VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp@ 25915

Last change on this file since 25915 was 25915, checked in by vboxsync, 15 years ago

PGMInvalidatePage: invalidate the TLB entry; might already be done by InvalidatePage (@todo)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 83.3 KB
Line 
1/* $Id: PGMAll.cpp 25915 2010-01-19 15:50:43Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM
26#include <VBox/pgm.h>
27#include <VBox/cpum.h>
28#include <VBox/selm.h>
29#include <VBox/iom.h>
30#include <VBox/sup.h>
31#include <VBox/mm.h>
32#include <VBox/stam.h>
33#include <VBox/csam.h>
34#include <VBox/patm.h>
35#include <VBox/trpm.h>
36#include <VBox/rem.h>
37#include <VBox/em.h>
38#include <VBox/hwaccm.h>
39#include <VBox/hwacc_vmx.h>
40#include "PGMInternal.h"
41#include <VBox/vm.h>
42#include <iprt/assert.h>
43#include <iprt/asm.h>
44#include <iprt/string.h>
45#include <VBox/log.h>
46#include <VBox/param.h>
47#include <VBox/err.h>
48
49
50/*******************************************************************************
51* Structures and Typedefs *
52*******************************************************************************/
53/**
54 * Stated structure for PGM_GST_NAME(HandlerVirtualUpdate) that's
55 * passed to PGM_GST_NAME(VirtHandlerUpdateOne) during enumeration.
56 */
57typedef struct PGMHVUSTATE
58{
59 /** The VM handle. */
60 PVM pVM;
61 /** The VMCPU handle. */
62 PVMCPU pVCpu;
63 /** The todo flags. */
64 RTUINT fTodo;
65 /** The CR4 register value. */
66 uint32_t cr4;
67} PGMHVUSTATE, *PPGMHVUSTATE;
68
69
70/*******************************************************************************
71* Internal Functions *
72*******************************************************************************/
73DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
74DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGMCPU pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
75
76/*
77 * Shadow - 32-bit mode
78 */
79#define PGM_SHW_TYPE PGM_TYPE_32BIT
80#define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
81#include "PGMAllShw.h"
82
83/* Guest - real mode */
84#define PGM_GST_TYPE PGM_TYPE_REAL
85#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
86#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
87#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
88#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
89#include "PGMGstDefs.h"
90#include "PGMAllGst.h"
91#include "PGMAllBth.h"
92#undef BTH_PGMPOOLKIND_PT_FOR_PT
93#undef BTH_PGMPOOLKIND_ROOT
94#undef PGM_BTH_NAME
95#undef PGM_GST_TYPE
96#undef PGM_GST_NAME
97
98/* Guest - protected mode */
99#define PGM_GST_TYPE PGM_TYPE_PROT
100#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
101#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
102#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
103#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
104#include "PGMGstDefs.h"
105#include "PGMAllGst.h"
106#include "PGMAllBth.h"
107#undef BTH_PGMPOOLKIND_PT_FOR_PT
108#undef BTH_PGMPOOLKIND_ROOT
109#undef PGM_BTH_NAME
110#undef PGM_GST_TYPE
111#undef PGM_GST_NAME
112
113/* Guest - 32-bit mode */
114#define PGM_GST_TYPE PGM_TYPE_32BIT
115#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
116#define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
117#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
118#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
119#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
120#include "PGMGstDefs.h"
121#include "PGMAllGst.h"
122#include "PGMAllBth.h"
123#undef BTH_PGMPOOLKIND_PT_FOR_BIG
124#undef BTH_PGMPOOLKIND_PT_FOR_PT
125#undef BTH_PGMPOOLKIND_ROOT
126#undef PGM_BTH_NAME
127#undef PGM_GST_TYPE
128#undef PGM_GST_NAME
129
130#undef PGM_SHW_TYPE
131#undef PGM_SHW_NAME
132
133
134/*
135 * Shadow - PAE mode
136 */
137#define PGM_SHW_TYPE PGM_TYPE_PAE
138#define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
139#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
140#include "PGMAllShw.h"
141
142/* Guest - real mode */
143#define PGM_GST_TYPE PGM_TYPE_REAL
144#define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
145#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
146#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
147#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
148#include "PGMGstDefs.h"
149#include "PGMAllBth.h"
150#undef BTH_PGMPOOLKIND_PT_FOR_PT
151#undef BTH_PGMPOOLKIND_ROOT
152#undef PGM_BTH_NAME
153#undef PGM_GST_TYPE
154#undef PGM_GST_NAME
155
156/* Guest - protected mode */
157#define PGM_GST_TYPE PGM_TYPE_PROT
158#define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
159#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
160#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
161#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
162#include "PGMGstDefs.h"
163#include "PGMAllBth.h"
164#undef BTH_PGMPOOLKIND_PT_FOR_PT
165#undef BTH_PGMPOOLKIND_ROOT
166#undef PGM_BTH_NAME
167#undef PGM_GST_TYPE
168#undef PGM_GST_NAME
169
170/* Guest - 32-bit mode */
171#define PGM_GST_TYPE PGM_TYPE_32BIT
172#define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
173#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
174#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
175#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
176#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
177#include "PGMGstDefs.h"
178#include "PGMAllBth.h"
179#undef BTH_PGMPOOLKIND_PT_FOR_BIG
180#undef BTH_PGMPOOLKIND_PT_FOR_PT
181#undef BTH_PGMPOOLKIND_ROOT
182#undef PGM_BTH_NAME
183#undef PGM_GST_TYPE
184#undef PGM_GST_NAME
185
186
187/* Guest - PAE mode */
188#define PGM_GST_TYPE PGM_TYPE_PAE
189#define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
190#define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
191#define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
192#define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
193#define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
194#include "PGMGstDefs.h"
195#include "PGMAllGst.h"
196#include "PGMAllBth.h"
197#undef BTH_PGMPOOLKIND_PT_FOR_BIG
198#undef BTH_PGMPOOLKIND_PT_FOR_PT
199#undef BTH_PGMPOOLKIND_ROOT
200#undef PGM_BTH_NAME
201#undef PGM_GST_TYPE
202#undef PGM_GST_NAME
203
204#undef PGM_SHW_TYPE
205#undef PGM_SHW_NAME
206
207
208#ifndef IN_RC /* AMD64 implies VT-x/AMD-V */
209/*
210 * Shadow - AMD64 mode
211 */
212# define PGM_SHW_TYPE PGM_TYPE_AMD64
213# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
214# include "PGMAllShw.h"
215
216/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
217# define PGM_GST_TYPE PGM_TYPE_PROT
218# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
219# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
220# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
221# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
222# include "PGMGstDefs.h"
223# include "PGMAllBth.h"
224# undef BTH_PGMPOOLKIND_PT_FOR_PT
225# undef BTH_PGMPOOLKIND_ROOT
226# undef PGM_BTH_NAME
227# undef PGM_GST_TYPE
228# undef PGM_GST_NAME
229
230# ifdef VBOX_WITH_64_BITS_GUESTS
231/* Guest - AMD64 mode */
232# define PGM_GST_TYPE PGM_TYPE_AMD64
233# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
234# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
235# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
236# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
237# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
238# include "PGMGstDefs.h"
239# include "PGMAllGst.h"
240# include "PGMAllBth.h"
241# undef BTH_PGMPOOLKIND_PT_FOR_BIG
242# undef BTH_PGMPOOLKIND_PT_FOR_PT
243# undef BTH_PGMPOOLKIND_ROOT
244# undef PGM_BTH_NAME
245# undef PGM_GST_TYPE
246# undef PGM_GST_NAME
247# endif /* VBOX_WITH_64_BITS_GUESTS */
248
249# undef PGM_SHW_TYPE
250# undef PGM_SHW_NAME
251
252
253/*
254 * Shadow - Nested paging mode
255 */
256# define PGM_SHW_TYPE PGM_TYPE_NESTED
257# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED(name)
258# include "PGMAllShw.h"
259
260/* Guest - real mode */
261# define PGM_GST_TYPE PGM_TYPE_REAL
262# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
263# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_REAL(name)
264# include "PGMGstDefs.h"
265# include "PGMAllBth.h"
266# undef PGM_BTH_NAME
267# undef PGM_GST_TYPE
268# undef PGM_GST_NAME
269
270/* Guest - protected mode */
271# define PGM_GST_TYPE PGM_TYPE_PROT
272# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
273# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PROT(name)
274# include "PGMGstDefs.h"
275# include "PGMAllBth.h"
276# undef PGM_BTH_NAME
277# undef PGM_GST_TYPE
278# undef PGM_GST_NAME
279
280/* Guest - 32-bit mode */
281# define PGM_GST_TYPE PGM_TYPE_32BIT
282# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
283# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT(name)
284# include "PGMGstDefs.h"
285# include "PGMAllBth.h"
286# undef PGM_BTH_NAME
287# undef PGM_GST_TYPE
288# undef PGM_GST_NAME
289
290/* Guest - PAE mode */
291# define PGM_GST_TYPE PGM_TYPE_PAE
292# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
293# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE(name)
294# include "PGMGstDefs.h"
295# include "PGMAllBth.h"
296# undef PGM_BTH_NAME
297# undef PGM_GST_TYPE
298# undef PGM_GST_NAME
299
300# ifdef VBOX_WITH_64_BITS_GUESTS
301/* Guest - AMD64 mode */
302# define PGM_GST_TYPE PGM_TYPE_AMD64
303# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
304# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64(name)
305# include "PGMGstDefs.h"
306# include "PGMAllBth.h"
307# undef PGM_BTH_NAME
308# undef PGM_GST_TYPE
309# undef PGM_GST_NAME
310# endif /* VBOX_WITH_64_BITS_GUESTS */
311
312# undef PGM_SHW_TYPE
313# undef PGM_SHW_NAME
314
315
316/*
317 * Shadow - EPT
318 */
319# define PGM_SHW_TYPE PGM_TYPE_EPT
320# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
321# include "PGMAllShw.h"
322
323/* Guest - real mode */
324# define PGM_GST_TYPE PGM_TYPE_REAL
325# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
326# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
327# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
328# include "PGMGstDefs.h"
329# include "PGMAllBth.h"
330# undef BTH_PGMPOOLKIND_PT_FOR_PT
331# undef PGM_BTH_NAME
332# undef PGM_GST_TYPE
333# undef PGM_GST_NAME
334
335/* Guest - protected mode */
336# define PGM_GST_TYPE PGM_TYPE_PROT
337# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
338# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
339# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
340# include "PGMGstDefs.h"
341# include "PGMAllBth.h"
342# undef BTH_PGMPOOLKIND_PT_FOR_PT
343# undef PGM_BTH_NAME
344# undef PGM_GST_TYPE
345# undef PGM_GST_NAME
346
347/* Guest - 32-bit mode */
348# define PGM_GST_TYPE PGM_TYPE_32BIT
349# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
350# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
351# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
352# include "PGMGstDefs.h"
353# include "PGMAllBth.h"
354# undef BTH_PGMPOOLKIND_PT_FOR_PT
355# undef PGM_BTH_NAME
356# undef PGM_GST_TYPE
357# undef PGM_GST_NAME
358
359/* Guest - PAE mode */
360# define PGM_GST_TYPE PGM_TYPE_PAE
361# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
362# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
363# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
364# include "PGMGstDefs.h"
365# include "PGMAllBth.h"
366# undef BTH_PGMPOOLKIND_PT_FOR_PT
367# undef PGM_BTH_NAME
368# undef PGM_GST_TYPE
369# undef PGM_GST_NAME
370
371# ifdef VBOX_WITH_64_BITS_GUESTS
372/* Guest - AMD64 mode */
373# define PGM_GST_TYPE PGM_TYPE_AMD64
374# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
375# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
376# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
377# include "PGMGstDefs.h"
378# include "PGMAllBth.h"
379# undef BTH_PGMPOOLKIND_PT_FOR_PT
380# undef PGM_BTH_NAME
381# undef PGM_GST_TYPE
382# undef PGM_GST_NAME
383# endif /* VBOX_WITH_64_BITS_GUESTS */
384
385# undef PGM_SHW_TYPE
386# undef PGM_SHW_NAME
387
388#endif /* !IN_RC */
389
390
391#ifndef IN_RING3
392/**
393 * #PF Handler.
394 *
395 * @returns VBox status code (appropriate for trap handling and GC return).
396 * @param pVCpu VMCPU handle.
397 * @param uErr The trap error code.
398 * @param pRegFrame Trap register frame.
399 * @param pvFault The fault address.
400 */
401VMMDECL(int) PGMTrap0eHandler(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
402{
403 PVM pVM = pVCpu->CTX_SUFF(pVM);
404
405 Log(("PGMTrap0eHandler: uErr=%RGu pvFault=%RGv eip=%04x:%RGv\n", uErr, pvFault, pRegFrame->cs, (RTGCPTR)pRegFrame->rip));
406 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a);
407 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );
408
409
410#ifdef VBOX_WITH_STATISTICS
411 /*
412 * Error code stats.
413 */
414 if (uErr & X86_TRAP_PF_US)
415 {
416 if (!(uErr & X86_TRAP_PF_P))
417 {
418 if (uErr & X86_TRAP_PF_RW)
419 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentWrite);
420 else
421 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentRead);
422 }
423 else if (uErr & X86_TRAP_PF_RW)
424 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSWrite);
425 else if (uErr & X86_TRAP_PF_RSVD)
426 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSReserved);
427 else if (uErr & X86_TRAP_PF_ID)
428 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNXE);
429 else
430 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSRead);
431 }
432 else
433 { /* Supervisor */
434 if (!(uErr & X86_TRAP_PF_P))
435 {
436 if (uErr & X86_TRAP_PF_RW)
437 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentWrite);
438 else
439 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentRead);
440 }
441 else if (uErr & X86_TRAP_PF_RW)
442 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVWrite);
443 else if (uErr & X86_TRAP_PF_ID)
444 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSNXE);
445 else if (uErr & X86_TRAP_PF_RSVD)
446 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVReserved);
447 }
448#endif /* VBOX_WITH_STATISTICS */
449
450 /*
451 * Call the worker.
452 */
453 pgmLock(pVM);
454 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault);
455 Assert(PGMIsLockOwner(pVM));
456 pgmUnlock(pVM);
457 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
458 rc = VINF_SUCCESS;
459
460# ifdef IN_RING0
461 /* Note: hack alert for difficult to reproduce problem. */
462 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
463 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
464 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
465 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
466 {
467 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pRegFrame->rip));
468 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
469 rc = VINF_SUCCESS;
470 }
471# endif
472
473 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eGuestPF); });
474 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution))
475 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2Misc; });
476 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);
477 return rc;
478}
479#endif /* !IN_RING3 */
480
481
482/**
483 * Prefetch a page
484 *
485 * Typically used to sync commonly used pages before entering raw mode
486 * after a CR3 reload.
487 *
488 * @returns VBox status code suitable for scheduling.
489 * @retval VINF_SUCCESS on success.
490 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
491 * @param pVCpu VMCPU handle.
492 * @param GCPtrPage Page to invalidate.
493 */
494VMMDECL(int) PGMPrefetchPage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
495{
496 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
497 int rc = PGM_BTH_PFN(PrefetchPage, pVCpu)(pVCpu, GCPtrPage);
498 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,Prefetch), a);
499 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
500 return rc;
501}
502
503
504/**
505 * Gets the mapping corresponding to the specified address (if any).
506 *
507 * @returns Pointer to the mapping.
508 * @returns NULL if not
509 *
510 * @param pVM The virtual machine.
511 * @param GCPtr The guest context pointer.
512 */
513PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr)
514{
515 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
516 while (pMapping)
517 {
518 if ((uintptr_t)GCPtr < (uintptr_t)pMapping->GCPtr)
519 break;
520 if ((uintptr_t)GCPtr - (uintptr_t)pMapping->GCPtr < pMapping->cb)
521 return pMapping;
522 pMapping = pMapping->CTX_SUFF(pNext);
523 }
524 return NULL;
525}
526
527
528/**
529 * Verifies a range of pages for read or write access
530 *
531 * Only checks the guest's page tables
532 *
533 * @returns VBox status code.
534 * @param pVCpu VMCPU handle.
535 * @param Addr Guest virtual address to check
536 * @param cbSize Access size
537 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
538 * @remarks Current not in use.
539 */
540VMMDECL(int) PGMIsValidAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
541{
542 /*
543 * Validate input.
544 */
545 if (fAccess & ~(X86_PTE_US | X86_PTE_RW))
546 {
547 AssertMsgFailed(("PGMIsValidAccess: invalid access type %08x\n", fAccess));
548 return VERR_INVALID_PARAMETER;
549 }
550
551 uint64_t fPage;
552 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPage, NULL);
553 if (RT_FAILURE(rc))
554 {
555 Log(("PGMIsValidAccess: access violation for %RGv rc=%d\n", Addr, rc));
556 return VINF_EM_RAW_GUEST_TRAP;
557 }
558
559 /*
560 * Check if the access would cause a page fault
561 *
562 * Note that hypervisor page directories are not present in the guest's tables, so this check
563 * is sufficient.
564 */
565 bool fWrite = !!(fAccess & X86_PTE_RW);
566 bool fUser = !!(fAccess & X86_PTE_US);
567 if ( !(fPage & X86_PTE_P)
568 || (fWrite && !(fPage & X86_PTE_RW))
569 || (fUser && !(fPage & X86_PTE_US)) )
570 {
571 Log(("PGMIsValidAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPage, fWrite, fUser));
572 return VINF_EM_RAW_GUEST_TRAP;
573 }
574 if ( RT_SUCCESS(rc)
575 && PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize))
576 return PGMIsValidAccess(pVCpu, Addr + PAGE_SIZE, (cbSize > PAGE_SIZE) ? cbSize - PAGE_SIZE : 1, fAccess);
577 return rc;
578}
579
580
581/**
582 * Verifies a range of pages for read or write access
583 *
584 * Supports handling of pages marked for dirty bit tracking and CSAM
585 *
586 * @returns VBox status code.
587 * @param pVCpu VMCPU handle.
588 * @param Addr Guest virtual address to check
589 * @param cbSize Access size
590 * @param fAccess Access type (r/w, user/supervisor (X86_PTE_*))
591 */
592VMMDECL(int) PGMVerifyAccess(PVMCPU pVCpu, RTGCPTR Addr, uint32_t cbSize, uint32_t fAccess)
593{
594 PVM pVM = pVCpu->CTX_SUFF(pVM);
595
596 AssertMsg(!(fAccess & ~(X86_PTE_US | X86_PTE_RW)), ("PGMVerifyAccess: invalid access type %08x\n", fAccess));
597
598 /*
599 * Get going.
600 */
601 uint64_t fPageGst;
602 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)Addr, &fPageGst, NULL);
603 if (RT_FAILURE(rc))
604 {
605 Log(("PGMVerifyAccess: access violation for %RGv rc=%d\n", Addr, rc));
606 return VINF_EM_RAW_GUEST_TRAP;
607 }
608
609 /*
610 * Check if the access would cause a page fault
611 *
612 * Note that hypervisor page directories are not present in the guest's tables, so this check
613 * is sufficient.
614 */
615 const bool fWrite = !!(fAccess & X86_PTE_RW);
616 const bool fUser = !!(fAccess & X86_PTE_US);
617 if ( !(fPageGst & X86_PTE_P)
618 || (fWrite && !(fPageGst & X86_PTE_RW))
619 || (fUser && !(fPageGst & X86_PTE_US)) )
620 {
621 Log(("PGMVerifyAccess: access violation for %RGv attr %#llx vs %d:%d\n", Addr, fPageGst, fWrite, fUser));
622 return VINF_EM_RAW_GUEST_TRAP;
623 }
624
625 if (!HWACCMIsNestedPagingActive(pVM))
626 {
627 /*
628 * Next step is to verify if we protected this page for dirty bit tracking or for CSAM scanning
629 */
630 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, NULL, NULL);
631 if ( rc == VERR_PAGE_NOT_PRESENT
632 || rc == VERR_PAGE_TABLE_NOT_PRESENT)
633 {
634 /*
635 * Page is not present in our page tables.
636 * Try to sync it!
637 */
638 Assert(X86_TRAP_PF_RW == X86_PTE_RW && X86_TRAP_PF_US == X86_PTE_US);
639 uint32_t uErr = fAccess & (X86_TRAP_PF_RW | X86_TRAP_PF_US);
640 rc = PGM_BTH_PFN(VerifyAccessSyncPage, pVCpu)(pVCpu, Addr, fPageGst, uErr);
641 if (rc != VINF_SUCCESS)
642 return rc;
643 }
644 else
645 AssertMsg(rc == VINF_SUCCESS, ("PGMShwGetPage %RGv failed with %Rrc\n", Addr, rc));
646 }
647
648#if 0 /* def VBOX_STRICT; triggers too often now */
649 /*
650 * This check is a bit paranoid, but useful.
651 */
652 /** @note this will assert when writing to monitored pages (a bit annoying actually) */
653 uint64_t fPageShw;
654 rc = PGMShwGetPage(pVCpu, (RTGCPTR)Addr, &fPageShw, NULL);
655 if ( (rc == VERR_PAGE_NOT_PRESENT || RT_FAILURE(rc))
656 || (fWrite && !(fPageShw & X86_PTE_RW))
657 || (fUser && !(fPageShw & X86_PTE_US)) )
658 {
659 AssertMsgFailed(("Unexpected access violation for %RGv! rc=%Rrc write=%d user=%d\n",
660 Addr, rc, fWrite && !(fPageShw & X86_PTE_RW), fUser && !(fPageShw & X86_PTE_US)));
661 return VINF_EM_RAW_GUEST_TRAP;
662 }
663#endif
664
665 if ( RT_SUCCESS(rc)
666 && ( PAGE_ADDRESS(Addr) != PAGE_ADDRESS(Addr + cbSize - 1)
667 || Addr + cbSize < Addr))
668 {
669 /* Don't recursively call PGMVerifyAccess as we might run out of stack. */
670 for (;;)
671 {
672 Addr += PAGE_SIZE;
673 if (cbSize > PAGE_SIZE)
674 cbSize -= PAGE_SIZE;
675 else
676 cbSize = 1;
677 rc = PGMVerifyAccess(pVCpu, Addr, 1, fAccess);
678 if (rc != VINF_SUCCESS)
679 break;
680 if (PAGE_ADDRESS(Addr) == PAGE_ADDRESS(Addr + cbSize - 1))
681 break;
682 }
683 }
684 return rc;
685}
686
687
688/**
689 * Emulation of the invlpg instruction (HC only actually).
690 *
691 * @returns VBox status code, special care required.
692 * @retval VINF_PGM_SYNC_CR3 - handled.
693 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
694 * @retval VERR_REM_FLUSHED_PAGES_OVERFLOW - not handled.
695 *
696 * @param pVCpu VMCPU handle.
697 * @param GCPtrPage Page to invalidate.
698 *
699 * @remark ASSUMES the page table entry or page directory is valid. Fairly
700 * safe, but there could be edge cases!
701 *
702 * @todo Flush page or page directory only if necessary!
703 */
704VMMDECL(int) PGMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtrPage)
705{
706 PVM pVM = pVCpu->CTX_SUFF(pVM);
707 int rc;
708 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
709
710#ifndef IN_RING3
711 /*
712 * Notify the recompiler so it can record this instruction.
713 * Failure happens when it's out of space. We'll return to HC in that case.
714 */
715 rc = REMNotifyInvalidatePage(pVM, GCPtrPage);
716 if (rc != VINF_SUCCESS)
717 return rc;
718#endif /* !IN_RING3 */
719
720
721#ifdef IN_RC
722 /*
723 * Check for conflicts and pending CR3 monitoring updates.
724 */
725 if (!pVM->pgm.s.fMappingsFixed)
726 {
727 if ( pgmGetMapping(pVM, GCPtrPage)
728 && PGMGstGetPage(pVCpu, GCPtrPage, NULL, NULL) != VERR_PAGE_TABLE_NOT_PRESENT)
729 {
730 LogFlow(("PGMGCInvalidatePage: Conflict!\n"));
731 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
732 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgConflict);
733 return VINF_PGM_SYNC_CR3;
734 }
735
736 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
737 {
738 LogFlow(("PGMGCInvalidatePage: PGM_SYNC_MONITOR_CR3 -> reinterpret instruction in R3\n"));
739 STAM_COUNTER_INC(&pVM->pgm.s.StatRCInvlPgSyncMonCR3);
740 return VINF_EM_RAW_EMULATE_INSTR;
741 }
742 }
743#endif /* IN_RC */
744
745 /*
746 * Call paging mode specific worker.
747 */
748 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
749 pgmLock(pVM);
750 rc = PGM_BTH_PFN(InvalidatePage, pVCpu)(pVCpu, GCPtrPage);
751 pgmUnlock(pVM);
752 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,InvalidatePage), a);
753
754 /* Invalidate the TLB entry; might already be done by InvalidatePage (@todo) */
755 PGM_INVL_PG(pVCpu, GCPtrPage);
756
757#ifdef IN_RING3
758 /*
759 * Check if we have a pending update of the CR3 monitoring.
760 */
761 if ( RT_SUCCESS(rc)
762 && (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3))
763 {
764 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
765 Assert(!pVM->pgm.s.fMappingsFixed);
766 }
767
768 /*
769 * Inform CSAM about the flush
770 *
771 * Note: This is to check if monitored pages have been changed; when we implement
772 * callbacks for virtual handlers, this is no longer required.
773 */
774 CSAMR3FlushPage(pVM, GCPtrPage);
775#endif /* IN_RING3 */
776
777 /* Ignore all irrelevant error codes. */
778 if ( rc == VERR_PAGE_NOT_PRESENT
779 || rc == VERR_PAGE_TABLE_NOT_PRESENT
780 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
781 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
782 rc = VINF_SUCCESS;
783
784 return rc;
785}
786
787
788/**
789 * Executes an instruction using the interpreter.
790 *
791 * @returns VBox status code (appropriate for trap handling and GC return).
792 * @param pVM VM handle.
793 * @param pVCpu VMCPU handle.
794 * @param pRegFrame Register frame.
795 * @param pvFault Fault address.
796 */
797VMMDECL(int) PGMInterpretInstruction(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
798{
799 uint32_t cb;
800 int rc = EMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault, &cb);
801 if (rc == VERR_EM_INTERPRETER)
802 rc = VINF_EM_RAW_EMULATE_INSTR;
803 if (rc != VINF_SUCCESS)
804 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", rc, pvFault));
805 return rc;
806}
807
808
809/**
810 * Gets effective page information (from the VMM page directory).
811 *
812 * @returns VBox status.
813 * @param pVCpu VMCPU handle.
814 * @param GCPtr Guest Context virtual address of the page.
815 * @param pfFlags Where to store the flags. These are X86_PTE_*.
816 * @param pHCPhys Where to store the HC physical address of the page.
817 * This is page aligned.
818 * @remark You should use PGMMapGetPage() for pages in a mapping.
819 */
820VMMDECL(int) PGMShwGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
821{
822 pgmLock(pVCpu->CTX_SUFF(pVM));
823 int rc = PGM_SHW_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pHCPhys);
824 pgmUnlock(pVCpu->CTX_SUFF(pVM));
825 return rc;
826}
827
828
829/**
830 * Sets (replaces) the page flags for a range of pages in the shadow context.
831 *
832 * @returns VBox status.
833 * @param pVCpu VMCPU handle.
834 * @param GCPtr The address of the first page.
835 * @param cb The size of the range in bytes.
836 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
837 * @remark You must use PGMMapSetPage() for pages in a mapping.
838 */
839VMMDECL(int) PGMShwSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
840{
841 return PGMShwModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
842}
843
844
845/**
846 * Modify page flags for a range of pages in the shadow context.
847 *
848 * The existing flags are ANDed with the fMask and ORed with the fFlags.
849 *
850 * @returns VBox status code.
851 * @param pVCpu VMCPU handle.
852 * @param GCPtr Virtual address of the first page in the range.
853 * @param cb Size (in bytes) of the range to apply the modification to.
854 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
855 * @param fMask The AND mask - page flags X86_PTE_*.
856 * Be very CAREFUL when ~'ing constants which could be 32-bit!
857 * @remark You must use PGMMapModifyPage() for pages in a mapping.
858 */
859VMMDECL(int) PGMShwModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
860{
861 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
862 Assert(cb);
863
864 /*
865 * Align the input.
866 */
867 cb += GCPtr & PAGE_OFFSET_MASK;
868 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
869 GCPtr = (GCPtr & PAGE_BASE_GC_MASK); /** @todo this ain't necessary, right... */
870
871 /*
872 * Call worker.
873 */
874 PVM pVM = pVCpu->CTX_SUFF(pVM);
875 pgmLock(pVM);
876 int rc = PGM_SHW_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
877 pgmUnlock(pVM);
878 return rc;
879}
880
881/**
882 * Gets the shadow page directory for the specified address, PAE.
883 *
884 * @returns Pointer to the shadow PD.
885 * @param pVCpu The VMCPU handle.
886 * @param GCPtr The address.
887 * @param pGstPdpe Guest PDPT entry
888 * @param ppPD Receives address of page directory
889 */
890int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
891{
892 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
893 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(&pVCpu->pgm.s);
894 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
895 PVM pVM = pVCpu->CTX_SUFF(pVM);
896 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
897 PPGMPOOLPAGE pShwPage;
898 int rc;
899
900 Assert(PGMIsLockOwner(pVM));
901
902 /* Allocate page directory if not present. */
903 if ( !pPdpe->n.u1Present
904 && !(pPdpe->u & X86_PDPE_PG_MASK))
905 {
906 RTGCPTR64 GCPdPt;
907 PGMPOOLKIND enmKind;
908
909# if defined(IN_RC)
910 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
911 PGMDynLockHCPage(pVM, (uint8_t *)pPdpe);
912# endif
913
914 if (HWACCMIsNestedPagingActive(pVM) || !CPUMIsGuestPagingEnabled(pVCpu))
915 {
916 /* AMD-V nested paging or real/protected mode without paging */
917 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
918 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
919 }
920 else
921 {
922 Assert(pGstPdpe);
923
924 if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
925 {
926 if (!pGstPdpe->n.u1Present)
927 {
928 /* PD not present; guest must reload CR3 to change it.
929 * No need to monitor anything in this case.
930 */
931 Assert(!HWACCMIsEnabled(pVM));
932
933 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
934 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
935 pGstPdpe->n.u1Present = 1;
936 }
937 else
938 {
939 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
940 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
941 }
942 }
943 else
944 {
945 GCPdPt = CPUMGetGuestCR3(pVCpu);
946 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
947 }
948 }
949
950 /* Create a reference back to the PDPT by using the index in its shadow page. */
951 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, &pShwPage);
952 AssertRCReturn(rc, rc);
953
954 /* The PD was cached or created; hook it up now. */
955 pPdpe->u |= pShwPage->Core.Key
956 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
957
958# if defined(IN_RC)
959 /* In 32 bits PAE mode we *must* invalidate the TLB when changing a PDPT entry; the CPU fetches them only during cr3 load, so any
960 * non-present PDPT will continue to cause page faults.
961 */
962 ASMReloadCR3();
963 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);
964# endif
965 }
966 else
967 {
968 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
969 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
970 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
971
972 pgmPoolCacheUsed(pPool, pShwPage);
973 }
974 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
975 return VINF_SUCCESS;
976}
977
978
979/**
980 * Gets the pointer to the shadow page directory entry for an address, PAE.
981 *
982 * @returns Pointer to the PDE.
983 * @param pPGM Pointer to the PGMCPU instance data.
984 * @param GCPtr The address.
985 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
986 */
987DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGMCPU pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
988{
989 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
990 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM);
991
992 Assert(PGMIsLockOwner(PGMCPU2VM(pPGM)));
993
994 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
995 if (!pPdpt->a[iPdPt].n.u1Present)
996 {
997 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, pPdpt->a[iPdPt].u));
998 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
999 }
1000 AssertMsg(pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1001
1002 /* Fetch the pgm pool shadow descriptor. */
1003 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(PGMCPU2PGM(pPGM)->CTX_SUFF(pPool), pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1004 AssertReturn(pShwPde, VERR_INTERNAL_ERROR);
1005
1006 *ppShwPde = pShwPde;
1007 return VINF_SUCCESS;
1008}
1009
1010#ifndef IN_RC
1011
1012/**
1013 * Syncs the SHADOW page directory pointer for the specified address.
1014 *
1015 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1016 *
1017 * The caller is responsible for making sure the guest has a valid PD before
1018 * calling this function.
1019 *
1020 * @returns VBox status.
1021 * @param pVCpu VMCPU handle.
1022 * @param GCPtr The address.
1023 * @param pGstPml4e Guest PML4 entry
1024 * @param pGstPdpe Guest PDPT entry
1025 * @param ppPD Receives address of page directory
1026 */
1027int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD)
1028{
1029 PPGMCPU pPGM = &pVCpu->pgm.s;
1030 PVM pVM = pVCpu->CTX_SUFF(pVM);
1031 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1032 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1033 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1034 bool fNestedPagingOrNoGstPaging = HWACCMIsNestedPagingActive(pVM) || !CPUMIsGuestPagingEnabled(pVCpu);
1035 PPGMPOOLPAGE pShwPage;
1036 int rc;
1037
1038 Assert(PGMIsLockOwner(pVM));
1039
1040 /* Allocate page directory pointer table if not present. */
1041 if ( !pPml4e->n.u1Present
1042 && !(pPml4e->u & X86_PML4E_PG_MASK))
1043 {
1044 RTGCPTR64 GCPml4;
1045 PGMPOOLKIND enmKind;
1046
1047 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1048
1049 if (fNestedPagingOrNoGstPaging)
1050 {
1051 /* AMD-V nested paging or real/protected mode without paging */
1052 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT;
1053 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1054 }
1055 else
1056 {
1057 Assert(pGstPml4e && pGstPdpe);
1058
1059 GCPml4 = pGstPml4e->u & X86_PML4E_PG_MASK;
1060 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1061 }
1062
1063 /* Create a reference back to the PDPT by using the index in its shadow page. */
1064 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
1065 AssertRCReturn(rc, rc);
1066 }
1067 else
1068 {
1069 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1070 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1071
1072 pgmPoolCacheUsed(pPool, pShwPage);
1073 }
1074 /* The PDPT was cached or created; hook it up now. */
1075 pPml4e->u |= pShwPage->Core.Key
1076 | (pGstPml4e->u & ~(X86_PML4E_PG_MASK | X86_PML4E_AVL_MASK | X86_PML4E_PCD | X86_PML4E_PWT));
1077
1078 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1079 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1080 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1081
1082 /* Allocate page directory if not present. */
1083 if ( !pPdpe->n.u1Present
1084 && !(pPdpe->u & X86_PDPE_PG_MASK))
1085 {
1086 RTGCPTR64 GCPdPt;
1087 PGMPOOLKIND enmKind;
1088
1089 if (fNestedPagingOrNoGstPaging)
1090 {
1091 /* AMD-V nested paging or real/protected mode without paging */
1092 GCPdPt = (RTGCPTR64)iPdPt << X86_PDPT_SHIFT;
1093 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1094 }
1095 else
1096 {
1097 Assert(pGstPdpe);
1098
1099 GCPdPt = pGstPdpe->u & X86_PDPE_PG_MASK;
1100 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1101 }
1102
1103 /* Create a reference back to the PDPT by using the index in its shadow page. */
1104 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pShwPage->idx, iPdPt, &pShwPage);
1105 AssertRCReturn(rc, rc);
1106 }
1107 else
1108 {
1109 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
1110 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1111
1112 pgmPoolCacheUsed(pPool, pShwPage);
1113 }
1114 /* The PD was cached or created; hook it up now. */
1115 pPdpe->u |= pShwPage->Core.Key
1116 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
1117
1118 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1119 return VINF_SUCCESS;
1120}
1121
1122
1123/**
1124 * Gets the SHADOW page directory pointer for the specified address (long mode).
1125 *
1126 * @returns VBox status.
1127 * @param pVCpu VMCPU handle.
1128 * @param GCPtr The address.
1129 * @param ppPdpt Receives address of pdpt
1130 * @param ppPD Receives address of page directory
1131 */
1132DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1133{
1134 PPGMCPU pPGM = &pVCpu->pgm.s;
1135 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1136 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
1137
1138 Assert(PGMIsLockOwner(PGMCPU2VM(pPGM)));
1139
1140 AssertReturn(pPml4e, VERR_INTERNAL_ERROR);
1141 if (ppPml4e)
1142 *ppPml4e = (PX86PML4E)pPml4e;
1143
1144 Log4(("pgmShwGetLongModePDPtr %VGv (%VHv) %RX64\n", GCPtr, pPml4e, pPml4e->u));
1145
1146 if (!pPml4e->n.u1Present)
1147 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1148
1149 PVM pVM = pVCpu->CTX_SUFF(pVM);
1150 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1151 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPml4e->u & X86_PML4E_PG_MASK);
1152 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1153
1154 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1155 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1156 if (!pPdpt->a[iPdPt].n.u1Present)
1157 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1158
1159 pShwPage = pgmPoolGetPage(pPool, pPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
1160 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1161
1162 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1163 return VINF_SUCCESS;
1164}
1165
1166
1167/**
1168 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1169 * backing pages in case the PDPT or PML4 entry is missing.
1170 *
1171 * @returns VBox status.
1172 * @param pVCpu VMCPU handle.
1173 * @param GCPtr The address.
1174 * @param ppPdpt Receives address of pdpt
1175 * @param ppPD Receives address of page directory
1176 */
1177int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1178{
1179 PPGMCPU pPGM = &pVCpu->pgm.s;
1180 PVM pVM = pVCpu->CTX_SUFF(pVM);
1181 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1182 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1183 PEPTPML4 pPml4;
1184 PEPTPML4E pPml4e;
1185 PPGMPOOLPAGE pShwPage;
1186 int rc;
1187
1188 Assert(HWACCMIsNestedPagingActive(pVM));
1189 Assert(PGMIsLockOwner(pVM));
1190
1191 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
1192 Assert(pPml4);
1193
1194 /* Allocate page directory pointer table if not present. */
1195 pPml4e = &pPml4->a[iPml4];
1196 if ( !pPml4e->n.u1Present
1197 && !(pPml4e->u & EPT_PML4E_PG_MASK))
1198 {
1199 Assert(!(pPml4e->u & EPT_PML4E_PG_MASK));
1200 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1201
1202 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
1203 AssertRCReturn(rc, rc);
1204 }
1205 else
1206 {
1207 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1208 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1209
1210 pgmPoolCacheUsed(pPool, pShwPage);
1211 }
1212 /* The PDPT was cached or created; hook it up now and fill with the default value. */
1213 pPml4e->u = pShwPage->Core.Key;
1214 pPml4e->n.u1Present = 1;
1215 pPml4e->n.u1Write = 1;
1216 pPml4e->n.u1Execute = 1;
1217
1218 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1219 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1220 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1221
1222 if (ppPdpt)
1223 *ppPdpt = pPdpt;
1224
1225 /* Allocate page directory if not present. */
1226 if ( !pPdpe->n.u1Present
1227 && !(pPdpe->u & EPT_PDPTE_PG_MASK))
1228 {
1229 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
1230
1231 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
1232 AssertRCReturn(rc, rc);
1233 }
1234 else
1235 {
1236 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1237 AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
1238
1239 pgmPoolCacheUsed(pPool, pShwPage);
1240 }
1241 /* The PD was cached or created; hook it up now and fill with the default value. */
1242 pPdpe->u = pShwPage->Core.Key;
1243 pPdpe->n.u1Present = 1;
1244 pPdpe->n.u1Write = 1;
1245 pPdpe->n.u1Execute = 1;
1246
1247 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
1248 return VINF_SUCCESS;
1249}
1250
1251#endif /* IN_RC */
1252
1253/**
1254 * Gets effective Guest OS page information.
1255 *
1256 * When GCPtr is in a big page, the function will return as if it was a normal
1257 * 4KB page. If the need for distinguishing between big and normal page becomes
1258 * necessary at a later point, a PGMGstGetPage() will be created for that
1259 * purpose.
1260 *
1261 * @returns VBox status.
1262 * @param pVCpu VMCPU handle.
1263 * @param GCPtr Guest Context virtual address of the page.
1264 * @param pfFlags Where to store the flags. These are X86_PTE_*, even for big pages.
1265 * @param pGCPhys Where to store the GC physical address of the page.
1266 * This is page aligned. The fact that the
1267 */
1268VMMDECL(int) PGMGstGetPage(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)
1269{
1270 return PGM_GST_PFN(GetPage, pVCpu)(pVCpu, GCPtr, pfFlags, pGCPhys);
1271}
1272
1273
1274/**
1275 * Checks if the page is present.
1276 *
1277 * @returns true if the page is present.
1278 * @returns false if the page is not present.
1279 * @param pVCpu VMCPU handle.
1280 * @param GCPtr Address within the page.
1281 */
1282VMMDECL(bool) PGMGstIsPagePresent(PVMCPU pVCpu, RTGCPTR GCPtr)
1283{
1284 int rc = PGMGstGetPage(pVCpu, GCPtr, NULL, NULL);
1285 return RT_SUCCESS(rc);
1286}
1287
1288
1289/**
1290 * Sets (replaces) the page flags for a range of pages in the guest's tables.
1291 *
1292 * @returns VBox status.
1293 * @param pVCpu VMCPU handle.
1294 * @param GCPtr The address of the first page.
1295 * @param cb The size of the range in bytes.
1296 * @param fFlags Page flags X86_PTE_*, excluding the page mask of course.
1297 */
1298VMMDECL(int) PGMGstSetPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags)
1299{
1300 return PGMGstModifyPage(pVCpu, GCPtr, cb, fFlags, 0);
1301}
1302
1303
1304/**
1305 * Modify page flags for a range of pages in the guest's tables
1306 *
1307 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1308 *
1309 * @returns VBox status code.
1310 * @param pVCpu VMCPU handle.
1311 * @param GCPtr Virtual address of the first page in the range.
1312 * @param cb Size (in bytes) of the range to apply the modification to.
1313 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1314 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
1315 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1316 */
1317VMMDECL(int) PGMGstModifyPage(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
1318{
1319 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1320
1321 /*
1322 * Validate input.
1323 */
1324 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1325 Assert(cb);
1326
1327 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
1328
1329 /*
1330 * Adjust input.
1331 */
1332 cb += GCPtr & PAGE_OFFSET_MASK;
1333 cb = RT_ALIGN_Z(cb, PAGE_SIZE);
1334 GCPtr = (GCPtr & PAGE_BASE_GC_MASK);
1335
1336 /*
1337 * Call worker.
1338 */
1339 int rc = PGM_GST_PFN(ModifyPage, pVCpu)(pVCpu, GCPtr, cb, fFlags, fMask);
1340
1341 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,GstModifyPage), a);
1342 return rc;
1343}
1344
1345#ifdef IN_RING3
1346
1347/**
1348 * Performs the lazy mapping of the 32-bit guest PD.
1349 *
1350 * @returns Pointer to the mapping.
1351 * @param pPGM The PGM instance data.
1352 */
1353PX86PD pgmGstLazyMap32BitPD(PPGMCPU pPGM)
1354{
1355 Assert(!pPGM->CTX_SUFF(pGst32BitPd));
1356 PVM pVM = PGMCPU2VM(pPGM);
1357 pgmLock(pVM);
1358
1359 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1360 AssertReturn(pPage, NULL);
1361
1362 RTHCPTR HCPtrGuestCR3;
1363 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3);
1364 AssertRCReturn(rc, NULL);
1365
1366 pPGM->pGst32BitPdR3 = (R3PTRTYPE(PX86PD))HCPtrGuestCR3;
1367# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1368 pPGM->pGst32BitPdR0 = (R0PTRTYPE(PX86PD))HCPtrGuestCR3;
1369# endif
1370
1371 pgmUnlock(pVM);
1372 return pPGM->CTX_SUFF(pGst32BitPd);
1373}
1374
1375
1376/**
1377 * Performs the lazy mapping of the PAE guest PDPT.
1378 *
1379 * @returns Pointer to the mapping.
1380 * @param pPGM The PGM instance data.
1381 */
1382PX86PDPT pgmGstLazyMapPaePDPT(PPGMCPU pPGM)
1383{
1384 Assert(!pPGM->CTX_SUFF(pGstPaePdpt));
1385 PVM pVM = PGMCPU2VM(pPGM);
1386 pgmLock(pVM);
1387
1388 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1389 AssertReturn(pPage, NULL);
1390
1391 RTHCPTR HCPtrGuestCR3;
1392 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_PAE_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysR3 masking isn't necessary. */
1393 AssertRCReturn(rc, NULL);
1394
1395 pPGM->pGstPaePdptR3 = (R3PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1396# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1397 pPGM->pGstPaePdptR0 = (R0PTRTYPE(PX86PDPT))HCPtrGuestCR3;
1398# endif
1399
1400 pgmUnlock(pVM);
1401 return pPGM->CTX_SUFF(pGstPaePdpt);
1402}
1403
1404#endif /* IN_RING3 */
1405
1406#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1407/**
1408 * Performs the lazy mapping / updating of a PAE guest PD.
1409 *
1410 * @returns Pointer to the mapping.
1411 * @param pPGM The PGM instance data.
1412 * @param iPdpt Which PD entry to map (0..3).
1413 */
1414PX86PDPAE pgmGstLazyMapPaePD(PPGMCPU pPGM, uint32_t iPdpt)
1415{
1416 PVM pVM = PGMCPU2VM(pPGM);
1417 pgmLock(pVM);
1418
1419 PX86PDPT pGuestPDPT = pPGM->CTX_SUFF(pGstPaePdpt);
1420 Assert(pGuestPDPT);
1421 Assert(pGuestPDPT->a[iPdpt].n.u1Present);
1422 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
1423 bool const fChanged = pPGM->aGCPhysGstPaePDs[iPdpt] != GCPhys;
1424
1425 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
1426 if (RT_LIKELY(pPage))
1427 {
1428 int rc = VINF_SUCCESS;
1429 RTRCPTR RCPtr = NIL_RTRCPTR;
1430 RTHCPTR HCPtr = NIL_RTHCPTR;
1431#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1432 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &HCPtr);
1433 AssertRC(rc);
1434#endif
1435 if (RT_SUCCESS(rc) && fChanged)
1436 {
1437 RCPtr = (RTRCPTR)(RTRCUINTPTR)(pVM->pgm.s.GCPtrCR3Mapping + (1 + iPdpt) * PAGE_SIZE);
1438 rc = PGMMap(pVM, (RTRCUINTPTR)RCPtr, PGM_PAGE_GET_HCPHYS(pPage), PAGE_SIZE, 0);
1439 }
1440 if (RT_SUCCESS(rc))
1441 {
1442 pPGM->apGstPaePDsR3[iPdpt] = (R3PTRTYPE(PX86PDPAE))HCPtr;
1443# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1444 pPGM->apGstPaePDsR0[iPdpt] = (R0PTRTYPE(PX86PDPAE))HCPtr;
1445# endif
1446 if (fChanged)
1447 {
1448 pPGM->aGCPhysGstPaePDs[iPdpt] = GCPhys;
1449 pPGM->apGstPaePDsRC[iPdpt] = (RCPTRTYPE(PX86PDPAE))RCPtr;
1450 }
1451
1452 pgmUnlock(pVM);
1453 return pPGM->CTX_SUFF(apGstPaePDs)[iPdpt];
1454 }
1455 }
1456
1457 /* Invalid page or some failure, invalidate the entry. */
1458 pPGM->aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
1459 pPGM->apGstPaePDsR3[iPdpt] = 0;
1460# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1461 pPGM->apGstPaePDsR0[iPdpt] = 0;
1462# endif
1463 pPGM->apGstPaePDsRC[iPdpt] = 0;
1464
1465 pgmUnlock(pVM);
1466 return NULL;
1467}
1468#endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1469
1470
1471#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3
1472/**
1473 * Performs the lazy mapping of the 32-bit guest PD.
1474 *
1475 * @returns Pointer to the mapping.
1476 * @param pPGM The PGM instance data.
1477 */
1478PX86PML4 pgmGstLazyMapPml4(PPGMCPU pPGM)
1479{
1480 Assert(!pPGM->CTX_SUFF(pGstAmd64Pml4));
1481 PVM pVM = PGMCPU2VM(pPGM);
1482 pgmLock(pVM);
1483
1484 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPGM->GCPhysCR3);
1485 AssertReturn(pPage, NULL);
1486
1487 RTHCPTR HCPtrGuestCR3;
1488 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pPGM->GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */
1489 AssertRCReturn(rc, NULL);
1490
1491 pPGM->pGstAmd64Pml4R3 = (R3PTRTYPE(PX86PML4))HCPtrGuestCR3;
1492# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
1493 pPGM->pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
1494# endif
1495
1496 pgmUnlock(pVM);
1497 return pPGM->CTX_SUFF(pGstAmd64Pml4);
1498}
1499#endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R3 */
1500
1501
1502/**
1503 * Gets the specified page directory pointer table entry.
1504 *
1505 * @returns PDP entry
1506 * @param pVCpu VMCPU handle.
1507 * @param iPdpt PDPT index
1508 */
1509VMMDECL(X86PDPE) PGMGstGetPaePDPtr(PVMCPU pVCpu, unsigned iPdpt)
1510{
1511 Assert(iPdpt <= 3);
1512 return pgmGstGetPaePDPTPtr(&pVCpu->pgm.s)->a[iPdpt & 3];
1513}
1514
1515
1516/**
1517 * Gets the current CR3 register value for the shadow memory context.
1518 * @returns CR3 value.
1519 * @param pVCpu VMCPU handle.
1520 */
1521VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
1522{
1523 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
1524 AssertPtrReturn(pPoolPage, 0);
1525 return pPoolPage->Core.Key;
1526}
1527
1528
1529/**
1530 * Gets the current CR3 register value for the nested memory context.
1531 * @returns CR3 value.
1532 * @param pVCpu VMCPU handle.
1533 */
1534VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVMCPU pVCpu, PGMMODE enmShadowMode)
1535{
1536 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1537 return pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
1538}
1539
1540
1541/**
1542 * Gets the current CR3 register value for the HC intermediate memory context.
1543 * @returns CR3 value.
1544 * @param pVM The VM handle.
1545 */
1546VMMDECL(RTHCPHYS) PGMGetInterHCCR3(PVM pVM)
1547{
1548 switch (pVM->pgm.s.enmHostMode)
1549 {
1550 case SUPPAGINGMODE_32_BIT:
1551 case SUPPAGINGMODE_32_BIT_GLOBAL:
1552 return pVM->pgm.s.HCPhysInterPD;
1553
1554 case SUPPAGINGMODE_PAE:
1555 case SUPPAGINGMODE_PAE_GLOBAL:
1556 case SUPPAGINGMODE_PAE_NX:
1557 case SUPPAGINGMODE_PAE_GLOBAL_NX:
1558 return pVM->pgm.s.HCPhysInterPaePDPT;
1559
1560 case SUPPAGINGMODE_AMD64:
1561 case SUPPAGINGMODE_AMD64_GLOBAL:
1562 case SUPPAGINGMODE_AMD64_NX:
1563 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
1564 return pVM->pgm.s.HCPhysInterPaePDPT;
1565
1566 default:
1567 AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode));
1568 return ~0;
1569 }
1570}
1571
1572
1573/**
1574 * Gets the current CR3 register value for the RC intermediate memory context.
1575 * @returns CR3 value.
1576 * @param pVM The VM handle.
1577 * @param pVCpu VMCPU handle.
1578 */
1579VMMDECL(RTHCPHYS) PGMGetInterRCCR3(PVM pVM, PVMCPU pVCpu)
1580{
1581 switch (pVCpu->pgm.s.enmShadowMode)
1582 {
1583 case PGMMODE_32_BIT:
1584 return pVM->pgm.s.HCPhysInterPD;
1585
1586 case PGMMODE_PAE:
1587 case PGMMODE_PAE_NX:
1588 return pVM->pgm.s.HCPhysInterPaePDPT;
1589
1590 case PGMMODE_AMD64:
1591 case PGMMODE_AMD64_NX:
1592 return pVM->pgm.s.HCPhysInterPaePML4;
1593
1594 case PGMMODE_EPT:
1595 case PGMMODE_NESTED:
1596 return 0; /* not relevant */
1597
1598 default:
1599 AssertMsgFailed(("enmShadowMode=%d\n", pVCpu->pgm.s.enmShadowMode));
1600 return ~0;
1601 }
1602}
1603
1604
1605/**
1606 * Gets the CR3 register value for the 32-Bit intermediate memory context.
1607 * @returns CR3 value.
1608 * @param pVM The VM handle.
1609 */
1610VMMDECL(RTHCPHYS) PGMGetInter32BitCR3(PVM pVM)
1611{
1612 return pVM->pgm.s.HCPhysInterPD;
1613}
1614
1615
1616/**
1617 * Gets the CR3 register value for the PAE intermediate memory context.
1618 * @returns CR3 value.
1619 * @param pVM The VM handle.
1620 */
1621VMMDECL(RTHCPHYS) PGMGetInterPaeCR3(PVM pVM)
1622{
1623 return pVM->pgm.s.HCPhysInterPaePDPT;
1624}
1625
1626
1627/**
1628 * Gets the CR3 register value for the AMD64 intermediate memory context.
1629 * @returns CR3 value.
1630 * @param pVM The VM handle.
1631 */
1632VMMDECL(RTHCPHYS) PGMGetInterAmd64CR3(PVM pVM)
1633{
1634 return pVM->pgm.s.HCPhysInterPaePML4;
1635}
1636
1637
1638/**
1639 * Performs and schedules necessary updates following a CR3 load or reload.
1640 *
1641 * This will normally involve mapping the guest PD or nPDPT
1642 *
1643 * @returns VBox status code.
1644 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
1645 * safely be ignored and overridden since the FF will be set too then.
1646 * @param pVCpu VMCPU handle.
1647 * @param cr3 The new cr3.
1648 * @param fGlobal Indicates whether this is a global flush or not.
1649 */
1650VMMDECL(int) PGMFlushTLB(PVMCPU pVCpu, uint64_t cr3, bool fGlobal)
1651{
1652 PVM pVM = pVCpu->CTX_SUFF(pVM);
1653
1654 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1655
1656 /*
1657 * Always flag the necessary updates; necessary for hardware acceleration
1658 */
1659 /** @todo optimize this, it shouldn't always be necessary. */
1660 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1661 if (fGlobal)
1662 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1663 LogFlow(("PGMFlushTLB: cr3=%RX64 OldCr3=%RX64 fGlobal=%d\n", cr3, pVCpu->pgm.s.GCPhysCR3, fGlobal));
1664
1665 /*
1666 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1667 */
1668 int rc = VINF_SUCCESS;
1669 RTGCPHYS GCPhysCR3;
1670 switch (pVCpu->pgm.s.enmGuestMode)
1671 {
1672 case PGMMODE_PAE:
1673 case PGMMODE_PAE_NX:
1674 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1675 break;
1676 case PGMMODE_AMD64:
1677 case PGMMODE_AMD64_NX:
1678 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1679 break;
1680 default:
1681 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1682 break;
1683 }
1684
1685 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1686 {
1687 RTGCPHYS GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
1688 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1689 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1690 if (RT_LIKELY(rc == VINF_SUCCESS))
1691 {
1692 if (!pVM->pgm.s.fMappingsFixed)
1693 {
1694 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1695 }
1696 }
1697 else
1698 {
1699 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
1700 Assert(VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
1701 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
1702 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
1703 if (!pVM->pgm.s.fMappingsFixed)
1704 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
1705 }
1706
1707 if (fGlobal)
1708 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
1709 else
1710 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBNewCR3));
1711 }
1712 else
1713 {
1714# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
1715 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1716 if (pPool->cDirtyPages)
1717 {
1718 pgmLock(pVM);
1719 pgmPoolResetDirtyPages(pVM);
1720 pgmUnlock(pVM);
1721 }
1722# endif
1723 /*
1724 * Check if we have a pending update of the CR3 monitoring.
1725 */
1726 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1727 {
1728 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1729 Assert(!pVM->pgm.s.fMappingsFixed);
1730 }
1731 if (fGlobal)
1732 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
1733 else
1734 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLBSameCR3));
1735 }
1736
1737 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,FlushTLB), a);
1738 return rc;
1739}
1740
1741
1742/**
1743 * Performs and schedules necessary updates following a CR3 load or reload when
1744 * using nested or extended paging.
1745 *
1746 * This API is an alterantive to PDMFlushTLB that avoids actually flushing the
1747 * TLB and triggering a SyncCR3.
1748 *
1749 * This will normally involve mapping the guest PD or nPDPT
1750 *
1751 * @returns VBox status code.
1752 * @retval VINF_SUCCESS.
1753 * @retval (If applied when not in nested mode: VINF_PGM_SYNC_CR3 if monitoring
1754 * requires a CR3 sync. This can safely be ignored and overridden since
1755 * the FF will be set too then.)
1756 * @param pVCpu VMCPU handle.
1757 * @param cr3 The new cr3.
1758 */
1759VMMDECL(int) PGMUpdateCR3(PVMCPU pVCpu, uint64_t cr3)
1760{
1761 PVM pVM = pVCpu->CTX_SUFF(pVM);
1762
1763 LogFlow(("PGMUpdateCR3: cr3=%RX64 OldCr3=%RX64\n", cr3, pVCpu->pgm.s.GCPhysCR3));
1764
1765 /* We assume we're only called in nested paging mode. */
1766 Assert(pVM->pgm.s.fMappingsFixed);
1767 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3));
1768 Assert(HWACCMIsNestedPagingActive(pVM) || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
1769
1770 /*
1771 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
1772 */
1773 int rc = VINF_SUCCESS;
1774 RTGCPHYS GCPhysCR3;
1775 switch (pVCpu->pgm.s.enmGuestMode)
1776 {
1777 case PGMMODE_PAE:
1778 case PGMMODE_PAE_NX:
1779 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1780 break;
1781 case PGMMODE_AMD64:
1782 case PGMMODE_AMD64_NX:
1783 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1784 break;
1785 default:
1786 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1787 break;
1788 }
1789 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1790 {
1791 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1792 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1793 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
1794 }
1795 return rc;
1796}
1797
1798
1799/**
1800 * Synchronize the paging structures.
1801 *
1802 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
1803 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
1804 * in several places, most importantly whenever the CR3 is loaded.
1805 *
1806 * @returns VBox status code.
1807 * @param pVCpu VMCPU handle.
1808 * @param cr0 Guest context CR0 register
1809 * @param cr3 Guest context CR3 register
1810 * @param cr4 Guest context CR4 register
1811 * @param fGlobal Including global page directories or not
1812 */
1813VMMDECL(int) PGMSyncCR3(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
1814{
1815 PVM pVM = pVCpu->CTX_SUFF(pVM);
1816 int rc;
1817
1818 /*
1819 * The pool may have pending stuff and even require a return to ring-3 to
1820 * clear the whole thing.
1821 */
1822 rc = pgmPoolSyncCR3(pVCpu);
1823 if (rc != VINF_SUCCESS)
1824 return rc;
1825
1826 /*
1827 * We might be called when we shouldn't.
1828 *
1829 * The mode switching will ensure that the PD is resynced
1830 * after every mode switch. So, if we find ourselves here
1831 * when in protected or real mode we can safely disable the
1832 * FF and return immediately.
1833 */
1834 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
1835 {
1836 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
1837 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1838 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1839 return VINF_SUCCESS;
1840 }
1841
1842 /* If global pages are not supported, then all flushes are global. */
1843 if (!(cr4 & X86_CR4_PGE))
1844 fGlobal = true;
1845 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
1846 VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
1847
1848 /*
1849 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
1850 * This should be done before SyncCR3.
1851 */
1852 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
1853 {
1854 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
1855
1856 RTGCPHYS GCPhysCR3Old = pVCpu->pgm.s.GCPhysCR3;
1857 RTGCPHYS GCPhysCR3;
1858 switch (pVCpu->pgm.s.enmGuestMode)
1859 {
1860 case PGMMODE_PAE:
1861 case PGMMODE_PAE_NX:
1862 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAE_PAGE_MASK);
1863 break;
1864 case PGMMODE_AMD64:
1865 case PGMMODE_AMD64_NX:
1866 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_AMD64_PAGE_MASK);
1867 break;
1868 default:
1869 GCPhysCR3 = (RTGCPHYS)(cr3 & X86_CR3_PAGE_MASK);
1870 break;
1871 }
1872
1873 if (pVCpu->pgm.s.GCPhysCR3 != GCPhysCR3)
1874 {
1875 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
1876 rc = PGM_BTH_PFN(MapCR3, pVCpu)(pVCpu, GCPhysCR3);
1877 }
1878#ifdef IN_RING3
1879 if (rc == VINF_PGM_SYNC_CR3)
1880 rc = pgmPoolSyncCR3(pVCpu);
1881#else
1882 if (rc == VINF_PGM_SYNC_CR3)
1883 {
1884 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3Old;
1885 return rc;
1886 }
1887#endif
1888 AssertRCReturn(rc, rc);
1889 AssertRCSuccessReturn(rc, VERR_INTERNAL_ERROR);
1890 }
1891
1892 /*
1893 * Let the 'Bth' function do the work and we'll just keep track of the flags.
1894 */
1895 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1896 rc = PGM_BTH_PFN(SyncCR3, pVCpu)(pVCpu, cr0, cr3, cr4, fGlobal);
1897 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
1898 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1899 if (rc == VINF_SUCCESS)
1900 {
1901 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
1902 {
1903 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1904 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
1905 }
1906
1907 /*
1908 * Check if we have a pending update of the CR3 monitoring.
1909 */
1910 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MONITOR_CR3)
1911 {
1912 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
1913 Assert(!pVM->pgm.s.fMappingsFixed);
1914 }
1915 }
1916
1917 /*
1918 * Now flush the CR3 (guest context).
1919 */
1920 if (rc == VINF_SUCCESS)
1921 PGM_INVL_VCPU_TLBS(pVCpu);
1922 return rc;
1923}
1924
1925
1926/**
1927 * Called whenever CR0 or CR4 in a way which may change
1928 * the paging mode.
1929 *
1930 * @returns VBox status code, with the following informational code for
1931 * VM scheduling.
1932 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
1933 * @retval VINF_PGM_CHANGE_MODE if we're in RC or R0 and the mode changes.
1934 * (I.e. not in R3.)
1935 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
1936 *
1937 * @param pVCpu VMCPU handle.
1938 * @param cr0 The new cr0.
1939 * @param cr4 The new cr4.
1940 * @param efer The new extended feature enable register.
1941 */
1942VMMDECL(int) PGMChangeMode(PVMCPU pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer)
1943{
1944 PVM pVM = pVCpu->CTX_SUFF(pVM);
1945 PGMMODE enmGuestMode;
1946
1947 /*
1948 * Calc the new guest mode.
1949 */
1950 if (!(cr0 & X86_CR0_PE))
1951 enmGuestMode = PGMMODE_REAL;
1952 else if (!(cr0 & X86_CR0_PG))
1953 enmGuestMode = PGMMODE_PROTECTED;
1954 else if (!(cr4 & X86_CR4_PAE))
1955 enmGuestMode = PGMMODE_32_BIT;
1956 else if (!(efer & MSR_K6_EFER_LME))
1957 {
1958 if (!(efer & MSR_K6_EFER_NXE))
1959 enmGuestMode = PGMMODE_PAE;
1960 else
1961 enmGuestMode = PGMMODE_PAE_NX;
1962 }
1963 else
1964 {
1965 if (!(efer & MSR_K6_EFER_NXE))
1966 enmGuestMode = PGMMODE_AMD64;
1967 else
1968 enmGuestMode = PGMMODE_AMD64_NX;
1969 }
1970
1971 /*
1972 * Did it change?
1973 */
1974 if (pVCpu->pgm.s.enmGuestMode == enmGuestMode)
1975 return VINF_SUCCESS;
1976
1977 /* Flush the TLB */
1978 PGM_INVL_VCPU_TLBS(pVCpu);
1979
1980#ifdef IN_RING3
1981 return PGMR3ChangeMode(pVM, pVCpu, enmGuestMode);
1982#else
1983 LogFlow(("PGMChangeMode: returns VINF_PGM_CHANGE_MODE.\n"));
1984 return VINF_PGM_CHANGE_MODE;
1985#endif
1986}
1987
1988
1989/**
1990 * Gets the current guest paging mode.
1991 *
1992 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
1993 *
1994 * @returns The current paging mode.
1995 * @param pVCpu VMCPU handle.
1996 */
1997VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
1998{
1999 return pVCpu->pgm.s.enmGuestMode;
2000}
2001
2002
2003/**
2004 * Gets the current shadow paging mode.
2005 *
2006 * @returns The current paging mode.
2007 * @param pVCpu VMCPU handle.
2008 */
2009VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
2010{
2011 return pVCpu->pgm.s.enmShadowMode;
2012}
2013
2014/**
2015 * Gets the current host paging mode.
2016 *
2017 * @returns The current paging mode.
2018 * @param pVM The VM handle.
2019 */
2020VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
2021{
2022 switch (pVM->pgm.s.enmHostMode)
2023 {
2024 case SUPPAGINGMODE_32_BIT:
2025 case SUPPAGINGMODE_32_BIT_GLOBAL:
2026 return PGMMODE_32_BIT;
2027
2028 case SUPPAGINGMODE_PAE:
2029 case SUPPAGINGMODE_PAE_GLOBAL:
2030 return PGMMODE_PAE;
2031
2032 case SUPPAGINGMODE_PAE_NX:
2033 case SUPPAGINGMODE_PAE_GLOBAL_NX:
2034 return PGMMODE_PAE_NX;
2035
2036 case SUPPAGINGMODE_AMD64:
2037 case SUPPAGINGMODE_AMD64_GLOBAL:
2038 return PGMMODE_AMD64;
2039
2040 case SUPPAGINGMODE_AMD64_NX:
2041 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
2042 return PGMMODE_AMD64_NX;
2043
2044 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
2045 }
2046
2047 return PGMMODE_INVALID;
2048}
2049
2050
2051/**
2052 * Get mode name.
2053 *
2054 * @returns read-only name string.
2055 * @param enmMode The mode which name is desired.
2056 */
2057VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
2058{
2059 switch (enmMode)
2060 {
2061 case PGMMODE_REAL: return "Real";
2062 case PGMMODE_PROTECTED: return "Protected";
2063 case PGMMODE_32_BIT: return "32-bit";
2064 case PGMMODE_PAE: return "PAE";
2065 case PGMMODE_PAE_NX: return "PAE+NX";
2066 case PGMMODE_AMD64: return "AMD64";
2067 case PGMMODE_AMD64_NX: return "AMD64+NX";
2068 case PGMMODE_NESTED: return "Nested";
2069 case PGMMODE_EPT: return "EPT";
2070 default: return "unknown mode value";
2071 }
2072}
2073
2074
2075/**
2076 * Check if any pgm pool pages are marked dirty (not monitored)
2077 *
2078 * @returns bool locked/not locked
2079 * @param pVM The VM to operate on.
2080 */
2081VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
2082{
2083 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
2084}
2085
2086/**
2087 * Check if the PGM lock is currently taken.
2088 *
2089 * @returns bool locked/not locked
2090 * @param pVM The VM to operate on.
2091 */
2092VMMDECL(bool) PGMIsLocked(PVM pVM)
2093{
2094 return PDMCritSectIsOwned(&pVM->pgm.s.CritSect);
2095}
2096
2097
2098/**
2099 * Check if this VCPU currently owns the PGM lock.
2100 *
2101 * @returns bool owner/not owner
2102 * @param pVM The VM to operate on.
2103 */
2104VMMDECL(bool) PGMIsLockOwner(PVM pVM)
2105{
2106 return PDMCritSectIsOwner(&pVM->pgm.s.CritSect);
2107}
2108
2109
2110/**
2111 * Acquire the PGM lock.
2112 *
2113 * @returns VBox status code
2114 * @param pVM The VM to operate on.
2115 */
2116int pgmLock(PVM pVM)
2117{
2118 int rc = PDMCritSectEnter(&pVM->pgm.s.CritSect, VERR_SEM_BUSY);
2119#if defined(IN_RC) || defined(IN_RING0)
2120 if (rc == VERR_SEM_BUSY)
2121 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_LOCK, 0);
2122#endif
2123 AssertMsg(rc == VINF_SUCCESS, ("%Rrc\n", rc));
2124 return rc;
2125}
2126
2127
2128/**
2129 * Release the PGM lock.
2130 *
2131 * @returns VBox status code
2132 * @param pVM The VM to operate on.
2133 */
2134void pgmUnlock(PVM pVM)
2135{
2136 PDMCritSectLeave(&pVM->pgm.s.CritSect);
2137}
2138
2139#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2140
2141/**
2142 * Temporarily maps one guest page specified by GC physical address.
2143 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2144 *
2145 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2146 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2147 *
2148 * @returns VBox status.
2149 * @param pVM VM handle.
2150 * @param GCPhys GC Physical address of the page.
2151 * @param ppv Where to store the address of the mapping.
2152 */
2153VMMDECL(int) PGMDynMapGCPage(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2154{
2155 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp\n", GCPhys));
2156
2157 /*
2158 * Get the ram range.
2159 */
2160 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2161 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
2162 pRam = pRam->CTX_SUFF(pNext);
2163 if (!pRam)
2164 {
2165 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2166 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2167 }
2168
2169 /*
2170 * Pass it on to PGMDynMapHCPage.
2171 */
2172 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2173 //Log(("PGMDynMapGCPage: GCPhys=%RGp HCPhys=%RHp\n", GCPhys, HCPhys));
2174#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2175 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2176#else
2177 PGMDynMapHCPage(pVM, HCPhys, ppv);
2178#endif
2179 return VINF_SUCCESS;
2180}
2181
2182
2183/**
2184 * Temporarily maps one guest page specified by unaligned GC physical address.
2185 * These pages must have a physical mapping in HC, i.e. they cannot be MMIO pages.
2186 *
2187 * Be WARNED that the dynamic page mapping area is small, 8 pages, thus the space is
2188 * reused after 8 mappings (or perhaps a few more if you score with the cache).
2189 *
2190 * The caller is aware that only the speicifed page is mapped and that really bad things
2191 * will happen if writing beyond the page!
2192 *
2193 * @returns VBox status.
2194 * @param pVM VM handle.
2195 * @param GCPhys GC Physical address within the page to be mapped.
2196 * @param ppv Where to store the address of the mapping address corresponding to GCPhys.
2197 */
2198VMMDECL(int) PGMDynMapGCPageOff(PVM pVM, RTGCPHYS GCPhys, void **ppv)
2199{
2200 /*
2201 * Get the ram range.
2202 */
2203 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2204 while (pRam && GCPhys - pRam->GCPhys >= pRam->cb)
2205 pRam = pRam->CTX_SUFF(pNext);
2206 if (!pRam)
2207 {
2208 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys));
2209 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
2210 }
2211
2212 /*
2213 * Pass it on to PGMDynMapHCPage.
2214 */
2215 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]);
2216#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
2217 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
2218#else
2219 PGMDynMapHCPage(pVM, HCPhys, ppv);
2220#endif
2221 *ppv = (void *)((uintptr_t)*ppv | (GCPhys & PAGE_OFFSET_MASK));
2222 return VINF_SUCCESS;
2223}
2224
2225# ifdef IN_RC
2226
2227/**
2228 * Temporarily maps one host page specified by HC physical address.
2229 *
2230 * Be WARNED that the dynamic page mapping area is small, 16 pages, thus the space is
2231 * reused after 16 mappings (or perhaps a few more if you score with the cache).
2232 *
2233 * @returns VINF_SUCCESS, will bail out to ring-3 on failure.
2234 * @param pVM VM handle.
2235 * @param HCPhys HC Physical address of the page.
2236 * @param ppv Where to store the address of the mapping. This is the
2237 * address of the PAGE not the exact address corresponding
2238 * to HCPhys. Use PGMDynMapHCPageOff if you care for the
2239 * page offset.
2240 */
2241VMMDECL(int) PGMDynMapHCPage(PVM pVM, RTHCPHYS HCPhys, void **ppv)
2242{
2243 AssertMsg(!(HCPhys & PAGE_OFFSET_MASK), ("HCPhys=%RHp\n", HCPhys));
2244
2245 /*
2246 * Check the cache.
2247 */
2248 register unsigned iCache;
2249 for (iCache = 0;iCache < RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache);iCache++)
2250 {
2251 static const uint8_t au8Trans[MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT][RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache)] =
2252 {
2253 { 0, 9, 10, 11, 12, 13, 14, 15},
2254 { 0, 1, 10, 11, 12, 13, 14, 15},
2255 { 0, 1, 2, 11, 12, 13, 14, 15},
2256 { 0, 1, 2, 3, 12, 13, 14, 15},
2257 { 0, 1, 2, 3, 4, 13, 14, 15},
2258 { 0, 1, 2, 3, 4, 5, 14, 15},
2259 { 0, 1, 2, 3, 4, 5, 6, 15},
2260 { 0, 1, 2, 3, 4, 5, 6, 7},
2261 { 8, 1, 2, 3, 4, 5, 6, 7},
2262 { 8, 9, 2, 3, 4, 5, 6, 7},
2263 { 8, 9, 10, 3, 4, 5, 6, 7},
2264 { 8, 9, 10, 11, 4, 5, 6, 7},
2265 { 8, 9, 10, 11, 12, 5, 6, 7},
2266 { 8, 9, 10, 11, 12, 13, 6, 7},
2267 { 8, 9, 10, 11, 12, 13, 14, 7},
2268 { 8, 9, 10, 11, 12, 13, 14, 15},
2269 };
2270 AssertCompile(RT_ELEMENTS(au8Trans) == 16);
2271 AssertCompile(RT_ELEMENTS(au8Trans[0]) == 8);
2272
2273 if (pVM->pgm.s.aHCPhysDynPageMapCache[iCache] == HCPhys)
2274 {
2275 int iPage = au8Trans[pVM->pgm.s.iDynPageMapLast][iCache];
2276
2277 /* The cache can get out of sync with locked entries. (10 locked, 2 overwrites its cache position, last = 11, lookup 2 -> page 10 instead of 2) */
2278 if ((pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u & X86_PTE_PG_MASK) == HCPhys)
2279 {
2280 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2281 *ppv = pv;
2282 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheHits);
2283 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d iCache=%d\n", HCPhys, pv, iPage, iCache));
2284 return VINF_SUCCESS;
2285 }
2286 LogFlow(("Out of sync entry %d\n", iPage));
2287 }
2288 }
2289 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) == 8);
2290 AssertCompile((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) == 16);
2291 STAM_COUNTER_INC(&pVM->pgm.s.StatRCDynMapCacheMisses);
2292
2293 /*
2294 * Update the page tables.
2295 */
2296 unsigned iPage = pVM->pgm.s.iDynPageMapLast;
2297 unsigned i;
2298 for (i = 0; i < (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT); i++)
2299 {
2300 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
2301 if (!pVM->pgm.s.aLockedDynPageMapCache[iPage])
2302 break;
2303 iPage++;
2304 }
2305 AssertRelease(i != (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT));
2306
2307 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
2308 pVM->pgm.s.paDynPageMap32BitPTEsGC[iPage].u = (uint32_t)HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2309 pVM->pgm.s.paDynPageMapPaePTEsGC[iPage].u = HCPhys | X86_PTE_P | X86_PTE_A | X86_PTE_D;
2310 pVM->pgm.s.aLockedDynPageMapCache[iPage] = 0;
2311
2312 void *pv = pVM->pgm.s.pbDynPageMapBaseGC + (iPage << PAGE_SHIFT);
2313 *ppv = pv;
2314 ASMInvalidatePage(pv);
2315 Log4(("PGMGCDynMapHCPage: HCPhys=%RHp pv=%p iPage=%d\n", HCPhys, pv, iPage));
2316 return VINF_SUCCESS;
2317}
2318
2319
2320/**
2321 * Temporarily lock a dynamic page to prevent it from being reused.
2322 *
2323 * @param pVM VM handle.
2324 * @param GCPage GC address of page
2325 */
2326VMMDECL(void) PGMDynLockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2327{
2328 unsigned iPage;
2329
2330 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2331 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2332 ASMAtomicIncU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2333 Log4(("PGMDynLockHCPage %RRv iPage=%d\n", GCPage, iPage));
2334}
2335
2336
2337/**
2338 * Unlock a dynamic page
2339 *
2340 * @param pVM VM handle.
2341 * @param GCPage GC address of page
2342 */
2343VMMDECL(void) PGMDynUnlockHCPage(PVM pVM, RCPTRTYPE(uint8_t *) GCPage)
2344{
2345 unsigned iPage;
2346
2347 AssertCompile(RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache) == 2* RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache));
2348 AssertCompileMemberSize(VM, pgm.s.aLockedDynPageMapCache, sizeof(uint32_t) * (MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)));
2349
2350 Assert(GCPage >= pVM->pgm.s.pbDynPageMapBaseGC && GCPage < (pVM->pgm.s.pbDynPageMapBaseGC + MM_HYPER_DYNAMIC_SIZE));
2351 iPage = ((uintptr_t)(GCPage - pVM->pgm.s.pbDynPageMapBaseGC)) >> PAGE_SHIFT;
2352 Assert(pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2353 ASMAtomicDecU32(&pVM->pgm.s.aLockedDynPageMapCache[iPage]);
2354 Log4(("PGMDynUnlockHCPage %RRv iPage=%d\n", GCPage, iPage));
2355}
2356
2357
2358# ifdef VBOX_STRICT
2359/**
2360 * Check for lock leaks.
2361 *
2362 * @param pVM VM handle.
2363 */
2364VMMDECL(void) PGMDynCheckLocks(PVM pVM)
2365{
2366 for (unsigned i=0;i<RT_ELEMENTS(pVM->pgm.s.aLockedDynPageMapCache);i++)
2367 Assert(!pVM->pgm.s.aLockedDynPageMapCache[i]);
2368}
2369# endif /* VBOX_STRICT */
2370
2371# endif /* IN_RC */
2372#endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
2373
2374#if !defined(IN_R0) || defined(LOG_ENABLED)
2375
2376/** Format handler for PGMPAGE.
2377 * @copydoc FNRTSTRFORMATTYPE */
2378static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2379 const char *pszType, void const *pvValue,
2380 int cchWidth, int cchPrecision, unsigned fFlags,
2381 void *pvUser)
2382{
2383 size_t cch;
2384 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
2385 if (VALID_PTR(pPage))
2386 {
2387 char szTmp[64+80];
2388
2389 cch = 0;
2390
2391 /* The single char state stuff. */
2392 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
2393 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE(pPage)];
2394
2395#define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
2396 if (IS_PART_INCLUDED(5))
2397 {
2398 static const char s_achHandlerStates[4] = { '-', 't', 'w', 'a' };
2399 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)];
2400 szTmp[cch++] = s_achHandlerStates[PGM_PAGE_GET_HNDL_VIRT_STATE(pPage)];
2401 }
2402
2403 /* The type. */
2404 if (IS_PART_INCLUDED(4))
2405 {
2406 szTmp[cch++] = ':';
2407 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
2408 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][0];
2409 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][1];
2410 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE(pPage)][2];
2411 }
2412
2413 /* The numbers. */
2414 if (IS_PART_INCLUDED(3))
2415 {
2416 szTmp[cch++] = ':';
2417 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
2418 }
2419
2420 if (IS_PART_INCLUDED(2))
2421 {
2422 szTmp[cch++] = ':';
2423 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
2424 }
2425
2426 if (IS_PART_INCLUDED(6))
2427 {
2428 szTmp[cch++] = ':';
2429 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
2430 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS(pPage)];
2431 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
2432 }
2433#undef IS_PART_INCLUDED
2434
2435 cch = pfnOutput(pvArgOutput, szTmp, cch);
2436 }
2437 else
2438 cch = pfnOutput(pvArgOutput, "<bad-pgmpage-ptr>", sizeof("<bad-pgmpage-ptr>") - 1);
2439 return cch;
2440}
2441
2442
2443/** Format handler for PGMRAMRANGE.
2444 * @copydoc FNRTSTRFORMATTYPE */
2445static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
2446 const char *pszType, void const *pvValue,
2447 int cchWidth, int cchPrecision, unsigned fFlags,
2448 void *pvUser)
2449{
2450 size_t cch;
2451 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
2452 if (VALID_PTR(pRam))
2453 {
2454 char szTmp[80];
2455 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
2456 cch = pfnOutput(pvArgOutput, szTmp, cch);
2457 }
2458 else
2459 cch = pfnOutput(pvArgOutput, "<bad-pgmramrange-ptr>", sizeof("<bad-pgmramrange-ptr>") - 1);
2460 return cch;
2461}
2462
2463/** Format type andlers to be registered/deregistered. */
2464static const struct
2465{
2466 char szType[24];
2467 PFNRTSTRFORMATTYPE pfnHandler;
2468} g_aPgmFormatTypes[] =
2469{
2470 { "pgmpage", pgmFormatTypeHandlerPage },
2471 { "pgmramrange", pgmFormatTypeHandlerRamRange }
2472};
2473
2474#endif /* !IN_R0 || LOG_ENABLED */
2475
2476
2477/**
2478 * Registers the global string format types.
2479 *
2480 * This should be called at module load time or in some other manner that ensure
2481 * that it's called exactly one time.
2482 *
2483 * @returns IPRT status code on RTStrFormatTypeRegister failure.
2484 */
2485VMMDECL(int) PGMRegisterStringFormatTypes(void)
2486{
2487#if !defined(IN_R0) || defined(LOG_ENABLED)
2488 int rc = VINF_SUCCESS;
2489 unsigned i;
2490 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2491 {
2492 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2493# ifdef IN_RING0
2494 if (rc == VERR_ALREADY_EXISTS)
2495 {
2496 /* in case of cleanup failure in ring-0 */
2497 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2498 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
2499 }
2500# endif
2501 }
2502 if (RT_FAILURE(rc))
2503 while (i-- > 0)
2504 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2505
2506 return rc;
2507#else
2508 return VINF_SUCCESS;
2509#endif
2510}
2511
2512
2513/**
2514 * Deregisters the global string format types.
2515 *
2516 * This should be called at module unload time or in some other manner that
2517 * ensure that it's called exactly one time.
2518 */
2519VMMDECL(void) PGMDeregisterStringFormatTypes(void)
2520{
2521#if !defined(IN_R0) || defined(LOG_ENABLED)
2522 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
2523 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
2524#endif
2525}
2526
2527#ifdef VBOX_STRICT
2528
2529/**
2530 * Asserts that there are no mapping conflicts.
2531 *
2532 * @returns Number of conflicts.
2533 * @param pVM The VM Handle.
2534 */
2535VMMDECL(unsigned) PGMAssertNoMappingConflicts(PVM pVM)
2536{
2537 unsigned cErrors = 0;
2538
2539 /* Only applies to raw mode -> 1 VPCU */
2540 Assert(pVM->cCpus == 1);
2541 PVMCPU pVCpu = &pVM->aCpus[0];
2542
2543 /*
2544 * Check for mapping conflicts.
2545 */
2546 for (PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
2547 pMapping;
2548 pMapping = pMapping->CTX_SUFF(pNext))
2549 {
2550 /** @todo This is slow and should be optimized, but since it's just assertions I don't care now. */
2551 for (RTGCPTR GCPtr = pMapping->GCPtr;
2552 GCPtr <= pMapping->GCPtrLast;
2553 GCPtr += PAGE_SIZE)
2554 {
2555 int rc = PGMGstGetPage(pVCpu, (RTGCPTR)GCPtr, NULL, NULL);
2556 if (rc != VERR_PAGE_TABLE_NOT_PRESENT)
2557 {
2558 AssertMsgFailed(("Conflict at %RGv with %s\n", GCPtr, R3STRING(pMapping->pszDesc)));
2559 cErrors++;
2560 break;
2561 }
2562 }
2563 }
2564
2565 return cErrors;
2566}
2567
2568
2569/**
2570 * Asserts that everything related to the guest CR3 is correctly shadowed.
2571 *
2572 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
2573 * and assert the correctness of the guest CR3 mapping before asserting that the
2574 * shadow page tables is in sync with the guest page tables.
2575 *
2576 * @returns Number of conflicts.
2577 * @param pVM The VM Handle.
2578 * @param pVCpu VMCPU handle.
2579 * @param cr3 The current guest CR3 register value.
2580 * @param cr4 The current guest CR4 register value.
2581 */
2582VMMDECL(unsigned) PGMAssertCR3(PVM pVM, PVMCPU pVCpu, uint64_t cr3, uint64_t cr4)
2583{
2584 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2585 pgmLock(pVM);
2586 unsigned cErrors = PGM_BTH_PFN(AssertCR3, pVCpu)(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
2587 pgmUnlock(pVM);
2588 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncCR3), a);
2589 return cErrors;
2590}
2591
2592#endif /* VBOX_STRICT */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette