VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMInternal.h@ 26263

Last change on this file since 26263 was 26202, checked in by vboxsync, 15 years ago

Broke up guest page fault and dirty page checking to avoid taking the big pgm lock. (risky change)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 153.7 KB
Line 
1/* $Id: PGMInternal.h 26202 2010-02-03 15:19:36Z vboxsync $ */
2/** @file
3 * PGM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22#ifndef ___PGMInternal_h
23#define ___PGMInternal_h
24
25#include <VBox/cdefs.h>
26#include <VBox/types.h>
27#include <VBox/err.h>
28#include <VBox/stam.h>
29#include <VBox/param.h>
30#include <VBox/vmm.h>
31#include <VBox/mm.h>
32#include <VBox/pdmcritsect.h>
33#include <VBox/pdmapi.h>
34#include <VBox/dis.h>
35#include <VBox/dbgf.h>
36#include <VBox/log.h>
37#include <VBox/gmm.h>
38#include <VBox/hwaccm.h>
39#include <iprt/asm.h>
40#include <iprt/assert.h>
41#include <iprt/avl.h>
42#include <iprt/critsect.h>
43#include <iprt/sha.h>
44
45
46
47/** @defgroup grp_pgm_int Internals
48 * @ingroup grp_pgm
49 * @internal
50 * @{
51 */
52
53
54/** @name PGM Compile Time Config
55 * @{
56 */
57
58/**
59 * Indicates that there are no guest mappings to care about.
60 * Currently on raw-mode related code uses mappings, i.e. RC and R3 code.
61 */
62#if defined(IN_RING0) || !defined(VBOX_WITH_RAW_MODE)
63# define PGM_WITHOUT_MAPPINGS
64#endif
65
66/**
67 * Solve page is out of sync issues inside Guest Context (in PGMGC.cpp).
68 * Comment it if it will break something.
69 */
70#define PGM_OUT_OF_SYNC_IN_GC
71
72/**
73 * Check and skip global PDEs for non-global flushes
74 */
75#define PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
76
77/**
78 * Optimization for PAE page tables that are modified often
79 */
80//#if 0 /* disabled again while debugging */
81#ifndef IN_RC
82# define PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
83#endif
84//#endif
85
86/**
87 * Sync N pages instead of a whole page table
88 */
89#define PGM_SYNC_N_PAGES
90
91/**
92 * Number of pages to sync during a page fault
93 *
94 * When PGMPOOL_WITH_GCPHYS_TRACKING is enabled using high values here
95 * causes a lot of unnecessary extents and also is slower than taking more \#PFs.
96 *
97 * Note that \#PFs are much more expensive in the VT-x/AMD-V case due to
98 * world switch overhead, so let's sync more.
99 */
100# ifdef IN_RING0
101/* Chose 32 based on the compile test in #4219; 64 shows worse stats.
102 * 32 again shows better results than 16; slightly more overhead in the \#PF handler,
103 * but ~5% fewer faults.
104 */
105# define PGM_SYNC_NR_PAGES 32
106#else
107# define PGM_SYNC_NR_PAGES 8
108#endif
109
110/**
111 * Number of PGMPhysRead/Write cache entries (must be <= sizeof(uint64_t))
112 */
113#define PGM_MAX_PHYSCACHE_ENTRIES 64
114#define PGM_MAX_PHYSCACHE_ENTRIES_MASK (PGM_MAX_PHYSCACHE_ENTRIES-1)
115
116
117/** @def PGMPOOL_CFG_MAX_GROW
118 * The maximum number of pages to add to the pool in one go.
119 */
120#define PGMPOOL_CFG_MAX_GROW (_256K >> PAGE_SHIFT)
121
122/** @def VBOX_STRICT_PGM_HANDLER_VIRTUAL
123 * Enables some extra assertions for virtual handlers (mainly phys2virt related).
124 */
125#ifdef VBOX_STRICT
126# define VBOX_STRICT_PGM_HANDLER_VIRTUAL
127#endif
128
129/** @def VBOX_WITH_NEW_LAZY_PAGE_ALLOC
130 * Enables the experimental lazy page allocation code. */
131/*#define VBOX_WITH_NEW_LAZY_PAGE_ALLOC */
132
133/** @def VBOX_WITH_REAL_WRITE_MONITORED_PAGES
134 * Enables real write monitoring of pages, i.e. mapping them read-only and
135 * only making them writable when getting a write access #PF. */
136#define VBOX_WITH_REAL_WRITE_MONITORED_PAGES
137
138/** @} */
139
140
141/** @name PDPT and PML4 flags.
142 * These are placed in the three bits available for system programs in
143 * the PDPT and PML4 entries.
144 * @{ */
145/** The entry is a permanent one and it's must always be present.
146 * Never free such an entry. */
147#define PGM_PLXFLAGS_PERMANENT RT_BIT_64(10)
148/** Mapping (hypervisor allocated pagetable). */
149#define PGM_PLXFLAGS_MAPPING RT_BIT_64(11)
150/** @} */
151
152/** @name Page directory flags.
153 * These are placed in the three bits available for system programs in
154 * the page directory entries.
155 * @{ */
156/** Mapping (hypervisor allocated pagetable). */
157#define PGM_PDFLAGS_MAPPING RT_BIT_64(10)
158/** Made read-only to facilitate dirty bit tracking. */
159#define PGM_PDFLAGS_TRACK_DIRTY RT_BIT_64(11)
160/** @} */
161
162/** @name Page flags.
163 * These are placed in the three bits available for system programs in
164 * the page entries.
165 * @{ */
166/** Made read-only to facilitate dirty bit tracking. */
167#define PGM_PTFLAGS_TRACK_DIRTY RT_BIT_64(9)
168
169#ifndef PGM_PTFLAGS_CSAM_VALIDATED
170/** Scanned and approved by CSAM (tm).
171 * NOTE: Must be identical to the one defined in CSAMInternal.h!!
172 * @todo Move PGM_PTFLAGS_* and PGM_PDFLAGS_* to VBox/pgm.h. */
173#define PGM_PTFLAGS_CSAM_VALIDATED RT_BIT_64(11)
174#endif
175
176/** @} */
177
178/** @name Defines used to indicate the shadow and guest paging in the templates.
179 * @{ */
180#define PGM_TYPE_REAL 1
181#define PGM_TYPE_PROT 2
182#define PGM_TYPE_32BIT 3
183#define PGM_TYPE_PAE 4
184#define PGM_TYPE_AMD64 5
185#define PGM_TYPE_NESTED 6
186#define PGM_TYPE_EPT 7
187#define PGM_TYPE_MAX PGM_TYPE_EPT
188/** @} */
189
190/** Macro for checking if the guest is using paging.
191 * @param uGstType PGM_TYPE_*
192 * @param uShwType PGM_TYPE_*
193 * @remark ASSUMES certain order of the PGM_TYPE_* values.
194 */
195#define PGM_WITH_PAGING(uGstType, uShwType) \
196 ( (uGstType) >= PGM_TYPE_32BIT \
197 && (uShwType) != PGM_TYPE_NESTED \
198 && (uShwType) != PGM_TYPE_EPT)
199
200/** Macro for checking if the guest supports the NX bit.
201 * @param uGstType PGM_TYPE_*
202 * @param uShwType PGM_TYPE_*
203 * @remark ASSUMES certain order of the PGM_TYPE_* values.
204 */
205#define PGM_WITH_NX(uGstType, uShwType) \
206 ( (uGstType) >= PGM_TYPE_PAE \
207 && (uShwType) != PGM_TYPE_NESTED \
208 && (uShwType) != PGM_TYPE_EPT)
209
210
211/** @def PGM_HCPHYS_2_PTR
212 * Maps a HC physical page pool address to a virtual address.
213 *
214 * @returns VBox status code.
215 * @param pVM The VM handle.
216 * @param HCPhys The HC physical address to map to a virtual one.
217 * @param ppv Where to store the virtual address. No need to cast this.
218 *
219 * @remark In GC this uses PGMGCDynMapHCPage(), so it will consume of the
220 * small page window employeed by that function. Be careful.
221 * @remark There is no need to assert on the result.
222 */
223#ifdef IN_RC
224# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
225 PGMDynMapHCPage(pVM, HCPhys, (void **)(ppv))
226#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
227# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
228 pgmR0DynMapHCPageInlined(&(pVM)->pgm.s, HCPhys, (void **)(ppv))
229#else
230# define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
231 MMPagePhys2PageEx(pVM, HCPhys, (void **)(ppv))
232#endif
233
234/** @def PGM_HCPHYS_2_PTR_BY_PGM
235 * Maps a HC physical page pool address to a virtual address.
236 *
237 * @returns VBox status code.
238 * @param pPGM The PGM instance data.
239 * @param HCPhys The HC physical address to map to a virtual one.
240 * @param ppv Where to store the virtual address. No need to cast this.
241 *
242 * @remark In GC this uses PGMGCDynMapHCPage(), so it will consume of the
243 * small page window employeed by that function. Be careful.
244 * @remark There is no need to assert on the result.
245 */
246#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
247# define PGM_HCPHYS_2_PTR_BY_PGM(pPGM, HCPhys, ppv) \
248 pgmR0DynMapHCPageInlined(pPGM, HCPhys, (void **)(ppv))
249#else
250# define PGM_HCPHYS_2_PTR_BY_PGM(pPGM, HCPhys, ppv) \
251 PGM_HCPHYS_2_PTR(PGM2VM(pPGM), HCPhys, (void **)(ppv))
252#endif
253
254/** @def PGM_GCPHYS_2_PTR
255 * Maps a GC physical page address to a virtual address.
256 *
257 * @returns VBox status code.
258 * @param pVM The VM handle.
259 * @param GCPhys The GC physical address to map to a virtual one.
260 * @param ppv Where to store the virtual address. No need to cast this.
261 *
262 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
263 * small page window employeed by that function. Be careful.
264 * @remark There is no need to assert on the result.
265 */
266#ifdef IN_RC
267# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
268 PGMDynMapGCPage(pVM, GCPhys, (void **)(ppv))
269#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
270# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
271 pgmR0DynMapGCPageInlined(&(pVM)->pgm.s, GCPhys, (void **)(ppv))
272#else
273# define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
274 PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
275#endif
276
277/** @def PGM_GCPHYS_2_PTR_BY_PGMCPU
278 * Maps a GC physical page address to a virtual address.
279 *
280 * @returns VBox status code.
281 * @param pPGM Pointer to the PGM instance data.
282 * @param GCPhys The GC physical address to map to a virtual one.
283 * @param ppv Where to store the virtual address. No need to cast this.
284 *
285 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
286 * small page window employeed by that function. Be careful.
287 * @remark There is no need to assert on the result.
288 */
289#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
290# define PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, GCPhys, ppv) \
291 pgmR0DynMapGCPageInlined(PGMCPU2PGM(pPGM), GCPhys, (void **)(ppv))
292#else
293# define PGM_GCPHYS_2_PTR_BY_PGMCPU(pPGM, GCPhys, ppv) \
294 PGM_GCPHYS_2_PTR(PGMCPU2VM(pPGM), GCPhys, ppv)
295#endif
296
297/** @def PGM_GCPHYS_2_PTR_EX
298 * Maps a unaligned GC physical page address to a virtual address.
299 *
300 * @returns VBox status code.
301 * @param pVM The VM handle.
302 * @param GCPhys The GC physical address to map to a virtual one.
303 * @param ppv Where to store the virtual address. No need to cast this.
304 *
305 * @remark In GC this uses PGMGCDynMapGCPage(), so it will consume of the
306 * small page window employeed by that function. Be careful.
307 * @remark There is no need to assert on the result.
308 */
309#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
310# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
311 PGMDynMapGCPageOff(pVM, GCPhys, (void **)(ppv))
312#else
313# define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \
314 PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
315#endif
316
317/** @def PGM_INVL_PG
318 * Invalidates a page.
319 *
320 * @param pVCpu The VMCPU handle.
321 * @param GCVirt The virtual address of the page to invalidate.
322 */
323#ifdef IN_RC
324# define PGM_INVL_PG(pVCpu, GCVirt) ASMInvalidatePage((void *)(GCVirt))
325#elif defined(IN_RING0)
326# define PGM_INVL_PG(pVCpu, GCVirt) HWACCMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
327#else
328# define PGM_INVL_PG(pVCpu, GCVirt) HWACCMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))
329#endif
330
331/** @def PGM_INVL_PG_ALL_VCPU
332 * Invalidates a page on all VCPUs
333 *
334 * @param pVM The VM handle.
335 * @param GCVirt The virtual address of the page to invalidate.
336 */
337#ifdef IN_RC
338# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) ASMInvalidatePage((void *)(GCVirt))
339#elif defined(IN_RING0)
340# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HWACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
341#else
342# define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HWACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))
343#endif
344
345/** @def PGM_INVL_BIG_PG
346 * Invalidates a 4MB page directory entry.
347 *
348 * @param pVCpu The VMCPU handle.
349 * @param GCVirt The virtual address within the page directory to invalidate.
350 */
351#ifdef IN_RC
352# define PGM_INVL_BIG_PG(pVCpu, GCVirt) ASMReloadCR3()
353#elif defined(IN_RING0)
354# define PGM_INVL_BIG_PG(pVCpu, GCVirt) HWACCMFlushTLB(pVCpu)
355#else
356# define PGM_INVL_BIG_PG(pVCpu, GCVirt) HWACCMFlushTLB(pVCpu)
357#endif
358
359/** @def PGM_INVL_VCPU_TLBS()
360 * Invalidates the TLBs of the specified VCPU
361 *
362 * @param pVCpu The VMCPU handle.
363 */
364#ifdef IN_RC
365# define PGM_INVL_VCPU_TLBS(pVCpu) ASMReloadCR3()
366#elif defined(IN_RING0)
367# define PGM_INVL_VCPU_TLBS(pVCpu) HWACCMFlushTLB(pVCpu)
368#else
369# define PGM_INVL_VCPU_TLBS(pVCpu) HWACCMFlushTLB(pVCpu)
370#endif
371
372/** @def PGM_INVL_ALL_VCPU_TLBS()
373 * Invalidates the TLBs of all VCPUs
374 *
375 * @param pVM The VM handle.
376 */
377#ifdef IN_RC
378# define PGM_INVL_ALL_VCPU_TLBS(pVM) ASMReloadCR3()
379#elif defined(IN_RING0)
380# define PGM_INVL_ALL_VCPU_TLBS(pVM) HWACCMFlushTLBOnAllVCpus(pVM)
381#else
382# define PGM_INVL_ALL_VCPU_TLBS(pVM) HWACCMFlushTLBOnAllVCpus(pVM)
383#endif
384
385/** Size of the GCPtrConflict array in PGMMAPPING.
386 * @remarks Must be a power of two. */
387#define PGMMAPPING_CONFLICT_MAX 8
388
389/**
390 * Structure for tracking GC Mappings.
391 *
392 * This structure is used by linked list in both GC and HC.
393 */
394typedef struct PGMMAPPING
395{
396 /** Pointer to next entry. */
397 R3PTRTYPE(struct PGMMAPPING *) pNextR3;
398 /** Pointer to next entry. */
399 R0PTRTYPE(struct PGMMAPPING *) pNextR0;
400 /** Pointer to next entry. */
401 RCPTRTYPE(struct PGMMAPPING *) pNextRC;
402 /** Indicate whether this entry is finalized. */
403 bool fFinalized;
404 /** Start Virtual address. */
405 RTGCPTR GCPtr;
406 /** Last Virtual address (inclusive). */
407 RTGCPTR GCPtrLast;
408 /** Range size (bytes). */
409 RTGCPTR cb;
410 /** Pointer to relocation callback function. */
411 R3PTRTYPE(PFNPGMRELOCATE) pfnRelocate;
412 /** User argument to the callback. */
413 R3PTRTYPE(void *) pvUser;
414 /** Mapping description / name. For easing debugging. */
415 R3PTRTYPE(const char *) pszDesc;
416 /** Last 8 addresses that caused conflicts. */
417 RTGCPTR aGCPtrConflicts[PGMMAPPING_CONFLICT_MAX];
418 /** Number of conflicts for this hypervisor mapping. */
419 uint32_t cConflicts;
420 /** Number of page tables. */
421 uint32_t cPTs;
422
423 /** Array of page table mapping data. Each entry
424 * describes one page table. The array can be longer
425 * than the declared length.
426 */
427 struct
428 {
429 /** The HC physical address of the page table. */
430 RTHCPHYS HCPhysPT;
431 /** The HC physical address of the first PAE page table. */
432 RTHCPHYS HCPhysPaePT0;
433 /** The HC physical address of the second PAE page table. */
434 RTHCPHYS HCPhysPaePT1;
435 /** The HC virtual address of the 32-bit page table. */
436 R3PTRTYPE(PX86PT) pPTR3;
437 /** The HC virtual address of the two PAE page table. (i.e 1024 entries instead of 512) */
438 R3PTRTYPE(PX86PTPAE) paPaePTsR3;
439 /** The RC virtual address of the 32-bit page table. */
440 RCPTRTYPE(PX86PT) pPTRC;
441 /** The RC virtual address of the two PAE page table. */
442 RCPTRTYPE(PX86PTPAE) paPaePTsRC;
443 /** The R0 virtual address of the 32-bit page table. */
444 R0PTRTYPE(PX86PT) pPTR0;
445 /** The R0 virtual address of the two PAE page table. */
446 R0PTRTYPE(PX86PTPAE) paPaePTsR0;
447 } aPTs[1];
448} PGMMAPPING;
449/** Pointer to structure for tracking GC Mappings. */
450typedef struct PGMMAPPING *PPGMMAPPING;
451
452
453/**
454 * Physical page access handler structure.
455 *
456 * This is used to keep track of physical address ranges
457 * which are being monitored in some kind of way.
458 */
459typedef struct PGMPHYSHANDLER
460{
461 AVLROGCPHYSNODECORE Core;
462 /** Access type. */
463 PGMPHYSHANDLERTYPE enmType;
464 /** Number of pages to update. */
465 uint32_t cPages;
466 /** Pointer to R3 callback function. */
467 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3;
468 /** User argument for R3 handlers. */
469 R3PTRTYPE(void *) pvUserR3;
470 /** Pointer to R0 callback function. */
471 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnHandlerR0;
472 /** User argument for R0 handlers. */
473 R0PTRTYPE(void *) pvUserR0;
474 /** Pointer to RC callback function. */
475 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnHandlerRC;
476 /** User argument for RC handlers. */
477 RCPTRTYPE(void *) pvUserRC;
478 /** Description / Name. For easing debugging. */
479 R3PTRTYPE(const char *) pszDesc;
480#ifdef VBOX_WITH_STATISTICS
481 /** Profiling of this handler. */
482 STAMPROFILE Stat;
483#endif
484} PGMPHYSHANDLER;
485/** Pointer to a physical page access handler structure. */
486typedef PGMPHYSHANDLER *PPGMPHYSHANDLER;
487
488
489/**
490 * Cache node for the physical addresses covered by a virtual handler.
491 */
492typedef struct PGMPHYS2VIRTHANDLER
493{
494 /** Core node for the tree based on physical ranges. */
495 AVLROGCPHYSNODECORE Core;
496 /** Offset from this struct to the PGMVIRTHANDLER structure. */
497 int32_t offVirtHandler;
498 /** Offset of the next alias relative to this one.
499 * Bit 0 is used for indicating whether we're in the tree.
500 * Bit 1 is used for indicating that we're the head node.
501 */
502 int32_t offNextAlias;
503} PGMPHYS2VIRTHANDLER;
504/** Pointer to a phys to virtual handler structure. */
505typedef PGMPHYS2VIRTHANDLER *PPGMPHYS2VIRTHANDLER;
506
507/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
508 * node is in the tree. */
509#define PGMPHYS2VIRTHANDLER_IN_TREE RT_BIT(0)
510/** The bit in PGMPHYS2VIRTHANDLER::offNextAlias used to indicate that the
511 * node is in the head of an alias chain.
512 * The PGMPHYS2VIRTHANDLER_IN_TREE is always set if this bit is set. */
513#define PGMPHYS2VIRTHANDLER_IS_HEAD RT_BIT(1)
514/** The mask to apply to PGMPHYS2VIRTHANDLER::offNextAlias to get the offset. */
515#define PGMPHYS2VIRTHANDLER_OFF_MASK (~(int32_t)3)
516
517
518/**
519 * Virtual page access handler structure.
520 *
521 * This is used to keep track of virtual address ranges
522 * which are being monitored in some kind of way.
523 */
524typedef struct PGMVIRTHANDLER
525{
526 /** Core node for the tree based on virtual ranges. */
527 AVLROGCPTRNODECORE Core;
528 /** Size of the range (in bytes). */
529 RTGCPTR cb;
530 /** Number of cache pages. */
531 uint32_t cPages;
532 /** Access type. */
533 PGMVIRTHANDLERTYPE enmType;
534 /** Pointer to the RC callback function. */
535 RCPTRTYPE(PFNPGMRCVIRTHANDLER) pfnHandlerRC;
536#if HC_ARCH_BITS == 64
537 RTRCPTR padding;
538#endif
539 /** Pointer to the R3 callback function for invalidation. */
540 R3PTRTYPE(PFNPGMR3VIRTINVALIDATE) pfnInvalidateR3;
541 /** Pointer to the R3 callback function. */
542 R3PTRTYPE(PFNPGMR3VIRTHANDLER) pfnHandlerR3;
543 /** Description / Name. For easing debugging. */
544 R3PTRTYPE(const char *) pszDesc;
545#ifdef VBOX_WITH_STATISTICS
546 /** Profiling of this handler. */
547 STAMPROFILE Stat;
548#endif
549 /** Array of cached physical addresses for the monitored ranged. */
550 PGMPHYS2VIRTHANDLER aPhysToVirt[HC_ARCH_BITS == 32 ? 1 : 2];
551} PGMVIRTHANDLER;
552/** Pointer to a virtual page access handler structure. */
553typedef PGMVIRTHANDLER *PPGMVIRTHANDLER;
554
555
556/**
557 * Page type.
558 *
559 * @remarks This enum has to fit in a 3-bit field (see PGMPAGE::u3Type).
560 * @remarks This is used in the saved state, so changes to it requires bumping
561 * the saved state version.
562 * @todo So, convert to \#defines!
563 */
564typedef enum PGMPAGETYPE
565{
566 /** The usual invalid zero entry. */
567 PGMPAGETYPE_INVALID = 0,
568 /** RAM page. (RWX) */
569 PGMPAGETYPE_RAM,
570 /** MMIO2 page. (RWX) */
571 PGMPAGETYPE_MMIO2,
572 /** MMIO2 page aliased over an MMIO page. (RWX)
573 * See PGMHandlerPhysicalPageAlias(). */
574 PGMPAGETYPE_MMIO2_ALIAS_MMIO,
575 /** Shadowed ROM. (RWX) */
576 PGMPAGETYPE_ROM_SHADOW,
577 /** ROM page. (R-X) */
578 PGMPAGETYPE_ROM,
579 /** MMIO page. (---) */
580 PGMPAGETYPE_MMIO,
581 /** End of valid entries. */
582 PGMPAGETYPE_END
583} PGMPAGETYPE;
584AssertCompile(PGMPAGETYPE_END <= 7);
585
586/** @name Page type predicates.
587 * @{ */
588#define PGMPAGETYPE_IS_READABLE(type) ( (type) <= PGMPAGETYPE_ROM )
589#define PGMPAGETYPE_IS_WRITEABLE(type) ( (type) <= PGMPAGETYPE_ROM_SHADOW )
590#define PGMPAGETYPE_IS_RWX(type) ( (type) <= PGMPAGETYPE_ROM_SHADOW )
591#define PGMPAGETYPE_IS_ROX(type) ( (type) == PGMPAGETYPE_ROM )
592#define PGMPAGETYPE_IS_NP(type) ( (type) == PGMPAGETYPE_MMIO )
593/** @} */
594
595
596/**
597 * A Physical Guest Page tracking structure.
598 *
599 * The format of this structure is complicated because we have to fit a lot
600 * of information into as few bits as possible. The format is also subject
601 * to change (there is one comming up soon). Which means that for we'll be
602 * using PGM_PAGE_GET_*, PGM_PAGE_IS_ and PGM_PAGE_SET_* macros for *all*
603 * accesses to the structure.
604 */
605typedef struct PGMPAGE
606{
607 /** The physical address and the Page ID. */
608 RTHCPHYS HCPhysAndPageID;
609 /** Combination of:
610 * - [0-7]: u2HandlerPhysStateY - the physical handler state
611 * (PGM_PAGE_HNDL_PHYS_STATE_*).
612 * - [8-9]: u2HandlerVirtStateY - the virtual handler state
613 * (PGM_PAGE_HNDL_VIRT_STATE_*).
614 * - [15]: fWrittenToY - flag indicating that a write monitored page was
615 * written to when set.
616 * - [10-14]: 5 unused bits.
617 * @remarks Warning! All accesses to the bits are hardcoded.
618 *
619 * @todo Change this to a union with both bitfields, u8 and u accessors.
620 * That'll help deal with some of the hardcoded accesses.
621 *
622 * @todo Include uStateY and uTypeY as well so it becomes 32-bit. This
623 * will make it possible to turn some of the 16-bit accesses into
624 * 32-bit ones, which may be efficient (stalls).
625 */
626 RTUINT16U u16MiscY;
627 /** The page state.
628 * Only 2 bits are really needed for this. */
629 uint8_t uStateY;
630 /** The page type (PGMPAGETYPE).
631 * Only 3 bits are really needed for this. */
632 uint8_t uTypeY;
633 /** Usage tracking (page pool). */
634 uint16_t u16TrackingY;
635 /** The number of read locks on this page. */
636 uint8_t cReadLocksY;
637 /** The number of write locks on this page. */
638 uint8_t cWriteLocksY;
639} PGMPAGE;
640AssertCompileSize(PGMPAGE, 16);
641/** Pointer to a physical guest page. */
642typedef PGMPAGE *PPGMPAGE;
643/** Pointer to a const physical guest page. */
644typedef const PGMPAGE *PCPGMPAGE;
645/** Pointer to a physical guest page pointer. */
646typedef PPGMPAGE *PPPGMPAGE;
647
648
649/**
650 * Clears the page structure.
651 * @param pPage Pointer to the physical guest page tracking structure.
652 */
653#define PGM_PAGE_CLEAR(pPage) \
654 do { \
655 (pPage)->HCPhysAndPageID = 0; \
656 (pPage)->uStateY = 0; \
657 (pPage)->uTypeY = 0; \
658 (pPage)->u16MiscY.u = 0; \
659 (pPage)->u16TrackingY = 0; \
660 (pPage)->cReadLocksY = 0; \
661 (pPage)->cWriteLocksY = 0; \
662 } while (0)
663
664/**
665 * Initializes the page structure.
666 * @param pPage Pointer to the physical guest page tracking structure.
667 */
668#define PGM_PAGE_INIT(pPage, _HCPhys, _idPage, _uType, _uState) \
669 do { \
670 RTHCPHYS SetHCPhysTmp = (_HCPhys); \
671 AssertFatal(!(SetHCPhysTmp & ~UINT64_C(0x0000fffffffff000))); \
672 (pPage)->HCPhysAndPageID = (SetHCPhysTmp << (28-12)) | ((_idPage) & UINT32_C(0x0fffffff)); \
673 (pPage)->uStateY = (_uState); \
674 (pPage)->uTypeY = (_uType); \
675 (pPage)->u16MiscY.u = 0; \
676 (pPage)->u16TrackingY = 0; \
677 (pPage)->cReadLocksY = 0; \
678 (pPage)->cWriteLocksY = 0; \
679 } while (0)
680
681/**
682 * Initializes the page structure of a ZERO page.
683 * @param pPage Pointer to the physical guest page tracking structure.
684 * @param pVM The VM handle (for getting the zero page address).
685 * @param uType The page type (PGMPAGETYPE).
686 */
687#define PGM_PAGE_INIT_ZERO(pPage, pVM, uType) \
688 PGM_PAGE_INIT((pPage), (pVM)->pgm.s.HCPhysZeroPg, NIL_GMM_PAGEID, (uType), PGM_PAGE_STATE_ZERO)
689
690
691/** @name The Page state, PGMPAGE::uStateY.
692 * @{ */
693/** The zero page.
694 * This is a per-VM page that's never ever mapped writable. */
695#define PGM_PAGE_STATE_ZERO 0
696/** A allocated page.
697 * This is a per-VM page allocated from the page pool (or wherever
698 * we get MMIO2 pages from if the type is MMIO2).
699 */
700#define PGM_PAGE_STATE_ALLOCATED 1
701/** A allocated page that's being monitored for writes.
702 * The shadow page table mappings are read-only. When a write occurs, the
703 * fWrittenTo member is set, the page remapped as read-write and the state
704 * moved back to allocated. */
705#define PGM_PAGE_STATE_WRITE_MONITORED 2
706/** The page is shared, aka. copy-on-write.
707 * This is a page that's shared with other VMs. */
708#define PGM_PAGE_STATE_SHARED 3
709/** @} */
710
711
712/**
713 * Gets the page state.
714 * @returns page state (PGM_PAGE_STATE_*).
715 * @param pPage Pointer to the physical guest page tracking structure.
716 */
717#define PGM_PAGE_GET_STATE(pPage) ( (pPage)->uStateY )
718
719/**
720 * Sets the page state.
721 * @param pPage Pointer to the physical guest page tracking structure.
722 * @param _uState The new page state.
723 */
724#define PGM_PAGE_SET_STATE(pPage, _uState) do { (pPage)->uStateY = (_uState); } while (0)
725
726
727/**
728 * Gets the host physical address of the guest page.
729 * @returns host physical address (RTHCPHYS).
730 * @param pPage Pointer to the physical guest page tracking structure.
731 */
732#define PGM_PAGE_GET_HCPHYS(pPage) ( ((pPage)->HCPhysAndPageID >> 28) << 12 )
733
734/**
735 * Sets the host physical address of the guest page.
736 * @param pPage Pointer to the physical guest page tracking structure.
737 * @param _HCPhys The new host physical address.
738 */
739#define PGM_PAGE_SET_HCPHYS(pPage, _HCPhys) \
740 do { \
741 RTHCPHYS SetHCPhysTmp = (_HCPhys); \
742 AssertFatal(!(SetHCPhysTmp & ~UINT64_C(0x0000fffffffff000))); \
743 (pPage)->HCPhysAndPageID = ((pPage)->HCPhysAndPageID & UINT32_C(0x0fffffff)) \
744 | (SetHCPhysTmp << (28-12)); \
745 } while (0)
746
747/**
748 * Get the Page ID.
749 * @returns The Page ID; NIL_GMM_PAGEID if it's a ZERO page.
750 * @param pPage Pointer to the physical guest page tracking structure.
751 */
752#define PGM_PAGE_GET_PAGEID(pPage) ( (uint32_t)((pPage)->HCPhysAndPageID & UINT32_C(0x0fffffff)) )
753
754/**
755 * Sets the Page ID.
756 * @param pPage Pointer to the physical guest page tracking structure.
757 */
758#define PGM_PAGE_SET_PAGEID(pPage, _idPage) \
759 do { \
760 (pPage)->HCPhysAndPageID = (((pPage)->HCPhysAndPageID) & UINT64_C(0xfffffffff0000000)) \
761 | ((_idPage) & UINT32_C(0x0fffffff)); \
762 } while (0)
763
764/**
765 * Get the Chunk ID.
766 * @returns The Chunk ID; NIL_GMM_CHUNKID if it's a ZERO page.
767 * @param pPage Pointer to the physical guest page tracking structure.
768 */
769#define PGM_PAGE_GET_CHUNKID(pPage) ( PGM_PAGE_GET_PAGEID(pPage) >> GMM_CHUNKID_SHIFT )
770
771/**
772 * Get the index of the page within the allocation chunk.
773 * @returns The page index.
774 * @param pPage Pointer to the physical guest page tracking structure.
775 */
776#define PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) ( (uint32_t)((pPage)->HCPhysAndPageID & GMM_PAGEID_IDX_MASK) )
777
778/**
779 * Gets the page type.
780 * @returns The page type.
781 * @param pPage Pointer to the physical guest page tracking structure.
782 */
783#define PGM_PAGE_GET_TYPE(pPage) (pPage)->uTypeY
784
785/**
786 * Sets the page type.
787 * @param pPage Pointer to the physical guest page tracking structure.
788 * @param _enmType The new page type (PGMPAGETYPE).
789 */
790#define PGM_PAGE_SET_TYPE(pPage, _enmType) do { (pPage)->uTypeY = (_enmType); } while (0)
791
792/**
793 * Checks if the page is marked for MMIO.
794 * @returns true/false.
795 * @param pPage Pointer to the physical guest page tracking structure.
796 */
797#define PGM_PAGE_IS_MMIO(pPage) ( (pPage)->uTypeY == PGMPAGETYPE_MMIO )
798
799/**
800 * Checks if the page is backed by the ZERO page.
801 * @returns true/false.
802 * @param pPage Pointer to the physical guest page tracking structure.
803 */
804#define PGM_PAGE_IS_ZERO(pPage) ( (pPage)->uStateY == PGM_PAGE_STATE_ZERO )
805
806/**
807 * Checks if the page is backed by a SHARED page.
808 * @returns true/false.
809 * @param pPage Pointer to the physical guest page tracking structure.
810 */
811#define PGM_PAGE_IS_SHARED(pPage) ( (pPage)->uStateY == PGM_PAGE_STATE_SHARED )
812
813
814/**
815 * Marks the paget as written to (for GMM change monitoring).
816 * @param pPage Pointer to the physical guest page tracking structure.
817 */
818#define PGM_PAGE_SET_WRITTEN_TO(pPage) do { (pPage)->u16MiscY.au8[1] |= UINT8_C(0x80); } while (0)
819
820/**
821 * Clears the written-to indicator.
822 * @param pPage Pointer to the physical guest page tracking structure.
823 */
824#define PGM_PAGE_CLEAR_WRITTEN_TO(pPage) do { (pPage)->u16MiscY.au8[1] &= UINT8_C(0x7f); } while (0)
825
826/**
827 * Checks if the page was marked as written-to.
828 * @returns true/false.
829 * @param pPage Pointer to the physical guest page tracking structure.
830 */
831#define PGM_PAGE_IS_WRITTEN_TO(pPage) ( !!((pPage)->u16MiscY.au8[1] & UINT8_C(0x80)) )
832
833
834/** Enabled optimized access handler tests.
835 * These optimizations makes ASSUMPTIONS about the state values and the u16MiscY
836 * layout. When enabled, the compiler should normally generate more compact
837 * code.
838 */
839#define PGM_PAGE_WITH_OPTIMIZED_HANDLER_ACCESS 1
840
841/** @name Physical Access Handler State values (PGMPAGE::u2HandlerPhysStateY).
842 *
843 * @remarks The values are assigned in order of priority, so we can calculate
844 * the correct state for a page with different handlers installed.
845 * @{ */
846/** No handler installed. */
847#define PGM_PAGE_HNDL_PHYS_STATE_NONE 0
848/** Monitoring is temporarily disabled. */
849#define PGM_PAGE_HNDL_PHYS_STATE_DISABLED 1
850/** Write access is monitored. */
851#define PGM_PAGE_HNDL_PHYS_STATE_WRITE 2
852/** All access is monitored. */
853#define PGM_PAGE_HNDL_PHYS_STATE_ALL 3
854/** @} */
855
856/**
857 * Gets the physical access handler state of a page.
858 * @returns PGM_PAGE_HNDL_PHYS_STATE_* value.
859 * @param pPage Pointer to the physical guest page tracking structure.
860 */
861#define PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) \
862 ( (pPage)->u16MiscY.au8[0] )
863
864/**
865 * Sets the physical access handler state of a page.
866 * @param pPage Pointer to the physical guest page tracking structure.
867 * @param _uState The new state value.
868 */
869#define PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, _uState) \
870 do { (pPage)->u16MiscY.au8[0] = (_uState); } while (0)
871
872/**
873 * Checks if the page has any physical access handlers, including temporariliy disabled ones.
874 * @returns true/false
875 * @param pPage Pointer to the physical guest page tracking structure.
876 */
877#define PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage) \
878 ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE )
879
880/**
881 * Checks if the page has any active physical access handlers.
882 * @returns true/false
883 * @param pPage Pointer to the physical guest page tracking structure.
884 */
885#define PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage) \
886 ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) >= PGM_PAGE_HNDL_PHYS_STATE_WRITE )
887
888
889/** @name Virtual Access Handler State values (PGMPAGE::u2HandlerVirtStateY).
890 *
891 * @remarks The values are assigned in order of priority, so we can calculate
892 * the correct state for a page with different handlers installed.
893 * @{ */
894/** No handler installed. */
895#define PGM_PAGE_HNDL_VIRT_STATE_NONE 0
896/* 1 is reserved so the lineup is identical with the physical ones. */
897/** Write access is monitored. */
898#define PGM_PAGE_HNDL_VIRT_STATE_WRITE 2
899/** All access is monitored. */
900#define PGM_PAGE_HNDL_VIRT_STATE_ALL 3
901/** @} */
902
903/**
904 * Gets the virtual access handler state of a page.
905 * @returns PGM_PAGE_HNDL_VIRT_STATE_* value.
906 * @param pPage Pointer to the physical guest page tracking structure.
907 */
908#define PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) ( (pPage)->u16MiscY.au8[1] & UINT8_C(0x03) )
909
910/**
911 * Sets the virtual access handler state of a page.
912 * @param pPage Pointer to the physical guest page tracking structure.
913 * @param _uState The new state value.
914 */
915#define PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, _uState) \
916 do { \
917 (pPage)->u16MiscY.au8[1] = ((pPage)->u16MiscY.au8[1] & UINT8_C(0xfc)) \
918 | ((_uState) & UINT8_C(0x03)); \
919 } while (0)
920
921/**
922 * Checks if the page has any virtual access handlers.
923 * @returns true/false
924 * @param pPage Pointer to the physical guest page tracking structure.
925 */
926#define PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage) \
927 ( PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) != PGM_PAGE_HNDL_VIRT_STATE_NONE )
928
929/**
930 * Same as PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS - can't disable pages in
931 * virtual handlers.
932 * @returns true/false
933 * @param pPage Pointer to the physical guest page tracking structure.
934 */
935#define PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage) \
936 PGM_PAGE_HAS_ANY_VIRTUAL_HANDLERS(pPage)
937
938
939/**
940 * Checks if the page has any access handlers, including temporarily disabled ones.
941 * @returns true/false
942 * @param pPage Pointer to the physical guest page tracking structure.
943 */
944#ifdef PGM_PAGE_WITH_OPTIMIZED_HANDLER_ACCESS
945# define PGM_PAGE_HAS_ANY_HANDLERS(pPage) \
946 ( ((pPage)->u16MiscY.u & UINT16_C(0x0303)) != 0 )
947#else
948# define PGM_PAGE_HAS_ANY_HANDLERS(pPage) \
949 ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE \
950 || PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) != PGM_PAGE_HNDL_VIRT_STATE_NONE )
951#endif
952
953/**
954 * Checks if the page has any active access handlers.
955 * @returns true/false
956 * @param pPage Pointer to the physical guest page tracking structure.
957 */
958#ifdef PGM_PAGE_WITH_OPTIMIZED_HANDLER_ACCESS
959# define PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) \
960 ( ((pPage)->u16MiscY.u & UINT16_C(0x0202)) != 0 )
961#else
962# define PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) \
963 ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) >= PGM_PAGE_HNDL_PHYS_STATE_WRITE \
964 || PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) >= PGM_PAGE_HNDL_VIRT_STATE_WRITE )
965#endif
966
967/**
968 * Checks if the page has any active access handlers catching all accesses.
969 * @returns true/false
970 * @param pPage Pointer to the physical guest page tracking structure.
971 */
972#ifdef PGM_PAGE_WITH_OPTIMIZED_HANDLER_ACCESS
973# define PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) \
974 ( ( ((pPage)->u16MiscY.au8[0] | (pPage)->u16MiscY.au8[1]) & UINT8_C(0x3) ) \
975 == PGM_PAGE_HNDL_PHYS_STATE_ALL )
976#else
977# define PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage) \
978 ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL \
979 || PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL )
980#endif
981
982
983/** @def PGM_PAGE_GET_TRACKING
984 * Gets the packed shadow page pool tracking data associated with a guest page.
985 * @returns uint16_t containing the data.
986 * @param pPage Pointer to the physical guest page tracking structure.
987 */
988#define PGM_PAGE_GET_TRACKING(pPage) ( (pPage)->u16TrackingY )
989
990/** @def PGM_PAGE_SET_TRACKING
991 * Sets the packed shadow page pool tracking data associated with a guest page.
992 * @param pPage Pointer to the physical guest page tracking structure.
993 * @param u16TrackingData The tracking data to store.
994 */
995#define PGM_PAGE_SET_TRACKING(pPage, u16TrackingData) \
996 do { (pPage)->u16TrackingY = (u16TrackingData); } while (0)
997
998/** @def PGM_PAGE_GET_TD_CREFS
999 * Gets the @a cRefs tracking data member.
1000 * @returns cRefs.
1001 * @param pPage Pointer to the physical guest page tracking structure.
1002 */
1003#define PGM_PAGE_GET_TD_CREFS(pPage) \
1004 ((PGM_PAGE_GET_TRACKING(pPage) >> PGMPOOL_TD_CREFS_SHIFT) & PGMPOOL_TD_CREFS_MASK)
1005
1006/** @def PGM_PAGE_GET_TD_IDX
1007 * Gets the @a idx tracking data member.
1008 * @returns idx.
1009 * @param pPage Pointer to the physical guest page tracking structure.
1010 */
1011#define PGM_PAGE_GET_TD_IDX(pPage) \
1012 ((PGM_PAGE_GET_TRACKING(pPage) >> PGMPOOL_TD_IDX_SHIFT) & PGMPOOL_TD_IDX_MASK)
1013
1014
1015/** Max number of locks on a page. */
1016#define PGM_PAGE_MAX_LOCKS UINT8_C(254)
1017
1018/** Get the read lock count.
1019 * @returns count.
1020 * @param pPage Pointer to the physical guest page tracking structure.
1021 */
1022#define PGM_PAGE_GET_READ_LOCKS(pPage) ( (pPage)->cReadLocksY )
1023
1024/** Get the write lock count.
1025 * @returns count.
1026 * @param pPage Pointer to the physical guest page tracking structure.
1027 */
1028#define PGM_PAGE_GET_WRITE_LOCKS(pPage) ( (pPage)->cWriteLocksY )
1029
1030/** Decrement the read lock counter.
1031 * @param pPage Pointer to the physical guest page tracking structure.
1032 */
1033#define PGM_PAGE_DEC_READ_LOCKS(pPage) do { --(pPage)->cReadLocksY; } while (0)
1034
1035/** Decrement the write lock counter.
1036 * @param pPage Pointer to the physical guest page tracking structure.
1037 */
1038#define PGM_PAGE_DEC_WRITE_LOCKS(pPage) do { --(pPage)->cWriteLocksY; } while (0)
1039
1040/** Increment the read lock counter.
1041 * @param pPage Pointer to the physical guest page tracking structure.
1042 */
1043#define PGM_PAGE_INC_READ_LOCKS(pPage) do { ++(pPage)->cReadLocksY; } while (0)
1044
1045/** Increment the write lock counter.
1046 * @param pPage Pointer to the physical guest page tracking structure.
1047 */
1048#define PGM_PAGE_INC_WRITE_LOCKS(pPage) do { ++(pPage)->cWriteLocksY; } while (0)
1049
1050
1051#if 0
1052/** Enables sanity checking of write monitoring using CRC-32. */
1053# define PGMLIVESAVERAMPAGE_WITH_CRC32
1054#endif
1055
1056/**
1057 * Per page live save tracking data.
1058 */
1059typedef struct PGMLIVESAVERAMPAGE
1060{
1061 /** Number of times it has been dirtied. */
1062 uint32_t cDirtied : 24;
1063 /** Whether it is currently dirty. */
1064 uint32_t fDirty : 1;
1065 /** Ignore the page.
1066 * This is used for pages that has been MMIO, MMIO2 or ROM pages once. We will
1067 * deal with these after pausing the VM and DevPCI have said it bit about
1068 * remappings. */
1069 uint32_t fIgnore : 1;
1070 /** Was a ZERO page last time around. */
1071 uint32_t fZero : 1;
1072 /** Was a SHARED page last time around. */
1073 uint32_t fShared : 1;
1074 /** Whether the page is/was write monitored in a previous pass. */
1075 uint32_t fWriteMonitored : 1;
1076 /** Whether the page is/was write monitored earlier in this pass. */
1077 uint32_t fWriteMonitoredJustNow : 1;
1078 /** Bits reserved for future use. */
1079 uint32_t u2Reserved : 2;
1080#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1081 /** CRC-32 for the page. This is for internal consistency checks. */
1082 uint32_t u32Crc;
1083#endif
1084} PGMLIVESAVERAMPAGE;
1085#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
1086AssertCompileSize(PGMLIVESAVERAMPAGE, 8);
1087#else
1088AssertCompileSize(PGMLIVESAVERAMPAGE, 4);
1089#endif
1090/** Pointer to the per page live save tracking data. */
1091typedef PGMLIVESAVERAMPAGE *PPGMLIVESAVERAMPAGE;
1092
1093/** The max value of PGMLIVESAVERAMPAGE::cDirtied. */
1094#define PGMLIVSAVEPAGE_MAX_DIRTIED 0x00fffff0
1095
1096
1097/**
1098 * Ram range for GC Phys to HC Phys conversion.
1099 *
1100 * Can be used for HC Virt to GC Phys and HC Virt to HC Phys
1101 * conversions too, but we'll let MM handle that for now.
1102 *
1103 * This structure is used by linked lists in both GC and HC.
1104 */
1105typedef struct PGMRAMRANGE
1106{
1107 /** Start of the range. Page aligned. */
1108 RTGCPHYS GCPhys;
1109 /** Size of the range. (Page aligned of course). */
1110 RTGCPHYS cb;
1111 /** Pointer to the next RAM range - for R3. */
1112 R3PTRTYPE(struct PGMRAMRANGE *) pNextR3;
1113 /** Pointer to the next RAM range - for R0. */
1114 R0PTRTYPE(struct PGMRAMRANGE *) pNextR0;
1115 /** Pointer to the next RAM range - for RC. */
1116 RCPTRTYPE(struct PGMRAMRANGE *) pNextRC;
1117 /** PGM_RAM_RANGE_FLAGS_* flags. */
1118 uint32_t fFlags;
1119 /** Last address in the range (inclusive). Page aligned (-1). */
1120 RTGCPHYS GCPhysLast;
1121 /** Start of the HC mapping of the range. This is only used for MMIO2. */
1122 R3PTRTYPE(void *) pvR3;
1123 /** Live save per page tracking data. */
1124 R3PTRTYPE(PPGMLIVESAVERAMPAGE) paLSPages;
1125 /** The range description. */
1126 R3PTRTYPE(const char *) pszDesc;
1127 /** Pointer to self - R0 pointer. */
1128 R0PTRTYPE(struct PGMRAMRANGE *) pSelfR0;
1129 /** Pointer to self - RC pointer. */
1130 RCPTRTYPE(struct PGMRAMRANGE *) pSelfRC;
1131 /** Padding to make aPage aligned on sizeof(PGMPAGE). */
1132 uint32_t au32Alignment2[HC_ARCH_BITS == 32 ? 1 : 3];
1133 /** Array of physical guest page tracking structures. */
1134 PGMPAGE aPages[1];
1135} PGMRAMRANGE;
1136/** Pointer to Ram range for GC Phys to HC Phys conversion. */
1137typedef PGMRAMRANGE *PPGMRAMRANGE;
1138
1139/** @name PGMRAMRANGE::fFlags
1140 * @{ */
1141/** The RAM range is floating around as an independent guest mapping. */
1142#define PGM_RAM_RANGE_FLAGS_FLOATING RT_BIT(20)
1143/** Ad hoc RAM range for an ROM mapping. */
1144#define PGM_RAM_RANGE_FLAGS_AD_HOC_ROM RT_BIT(21)
1145/** Ad hoc RAM range for an MMIO mapping. */
1146#define PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO RT_BIT(22)
1147/** Ad hoc RAM range for an MMIO2 mapping. */
1148#define PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO2 RT_BIT(23)
1149/** @} */
1150
1151/** Tests if a RAM range is an ad hoc one or not.
1152 * @returns true/false.
1153 * @param pRam The RAM range.
1154 */
1155#define PGM_RAM_RANGE_IS_AD_HOC(pRam) \
1156 (!!( (pRam)->fFlags & (PGM_RAM_RANGE_FLAGS_AD_HOC_ROM | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO2) ) )
1157
1158
1159/**
1160 * Per page tracking structure for ROM image.
1161 *
1162 * A ROM image may have a shadow page, in which case we may have two pages
1163 * backing it. This structure contains the PGMPAGE for both while
1164 * PGMRAMRANGE have a copy of the active one. It is important that these
1165 * aren't out of sync in any regard other than page pool tracking data.
1166 */
1167typedef struct PGMROMPAGE
1168{
1169 /** The page structure for the virgin ROM page. */
1170 PGMPAGE Virgin;
1171 /** The page structure for the shadow RAM page. */
1172 PGMPAGE Shadow;
1173 /** The current protection setting. */
1174 PGMROMPROT enmProt;
1175 /** Live save status information. Makes use of unused alignment space. */
1176 struct
1177 {
1178 /** The previous protection value. */
1179 uint8_t u8Prot;
1180 /** Written to flag set by the handler. */
1181 bool fWrittenTo;
1182 /** Whether the shadow page is dirty or not. */
1183 bool fDirty;
1184 /** Whether it was dirtied in the recently. */
1185 bool fDirtiedRecently;
1186 } LiveSave;
1187} PGMROMPAGE;
1188AssertCompileSizeAlignment(PGMROMPAGE, 8);
1189/** Pointer to a ROM page tracking structure. */
1190typedef PGMROMPAGE *PPGMROMPAGE;
1191
1192
1193/**
1194 * A registered ROM image.
1195 *
1196 * This is needed to keep track of ROM image since they generally intrude
1197 * into a PGMRAMRANGE. It also keeps track of additional info like the
1198 * two page sets (read-only virgin and read-write shadow), the current
1199 * state of each page.
1200 *
1201 * Because access handlers cannot easily be executed in a different
1202 * context, the ROM ranges needs to be accessible and in all contexts.
1203 */
1204typedef struct PGMROMRANGE
1205{
1206 /** Pointer to the next range - R3. */
1207 R3PTRTYPE(struct PGMROMRANGE *) pNextR3;
1208 /** Pointer to the next range - R0. */
1209 R0PTRTYPE(struct PGMROMRANGE *) pNextR0;
1210 /** Pointer to the next range - RC. */
1211 RCPTRTYPE(struct PGMROMRANGE *) pNextRC;
1212 /** Pointer alignment */
1213 RTRCPTR RCPtrAlignment;
1214 /** Address of the range. */
1215 RTGCPHYS GCPhys;
1216 /** Address of the last byte in the range. */
1217 RTGCPHYS GCPhysLast;
1218 /** Size of the range. */
1219 RTGCPHYS cb;
1220 /** The flags (PGMPHYS_ROM_FLAGS_*). */
1221 uint32_t fFlags;
1222 /** The saved state range ID. */
1223 uint8_t idSavedState;
1224 /** Alignment padding. */
1225 uint8_t au8Alignment[3];
1226 /** Alignment padding ensuring that aPages is sizeof(PGMROMPAGE) aligned. */
1227 uint32_t au32Alignemnt[HC_ARCH_BITS == 32 ? 6 : 2];
1228 /** Pointer to the original bits when PGMPHYS_ROM_FLAGS_PERMANENT_BINARY was specified.
1229 * This is used for strictness checks. */
1230 R3PTRTYPE(const void *) pvOriginal;
1231 /** The ROM description. */
1232 R3PTRTYPE(const char *) pszDesc;
1233 /** The per page tracking structures. */
1234 PGMROMPAGE aPages[1];
1235} PGMROMRANGE;
1236/** Pointer to a ROM range. */
1237typedef PGMROMRANGE *PPGMROMRANGE;
1238
1239
1240/**
1241 * Live save per page data for an MMIO2 page.
1242 *
1243 * Not using PGMLIVESAVERAMPAGE here because we cannot use normal write monitoring
1244 * of MMIO2 pages. The current approach is using some optimisitic SHA-1 +
1245 * CRC-32 for detecting changes as well as special handling of zero pages. This
1246 * is a TEMPORARY measure which isn't perfect, but hopefully it is good enough
1247 * for speeding things up. (We're using SHA-1 and not SHA-256 or SHA-512
1248 * because of speed (2.5x and 6x slower).)
1249 *
1250 * @todo Implement dirty MMIO2 page reporting that can be enabled during live
1251 * save but normally is disabled. Since we can write monitore guest
1252 * accesses on our own, we only need this for host accesses. Shouldn't be
1253 * too difficult for DevVGA, VMMDev might be doable, the planned
1254 * networking fun will be fun since it involves ring-0.
1255 */
1256typedef struct PGMLIVESAVEMMIO2PAGE
1257{
1258 /** Set if the page is considered dirty. */
1259 bool fDirty;
1260 /** The number of scans this page has remained unchanged for.
1261 * Only updated for dirty pages. */
1262 uint8_t cUnchangedScans;
1263 /** Whether this page was zero at the last scan. */
1264 bool fZero;
1265 /** Alignment padding. */
1266 bool fReserved;
1267 /** CRC-32 for the first half of the page.
1268 * This is used together with u32CrcH2 to quickly detect changes in the page
1269 * during the non-final passes. */
1270 uint32_t u32CrcH1;
1271 /** CRC-32 for the second half of the page. */
1272 uint32_t u32CrcH2;
1273 /** SHA-1 for the saved page.
1274 * This is used in the final pass to skip pages without changes. */
1275 uint8_t abSha1Saved[RTSHA1_HASH_SIZE];
1276} PGMLIVESAVEMMIO2PAGE;
1277/** Pointer to a live save status data for an MMIO2 page. */
1278typedef PGMLIVESAVEMMIO2PAGE *PPGMLIVESAVEMMIO2PAGE;
1279
1280/**
1281 * A registered MMIO2 (= Device RAM) range.
1282 *
1283 * There are a few reason why we need to keep track of these
1284 * registrations. One of them is the deregistration & cleanup stuff,
1285 * while another is that the PGMRAMRANGE associated with such a region may
1286 * have to be removed from the ram range list.
1287 *
1288 * Overlapping with a RAM range has to be 100% or none at all. The pages
1289 * in the existing RAM range must not be ROM nor MMIO. A guru meditation
1290 * will be raised if a partial overlap or an overlap of ROM pages is
1291 * encountered. On an overlap we will free all the existing RAM pages and
1292 * put in the ram range pages instead.
1293 */
1294typedef struct PGMMMIO2RANGE
1295{
1296 /** The owner of the range. (a device) */
1297 PPDMDEVINSR3 pDevInsR3;
1298 /** Pointer to the ring-3 mapping of the allocation. */
1299 RTR3PTR pvR3;
1300 /** Pointer to the next range - R3. */
1301 R3PTRTYPE(struct PGMMMIO2RANGE *) pNextR3;
1302 /** Whether it's mapped or not. */
1303 bool fMapped;
1304 /** Whether it's overlapping or not. */
1305 bool fOverlapping;
1306 /** The PCI region number.
1307 * @remarks This ASSUMES that nobody will ever really need to have multiple
1308 * PCI devices with matching MMIO region numbers on a single device. */
1309 uint8_t iRegion;
1310 /** The saved state range ID. */
1311 uint8_t idSavedState;
1312 /** Alignment padding for putting the ram range on a PGMPAGE alignment boundrary. */
1313 uint8_t abAlignemnt[HC_ARCH_BITS == 32 ? 12 : 12];
1314 /** Live save per page tracking data. */
1315 R3PTRTYPE(PPGMLIVESAVEMMIO2PAGE) paLSPages;
1316 /** The associated RAM range. */
1317 PGMRAMRANGE RamRange;
1318} PGMMMIO2RANGE;
1319/** Pointer to a MMIO2 range. */
1320typedef PGMMMIO2RANGE *PPGMMMIO2RANGE;
1321
1322
1323
1324
1325/**
1326 * PGMPhysRead/Write cache entry
1327 */
1328typedef struct PGMPHYSCACHEENTRY
1329{
1330 /** R3 pointer to physical page. */
1331 R3PTRTYPE(uint8_t *) pbR3;
1332 /** GC Physical address for cache entry */
1333 RTGCPHYS GCPhys;
1334#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
1335 RTGCPHYS u32Padding0; /**< alignment padding. */
1336#endif
1337} PGMPHYSCACHEENTRY;
1338
1339/**
1340 * PGMPhysRead/Write cache to reduce REM memory access overhead
1341 */
1342typedef struct PGMPHYSCACHE
1343{
1344 /** Bitmap of valid cache entries */
1345 uint64_t aEntries;
1346 /** Cache entries */
1347 PGMPHYSCACHEENTRY Entry[PGM_MAX_PHYSCACHE_ENTRIES];
1348} PGMPHYSCACHE;
1349
1350
1351/** Pointer to an allocation chunk ring-3 mapping. */
1352typedef struct PGMCHUNKR3MAP *PPGMCHUNKR3MAP;
1353/** Pointer to an allocation chunk ring-3 mapping pointer. */
1354typedef PPGMCHUNKR3MAP *PPPGMCHUNKR3MAP;
1355
1356/**
1357 * Ring-3 tracking structore for an allocation chunk ring-3 mapping.
1358 *
1359 * The primary tree (Core) uses the chunk id as key.
1360 * The secondary tree (AgeCore) is used for ageing and uses ageing sequence number as key.
1361 */
1362typedef struct PGMCHUNKR3MAP
1363{
1364 /** The key is the chunk id. */
1365 AVLU32NODECORE Core;
1366 /** The key is the ageing sequence number. */
1367 AVLLU32NODECORE AgeCore;
1368 /** The current age thingy. */
1369 uint32_t iAge;
1370 /** The current reference count. */
1371 uint32_t volatile cRefs;
1372 /** The current permanent reference count. */
1373 uint32_t volatile cPermRefs;
1374 /** The mapping address. */
1375 void *pv;
1376} PGMCHUNKR3MAP;
1377
1378/**
1379 * Allocation chunk ring-3 mapping TLB entry.
1380 */
1381typedef struct PGMCHUNKR3MAPTLBE
1382{
1383 /** The chunk id. */
1384 uint32_t volatile idChunk;
1385#if HC_ARCH_BITS == 64
1386 uint32_t u32Padding; /**< alignment padding. */
1387#endif
1388 /** The chunk map. */
1389#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1390 R3PTRTYPE(PPGMCHUNKR3MAP) volatile pChunk;
1391#else
1392 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pChunk;
1393#endif
1394} PGMCHUNKR3MAPTLBE;
1395/** Pointer to the an allocation chunk ring-3 mapping TLB entry. */
1396typedef PGMCHUNKR3MAPTLBE *PPGMCHUNKR3MAPTLBE;
1397
1398/** The number of TLB entries in PGMCHUNKR3MAPTLB.
1399 * @remark Must be a power of two value. */
1400#define PGM_CHUNKR3MAPTLB_ENTRIES 64
1401
1402/**
1403 * Allocation chunk ring-3 mapping TLB.
1404 *
1405 * @remarks We use a TLB to speed up lookups by avoiding walking the AVL.
1406 * At first glance this might look kinda odd since AVL trees are
1407 * supposed to give the most optimial lookup times of all trees
1408 * due to their balancing. However, take a tree with 1023 nodes
1409 * in it, that's 10 levels, meaning that most searches has to go
1410 * down 9 levels before they find what they want. This isn't fast
1411 * compared to a TLB hit. There is the factor of cache misses,
1412 * and of course the problem with trees and branch prediction.
1413 * This is why we use TLBs in front of most of the trees.
1414 *
1415 * @todo Generalize this TLB + AVL stuff, shouldn't be all that
1416 * difficult when we switch to the new inlined AVL trees (from kStuff).
1417 */
1418typedef struct PGMCHUNKR3MAPTLB
1419{
1420 /** The TLB entries. */
1421 PGMCHUNKR3MAPTLBE aEntries[PGM_CHUNKR3MAPTLB_ENTRIES];
1422} PGMCHUNKR3MAPTLB;
1423
1424/**
1425 * Calculates the index of a guest page in the Ring-3 Chunk TLB.
1426 * @returns Chunk TLB index.
1427 * @param idChunk The Chunk ID.
1428 */
1429#define PGM_CHUNKR3MAPTLB_IDX(idChunk) ( (idChunk) & (PGM_CHUNKR3MAPTLB_ENTRIES - 1) )
1430
1431
1432/**
1433 * Ring-3 guest page mapping TLB entry.
1434 * @remarks used in ring-0 as well at the moment.
1435 */
1436typedef struct PGMPAGER3MAPTLBE
1437{
1438 /** Address of the page. */
1439 RTGCPHYS volatile GCPhys;
1440 /** The guest page. */
1441#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1442 R3PTRTYPE(PPGMPAGE) volatile pPage;
1443#else
1444 R3R0PTRTYPE(PPGMPAGE) volatile pPage;
1445#endif
1446 /** Pointer to the page mapping tracking structure, PGMCHUNKR3MAP. */
1447#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1448 R3PTRTYPE(PPGMCHUNKR3MAP) volatile pMap;
1449#else
1450 R3R0PTRTYPE(PPGMCHUNKR3MAP) volatile pMap;
1451#endif
1452 /** The address */
1453#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1454 R3PTRTYPE(void *) volatile pv;
1455#else
1456 R3R0PTRTYPE(void *) volatile pv;
1457#endif
1458#if HC_ARCH_BITS == 32
1459 uint32_t u32Padding; /**< alignment padding. */
1460#endif
1461} PGMPAGER3MAPTLBE;
1462/** Pointer to an entry in the HC physical TLB. */
1463typedef PGMPAGER3MAPTLBE *PPGMPAGER3MAPTLBE;
1464
1465
1466/** The number of entries in the ring-3 guest page mapping TLB.
1467 * @remarks The value must be a power of two. */
1468#define PGM_PAGER3MAPTLB_ENTRIES 256
1469
1470/**
1471 * Ring-3 guest page mapping TLB.
1472 * @remarks used in ring-0 as well at the moment.
1473 */
1474typedef struct PGMPAGER3MAPTLB
1475{
1476 /** The TLB entries. */
1477 PGMPAGER3MAPTLBE aEntries[PGM_PAGER3MAPTLB_ENTRIES];
1478} PGMPAGER3MAPTLB;
1479/** Pointer to the ring-3 guest page mapping TLB. */
1480typedef PGMPAGER3MAPTLB *PPGMPAGER3MAPTLB;
1481
1482/**
1483 * Calculates the index of the TLB entry for the specified guest page.
1484 * @returns Physical TLB index.
1485 * @param GCPhys The guest physical address.
1486 */
1487#define PGM_PAGER3MAPTLB_IDX(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGM_PAGER3MAPTLB_ENTRIES - 1) )
1488
1489
1490/**
1491 * Mapping cache usage set entry.
1492 *
1493 * @remarks 16-bit ints was choosen as the set is not expected to be used beyond
1494 * the dynamic ring-0 and (to some extent) raw-mode context mapping
1495 * cache. If it's extended to include ring-3, well, then something will
1496 * have be changed here...
1497 */
1498typedef struct PGMMAPSETENTRY
1499{
1500 /** The mapping cache index. */
1501 uint16_t iPage;
1502 /** The number of references.
1503 * The max is UINT16_MAX - 1. */
1504 uint16_t cRefs;
1505#if HC_ARCH_BITS == 64
1506 uint32_t alignment;
1507#endif
1508 /** Pointer to the page. */
1509 RTR0PTR pvPage;
1510 /** The physical address for this entry. */
1511 RTHCPHYS HCPhys;
1512} PGMMAPSETENTRY;
1513/** Pointer to a mapping cache usage set entry. */
1514typedef PGMMAPSETENTRY *PPGMMAPSETENTRY;
1515
1516/**
1517 * Mapping cache usage set.
1518 *
1519 * This is used in ring-0 and the raw-mode context to track dynamic mappings
1520 * done during exits / traps. The set is
1521 */
1522typedef struct PGMMAPSET
1523{
1524 /** The number of occupied entries.
1525 * This is PGMMAPSET_CLOSED if the set is closed and we're not supposed to do
1526 * dynamic mappings. */
1527 uint32_t cEntries;
1528 /** The start of the current subset.
1529 * This is UINT32_MAX if no subset is currently open. */
1530 uint32_t iSubset;
1531 /** The index of the current CPU, only valid if the set is open. */
1532 int32_t iCpu;
1533 uint32_t alignment;
1534 /** The entries. */
1535 PGMMAPSETENTRY aEntries[64];
1536 /** HCPhys -> iEntry fast lookup table.
1537 * Use PGMMAPSET_HASH for hashing.
1538 * The entries may or may not be valid, check against cEntries. */
1539 uint8_t aiHashTable[128];
1540} PGMMAPSET;
1541AssertCompileSizeAlignment(PGMMAPSET, 8);
1542/** Pointer to the mapping cache set. */
1543typedef PGMMAPSET *PPGMMAPSET;
1544
1545/** PGMMAPSET::cEntries value for a closed set. */
1546#define PGMMAPSET_CLOSED UINT32_C(0xdeadc0fe)
1547
1548/** Hash function for aiHashTable. */
1549#define PGMMAPSET_HASH(HCPhys) (((HCPhys) >> PAGE_SHIFT) & 127)
1550
1551/** The max fill size (strict builds). */
1552#define PGMMAPSET_MAX_FILL (64U * 80U / 100U)
1553
1554
1555/** @name Context neutrual page mapper TLB.
1556 *
1557 * Hoping to avoid some code and bug duplication parts of the GCxxx->CCPtr
1558 * code is writting in a kind of context neutrual way. Time will show whether
1559 * this actually makes sense or not...
1560 *
1561 * @todo this needs to be reconsidered and dropped/redone since the ring-0
1562 * context ends up using a global mapping cache on some platforms
1563 * (darwin).
1564 *
1565 * @{ */
1566/** @typedef PPGMPAGEMAPTLB
1567 * The page mapper TLB pointer type for the current context. */
1568/** @typedef PPGMPAGEMAPTLB
1569 * The page mapper TLB entry pointer type for the current context. */
1570/** @typedef PPGMPAGEMAPTLB
1571 * The page mapper TLB entry pointer pointer type for the current context. */
1572/** @def PGM_PAGEMAPTLB_ENTRIES
1573 * The number of TLB entries in the page mapper TLB for the current context. */
1574/** @def PGM_PAGEMAPTLB_IDX
1575 * Calculate the TLB index for a guest physical address.
1576 * @returns The TLB index.
1577 * @param GCPhys The guest physical address. */
1578/** @typedef PPGMPAGEMAP
1579 * Pointer to a page mapper unit for current context. */
1580/** @typedef PPPGMPAGEMAP
1581 * Pointer to a page mapper unit pointer for current context. */
1582#ifdef IN_RC
1583// typedef PPGMPAGEGCMAPTLB PPGMPAGEMAPTLB;
1584// typedef PPGMPAGEGCMAPTLBE PPGMPAGEMAPTLBE;
1585// typedef PPGMPAGEGCMAPTLBE *PPPGMPAGEMAPTLBE;
1586# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGEGCMAPTLB_ENTRIES
1587# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGEGCMAPTLB_IDX(GCPhys)
1588 typedef void * PPGMPAGEMAP;
1589 typedef void ** PPPGMPAGEMAP;
1590//#elif IN_RING0
1591// typedef PPGMPAGER0MAPTLB PPGMPAGEMAPTLB;
1592// typedef PPGMPAGER0MAPTLBE PPGMPAGEMAPTLBE;
1593// typedef PPGMPAGER0MAPTLBE *PPPGMPAGEMAPTLBE;
1594//# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER0MAPTLB_ENTRIES
1595//# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER0MAPTLB_IDX(GCPhys)
1596// typedef PPGMCHUNKR0MAP PPGMPAGEMAP;
1597// typedef PPPGMCHUNKR0MAP PPPGMPAGEMAP;
1598#else
1599 typedef PPGMPAGER3MAPTLB PPGMPAGEMAPTLB;
1600 typedef PPGMPAGER3MAPTLBE PPGMPAGEMAPTLBE;
1601 typedef PPGMPAGER3MAPTLBE *PPPGMPAGEMAPTLBE;
1602# define PGM_PAGEMAPTLB_ENTRIES PGM_PAGER3MAPTLB_ENTRIES
1603# define PGM_PAGEMAPTLB_IDX(GCPhys) PGM_PAGER3MAPTLB_IDX(GCPhys)
1604 typedef PPGMCHUNKR3MAP PPGMPAGEMAP;
1605 typedef PPPGMCHUNKR3MAP PPPGMPAGEMAP;
1606#endif
1607/** @} */
1608
1609
1610/** @name PGM Pool Indexes.
1611 * Aka. the unique shadow page identifier.
1612 * @{ */
1613/** NIL page pool IDX. */
1614#define NIL_PGMPOOL_IDX 0
1615/** The first normal index. */
1616#define PGMPOOL_IDX_FIRST_SPECIAL 1
1617/** Page directory (32-bit root). */
1618#define PGMPOOL_IDX_PD 1
1619/** Page Directory Pointer Table (PAE root). */
1620#define PGMPOOL_IDX_PDPT 2
1621/** AMD64 CR3 level index.*/
1622#define PGMPOOL_IDX_AMD64_CR3 3
1623/** Nested paging root.*/
1624#define PGMPOOL_IDX_NESTED_ROOT 4
1625/** The first normal index. */
1626#define PGMPOOL_IDX_FIRST 5
1627/** The last valid index. (inclusive, 14 bits) */
1628#define PGMPOOL_IDX_LAST 0x3fff
1629/** @} */
1630
1631/** The NIL index for the parent chain. */
1632#define NIL_PGMPOOL_USER_INDEX ((uint16_t)0xffff)
1633#define NIL_PGMPOOL_PRESENT_INDEX ((uint16_t)0xffff)
1634
1635/**
1636 * Node in the chain linking a shadowed page to it's parent (user).
1637 */
1638#pragma pack(1)
1639typedef struct PGMPOOLUSER
1640{
1641 /** The index to the next item in the chain. NIL_PGMPOOL_USER_INDEX is no next. */
1642 uint16_t iNext;
1643 /** The user page index. */
1644 uint16_t iUser;
1645 /** Index into the user table. */
1646 uint32_t iUserTable;
1647} PGMPOOLUSER, *PPGMPOOLUSER;
1648typedef const PGMPOOLUSER *PCPGMPOOLUSER;
1649#pragma pack()
1650
1651
1652/** The NIL index for the phys ext chain. */
1653#define NIL_PGMPOOL_PHYSEXT_INDEX ((uint16_t)0xffff)
1654
1655/**
1656 * Node in the chain of physical cross reference extents.
1657 * @todo Calling this an 'extent' is not quite right, find a better name.
1658 */
1659#pragma pack(1)
1660typedef struct PGMPOOLPHYSEXT
1661{
1662 /** The index to the next item in the chain. NIL_PGMPOOL_PHYSEXT_INDEX is no next. */
1663 uint16_t iNext;
1664 /** The user page index. */
1665 uint16_t aidx[3];
1666} PGMPOOLPHYSEXT, *PPGMPOOLPHYSEXT;
1667typedef const PGMPOOLPHYSEXT *PCPGMPOOLPHYSEXT;
1668#pragma pack()
1669
1670
1671/**
1672 * The kind of page that's being shadowed.
1673 */
1674typedef enum PGMPOOLKIND
1675{
1676 /** The virtual invalid 0 entry. */
1677 PGMPOOLKIND_INVALID = 0,
1678 /** The entry is free (=unused). */
1679 PGMPOOLKIND_FREE,
1680
1681 /** Shw: 32-bit page table; Gst: no paging */
1682 PGMPOOLKIND_32BIT_PT_FOR_PHYS,
1683 /** Shw: 32-bit page table; Gst: 32-bit page table. */
1684 PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT,
1685 /** Shw: 32-bit page table; Gst: 4MB page. */
1686 PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB,
1687 /** Shw: PAE page table; Gst: no paging */
1688 PGMPOOLKIND_PAE_PT_FOR_PHYS,
1689 /** Shw: PAE page table; Gst: 32-bit page table. */
1690 PGMPOOLKIND_PAE_PT_FOR_32BIT_PT,
1691 /** Shw: PAE page table; Gst: Half of a 4MB page. */
1692 PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB,
1693 /** Shw: PAE page table; Gst: PAE page table. */
1694 PGMPOOLKIND_PAE_PT_FOR_PAE_PT,
1695 /** Shw: PAE page table; Gst: 2MB page. */
1696 PGMPOOLKIND_PAE_PT_FOR_PAE_2MB,
1697
1698 /** Shw: 32-bit page directory. Gst: 32-bit page directory. */
1699 PGMPOOLKIND_32BIT_PD,
1700 /** Shw: 32-bit page directory. Gst: no paging. */
1701 PGMPOOLKIND_32BIT_PD_PHYS,
1702 /** Shw: PAE page directory 0; Gst: 32-bit page directory. */
1703 PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD,
1704 /** Shw: PAE page directory 1; Gst: 32-bit page directory. */
1705 PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD,
1706 /** Shw: PAE page directory 2; Gst: 32-bit page directory. */
1707 PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD,
1708 /** Shw: PAE page directory 3; Gst: 32-bit page directory. */
1709 PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD,
1710 /** Shw: PAE page directory; Gst: PAE page directory. */
1711 PGMPOOLKIND_PAE_PD_FOR_PAE_PD,
1712 /** Shw: PAE page directory; Gst: no paging. */
1713 PGMPOOLKIND_PAE_PD_PHYS,
1714
1715 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst 32 bits paging. */
1716 PGMPOOLKIND_PAE_PDPT_FOR_32BIT,
1717 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst PAE PDPT. */
1718 PGMPOOLKIND_PAE_PDPT,
1719 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst: no paging. */
1720 PGMPOOLKIND_PAE_PDPT_PHYS,
1721
1722 /** Shw: 64-bit page directory pointer table; Gst: 64-bit page directory pointer table. */
1723 PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT,
1724 /** Shw: 64-bit page directory pointer table; Gst: no paging */
1725 PGMPOOLKIND_64BIT_PDPT_FOR_PHYS,
1726 /** Shw: 64-bit page directory table; Gst: 64-bit page directory table. */
1727 PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD,
1728 /** Shw: 64-bit page directory table; Gst: no paging */
1729 PGMPOOLKIND_64BIT_PD_FOR_PHYS, /* 22 */
1730
1731 /** Shw: 64-bit PML4; Gst: 64-bit PML4. */
1732 PGMPOOLKIND_64BIT_PML4,
1733
1734 /** Shw: EPT page directory pointer table; Gst: no paging */
1735 PGMPOOLKIND_EPT_PDPT_FOR_PHYS,
1736 /** Shw: EPT page directory table; Gst: no paging */
1737 PGMPOOLKIND_EPT_PD_FOR_PHYS,
1738 /** Shw: EPT page table; Gst: no paging */
1739 PGMPOOLKIND_EPT_PT_FOR_PHYS,
1740
1741 /** Shw: Root Nested paging table. */
1742 PGMPOOLKIND_ROOT_NESTED,
1743
1744 /** The last valid entry. */
1745 PGMPOOLKIND_LAST = PGMPOOLKIND_ROOT_NESTED
1746} PGMPOOLKIND;
1747
1748/**
1749 * The access attributes of the page; only applies to big pages.
1750 */
1751typedef enum
1752{
1753 PGMPOOLACCESS_DONTCARE = 0,
1754 PGMPOOLACCESS_USER_RW,
1755 PGMPOOLACCESS_USER_R,
1756 PGMPOOLACCESS_USER_RW_NX,
1757 PGMPOOLACCESS_USER_R_NX,
1758 PGMPOOLACCESS_SUPERVISOR_RW,
1759 PGMPOOLACCESS_SUPERVISOR_R,
1760 PGMPOOLACCESS_SUPERVISOR_RW_NX,
1761 PGMPOOLACCESS_SUPERVISOR_R_NX
1762} PGMPOOLACCESS;
1763
1764/**
1765 * The tracking data for a page in the pool.
1766 */
1767typedef struct PGMPOOLPAGE
1768{
1769 /** AVL node code with the (R3) physical address of this page. */
1770 AVLOHCPHYSNODECORE Core;
1771 /** Pointer to the R3 mapping of the page. */
1772#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
1773 R3PTRTYPE(void *) pvPageR3;
1774#else
1775 R3R0PTRTYPE(void *) pvPageR3;
1776#endif
1777 /** The guest physical address. */
1778#if HC_ARCH_BITS == 32 && GC_ARCH_BITS == 64
1779 uint32_t Alignment0;
1780#endif
1781 RTGCPHYS GCPhys;
1782
1783 /** Access handler statistics to determine whether the guest is (re)initializing a page table. */
1784 RTGCPTR pvLastAccessHandlerRip;
1785 RTGCPTR pvLastAccessHandlerFault;
1786 uint64_t cLastAccessHandlerCount;
1787
1788 /** The kind of page we're shadowing. (This is really a PGMPOOLKIND enum.) */
1789 uint8_t enmKind;
1790 /** The subkind of page we're shadowing. (This is really a PGMPOOLACCESS enum.) */
1791 uint8_t enmAccess;
1792 /** The index of this page. */
1793 uint16_t idx;
1794 /** The next entry in the list this page currently resides in.
1795 * It's either in the free list or in the GCPhys hash. */
1796 uint16_t iNext;
1797 /** Head of the user chain. NIL_PGMPOOL_USER_INDEX if not currently in use. */
1798 uint16_t iUserHead;
1799 /** The number of present entries. */
1800 uint16_t cPresent;
1801 /** The first entry in the table which is present. */
1802 uint16_t iFirstPresent;
1803 /** The number of modifications to the monitored page. */
1804 uint16_t cModifications;
1805 /** The next modified page. NIL_PGMPOOL_IDX if tail. */
1806 uint16_t iModifiedNext;
1807 /** The previous modified page. NIL_PGMPOOL_IDX if head. */
1808 uint16_t iModifiedPrev;
1809 /** The next page sharing access handler. NIL_PGMPOOL_IDX if tail. */
1810 uint16_t iMonitoredNext;
1811 /** The previous page sharing access handler. NIL_PGMPOOL_IDX if head. */
1812 uint16_t iMonitoredPrev;
1813 /** The next page in the age list. */
1814 uint16_t iAgeNext;
1815 /** The previous page in the age list. */
1816 uint16_t iAgePrev;
1817 /** Used to indicate that the page is zeroed. */
1818 bool fZeroed;
1819 /** Used to indicate that a PT has non-global entries. */
1820 bool fSeenNonGlobal;
1821 /** Used to indicate that we're monitoring writes to the guest page. */
1822 bool fMonitored;
1823 /** Used to indicate that the page is in the cache (e.g. in the GCPhys hash).
1824 * (All pages are in the age list.) */
1825 bool fCached;
1826 /** This is used by the R3 access handlers when invoked by an async thread.
1827 * It's a hack required because of REMR3NotifyHandlerPhysicalDeregister. */
1828 bool volatile fReusedFlushPending;
1829 /** Used to mark the page as dirty (write monitoring if temporarily off. */
1830 bool fDirty;
1831
1832 /** Used to indicate that this page can't be flushed. Important for cr3 root pages or shadow pae pd pages). */
1833 uint32_t cLocked;
1834 uint32_t idxDirty;
1835 RTGCPTR pvDirtyFault;
1836} PGMPOOLPAGE, *PPGMPOOLPAGE, **PPPGMPOOLPAGE;
1837/** Pointer to a const pool page. */
1838typedef PGMPOOLPAGE const *PCPGMPOOLPAGE;
1839
1840
1841/** The hash table size. */
1842# define PGMPOOL_HASH_SIZE 0x40
1843/** The hash function. */
1844# define PGMPOOL_HASH(GCPhys) ( ((GCPhys) >> PAGE_SHIFT) & (PGMPOOL_HASH_SIZE - 1) )
1845
1846
1847/**
1848 * The shadow page pool instance data.
1849 *
1850 * It's all one big allocation made at init time, except for the
1851 * pages that is. The user nodes follows immediatly after the
1852 * page structures.
1853 */
1854typedef struct PGMPOOL
1855{
1856 /** The VM handle - R3 Ptr. */
1857 PVMR3 pVMR3;
1858 /** The VM handle - R0 Ptr. */
1859 PVMR0 pVMR0;
1860 /** The VM handle - RC Ptr. */
1861 PVMRC pVMRC;
1862 /** The max pool size. This includes the special IDs. */
1863 uint16_t cMaxPages;
1864 /** The current pool size. */
1865 uint16_t cCurPages;
1866 /** The head of the free page list. */
1867 uint16_t iFreeHead;
1868 /* Padding. */
1869 uint16_t u16Padding;
1870 /** Head of the chain of free user nodes. */
1871 uint16_t iUserFreeHead;
1872 /** The number of user nodes we've allocated. */
1873 uint16_t cMaxUsers;
1874 /** The number of present page table entries in the entire pool. */
1875 uint32_t cPresent;
1876 /** Pointer to the array of user nodes - RC pointer. */
1877 RCPTRTYPE(PPGMPOOLUSER) paUsersRC;
1878 /** Pointer to the array of user nodes - R3 pointer. */
1879 R3PTRTYPE(PPGMPOOLUSER) paUsersR3;
1880 /** Pointer to the array of user nodes - R0 pointer. */
1881 R0PTRTYPE(PPGMPOOLUSER) paUsersR0;
1882 /** Head of the chain of free phys ext nodes. */
1883 uint16_t iPhysExtFreeHead;
1884 /** The number of user nodes we've allocated. */
1885 uint16_t cMaxPhysExts;
1886 /** Pointer to the array of physical xref extent - RC pointer. */
1887 RCPTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsRC;
1888 /** Pointer to the array of physical xref extent nodes - R3 pointer. */
1889 R3PTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsR3;
1890 /** Pointer to the array of physical xref extent nodes - R0 pointer. */
1891 R0PTRTYPE(PPGMPOOLPHYSEXT) paPhysExtsR0;
1892 /** Hash table for GCPhys addresses. */
1893 uint16_t aiHash[PGMPOOL_HASH_SIZE];
1894 /** The head of the age list. */
1895 uint16_t iAgeHead;
1896 /** The tail of the age list. */
1897 uint16_t iAgeTail;
1898 /** Set if the cache is enabled. */
1899 bool fCacheEnabled;
1900 /** Alignment padding. */
1901 bool afPadding1[3];
1902 /** Head of the list of modified pages. */
1903 uint16_t iModifiedHead;
1904 /** The current number of modified pages. */
1905 uint16_t cModifiedPages;
1906 /** Access handler, RC. */
1907 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnAccessHandlerRC;
1908 /** Access handler, R0. */
1909 R0PTRTYPE(PFNPGMR0PHYSHANDLER) pfnAccessHandlerR0;
1910 /** Access handler, R3. */
1911 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnAccessHandlerR3;
1912 /** The access handler description (R3 ptr). */
1913 R3PTRTYPE(const char *) pszAccessHandler;
1914# if HC_ARCH_BITS == 32
1915 /** Alignment padding. */
1916 uint32_t u32Padding2;
1917# endif
1918 /* Next available slot. */
1919 uint32_t idxFreeDirtyPage;
1920 /* Number of active dirty pages. */
1921 uint32_t cDirtyPages;
1922 /* Array of current dirty pgm pool page indices. */
1923 uint16_t aIdxDirtyPages[16];
1924 uint64_t aDirtyPages[16][512];
1925 /** The number of pages currently in use. */
1926 uint16_t cUsedPages;
1927#ifdef VBOX_WITH_STATISTICS
1928 /** The high water mark for cUsedPages. */
1929 uint16_t cUsedPagesHigh;
1930 uint32_t Alignment1; /**< Align the next member on a 64-bit boundrary. */
1931 /** Profiling pgmPoolAlloc(). */
1932 STAMPROFILEADV StatAlloc;
1933 /** Profiling pgmR3PoolClearDoIt(). */
1934 STAMPROFILE StatClearAll;
1935 /** Profiling pgmR3PoolReset(). */
1936 STAMPROFILE StatR3Reset;
1937 /** Profiling pgmPoolFlushPage(). */
1938 STAMPROFILE StatFlushPage;
1939 /** Profiling pgmPoolFree(). */
1940 STAMPROFILE StatFree;
1941 /** Counting explicit flushes by PGMPoolFlushPage(). */
1942 STAMCOUNTER StatForceFlushPage;
1943 /** Counting explicit flushes of dirty pages by PGMPoolFlushPage(). */
1944 STAMCOUNTER StatForceFlushDirtyPage;
1945 /** Counting flushes for reused pages. */
1946 STAMCOUNTER StatForceFlushReused;
1947 /** Profiling time spent zeroing pages. */
1948 STAMPROFILE StatZeroPage;
1949 /** Profiling of pgmPoolTrackDeref. */
1950 STAMPROFILE StatTrackDeref;
1951 /** Profiling pgmTrackFlushGCPhysPT. */
1952 STAMPROFILE StatTrackFlushGCPhysPT;
1953 /** Profiling pgmTrackFlushGCPhysPTs. */
1954 STAMPROFILE StatTrackFlushGCPhysPTs;
1955 /** Profiling pgmTrackFlushGCPhysPTsSlow. */
1956 STAMPROFILE StatTrackFlushGCPhysPTsSlow;
1957 /** Number of times we've been out of user records. */
1958 STAMCOUNTER StatTrackFreeUpOneUser;
1959 /** Nr of flushed entries. */
1960 STAMCOUNTER StatTrackFlushEntry;
1961 /** Nr of updated entries. */
1962 STAMCOUNTER StatTrackFlushEntryKeep;
1963 /** Profiling deref activity related tracking GC physical pages. */
1964 STAMPROFILE StatTrackDerefGCPhys;
1965 /** Number of linear searches for a HCPhys in the ram ranges. */
1966 STAMCOUNTER StatTrackLinearRamSearches;
1967 /** The number of failing pgmPoolTrackPhysExtAlloc calls. */
1968 STAMCOUNTER StamTrackPhysExtAllocFailures;
1969 /** Profiling the RC/R0 access handler. */
1970 STAMPROFILE StatMonitorRZ;
1971 /** Times we've failed interpreting the instruction. */
1972 STAMCOUNTER StatMonitorRZEmulateInstr;
1973 /** Profiling the pgmPoolFlushPage calls made from the RC/R0 access handler. */
1974 STAMPROFILE StatMonitorRZFlushPage;
1975 /* Times we've detected a page table reinit. */
1976 STAMCOUNTER StatMonitorRZFlushReinit;
1977 /** Counting flushes for pages that are modified too often. */
1978 STAMCOUNTER StatMonitorRZFlushModOverflow;
1979 /** Times we've detected fork(). */
1980 STAMCOUNTER StatMonitorRZFork;
1981 /** Profiling the RC/R0 access we've handled (except REP STOSD). */
1982 STAMPROFILE StatMonitorRZHandled;
1983 /** Times we've failed interpreting a patch code instruction. */
1984 STAMCOUNTER StatMonitorRZIntrFailPatch1;
1985 /** Times we've failed interpreting a patch code instruction during flushing. */
1986 STAMCOUNTER StatMonitorRZIntrFailPatch2;
1987 /** The number of times we've seen rep prefixes we can't handle. */
1988 STAMCOUNTER StatMonitorRZRepPrefix;
1989 /** Profiling the REP STOSD cases we've handled. */
1990 STAMPROFILE StatMonitorRZRepStosd;
1991 /** Nr of handled PT faults. */
1992 STAMCOUNTER StatMonitorRZFaultPT;
1993 /** Nr of handled PD faults. */
1994 STAMCOUNTER StatMonitorRZFaultPD;
1995 /** Nr of handled PDPT faults. */
1996 STAMCOUNTER StatMonitorRZFaultPDPT;
1997 /** Nr of handled PML4 faults. */
1998 STAMCOUNTER StatMonitorRZFaultPML4;
1999
2000 /** Profiling the R3 access handler. */
2001 STAMPROFILE StatMonitorR3;
2002 /** Times we've failed interpreting the instruction. */
2003 STAMCOUNTER StatMonitorR3EmulateInstr;
2004 /** Profiling the pgmPoolFlushPage calls made from the R3 access handler. */
2005 STAMPROFILE StatMonitorR3FlushPage;
2006 /* Times we've detected a page table reinit. */
2007 STAMCOUNTER StatMonitorR3FlushReinit;
2008 /** Counting flushes for pages that are modified too often. */
2009 STAMCOUNTER StatMonitorR3FlushModOverflow;
2010 /** Times we've detected fork(). */
2011 STAMCOUNTER StatMonitorR3Fork;
2012 /** Profiling the R3 access we've handled (except REP STOSD). */
2013 STAMPROFILE StatMonitorR3Handled;
2014 /** The number of times we've seen rep prefixes we can't handle. */
2015 STAMCOUNTER StatMonitorR3RepPrefix;
2016 /** Profiling the REP STOSD cases we've handled. */
2017 STAMPROFILE StatMonitorR3RepStosd;
2018 /** Nr of handled PT faults. */
2019 STAMCOUNTER StatMonitorR3FaultPT;
2020 /** Nr of handled PD faults. */
2021 STAMCOUNTER StatMonitorR3FaultPD;
2022 /** Nr of handled PDPT faults. */
2023 STAMCOUNTER StatMonitorR3FaultPDPT;
2024 /** Nr of handled PML4 faults. */
2025 STAMCOUNTER StatMonitorR3FaultPML4;
2026 /** The number of times we're called in an async thread an need to flush. */
2027 STAMCOUNTER StatMonitorR3Async;
2028 /** Times we've called pgmPoolResetDirtyPages (and there were dirty page). */
2029 STAMCOUNTER StatResetDirtyPages;
2030 /** Times we've called pgmPoolAddDirtyPage. */
2031 STAMCOUNTER StatDirtyPage;
2032 /** Times we've had to flush duplicates for dirty page management. */
2033 STAMCOUNTER StatDirtyPageDupFlush;
2034 /** Times we've had to flush because of overflow. */
2035 STAMCOUNTER StatDirtyPageOverFlowFlush;
2036
2037 /** The high wather mark for cModifiedPages. */
2038 uint16_t cModifiedPagesHigh;
2039 uint16_t Alignment2[3]; /**< Align the next member on a 64-bit boundrary. */
2040
2041 /** The number of cache hits. */
2042 STAMCOUNTER StatCacheHits;
2043 /** The number of cache misses. */
2044 STAMCOUNTER StatCacheMisses;
2045 /** The number of times we've got a conflict of 'kind' in the cache. */
2046 STAMCOUNTER StatCacheKindMismatches;
2047 /** Number of times we've been out of pages. */
2048 STAMCOUNTER StatCacheFreeUpOne;
2049 /** The number of cacheable allocations. */
2050 STAMCOUNTER StatCacheCacheable;
2051 /** The number of uncacheable allocations. */
2052 STAMCOUNTER StatCacheUncacheable;
2053#else
2054 uint32_t Alignment3; /**< Align the next member on a 64-bit boundrary. */
2055#endif
2056 /** The AVL tree for looking up a page by its HC physical address. */
2057 AVLOHCPHYSTREE HCPhysTree;
2058 uint32_t Alignment4; /**< Align the next member on a 64-bit boundrary. */
2059 /** Array of pages. (cMaxPages in length)
2060 * The Id is the index into thist array.
2061 */
2062 PGMPOOLPAGE aPages[PGMPOOL_IDX_FIRST];
2063} PGMPOOL, *PPGMPOOL, **PPPGMPOOL;
2064AssertCompileMemberAlignment(PGMPOOL, iModifiedHead, 8);
2065AssertCompileMemberAlignment(PGMPOOL, aDirtyPages, 8);
2066AssertCompileMemberAlignment(PGMPOOL, cUsedPages, 8);
2067#ifdef VBOX_WITH_STATISTICS
2068AssertCompileMemberAlignment(PGMPOOL, StatAlloc, 8);
2069#endif
2070AssertCompileMemberAlignment(PGMPOOL, aPages, 8);
2071
2072
2073/** @def PGMPOOL_PAGE_2_PTR
2074 * Maps a pool page pool into the current context.
2075 *
2076 * @returns VBox status code.
2077 * @param pVM The VM handle.
2078 * @param pPage The pool page.
2079 *
2080 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
2081 * small page window employeed by that function. Be careful.
2082 * @remark There is no need to assert on the result.
2083 */
2084#if defined(IN_RC)
2085# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageInlined(&(pVM)->pgm.s, (pPage))
2086#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2087# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageInlined(&(pVM)->pgm.s, (pPage))
2088#elif defined(VBOX_STRICT)
2089# define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPageStrict(pPage)
2090DECLINLINE(void *) pgmPoolMapPageStrict(PPGMPOOLPAGE pPage)
2091{
2092 Assert(pPage && pPage->pvPageR3);
2093 return pPage->pvPageR3;
2094}
2095#else
2096# define PGMPOOL_PAGE_2_PTR(pVM, pPage) ((pPage)->pvPageR3)
2097#endif
2098
2099/** @def PGMPOOL_PAGE_2_PTR_BY_PGM
2100 * Maps a pool page pool into the current context.
2101 *
2102 * @returns VBox status code.
2103 * @param pPGM Pointer to the PGM instance data.
2104 * @param pPage The pool page.
2105 *
2106 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
2107 * small page window employeed by that function. Be careful.
2108 * @remark There is no need to assert on the result.
2109 */
2110#if defined(IN_RC)
2111# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) pgmPoolMapPageInlined(pPGM, (pPage))
2112#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2113# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) pgmPoolMapPageInlined(pPGM, (pPage))
2114#else
2115# define PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPage) PGMPOOL_PAGE_2_PTR(PGM2VM(pPGM), pPage)
2116#endif
2117
2118/** @def PGMPOOL_PAGE_2_PTR_BY_PGMCPU
2119 * Maps a pool page pool into the current context.
2120 *
2121 * @returns VBox status code.
2122 * @param pPGM Pointer to the PGMCPU instance data.
2123 * @param pPage The pool page.
2124 *
2125 * @remark In RC this uses PGMGCDynMapHCPage(), so it will consume of the
2126 * small page window employeed by that function. Be careful.
2127 * @remark There is no need to assert on the result.
2128 */
2129#if defined(IN_RC)
2130# define PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPage) pgmPoolMapPageInlined(PGMCPU2PGM(pPGM), (pPage))
2131#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2132# define PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPage) pgmPoolMapPageInlined(PGMCPU2PGM(pPGM), (pPage))
2133#else
2134# define PGMPOOL_PAGE_2_PTR_BY_PGMCPU(pPGM, pPage) PGMPOOL_PAGE_2_PTR(PGMCPU2VM(pPGM), pPage)
2135#endif
2136
2137
2138/** @name Per guest page tracking data.
2139 * This is currently as a 16-bit word in the PGMPAGE structure, the idea though
2140 * is to use more bits for it and split it up later on. But for now we'll play
2141 * safe and change as little as possible.
2142 *
2143 * The 16-bit word has two parts:
2144 *
2145 * The first 14-bit forms the @a idx field. It is either the index of a page in
2146 * the shadow page pool, or and index into the extent list.
2147 *
2148 * The 2 topmost bits makes up the @a cRefs field, which counts the number of
2149 * shadow page pool references to the page. If cRefs equals
2150 * PGMPOOL_CREFS_PHYSEXT, then the @a idx field is an indext into the extent
2151 * (misnomer) table and not the shadow page pool.
2152 *
2153 * See PGM_PAGE_GET_TRACKING and PGM_PAGE_SET_TRACKING for how to get and set
2154 * the 16-bit word.
2155 *
2156 * @{ */
2157/** The shift count for getting to the cRefs part. */
2158#define PGMPOOL_TD_CREFS_SHIFT 14
2159/** The mask applied after shifting the tracking data down by
2160 * PGMPOOL_TD_CREFS_SHIFT. */
2161#define PGMPOOL_TD_CREFS_MASK 0x3
2162/** The cRef value used to indiciate that the idx is the head of a
2163 * physical cross reference list. */
2164#define PGMPOOL_TD_CREFS_PHYSEXT PGMPOOL_TD_CREFS_MASK
2165/** The shift used to get idx. */
2166#define PGMPOOL_TD_IDX_SHIFT 0
2167/** The mask applied to the idx after shifting down by PGMPOOL_TD_IDX_SHIFT. */
2168#define PGMPOOL_TD_IDX_MASK 0x3fff
2169/** The idx value when we're out of of PGMPOOLPHYSEXT entries or/and there are
2170 * simply too many mappings of this page. */
2171#define PGMPOOL_TD_IDX_OVERFLOWED PGMPOOL_TD_IDX_MASK
2172
2173/** @def PGMPOOL_TD_MAKE
2174 * Makes a 16-bit tracking data word.
2175 *
2176 * @returns tracking data.
2177 * @param cRefs The @a cRefs field. Must be within bounds!
2178 * @param idx The @a idx field. Must also be within bounds! */
2179#define PGMPOOL_TD_MAKE(cRefs, idx) ( ((cRefs) << PGMPOOL_TD_CREFS_SHIFT) | (idx) )
2180
2181/** @def PGMPOOL_TD_GET_CREFS
2182 * Get the @a cRefs field from a tracking data word.
2183 *
2184 * @returns The @a cRefs field
2185 * @param u16 The tracking data word. */
2186#define PGMPOOL_TD_GET_CREFS(u16) ( ((u16) >> PGMPOOL_TD_CREFS_SHIFT) & PGMPOOL_TD_CREFS_MASK )
2187
2188/** @def PGMPOOL_TD_GET_IDX
2189 * Get the @a idx field from a tracking data word.
2190 *
2191 * @returns The @a idx field
2192 * @param u16 The tracking data word. */
2193#define PGMPOOL_TD_GET_IDX(u16) ( ((u16) >> PGMPOOL_TD_IDX_SHIFT) & PGMPOOL_TD_IDX_MASK )
2194/** @} */
2195
2196
2197/**
2198 * Trees are using self relative offsets as pointers.
2199 * So, all its data, including the root pointer, must be in the heap for HC and GC
2200 * to have the same layout.
2201 */
2202typedef struct PGMTREES
2203{
2204 /** Physical access handlers (AVL range+offsetptr tree). */
2205 AVLROGCPHYSTREE PhysHandlers;
2206 /** Virtual access handlers (AVL range + GC ptr tree). */
2207 AVLROGCPTRTREE VirtHandlers;
2208 /** Virtual access handlers (Phys range AVL range + offsetptr tree). */
2209 AVLROGCPHYSTREE PhysToVirtHandlers;
2210 /** Virtual access handlers for the hypervisor (AVL range + GC ptr tree). */
2211 AVLROGCPTRTREE HyperVirtHandlers;
2212} PGMTREES;
2213/** Pointer to PGM trees. */
2214typedef PGMTREES *PPGMTREES;
2215
2216
2217/** @name Paging mode macros
2218 * @{ */
2219#ifdef IN_RC
2220# define PGM_CTX(a,b) a##RC##b
2221# define PGM_CTX_STR(a,b) a "GC" b
2222# define PGM_CTX_DECL(type) VMMRCDECL(type)
2223#else
2224# ifdef IN_RING3
2225# define PGM_CTX(a,b) a##R3##b
2226# define PGM_CTX_STR(a,b) a "R3" b
2227# define PGM_CTX_DECL(type) DECLCALLBACK(type)
2228# else
2229# define PGM_CTX(a,b) a##R0##b
2230# define PGM_CTX_STR(a,b) a "R0" b
2231# define PGM_CTX_DECL(type) VMMDECL(type)
2232# endif
2233#endif
2234
2235#define PGM_GST_NAME_REAL(name) PGM_CTX(pgm,GstReal##name)
2236#define PGM_GST_NAME_RC_REAL_STR(name) "pgmRCGstReal" #name
2237#define PGM_GST_NAME_R0_REAL_STR(name) "pgmR0GstReal" #name
2238#define PGM_GST_NAME_PROT(name) PGM_CTX(pgm,GstProt##name)
2239#define PGM_GST_NAME_RC_PROT_STR(name) "pgmRCGstProt" #name
2240#define PGM_GST_NAME_R0_PROT_STR(name) "pgmR0GstProt" #name
2241#define PGM_GST_NAME_32BIT(name) PGM_CTX(pgm,Gst32Bit##name)
2242#define PGM_GST_NAME_RC_32BIT_STR(name) "pgmRCGst32Bit" #name
2243#define PGM_GST_NAME_R0_32BIT_STR(name) "pgmR0Gst32Bit" #name
2244#define PGM_GST_NAME_PAE(name) PGM_CTX(pgm,GstPAE##name)
2245#define PGM_GST_NAME_RC_PAE_STR(name) "pgmRCGstPAE" #name
2246#define PGM_GST_NAME_R0_PAE_STR(name) "pgmR0GstPAE" #name
2247#define PGM_GST_NAME_AMD64(name) PGM_CTX(pgm,GstAMD64##name)
2248#define PGM_GST_NAME_RC_AMD64_STR(name) "pgmRCGstAMD64" #name
2249#define PGM_GST_NAME_R0_AMD64_STR(name) "pgmR0GstAMD64" #name
2250#define PGM_GST_PFN(name, pVCpu) ((pVCpu)->pgm.s.PGM_CTX(pfn,Gst##name))
2251#define PGM_GST_DECL(type, name) PGM_CTX_DECL(type) PGM_GST_NAME(name)
2252
2253#define PGM_SHW_NAME_32BIT(name) PGM_CTX(pgm,Shw32Bit##name)
2254#define PGM_SHW_NAME_RC_32BIT_STR(name) "pgmRCShw32Bit" #name
2255#define PGM_SHW_NAME_R0_32BIT_STR(name) "pgmR0Shw32Bit" #name
2256#define PGM_SHW_NAME_PAE(name) PGM_CTX(pgm,ShwPAE##name)
2257#define PGM_SHW_NAME_RC_PAE_STR(name) "pgmRCShwPAE" #name
2258#define PGM_SHW_NAME_R0_PAE_STR(name) "pgmR0ShwPAE" #name
2259#define PGM_SHW_NAME_AMD64(name) PGM_CTX(pgm,ShwAMD64##name)
2260#define PGM_SHW_NAME_RC_AMD64_STR(name) "pgmRCShwAMD64" #name
2261#define PGM_SHW_NAME_R0_AMD64_STR(name) "pgmR0ShwAMD64" #name
2262#define PGM_SHW_NAME_NESTED(name) PGM_CTX(pgm,ShwNested##name)
2263#define PGM_SHW_NAME_RC_NESTED_STR(name) "pgmRCShwNested" #name
2264#define PGM_SHW_NAME_R0_NESTED_STR(name) "pgmR0ShwNested" #name
2265#define PGM_SHW_NAME_EPT(name) PGM_CTX(pgm,ShwEPT##name)
2266#define PGM_SHW_NAME_RC_EPT_STR(name) "pgmRCShwEPT" #name
2267#define PGM_SHW_NAME_R0_EPT_STR(name) "pgmR0ShwEPT" #name
2268#define PGM_SHW_DECL(type, name) PGM_CTX_DECL(type) PGM_SHW_NAME(name)
2269#define PGM_SHW_PFN(name, pVCpu) ((pVCpu)->pgm.s.PGM_CTX(pfn,Shw##name))
2270
2271/* Shw_Gst */
2272#define PGM_BTH_NAME_32BIT_REAL(name) PGM_CTX(pgm,Bth32BitReal##name)
2273#define PGM_BTH_NAME_32BIT_PROT(name) PGM_CTX(pgm,Bth32BitProt##name)
2274#define PGM_BTH_NAME_32BIT_32BIT(name) PGM_CTX(pgm,Bth32Bit32Bit##name)
2275#define PGM_BTH_NAME_PAE_REAL(name) PGM_CTX(pgm,BthPAEReal##name)
2276#define PGM_BTH_NAME_PAE_PROT(name) PGM_CTX(pgm,BthPAEProt##name)
2277#define PGM_BTH_NAME_PAE_32BIT(name) PGM_CTX(pgm,BthPAE32Bit##name)
2278#define PGM_BTH_NAME_PAE_PAE(name) PGM_CTX(pgm,BthPAEPAE##name)
2279#define PGM_BTH_NAME_AMD64_PROT(name) PGM_CTX(pgm,BthAMD64Prot##name)
2280#define PGM_BTH_NAME_AMD64_AMD64(name) PGM_CTX(pgm,BthAMD64AMD64##name)
2281#define PGM_BTH_NAME_NESTED_REAL(name) PGM_CTX(pgm,BthNestedReal##name)
2282#define PGM_BTH_NAME_NESTED_PROT(name) PGM_CTX(pgm,BthNestedProt##name)
2283#define PGM_BTH_NAME_NESTED_32BIT(name) PGM_CTX(pgm,BthNested32Bit##name)
2284#define PGM_BTH_NAME_NESTED_PAE(name) PGM_CTX(pgm,BthNestedPAE##name)
2285#define PGM_BTH_NAME_NESTED_AMD64(name) PGM_CTX(pgm,BthNestedAMD64##name)
2286#define PGM_BTH_NAME_EPT_REAL(name) PGM_CTX(pgm,BthEPTReal##name)
2287#define PGM_BTH_NAME_EPT_PROT(name) PGM_CTX(pgm,BthEPTProt##name)
2288#define PGM_BTH_NAME_EPT_32BIT(name) PGM_CTX(pgm,BthEPT32Bit##name)
2289#define PGM_BTH_NAME_EPT_PAE(name) PGM_CTX(pgm,BthEPTPAE##name)
2290#define PGM_BTH_NAME_EPT_AMD64(name) PGM_CTX(pgm,BthEPTAMD64##name)
2291
2292#define PGM_BTH_NAME_RC_32BIT_REAL_STR(name) "pgmRCBth32BitReal" #name
2293#define PGM_BTH_NAME_RC_32BIT_PROT_STR(name) "pgmRCBth32BitProt" #name
2294#define PGM_BTH_NAME_RC_32BIT_32BIT_STR(name) "pgmRCBth32Bit32Bit" #name
2295#define PGM_BTH_NAME_RC_PAE_REAL_STR(name) "pgmRCBthPAEReal" #name
2296#define PGM_BTH_NAME_RC_PAE_PROT_STR(name) "pgmRCBthPAEProt" #name
2297#define PGM_BTH_NAME_RC_PAE_32BIT_STR(name) "pgmRCBthPAE32Bit" #name
2298#define PGM_BTH_NAME_RC_PAE_PAE_STR(name) "pgmRCBthPAEPAE" #name
2299#define PGM_BTH_NAME_RC_AMD64_AMD64_STR(name) "pgmRCBthAMD64AMD64" #name
2300#define PGM_BTH_NAME_RC_NESTED_REAL_STR(name) "pgmRCBthNestedReal" #name
2301#define PGM_BTH_NAME_RC_NESTED_PROT_STR(name) "pgmRCBthNestedProt" #name
2302#define PGM_BTH_NAME_RC_NESTED_32BIT_STR(name) "pgmRCBthNested32Bit" #name
2303#define PGM_BTH_NAME_RC_NESTED_PAE_STR(name) "pgmRCBthNestedPAE" #name
2304#define PGM_BTH_NAME_RC_NESTED_AMD64_STR(name) "pgmRCBthNestedAMD64" #name
2305#define PGM_BTH_NAME_RC_EPT_REAL_STR(name) "pgmRCBthEPTReal" #name
2306#define PGM_BTH_NAME_RC_EPT_PROT_STR(name) "pgmRCBthEPTProt" #name
2307#define PGM_BTH_NAME_RC_EPT_32BIT_STR(name) "pgmRCBthEPT32Bit" #name
2308#define PGM_BTH_NAME_RC_EPT_PAE_STR(name) "pgmRCBthEPTPAE" #name
2309#define PGM_BTH_NAME_RC_EPT_AMD64_STR(name) "pgmRCBthEPTAMD64" #name
2310#define PGM_BTH_NAME_R0_32BIT_REAL_STR(name) "pgmR0Bth32BitReal" #name
2311#define PGM_BTH_NAME_R0_32BIT_PROT_STR(name) "pgmR0Bth32BitProt" #name
2312#define PGM_BTH_NAME_R0_32BIT_32BIT_STR(name) "pgmR0Bth32Bit32Bit" #name
2313#define PGM_BTH_NAME_R0_PAE_REAL_STR(name) "pgmR0BthPAEReal" #name
2314#define PGM_BTH_NAME_R0_PAE_PROT_STR(name) "pgmR0BthPAEProt" #name
2315#define PGM_BTH_NAME_R0_PAE_32BIT_STR(name) "pgmR0BthPAE32Bit" #name
2316#define PGM_BTH_NAME_R0_PAE_PAE_STR(name) "pgmR0BthPAEPAE" #name
2317#define PGM_BTH_NAME_R0_AMD64_PROT_STR(name) "pgmR0BthAMD64Prot" #name
2318#define PGM_BTH_NAME_R0_AMD64_AMD64_STR(name) "pgmR0BthAMD64AMD64" #name
2319#define PGM_BTH_NAME_R0_NESTED_REAL_STR(name) "pgmR0BthNestedReal" #name
2320#define PGM_BTH_NAME_R0_NESTED_PROT_STR(name) "pgmR0BthNestedProt" #name
2321#define PGM_BTH_NAME_R0_NESTED_32BIT_STR(name) "pgmR0BthNested32Bit" #name
2322#define PGM_BTH_NAME_R0_NESTED_PAE_STR(name) "pgmR0BthNestedPAE" #name
2323#define PGM_BTH_NAME_R0_NESTED_AMD64_STR(name) "pgmR0BthNestedAMD64" #name
2324#define PGM_BTH_NAME_R0_EPT_REAL_STR(name) "pgmR0BthEPTReal" #name
2325#define PGM_BTH_NAME_R0_EPT_PROT_STR(name) "pgmR0BthEPTProt" #name
2326#define PGM_BTH_NAME_R0_EPT_32BIT_STR(name) "pgmR0BthEPT32Bit" #name
2327#define PGM_BTH_NAME_R0_EPT_PAE_STR(name) "pgmR0BthEPTPAE" #name
2328#define PGM_BTH_NAME_R0_EPT_AMD64_STR(name) "pgmR0BthEPTAMD64" #name
2329
2330#define PGM_BTH_DECL(type, name) PGM_CTX_DECL(type) PGM_BTH_NAME(name)
2331#define PGM_BTH_PFN(name, pVCpu) ((pVCpu)->pgm.s.PGM_CTX(pfn,Bth##name))
2332/** @} */
2333
2334/**
2335 * Data for each paging mode.
2336 */
2337typedef struct PGMMODEDATA
2338{
2339 /** The guest mode type. */
2340 uint32_t uGstType;
2341 /** The shadow mode type. */
2342 uint32_t uShwType;
2343
2344 /** @name Function pointers for Shadow paging.
2345 * @{
2346 */
2347 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2348 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVMCPU pVCpu));
2349 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2350 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2351
2352 DECLRCCALLBACKMEMBER(int, pfnRCShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2353 DECLRCCALLBACKMEMBER(int, pfnRCShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2354
2355 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2356 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2357 /** @} */
2358
2359 /** @name Function pointers for Guest paging.
2360 * @{
2361 */
2362 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2363 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVMCPU pVCpu));
2364 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2365 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2366 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2367 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2368 DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2369 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2370 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2371 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2372 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2373 /** @} */
2374
2375 /** @name Function pointers for Both Shadow and Guest paging.
2376 * @{
2377 */
2378 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2379 /* no pfnR3BthTrap0eHandler */
2380 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2381 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2382 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2383 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2384 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2385#ifdef VBOX_STRICT
2386 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2387#endif
2388 DECLR3CALLBACKMEMBER(int, pfnR3BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
2389 DECLR3CALLBACKMEMBER(int, pfnR3BthUnmapCR3,(PVMCPU pVCpu));
2390
2391 DECLRCCALLBACKMEMBER(int, pfnRCBthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
2392 DECLRCCALLBACKMEMBER(int, pfnRCBthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2393 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2394 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2395 DECLRCCALLBACKMEMBER(int, pfnRCBthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2396 DECLRCCALLBACKMEMBER(int, pfnRCBthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2397#ifdef VBOX_STRICT
2398 DECLRCCALLBACKMEMBER(unsigned, pfnRCBthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2399#endif
2400 DECLRCCALLBACKMEMBER(int, pfnRCBthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
2401 DECLRCCALLBACKMEMBER(int, pfnRCBthUnmapCR3,(PVMCPU pVCpu));
2402
2403 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
2404 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2405 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2406 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2407 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2408 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2409#ifdef VBOX_STRICT
2410 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2411#endif
2412 DECLR0CALLBACKMEMBER(int, pfnR0BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
2413 DECLR0CALLBACKMEMBER(int, pfnR0BthUnmapCR3,(PVMCPU pVCpu));
2414 /** @} */
2415} PGMMODEDATA, *PPGMMODEDATA;
2416
2417
2418
2419/**
2420 * Converts a PGM pointer into a VM pointer.
2421 * @returns Pointer to the VM structure the PGM is part of.
2422 * @param pPGM Pointer to PGM instance data.
2423 */
2424#define PGM2VM(pPGM) ( (PVM)((char*)pPGM - pPGM->offVM) )
2425
2426/**
2427 * PGM Data (part of VM)
2428 */
2429typedef struct PGM
2430{
2431 /** Offset to the VM structure. */
2432 RTINT offVM;
2433 /** Offset of the PGMCPU structure relative to VMCPU. */
2434 RTINT offVCpuPGM;
2435
2436 /** @cfgm{RamPreAlloc, boolean, false}
2437 * Indicates whether the base RAM should all be allocated before starting
2438 * the VM (default), or if it should be allocated when first written to.
2439 */
2440 bool fRamPreAlloc;
2441 /** Indicates whether write monitoring is currently in use.
2442 * This is used to prevent conflicts between live saving and page sharing
2443 * detection. */
2444 bool fPhysWriteMonitoringEngaged;
2445 /** Alignment padding. */
2446 bool afAlignment0[2];
2447
2448 /*
2449 * This will be redefined at least two more times before we're done, I'm sure.
2450 * The current code is only to get on with the coding.
2451 * - 2004-06-10: initial version, bird.
2452 * - 2004-07-02: 1st time, bird.
2453 * - 2004-10-18: 2nd time, bird.
2454 * - 2005-07-xx: 3rd time, bird.
2455 */
2456
2457 /** The host paging mode. (This is what SUPLib reports.) */
2458 SUPPAGINGMODE enmHostMode;
2459
2460 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
2461 RCPTRTYPE(PX86PTE) paDynPageMap32BitPTEsGC;
2462 /** Pointer to the page table entries for the dynamic page mapping area - GCPtr. */
2463 RCPTRTYPE(PX86PTEPAE) paDynPageMapPaePTEsGC;
2464
2465 /** 4 MB page mask; 32 or 36 bits depending on PSE-36 (identical for all VCPUs) */
2466 RTGCPHYS GCPhys4MBPSEMask;
2467
2468 /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for R3.
2469 * This is sorted by physical address and contains no overlapping ranges. */
2470 R3PTRTYPE(PPGMRAMRANGE) pRamRangesR3;
2471 /** R0 pointer corresponding to PGM::pRamRangesR3. */
2472 R0PTRTYPE(PPGMRAMRANGE) pRamRangesR0;
2473 /** RC pointer corresponding to PGM::pRamRangesR3. */
2474 RCPTRTYPE(PPGMRAMRANGE) pRamRangesRC;
2475 /** Generation ID for the RAM ranges. This member is incremented everytime a RAM
2476 * range is linked or unlinked. */
2477 uint32_t volatile idRamRangesGen;
2478
2479 /** Pointer to the list of ROM ranges - for R3.
2480 * This is sorted by physical address and contains no overlapping ranges. */
2481 R3PTRTYPE(PPGMROMRANGE) pRomRangesR3;
2482 /** R0 pointer corresponding to PGM::pRomRangesR3. */
2483 R0PTRTYPE(PPGMROMRANGE) pRomRangesR0;
2484 /** RC pointer corresponding to PGM::pRomRangesR3. */
2485 RCPTRTYPE(PPGMROMRANGE) pRomRangesRC;
2486#if HC_ARCH_BITS == 64
2487 /** Alignment padding. */
2488 RTRCPTR GCPtrPadding2;
2489#endif
2490
2491 /** Pointer to the list of MMIO2 ranges - for R3.
2492 * Registration order. */
2493 R3PTRTYPE(PPGMMMIO2RANGE) pMmio2RangesR3;
2494
2495 /** PGM offset based trees - R3 Ptr. */
2496 R3PTRTYPE(PPGMTREES) pTreesR3;
2497 /** PGM offset based trees - R0 Ptr. */
2498 R0PTRTYPE(PPGMTREES) pTreesR0;
2499 /** PGM offset based trees - RC Ptr. */
2500 RCPTRTYPE(PPGMTREES) pTreesRC;
2501
2502 /** Linked list of GC mappings - for RC.
2503 * The list is sorted ascending on address.
2504 */
2505 RCPTRTYPE(PPGMMAPPING) pMappingsRC;
2506 /** Linked list of GC mappings - for HC.
2507 * The list is sorted ascending on address.
2508 */
2509 R3PTRTYPE(PPGMMAPPING) pMappingsR3;
2510 /** Linked list of GC mappings - for R0.
2511 * The list is sorted ascending on address.
2512 */
2513 R0PTRTYPE(PPGMMAPPING) pMappingsR0;
2514
2515 /** Pointer to the 5 page CR3 content mapping.
2516 * The first page is always the CR3 (in some form) while the 4 other pages
2517 * are used of the PDs in PAE mode. */
2518 RTGCPTR GCPtrCR3Mapping;
2519#if HC_ARCH_BITS == 64 && GC_ARCH_BITS == 32
2520 uint32_t u32Alignment1;
2521#endif
2522
2523 /** Indicates that PGMR3FinalizeMappings has been called and that further
2524 * PGMR3MapIntermediate calls will be rejected. */
2525 bool fFinalizedMappings;
2526 /** If set no conflict checks are required. */
2527 bool fMappingsFixed;
2528 /** If set if restored as fixed but we were unable to re-fixate at the old
2529 * location because of room or address incompatibilities. */
2530 bool fMappingsFixedRestored;
2531 /** If set, then no mappings are put into the shadow page table.
2532 * Use pgmMapAreMappingsEnabled() instead of direct access. */
2533 bool fMappingsDisabled;
2534 /** Size of fixed mapping.
2535 * This is valid if either fMappingsFixed or fMappingsFixedRestored is set. */
2536 uint32_t cbMappingFixed;
2537 /** Base address (GC) of fixed mapping.
2538 * This is valid if either fMappingsFixed or fMappingsFixedRestored is set. */
2539 RTGCPTR GCPtrMappingFixed;
2540 /** The address of the previous RAM range mapping. */
2541 RTGCPTR GCPtrPrevRamRangeMapping;
2542
2543 /** @name Intermediate Context
2544 * @{ */
2545 /** Pointer to the intermediate page directory - Normal. */
2546 R3PTRTYPE(PX86PD) pInterPD;
2547 /** Pointer to the intermedate page tables - Normal.
2548 * There are two page tables, one for the identity mapping and one for
2549 * the host context mapping (of the core code). */
2550 R3PTRTYPE(PX86PT) apInterPTs[2];
2551 /** Pointer to the intermedate page tables - PAE. */
2552 R3PTRTYPE(PX86PTPAE) apInterPaePTs[2];
2553 /** Pointer to the intermedate page directory - PAE. */
2554 R3PTRTYPE(PX86PDPAE) apInterPaePDs[4];
2555 /** Pointer to the intermedate page directory - PAE. */
2556 R3PTRTYPE(PX86PDPT) pInterPaePDPT;
2557 /** Pointer to the intermedate page-map level 4 - AMD64. */
2558 R3PTRTYPE(PX86PML4) pInterPaePML4;
2559 /** Pointer to the intermedate page directory - AMD64. */
2560 R3PTRTYPE(PX86PDPT) pInterPaePDPT64;
2561 /** The Physical Address (HC) of the intermediate Page Directory - Normal. */
2562 RTHCPHYS HCPhysInterPD;
2563 /** The Physical Address (HC) of the intermediate Page Directory Pointer Table - PAE. */
2564 RTHCPHYS HCPhysInterPaePDPT;
2565 /** The Physical Address (HC) of the intermediate Page Map Level 4 table - AMD64. */
2566 RTHCPHYS HCPhysInterPaePML4;
2567 /** @} */
2568
2569 /** Base address of the dynamic page mapping area.
2570 * The array is MM_HYPER_DYNAMIC_SIZE bytes big.
2571 */
2572 RCPTRTYPE(uint8_t *) pbDynPageMapBaseGC;
2573 /** The index of the last entry used in the dynamic page mapping area. */
2574 RTUINT iDynPageMapLast;
2575 /** Cache containing the last entries in the dynamic page mapping area.
2576 * The cache size is covering half of the mapping area. */
2577 RTHCPHYS aHCPhysDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT + 1)];
2578 /** Keep a lock counter for the full (!) mapping area. */
2579 uint32_t aLockedDynPageMapCache[MM_HYPER_DYNAMIC_SIZE >> (PAGE_SHIFT)];
2580
2581 /** The address of the ring-0 mapping cache if we're making use of it. */
2582 RTR0PTR pvR0DynMapUsed;
2583#if HC_ARCH_BITS == 32
2584 /** Alignment padding that makes the next member start on a 8 byte boundrary. */
2585 uint32_t u32Alignment2;
2586#endif
2587
2588 /** PGM critical section.
2589 * This protects the physical & virtual access handlers, ram ranges,
2590 * and the page flag updating (some of it anyway).
2591 */
2592 PDMCRITSECT CritSect;
2593
2594 /** Pointer to SHW+GST mode data (function pointers).
2595 * The index into this table is made up from */
2596 R3PTRTYPE(PPGMMODEDATA) paModeData;
2597
2598 /** Shadow Page Pool - R3 Ptr. */
2599 R3PTRTYPE(PPGMPOOL) pPoolR3;
2600 /** Shadow Page Pool - R0 Ptr. */
2601 R0PTRTYPE(PPGMPOOL) pPoolR0;
2602 /** Shadow Page Pool - RC Ptr. */
2603 RCPTRTYPE(PPGMPOOL) pPoolRC;
2604
2605 /** We're not in a state which permits writes to guest memory.
2606 * (Only used in strict builds.) */
2607 bool fNoMorePhysWrites;
2608 /** Alignment padding that makes the next member start on a 8 byte boundrary. */
2609 bool afAlignment3[HC_ARCH_BITS == 32 ? 7: 3];
2610
2611 /**
2612 * Data associated with managing the ring-3 mappings of the allocation chunks.
2613 */
2614 struct
2615 {
2616 /** The chunk tree, ordered by chunk id. */
2617#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
2618 R3PTRTYPE(PAVLU32NODECORE) pTree;
2619#else
2620 R3R0PTRTYPE(PAVLU32NODECORE) pTree;
2621#endif
2622 /** The chunk age tree, ordered by ageing sequence number. */
2623 R3PTRTYPE(PAVLLU32NODECORE) pAgeTree;
2624 /** The chunk mapping TLB. */
2625 PGMCHUNKR3MAPTLB Tlb;
2626 /** The number of mapped chunks. */
2627 uint32_t c;
2628 /** The maximum number of mapped chunks.
2629 * @cfgm PGM/MaxRing3Chunks */
2630 uint32_t cMax;
2631 /** The current time. */
2632 uint32_t iNow;
2633 /** Number of pgmR3PhysChunkFindUnmapCandidate calls left to the next ageing. */
2634 uint32_t AgeingCountdown;
2635 } ChunkR3Map;
2636
2637 /**
2638 * The page mapping TLB for ring-3 and (for the time being) ring-0.
2639 */
2640 PGMPAGER3MAPTLB PhysTlbHC;
2641
2642 /** @name The zero page.
2643 * @{ */
2644 /** The host physical address of the zero page. */
2645 RTHCPHYS HCPhysZeroPg;
2646 /** The ring-3 mapping of the zero page. */
2647 RTR3PTR pvZeroPgR3;
2648 /** The ring-0 mapping of the zero page. */
2649 RTR0PTR pvZeroPgR0;
2650 /** The GC mapping of the zero page. */
2651 RTGCPTR pvZeroPgRC;
2652#if GC_ARCH_BITS != 32
2653 uint32_t u32ZeroAlignment; /**< Alignment padding. */
2654#endif
2655 /** @}*/
2656
2657 /** The number of handy pages. */
2658 uint32_t cHandyPages;
2659 /**
2660 * Array of handy pages.
2661 *
2662 * This array is used in a two way communication between pgmPhysAllocPage
2663 * and GMMR0AllocateHandyPages, with PGMR3PhysAllocateHandyPages serving as
2664 * an intermediary.
2665 *
2666 * The size of this array is important, see pgmPhysEnsureHandyPage for details.
2667 * (The current size of 32 pages, means 128 KB of handy memory.)
2668 */
2669 GMMPAGEDESC aHandyPages[PGM_HANDY_PAGES];
2670
2671 /**
2672 * Live save data.
2673 */
2674 struct
2675 {
2676 /** Per type statistics. */
2677 struct
2678 {
2679 /** The number of ready pages. */
2680 uint32_t cReadyPages;
2681 /** The number of dirty pages. */
2682 uint32_t cDirtyPages;
2683 /** The number of ready zero pages. */
2684 uint32_t cZeroPages;
2685 /** The number of write monitored pages. */
2686 uint32_t cMonitoredPages;
2687 } Rom,
2688 Mmio2,
2689 Ram;
2690 /** The number of ignored pages in the RAM ranges (i.e. MMIO, MMIO2 and ROM). */
2691 uint32_t cIgnoredPages;
2692 /** Indicates that a live save operation is active. */
2693 bool fActive;
2694 /** Padding. */
2695 bool afReserved[2];
2696 /** The next history index. */
2697 uint8_t iDirtyPagesHistory;
2698 /** History of the total amount of dirty pages. */
2699 uint32_t acDirtyPagesHistory[64];
2700 /** Short term dirty page average. */
2701 uint32_t cDirtyPagesShort;
2702 /** Long term dirty page average. */
2703 uint32_t cDirtyPagesLong;
2704 /** The number of saved pages. This is used to get some kind of estimate of the
2705 * link speed so we can decide when we're done. It is reset after the first
2706 * 7 passes so the speed estimate doesn't get inflated by the initial set of
2707 * zero pages. */
2708 uint64_t cSavedPages;
2709 /** The nanosecond timestamp when cSavedPages was 0. */
2710 uint64_t uSaveStartNS;
2711 /** Pages per second (for statistics). */
2712 uint32_t cPagesPerSecond;
2713 uint32_t cAlignment;
2714 } LiveSave;
2715
2716 /** @name Error injection.
2717 * @{ */
2718 /** Inject handy page allocation errors pretending we're completely out of
2719 * memory. */
2720 bool volatile fErrInjHandyPages;
2721 /** Padding. */
2722 bool afReserved[3];
2723 /** @} */
2724
2725 /** @name Release Statistics
2726 * @{ */
2727 uint32_t cAllPages; /**< The total number of pages. (Should be Private + Shared + Zero + Pure MMIO.) */
2728 uint32_t cPrivatePages; /**< The number of private pages. */
2729 uint32_t cSharedPages; /**< The number of shared pages. */
2730 uint32_t cZeroPages; /**< The number of zero backed pages. */
2731 uint32_t cPureMmioPages; /**< The number of pure MMIO pages. */
2732 uint32_t cMonitoredPages; /**< The number of write monitored pages. */
2733 uint32_t cWrittenToPages; /**< The number of previously write monitored pages. */
2734 uint32_t cWriteLockedPages; /**< The number of write locked pages. */
2735 uint32_t cReadLockedPages; /**< The number of read locked pages. */
2736
2737 /** The number of times we were forced to change the hypervisor region location. */
2738 STAMCOUNTER cRelocations;
2739 /** @} */
2740
2741#ifdef VBOX_WITH_STATISTICS /** @todo move this chunk to the heap. */
2742 /* R3 only: */
2743 STAMCOUNTER StatR3DetectedConflicts; /**< R3: Number of times PGMR3MapHasConflicts() detected a conflict. */
2744 STAMPROFILE StatR3ResolveConflict; /**< R3: pgmR3SyncPTResolveConflict() profiling (includes the entire relocation). */
2745
2746 STAMCOUNTER StatRZChunkR3MapTlbHits; /**< RC/R0: Ring-3/0 chunk mapper TLB hits. */
2747 STAMCOUNTER StatRZChunkR3MapTlbMisses; /**< RC/R0: Ring-3/0 chunk mapper TLB misses. */
2748 STAMCOUNTER StatRZPageMapTlbHits; /**< RC/R0: Ring-3/0 page mapper TLB hits. */
2749 STAMCOUNTER StatRZPageMapTlbMisses; /**< RC/R0: Ring-3/0 page mapper TLB misses. */
2750 STAMCOUNTER StatPageMapTlbFlushes; /**< ALL: Ring-3/0 page mapper TLB flushes. */
2751 STAMCOUNTER StatPageMapTlbFlushEntry; /**< ALL: Ring-3/0 page mapper TLB flushes. */
2752 STAMCOUNTER StatR3ChunkR3MapTlbHits; /**< R3: Ring-3/0 chunk mapper TLB hits. */
2753 STAMCOUNTER StatR3ChunkR3MapTlbMisses; /**< R3: Ring-3/0 chunk mapper TLB misses. */
2754 STAMCOUNTER StatR3PageMapTlbHits; /**< R3: Ring-3/0 page mapper TLB hits. */
2755 STAMCOUNTER StatR3PageMapTlbMisses; /**< R3: Ring-3/0 page mapper TLB misses. */
2756 STAMPROFILE StatRZSyncCR3HandlerVirtualReset; /**< RC/R0: Profiling of the virtual handler resets. */
2757 STAMPROFILE StatRZSyncCR3HandlerVirtualUpdate; /**< RC/R0: Profiling of the virtual handler updates. */
2758 STAMPROFILE StatR3SyncCR3HandlerVirtualReset; /**< R3: Profiling of the virtual handler resets. */
2759 STAMPROFILE StatR3SyncCR3HandlerVirtualUpdate; /**< R3: Profiling of the virtual handler updates. */
2760 STAMCOUNTER StatR3PhysHandlerReset; /**< R3: The number of times PGMHandlerPhysicalReset is called. */
2761 STAMCOUNTER StatRZPhysHandlerReset; /**< RC/R0: The number of times PGMHandlerPhysicalReset is called. */
2762 STAMPROFILE StatRZVirtHandlerSearchByPhys; /**< RC/R0: Profiling of pgmHandlerVirtualFindByPhysAddr. */
2763 STAMPROFILE StatR3VirtHandlerSearchByPhys; /**< R3: Profiling of pgmHandlerVirtualFindByPhysAddr. */
2764 STAMCOUNTER StatRZPageReplaceShared; /**< RC/R0: Times a shared page has been replaced by a private one. */
2765 STAMCOUNTER StatRZPageReplaceZero; /**< RC/R0: Times the zero page has been replaced by a private one. */
2766/// @todo STAMCOUNTER StatRZPageHandyAllocs; /**< RC/R0: The number of times we've executed GMMR3AllocateHandyPages. */
2767 STAMCOUNTER StatR3PageReplaceShared; /**< R3: Times a shared page has been replaced by a private one. */
2768 STAMCOUNTER StatR3PageReplaceZero; /**< R3: Times the zero page has been replaced by a private one. */
2769/// @todo STAMCOUNTER StatR3PageHandyAllocs; /**< R3: The number of times we've executed GMMR3AllocateHandyPages. */
2770
2771 /* RC only: */
2772 STAMCOUNTER StatRCDynMapCacheMisses; /**< RC: The number of dynamic page mapping cache misses */
2773 STAMCOUNTER StatRCDynMapCacheHits; /**< RC: The number of dynamic page mapping cache hits */
2774 STAMCOUNTER StatRCInvlPgConflict; /**< RC: Number of times PGMInvalidatePage() detected a mapping conflict. */
2775 STAMCOUNTER StatRCInvlPgSyncMonCR3; /**< RC: Number of times PGMInvalidatePage() ran into PGM_SYNC_MONITOR_CR3. */
2776
2777 STAMCOUNTER StatRZPhysRead;
2778 STAMCOUNTER StatRZPhysReadBytes;
2779 STAMCOUNTER StatRZPhysWrite;
2780 STAMCOUNTER StatRZPhysWriteBytes;
2781 STAMCOUNTER StatR3PhysRead;
2782 STAMCOUNTER StatR3PhysReadBytes;
2783 STAMCOUNTER StatR3PhysWrite;
2784 STAMCOUNTER StatR3PhysWriteBytes;
2785 STAMCOUNTER StatRCPhysRead;
2786 STAMCOUNTER StatRCPhysReadBytes;
2787 STAMCOUNTER StatRCPhysWrite;
2788 STAMCOUNTER StatRCPhysWriteBytes;
2789
2790 STAMCOUNTER StatRZPhysSimpleRead;
2791 STAMCOUNTER StatRZPhysSimpleReadBytes;
2792 STAMCOUNTER StatRZPhysSimpleWrite;
2793 STAMCOUNTER StatRZPhysSimpleWriteBytes;
2794 STAMCOUNTER StatR3PhysSimpleRead;
2795 STAMCOUNTER StatR3PhysSimpleReadBytes;
2796 STAMCOUNTER StatR3PhysSimpleWrite;
2797 STAMCOUNTER StatR3PhysSimpleWriteBytes;
2798 STAMCOUNTER StatRCPhysSimpleRead;
2799 STAMCOUNTER StatRCPhysSimpleReadBytes;
2800 STAMCOUNTER StatRCPhysSimpleWrite;
2801 STAMCOUNTER StatRCPhysSimpleWriteBytes;
2802
2803 STAMCOUNTER StatTrackVirgin; /**< The number of first time shadowings. */
2804 STAMCOUNTER StatTrackAliased; /**< The number of times switching to cRef2, i.e. the page is being shadowed by two PTs. */
2805 STAMCOUNTER StatTrackAliasedMany; /**< The number of times we're tracking using cRef2. */
2806 STAMCOUNTER StatTrackAliasedLots; /**< The number of times we're hitting pages which has overflowed cRef2. */
2807 STAMCOUNTER StatTrackOverflows; /**< The number of times the extent list grows to long. */
2808 STAMPROFILE StatTrackDeref; /**< Profiling of SyncPageWorkerTrackDeref (expensive). */
2809#endif
2810} PGM;
2811#ifndef IN_TSTVMSTRUCTGC /* HACK */
2812AssertCompileMemberAlignment(PGM, paDynPageMap32BitPTEsGC, 8);
2813AssertCompileMemberAlignment(PGM, GCPtrMappingFixed, sizeof(RTGCPTR));
2814AssertCompileMemberAlignment(PGM, HCPhysInterPD, 8);
2815AssertCompileMemberAlignment(PGM, aHCPhysDynPageMapCache, 8);
2816AssertCompileMemberAlignment(PGM, CritSect, 8);
2817AssertCompileMemberAlignment(PGM, ChunkR3Map, 8);
2818AssertCompileMemberAlignment(PGM, PhysTlbHC, 8);
2819AssertCompileMemberAlignment(PGM, HCPhysZeroPg, 8);
2820AssertCompileMemberAlignment(PGM, aHandyPages, 8);
2821AssertCompileMemberAlignment(PGM, cRelocations, 8);
2822#endif /* !IN_TSTVMSTRUCTGC */
2823/** Pointer to the PGM instance data. */
2824typedef PGM *PPGM;
2825
2826
2827/**
2828 * Converts a PGMCPU pointer into a VM pointer.
2829 * @returns Pointer to the VM structure the PGM is part of.
2830 * @param pPGM Pointer to PGMCPU instance data.
2831 */
2832#define PGMCPU2VM(pPGM) ( (PVM)((char*)pPGM - pPGM->offVM) )
2833
2834/**
2835 * Converts a PGMCPU pointer into a PGM pointer.
2836 * @returns Pointer to the VM structure the PGM is part of.
2837 * @param pPGM Pointer to PGMCPU instance data.
2838 */
2839#define PGMCPU2PGM(pPGMCpu) ( (PPGM)((char*)pPGMCpu - pPGMCpu->offPGM) )
2840
2841/**
2842 * PGMCPU Data (part of VMCPU).
2843 */
2844typedef struct PGMCPU
2845{
2846 /** Offset to the VM structure. */
2847 RTINT offVM;
2848 /** Offset to the VMCPU structure. */
2849 RTINT offVCpu;
2850 /** Offset of the PGM structure relative to VMCPU. */
2851 RTINT offPGM;
2852 RTINT uPadding0; /**< structure size alignment. */
2853
2854#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
2855 /** Automatically tracked physical memory mapping set.
2856 * Ring-0 and strict raw-mode builds. */
2857 PGMMAPSET AutoSet;
2858#endif
2859
2860 /** A20 gate mask.
2861 * Our current approach to A20 emulation is to let REM do it and don't bother
2862 * anywhere else. The interesting Guests will be operating with it enabled anyway.
2863 * But whould need arrise, we'll subject physical addresses to this mask. */
2864 RTGCPHYS GCPhysA20Mask;
2865 /** A20 gate state - boolean! */
2866 bool fA20Enabled;
2867
2868 /** What needs syncing (PGM_SYNC_*).
2869 * This is used to queue operations for PGMSyncCR3, PGMInvalidatePage,
2870 * PGMFlushTLB, and PGMR3Load. */
2871 RTUINT fSyncFlags;
2872
2873 /** The shadow paging mode. */
2874 PGMMODE enmShadowMode;
2875 /** The guest paging mode. */
2876 PGMMODE enmGuestMode;
2877
2878 /** The current physical address representing in the guest CR3 register. */
2879 RTGCPHYS GCPhysCR3;
2880
2881 /** @name 32-bit Guest Paging.
2882 * @{ */
2883 /** The guest's page directory, R3 pointer. */
2884 R3PTRTYPE(PX86PD) pGst32BitPdR3;
2885#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2886 /** The guest's page directory, R0 pointer. */
2887 R0PTRTYPE(PX86PD) pGst32BitPdR0;
2888#endif
2889 /** The guest's page directory, static RC mapping. */
2890 RCPTRTYPE(PX86PD) pGst32BitPdRC;
2891 /** @} */
2892
2893 /** @name PAE Guest Paging.
2894 * @{ */
2895 /** The guest's page directory pointer table, static RC mapping. */
2896 RCPTRTYPE(PX86PDPT) pGstPaePdptRC;
2897 /** The guest's page directory pointer table, R3 pointer. */
2898 R3PTRTYPE(PX86PDPT) pGstPaePdptR3;
2899#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2900 /** The guest's page directory pointer table, R0 pointer. */
2901 R0PTRTYPE(PX86PDPT) pGstPaePdptR0;
2902#endif
2903
2904 /** The guest's page directories, R3 pointers.
2905 * These are individual pointers and don't have to be adjecent.
2906 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
2907 R3PTRTYPE(PX86PDPAE) apGstPaePDsR3[4];
2908 /** The guest's page directories, R0 pointers.
2909 * Same restrictions as apGstPaePDsR3. */
2910#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2911 R0PTRTYPE(PX86PDPAE) apGstPaePDsR0[4];
2912#endif
2913 /** The guest's page directories, static GC mapping.
2914 * Unlike the R3/R0 array the first entry can be accessed as a 2048 entry PD.
2915 * These don't have to be up-to-date - use pgmGstGetPaePD() to access them. */
2916 RCPTRTYPE(PX86PDPAE) apGstPaePDsRC[4];
2917 /** The physical addresses of the guest page directories (PAE) pointed to by apGstPagePDsHC/GC. */
2918 RTGCPHYS aGCPhysGstPaePDs[4];
2919 /** The physical addresses of the monitored guest page directories (PAE). */
2920 RTGCPHYS aGCPhysGstPaePDsMonitored[4];
2921 /** @} */
2922
2923 /** @name AMD64 Guest Paging.
2924 * @{ */
2925 /** The guest's page directory pointer table, R3 pointer. */
2926 R3PTRTYPE(PX86PML4) pGstAmd64Pml4R3;
2927#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
2928 /** The guest's page directory pointer table, R0 pointer. */
2929 R0PTRTYPE(PX86PML4) pGstAmd64Pml4R0;
2930#else
2931 RTR0PTR alignment6b; /**< alignment equalizer. */
2932#endif
2933 /** @} */
2934
2935 /** Pointer to the page of the current active CR3 - R3 Ptr. */
2936 R3PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R3;
2937 /** Pointer to the page of the current active CR3 - R0 Ptr. */
2938 R0PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R0;
2939 /** Pointer to the page of the current active CR3 - RC Ptr. */
2940 RCPTRTYPE(PPGMPOOLPAGE) pShwPageCR3RC;
2941 /* The shadow page pool index of the user table as specified during allocation; useful for freeing root pages */
2942 uint32_t iShwUser;
2943 /* The index into the user table (shadowed) as specified during allocation; useful for freeing root pages. */
2944 uint32_t iShwUserTable;
2945# if HC_ARCH_BITS == 64
2946 RTRCPTR alignment6; /**< structure size alignment. */
2947# endif
2948 /** @} */
2949
2950 /** @name Function pointers for Shadow paging.
2951 * @{
2952 */
2953 DECLR3CALLBACKMEMBER(int, pfnR3ShwRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2954 DECLR3CALLBACKMEMBER(int, pfnR3ShwExit,(PVMCPU pVCpu));
2955 DECLR3CALLBACKMEMBER(int, pfnR3ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2956 DECLR3CALLBACKMEMBER(int, pfnR3ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2957
2958 DECLRCCALLBACKMEMBER(int, pfnRCShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2959 DECLRCCALLBACKMEMBER(int, pfnRCShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2960
2961 DECLR0CALLBACKMEMBER(int, pfnR0ShwGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys));
2962 DECLR0CALLBACKMEMBER(int, pfnR0ShwModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2963
2964 /** @} */
2965
2966 /** @name Function pointers for Guest paging.
2967 * @{
2968 */
2969 DECLR3CALLBACKMEMBER(int, pfnR3GstRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2970 DECLR3CALLBACKMEMBER(int, pfnR3GstExit,(PVMCPU pVCpu));
2971 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2972 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2973 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2974 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2975 DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2976 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2977#if HC_ARCH_BITS == 64
2978 RTRCPTR alignment3; /**< structure size alignment. */
2979#endif
2980
2981 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
2982 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVMCPU pVCpu, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
2983 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDEPAE pPde));
2984 /** @} */
2985
2986 /** @name Function pointers for Both Shadow and Guest paging.
2987 * @{
2988 */
2989 DECLR3CALLBACKMEMBER(int, pfnR3BthRelocate,(PVMCPU pVCpu, RTGCPTR offDelta));
2990 /* no pfnR3BthTrap0eHandler */
2991 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2992 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
2993 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
2994 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
2995 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
2996 DECLR3CALLBACKMEMBER(unsigned, pfnR3BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
2997 DECLR3CALLBACKMEMBER(int, pfnR3BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
2998 DECLR3CALLBACKMEMBER(int, pfnR3BthUnmapCR3,(PVMCPU pVCpu));
2999
3000 DECLR0CALLBACKMEMBER(int, pfnR0BthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
3001 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3002 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
3003 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
3004 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3005 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
3006 DECLR0CALLBACKMEMBER(unsigned, pfnR0BthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
3007 DECLR0CALLBACKMEMBER(int, pfnR0BthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
3008 DECLR0CALLBACKMEMBER(int, pfnR0BthUnmapCR3,(PVMCPU pVCpu));
3009
3010 DECLRCCALLBACKMEMBER(int, pfnRCBthTrap0eHandler,(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken));
3011 DECLRCCALLBACKMEMBER(int, pfnRCBthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3012 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
3013 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
3014 DECLRCCALLBACKMEMBER(int, pfnRCBthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
3015 DECLRCCALLBACKMEMBER(int, pfnRCBthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
3016 DECLRCCALLBACKMEMBER(unsigned, pfnRCBthAssertCR3,(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr, RTGCPTR cb));
3017 DECLRCCALLBACKMEMBER(int, pfnRCBthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
3018 DECLRCCALLBACKMEMBER(int, pfnRCBthUnmapCR3,(PVMCPU pVCpu));
3019 RTRCPTR alignment2; /**< structure size alignment. */
3020 /** @} */
3021
3022 /** For saving stack space, the disassembler state is allocated here instead of
3023 * on the stack.
3024 * @note The DISCPUSTATE structure is not R3/R0/RZ clean! */
3025 union
3026 {
3027 /** The disassembler scratch space. */
3028 DISCPUSTATE DisState;
3029 /** Padding. */
3030 uint8_t abDisStatePadding[DISCPUSTATE_PADDING_SIZE];
3031 };
3032
3033 /* Count the number of pgm pool access handler calls. */
3034 uint64_t cPoolAccessHandler;
3035
3036 /** @name Release Statistics
3037 * @{ */
3038 /** The number of times the guest has switched mode since last reset or statistics reset. */
3039 STAMCOUNTER cGuestModeChanges;
3040 /** @} */
3041
3042#ifdef VBOX_WITH_STATISTICS /** @todo move this chunk to the heap. */
3043 /** @name Statistics
3044 * @{ */
3045 /** RC: Which statistic this \#PF should be attributed to. */
3046 RCPTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionRC;
3047 RTRCPTR padding0;
3048 /** R0: Which statistic this \#PF should be attributed to. */
3049 R0PTRTYPE(PSTAMPROFILE) pStatTrap0eAttributionR0;
3050 RTR0PTR padding1;
3051
3052 /* Common */
3053 STAMCOUNTER StatSyncPtPD[X86_PG_ENTRIES]; /**< SyncPT - PD distribution. */
3054 STAMCOUNTER StatSyncPagePD[X86_PG_ENTRIES]; /**< SyncPage - PD distribution. */
3055
3056 /* R0 only: */
3057 STAMCOUNTER StatR0DynMapMigrateInvlPg; /**< R0: invlpg in PGMDynMapMigrateAutoSet. */
3058 STAMPROFILE StatR0DynMapGCPageInl; /**< R0: Calls to pgmR0DynMapGCPageInlined. */
3059 STAMCOUNTER StatR0DynMapGCPageInlHits; /**< R0: Hash table lookup hits. */
3060 STAMCOUNTER StatR0DynMapGCPageInlMisses; /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */
3061 STAMCOUNTER StatR0DynMapGCPageInlRamHits; /**< R0: 1st ram range hits. */
3062 STAMCOUNTER StatR0DynMapGCPageInlRamMisses; /**< R0: 1st ram range misses, takes slow path. */
3063 STAMPROFILE StatR0DynMapHCPageInl; /**< R0: Calls to pgmR0DynMapHCPageInlined. */
3064 STAMCOUNTER StatR0DynMapHCPageInlHits; /**< R0: Hash table lookup hits. */
3065 STAMCOUNTER StatR0DynMapHCPageInlMisses; /**< R0: Misses that falls back to code common with PGMDynMapHCPage. */
3066 STAMPROFILE StatR0DynMapHCPage; /**< R0: Calls to PGMDynMapHCPage. */
3067 STAMCOUNTER StatR0DynMapSetOptimize; /**< R0: Calls to pgmDynMapOptimizeAutoSet. */
3068 STAMCOUNTER StatR0DynMapSetSearchFlushes; /**< R0: Set search restorting to subset flushes. */
3069 STAMCOUNTER StatR0DynMapSetSearchHits; /**< R0: Set search hits. */
3070 STAMCOUNTER StatR0DynMapSetSearchMisses; /**< R0: Set search misses. */
3071 STAMCOUNTER StatR0DynMapPage; /**< R0: Calls to pgmR0DynMapPage. */
3072 STAMCOUNTER StatR0DynMapPageHits0; /**< R0: Hits at iPage+0. */
3073 STAMCOUNTER StatR0DynMapPageHits1; /**< R0: Hits at iPage+1. */
3074 STAMCOUNTER StatR0DynMapPageHits2; /**< R0: Hits at iPage+2. */
3075 STAMCOUNTER StatR0DynMapPageInvlPg; /**< R0: invlpg. */
3076 STAMCOUNTER StatR0DynMapPageSlow; /**< R0: Calls to pgmR0DynMapPageSlow. */
3077 STAMCOUNTER StatR0DynMapPageSlowLoopHits; /**< R0: Hits in the pgmR0DynMapPageSlow search loop. */
3078 STAMCOUNTER StatR0DynMapPageSlowLoopMisses; /**< R0: Misses in the pgmR0DynMapPageSlow search loop. */
3079 //STAMCOUNTER StatR0DynMapPageSlowLostHits; /**< R0: Lost hits. */
3080 STAMCOUNTER StatR0DynMapSubsets; /**< R0: Times PGMDynMapPushAutoSubset was called. */
3081 STAMCOUNTER StatR0DynMapPopFlushes; /**< R0: Times PGMDynMapPopAutoSubset flushes the subset. */
3082 STAMCOUNTER aStatR0DynMapSetSize[11]; /**< R0: Set size distribution. */
3083
3084 /* RZ only: */
3085 STAMPROFILE StatRZTrap0e; /**< RC/R0: PGMTrap0eHandler() profiling. */
3086 STAMPROFILE StatRZTrap0eTimeCheckPageFault;
3087 STAMPROFILE StatRZTrap0eTimeSyncPT;
3088 STAMPROFILE StatRZTrap0eTimeMapping;
3089 STAMPROFILE StatRZTrap0eTimeOutOfSync;
3090 STAMPROFILE StatRZTrap0eTimeHandlers;
3091 STAMPROFILE StatRZTrap0eTime2CSAM; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is CSAM. */
3092 STAMPROFILE StatRZTrap0eTime2DirtyAndAccessed; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is dirty and/or accessed bit emulation. */
3093 STAMPROFILE StatRZTrap0eTime2GuestTrap; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a guest trap. */
3094 STAMPROFILE StatRZTrap0eTime2HndPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a physical handler. */
3095 STAMPROFILE StatRZTrap0eTime2HndVirt; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is a virtual handler. */
3096 STAMPROFILE StatRZTrap0eTime2HndUnhandled; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is access outside the monitored areas of a monitored page. */
3097 STAMPROFILE StatRZTrap0eTime2Misc; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is not known. */
3098 STAMPROFILE StatRZTrap0eTime2OutOfSync; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync page. */
3099 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndPhys; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync physical handler page. */
3100 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndVirt; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an out-of-sync virtual handler page. */
3101 STAMPROFILE StatRZTrap0eTime2OutOfSyncHndObs; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is an obsolete handler page. */
3102 STAMPROFILE StatRZTrap0eTime2SyncPT; /**< RC/R0: Profiling of the Trap0eHandler body when the cause is lazy syncing of a PT. */
3103 STAMCOUNTER StatRZTrap0eConflicts; /**< RC/R0: The number of times \#PF was caused by an undetected conflict. */
3104 STAMCOUNTER StatRZTrap0eHandlersMapping; /**< RC/R0: Number of traps due to access handlers in mappings. */
3105 STAMCOUNTER StatRZTrap0eHandlersOutOfSync; /**< RC/R0: Number of out-of-sync handled pages. */
3106 STAMCOUNTER StatRZTrap0eHandlersPhysical; /**< RC/R0: Number of traps due to physical access handlers. */
3107 STAMCOUNTER StatRZTrap0eHandlersVirtual; /**< RC/R0: Number of traps due to virtual access handlers. */
3108 STAMCOUNTER StatRZTrap0eHandlersVirtualByPhys; /**< RC/R0: Number of traps due to virtual access handlers found by physical address. */
3109 STAMCOUNTER StatRZTrap0eHandlersVirtualUnmarked;/**< RC/R0: Number of traps due to virtual access handlers found by virtual address (without proper physical flags). */
3110 STAMCOUNTER StatRZTrap0eHandlersUnhandled; /**< RC/R0: Number of traps due to access outside range of monitored page(s). */
3111 STAMCOUNTER StatRZTrap0eHandlersInvalid; /**< RC/R0: Number of traps due to access to invalid physical memory. */
3112 STAMCOUNTER StatRZTrap0eUSNotPresentRead; /**< RC/R0: \#PF err kind */
3113 STAMCOUNTER StatRZTrap0eUSNotPresentWrite; /**< RC/R0: \#PF err kind */
3114 STAMCOUNTER StatRZTrap0eUSWrite; /**< RC/R0: \#PF err kind */
3115 STAMCOUNTER StatRZTrap0eUSReserved; /**< RC/R0: \#PF err kind */
3116 STAMCOUNTER StatRZTrap0eUSNXE; /**< RC/R0: \#PF err kind */
3117 STAMCOUNTER StatRZTrap0eUSRead; /**< RC/R0: \#PF err kind */
3118 STAMCOUNTER StatRZTrap0eSVNotPresentRead; /**< RC/R0: \#PF err kind */
3119 STAMCOUNTER StatRZTrap0eSVNotPresentWrite; /**< RC/R0: \#PF err kind */
3120 STAMCOUNTER StatRZTrap0eSVWrite; /**< RC/R0: \#PF err kind */
3121 STAMCOUNTER StatRZTrap0eSVReserved; /**< RC/R0: \#PF err kind */
3122 STAMCOUNTER StatRZTrap0eSNXE; /**< RC/R0: \#PF err kind */
3123 STAMCOUNTER StatRZTrap0eGuestPF; /**< RC/R0: Real guest \#PFs. */
3124 STAMCOUNTER StatRZTrap0eGuestPFUnh; /**< RC/R0: Real guest \#PF ending up at the end of the \#PF code. */
3125 STAMCOUNTER StatRZTrap0eGuestPFMapping; /**< RC/R0: Real guest \#PF to HMA or other mapping. */
3126 STAMCOUNTER StatRZTrap0eWPEmulInRZ; /**< RC/R0: WP=0 virtualization trap, handled. */
3127 STAMCOUNTER StatRZTrap0eWPEmulToR3; /**< RC/R0: WP=0 virtualization trap, chickened out. */
3128 STAMCOUNTER StatRZTrap0ePD[X86_PG_ENTRIES]; /**< RC/R0: PD distribution of the \#PFs. */
3129 STAMCOUNTER StatRZGuestCR3WriteHandled; /**< RC/R0: The number of times WriteHandlerCR3() was successfully called. */
3130 STAMCOUNTER StatRZGuestCR3WriteUnhandled; /**< RC/R0: The number of times WriteHandlerCR3() was called and we had to fall back to the recompiler. */
3131 STAMCOUNTER StatRZGuestCR3WriteConflict; /**< RC/R0: The number of times WriteHandlerCR3() was called and a conflict was detected. */
3132 STAMCOUNTER StatRZGuestROMWriteHandled; /**< RC/R0: The number of times pgmPhysRomWriteHandler() was successfully called. */
3133 STAMCOUNTER StatRZGuestROMWriteUnhandled; /**< RC/R0: The number of times pgmPhysRomWriteHandler() was called and we had to fall back to the recompiler */
3134
3135 /* HC - R3 and (maybe) R0: */
3136
3137 /* RZ & R3: */
3138 STAMPROFILE StatRZSyncCR3; /**< RC/R0: PGMSyncCR3() profiling. */
3139 STAMPROFILE StatRZSyncCR3Handlers; /**< RC/R0: Profiling of the PGMSyncCR3() update handler section. */
3140 STAMCOUNTER StatRZSyncCR3Global; /**< RC/R0: The number of global CR3 syncs. */
3141 STAMCOUNTER StatRZSyncCR3NotGlobal; /**< RC/R0: The number of non-global CR3 syncs. */
3142 STAMCOUNTER StatRZSyncCR3DstCacheHit; /**< RC/R0: The number of times we got some kind of cache hit on a page table. */
3143 STAMCOUNTER StatRZSyncCR3DstFreed; /**< RC/R0: The number of times we've had to free a shadow entry. */
3144 STAMCOUNTER StatRZSyncCR3DstFreedSrcNP; /**< RC/R0: The number of times we've had to free a shadow entry for which the source entry was not present. */
3145 STAMCOUNTER StatRZSyncCR3DstNotPresent; /**< RC/R0: The number of times we've encountered a not present shadow entry for a present guest entry. */
3146 STAMCOUNTER StatRZSyncCR3DstSkippedGlobalPD; /**< RC/R0: The number of times a global page directory wasn't flushed. */
3147 STAMCOUNTER StatRZSyncCR3DstSkippedGlobalPT; /**< RC/R0: The number of times a page table with only global entries wasn't flushed. */
3148 STAMPROFILE StatRZSyncPT; /**< RC/R0: PGMSyncPT() profiling. */
3149 STAMCOUNTER StatRZSyncPTFailed; /**< RC/R0: The number of times PGMSyncPT() failed. */
3150 STAMCOUNTER StatRZSyncPT4K; /**< RC/R0: Number of 4KB syncs. */
3151 STAMCOUNTER StatRZSyncPT4M; /**< RC/R0: Number of 4MB syncs. */
3152 STAMCOUNTER StatRZSyncPagePDNAs; /**< RC/R0: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
3153 STAMCOUNTER StatRZSyncPagePDOutOfSync; /**< RC/R0: The number of time we've encountered an out-of-sync PD in SyncPage. */
3154 STAMCOUNTER StatRZAccessedPage; /**< RC/R0: The number of pages marked not present for accessed bit emulation. */
3155 STAMPROFILE StatRZDirtyBitTracking; /**< RC/R0: Profiling the dirty bit tracking in CheckPageFault().. */
3156 STAMCOUNTER StatRZDirtyPage; /**< RC/R0: The number of pages marked read-only for dirty bit tracking. */
3157 STAMCOUNTER StatRZDirtyPageBig; /**< RC/R0: The number of pages marked read-only for dirty bit tracking. */
3158 STAMCOUNTER StatRZDirtyPageSkipped; /**< RC/R0: The number of pages already dirty or readonly. */
3159 STAMCOUNTER StatRZDirtyPageTrap; /**< RC/R0: The number of traps generated for dirty bit tracking. */
3160 STAMCOUNTER StatRZDirtyPageStale; /**< RC/R0: The number of traps generated for dirty bit tracking. (stale tlb entries) */
3161 STAMCOUNTER StatRZDirtyTrackRealPF; /**< RC/R0: The number of real pages faults during dirty bit tracking. */
3162 STAMCOUNTER StatRZDirtiedPage; /**< RC/R0: The number of pages marked dirty because of write accesses. */
3163 STAMCOUNTER StatRZPageAlreadyDirty; /**< RC/R0: The number of pages already marked dirty because of write accesses. */
3164 STAMPROFILE StatRZInvalidatePage; /**< RC/R0: PGMInvalidatePage() profiling. */
3165 STAMCOUNTER StatRZInvalidatePage4KBPages; /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4KB page. */
3166 STAMCOUNTER StatRZInvalidatePage4MBPages; /**< RC/R0: The number of times PGMInvalidatePage() was called for a 4MB page. */
3167 STAMCOUNTER StatRZInvalidatePage4MBPagesSkip; /**< RC/R0: The number of times PGMInvalidatePage() skipped a 4MB page. */
3168 STAMCOUNTER StatRZInvalidatePagePDMappings; /**< RC/R0: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */
3169 STAMCOUNTER StatRZInvalidatePagePDNAs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not accessed page directory. */
3170 STAMCOUNTER StatRZInvalidatePagePDNPs; /**< RC/R0: The number of times PGMInvalidatePage() was called for a not present page directory. */
3171 STAMCOUNTER StatRZInvalidatePagePDOutOfSync; /**< RC/R0: The number of times PGMInvalidatePage() was called for an out of sync page directory. */
3172 STAMCOUNTER StatRZInvalidatePageSkipped; /**< RC/R0: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
3173 STAMCOUNTER StatRZPageOutOfSyncUser; /**< RC/R0: The number of times user page is out of sync was detected in \#PF or VerifyAccessSyncPage. */
3174 STAMCOUNTER StatRZPageOutOfSyncSupervisor; /**< RC/R0: The number of times supervisor page is out of sync was detected in in \#PF or VerifyAccessSyncPage. */
3175 STAMCOUNTER StatRZPageOutOfSyncUserWrite; /**< RC/R0: The number of times user page is out of sync was detected in \#PF. */
3176 STAMCOUNTER StatRZPageOutOfSyncSupervisorWrite; /**< RC/R0: The number of times supervisor page is out of sync was detected in in \#PF. */
3177 STAMPROFILE StatRZPrefetch; /**< RC/R0: PGMPrefetchPage. */
3178 STAMPROFILE StatRZFlushTLB; /**< RC/R0: Profiling of the PGMFlushTLB() body. */
3179 STAMCOUNTER StatRZFlushTLBNewCR3; /**< RC/R0: The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
3180 STAMCOUNTER StatRZFlushTLBNewCR3Global; /**< RC/R0: The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
3181 STAMCOUNTER StatRZFlushTLBSameCR3; /**< RC/R0: The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
3182 STAMCOUNTER StatRZFlushTLBSameCR3Global; /**< RC/R0: The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
3183 STAMPROFILE StatRZGstModifyPage; /**< RC/R0: Profiling of the PGMGstModifyPage() body */
3184
3185 STAMPROFILE StatR3SyncCR3; /**< R3: PGMSyncCR3() profiling. */
3186 STAMPROFILE StatR3SyncCR3Handlers; /**< R3: Profiling of the PGMSyncCR3() update handler section. */
3187 STAMCOUNTER StatR3SyncCR3Global; /**< R3: The number of global CR3 syncs. */
3188 STAMCOUNTER StatR3SyncCR3NotGlobal; /**< R3: The number of non-global CR3 syncs. */
3189 STAMCOUNTER StatR3SyncCR3DstFreed; /**< R3: The number of times we've had to free a shadow entry. */
3190 STAMCOUNTER StatR3SyncCR3DstFreedSrcNP; /**< R3: The number of times we've had to free a shadow entry for which the source entry was not present. */
3191 STAMCOUNTER StatR3SyncCR3DstNotPresent; /**< R3: The number of times we've encountered a not present shadow entry for a present guest entry. */
3192 STAMCOUNTER StatR3SyncCR3DstSkippedGlobalPD; /**< R3: The number of times a global page directory wasn't flushed. */
3193 STAMCOUNTER StatR3SyncCR3DstSkippedGlobalPT; /**< R3: The number of times a page table with only global entries wasn't flushed. */
3194 STAMCOUNTER StatR3SyncCR3DstCacheHit; /**< R3: The number of times we got some kind of cache hit on a page table. */
3195 STAMPROFILE StatR3SyncPT; /**< R3: PGMSyncPT() profiling. */
3196 STAMCOUNTER StatR3SyncPTFailed; /**< R3: The number of times PGMSyncPT() failed. */
3197 STAMCOUNTER StatR3SyncPT4K; /**< R3: Number of 4KB syncs. */
3198 STAMCOUNTER StatR3SyncPT4M; /**< R3: Number of 4MB syncs. */
3199 STAMCOUNTER StatR3SyncPagePDNAs; /**< R3: The number of time we've marked a PD not present from SyncPage to virtualize the accessed bit. */
3200 STAMCOUNTER StatR3SyncPagePDOutOfSync; /**< R3: The number of time we've encountered an out-of-sync PD in SyncPage. */
3201 STAMCOUNTER StatR3AccessedPage; /**< R3: The number of pages marked not present for accessed bit emulation. */
3202 STAMPROFILE StatR3DirtyBitTracking; /**< R3: Profiling the dirty bit tracking in CheckPageFault(). */
3203 STAMCOUNTER StatR3DirtyPage; /**< R3: The number of pages marked read-only for dirty bit tracking. */
3204 STAMCOUNTER StatR3DirtyPageBig; /**< R3: The number of pages marked read-only for dirty bit tracking. */
3205 STAMCOUNTER StatR3DirtyPageSkipped; /**< R3: The number of pages already dirty or readonly. */
3206 STAMCOUNTER StatR3DirtyPageTrap; /**< R3: The number of traps generated for dirty bit tracking. */
3207 STAMCOUNTER StatR3DirtyTrackRealPF; /**< R3: The number of real pages faults during dirty bit tracking. */
3208 STAMCOUNTER StatR3DirtiedPage; /**< R3: The number of pages marked dirty because of write accesses. */
3209 STAMCOUNTER StatR3PageAlreadyDirty; /**< R3: The number of pages already marked dirty because of write accesses. */
3210 STAMPROFILE StatR3InvalidatePage; /**< R3: PGMInvalidatePage() profiling. */
3211 STAMCOUNTER StatR3InvalidatePage4KBPages; /**< R3: The number of times PGMInvalidatePage() was called for a 4KB page. */
3212 STAMCOUNTER StatR3InvalidatePage4MBPages; /**< R3: The number of times PGMInvalidatePage() was called for a 4MB page. */
3213 STAMCOUNTER StatR3InvalidatePage4MBPagesSkip; /**< R3: The number of times PGMInvalidatePage() skipped a 4MB page. */
3214 STAMCOUNTER StatR3InvalidatePagePDNAs; /**< R3: The number of times PGMInvalidatePage() was called for a not accessed page directory. */
3215 STAMCOUNTER StatR3InvalidatePagePDNPs; /**< R3: The number of times PGMInvalidatePage() was called for a not present page directory. */
3216 STAMCOUNTER StatR3InvalidatePagePDMappings; /**< R3: The number of times PGMInvalidatePage() was called for a page directory containing mappings (no conflict). */
3217 STAMCOUNTER StatR3InvalidatePagePDOutOfSync; /**< R3: The number of times PGMInvalidatePage() was called for an out of sync page directory. */
3218 STAMCOUNTER StatR3InvalidatePageSkipped; /**< R3: The number of times PGMInvalidatePage() was skipped due to not present shw or pending pending SyncCR3. */
3219 STAMCOUNTER StatR3PageOutOfSyncUser; /**< R3: The number of times user page is out of sync was detected in \#PF or VerifyAccessSyncPage. */
3220 STAMCOUNTER StatR3PageOutOfSyncSupervisor; /**< R3: The number of times supervisor page is out of sync was detected in in \#PF or VerifyAccessSyncPage. */
3221 STAMCOUNTER StatR3PageOutOfSyncUserWrite; /**< R3: The number of times user page is out of sync was detected in \#PF. */
3222 STAMCOUNTER StatR3PageOutOfSyncSupervisorWrite; /**< R3: The number of times supervisor page is out of sync was detected in in \#PF. */
3223 STAMPROFILE StatR3Prefetch; /**< R3: PGMPrefetchPage. */
3224 STAMPROFILE StatR3FlushTLB; /**< R3: Profiling of the PGMFlushTLB() body. */
3225 STAMCOUNTER StatR3FlushTLBNewCR3; /**< R3: The number of times PGMFlushTLB was called with a new CR3, non-global. (switch) */
3226 STAMCOUNTER StatR3FlushTLBNewCR3Global; /**< R3: The number of times PGMFlushTLB was called with a new CR3, global. (switch) */
3227 STAMCOUNTER StatR3FlushTLBSameCR3; /**< R3: The number of times PGMFlushTLB was called with the same CR3, non-global. (flush) */
3228 STAMCOUNTER StatR3FlushTLBSameCR3Global; /**< R3: The number of times PGMFlushTLB was called with the same CR3, global. (flush) */
3229 STAMPROFILE StatR3GstModifyPage; /**< R3: Profiling of the PGMGstModifyPage() body */
3230 /** @} */
3231#endif /* VBOX_WITH_STATISTICS */
3232} PGMCPU;
3233/** Pointer to the per-cpu PGM data. */
3234typedef PGMCPU *PPGMCPU;
3235
3236
3237/** @name PGM::fSyncFlags Flags
3238 * @{
3239 */
3240/** Updates the virtual access handler state bit in PGMPAGE. */
3241#define PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL RT_BIT(0)
3242/** Always sync CR3. */
3243#define PGM_SYNC_ALWAYS RT_BIT(1)
3244/** Check monitoring on next CR3 (re)load and invalidate page.
3245 * @todo This is obsolete now. Remove after 2.2.0 is branched off. */
3246#define PGM_SYNC_MONITOR_CR3 RT_BIT(2)
3247/** Check guest mapping in SyncCR3. */
3248#define PGM_SYNC_MAP_CR3 RT_BIT(3)
3249/** Clear the page pool (a light weight flush). */
3250#define PGM_SYNC_CLEAR_PGM_POOL_BIT 8
3251#define PGM_SYNC_CLEAR_PGM_POOL RT_BIT(PGM_SYNC_CLEAR_PGM_POOL_BIT)
3252/** @} */
3253
3254
3255RT_C_DECLS_BEGIN
3256
3257int pgmLock(PVM pVM);
3258void pgmUnlock(PVM pVM);
3259
3260int pgmR3MappingsFixInternal(PVM pVM, RTGCPTR GCPtrBase, uint32_t cb);
3261int pgmR3SyncPTResolveConflict(PVM pVM, PPGMMAPPING pMapping, PX86PD pPDSrc, RTGCPTR GCPtrOldMapping);
3262int pgmR3SyncPTResolveConflictPAE(PVM pVM, PPGMMAPPING pMapping, RTGCPTR GCPtrOldMapping);
3263PPGMMAPPING pgmGetMapping(PVM pVM, RTGCPTR GCPtr);
3264int pgmMapResolveConflicts(PVM pVM);
3265DECLCALLBACK(void) pgmR3MapInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
3266
3267void pgmR3HandlerPhysicalUpdateAll(PVM pVM);
3268bool pgmHandlerPhysicalIsAll(PVM pVM, RTGCPHYS GCPhys);
3269void pgmHandlerPhysicalResetAliasedPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage);
3270int pgmHandlerVirtualFindByPhysAddr(PVM pVM, RTGCPHYS GCPhys, PPGMVIRTHANDLER *ppVirt, unsigned *piPage);
3271DECLCALLBACK(int) pgmHandlerVirtualResetOne(PAVLROGCPTRNODECORE pNode, void *pvUser);
3272#if defined(VBOX_STRICT) || defined(LOG_ENABLED)
3273void pgmHandlerVirtualDumpPhysPages(PVM pVM);
3274#else
3275# define pgmHandlerVirtualDumpPhysPages(a) do { } while (0)
3276#endif
3277DECLCALLBACK(void) pgmR3InfoHandlers(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
3278int pgmR3InitSavedState(PVM pVM, uint64_t cbRam);
3279
3280int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
3281int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys);
3282int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys);
3283void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage);
3284int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
3285int pgmPhysPageMakeWritableUnlocked(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
3286int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
3287int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
3288int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv);
3289int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv);
3290int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv);
3291int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv);
3292VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
3293#ifdef IN_RING3
3294void pgmR3PhysRelinkRamRanges(PVM pVM);
3295int pgmR3PhysRamPreAllocate(PVM pVM);
3296int pgmR3PhysRamReset(PVM pVM);
3297int pgmR3PhysRomReset(PVM pVM);
3298int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk);
3299
3300int pgmR3PoolInit(PVM pVM);
3301void pgmR3PoolRelocate(PVM pVM);
3302void pgmR3PoolResetUnpluggedCpu(PVM pVM, PVMCPU pVCpu);
3303void pgmR3PoolReset(PVM pVM);
3304void pgmR3PoolClearAll(PVM pVM);
3305
3306#endif /* IN_RING3 */
3307#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3308int pgmR0DynMapHCPageCommon(PVM pVM, PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv);
3309#endif
3310int pgmPoolAllocEx(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage, bool fLockPage = false);
3311
3312DECLINLINE(int) pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage, bool fLockPage = false)
3313{
3314 return pgmPoolAllocEx(pVM, GCPhys, enmKind, PGMPOOLACCESS_DONTCARE, iUser, iUserTable, ppPage, fLockPage);
3315}
3316
3317void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint32_t iUserTable);
3318void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable);
3319int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, bool fFlush = true /* DO NOT USE false UNLESS YOU KNOWN WHAT YOU'RE DOING!! */);
3320void pgmPoolFlushPageByGCPhys(PVM pVM, RTGCPHYS GCPhys);
3321PPGMPOOLPAGE pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys);
3322int pgmPoolSyncCR3(PVMCPU pVCpu);
3323bool pgmPoolIsDirtyPage(PVM pVM, RTGCPHYS GCPhys);
3324int pgmPoolTrackUpdateGCPhys(PVM pVM, PPGMPAGE pPhysPage, bool fFlushPTEs, bool *pfFlushTLBs);
3325void pgmPoolInvalidateDirtyPage(PVM pVM, RTGCPHYS GCPhysPT);
3326DECLINLINE(int) pgmPoolTrackFlushGCPhys(PVM pVM, PPGMPAGE pPhysPage, bool *pfFlushTLBs)
3327{
3328 return pgmPoolTrackUpdateGCPhys(pVM, pPhysPage, true /* flush PTEs */, pfFlushTLBs);
3329}
3330
3331uint16_t pgmPoolTrackPhysExtAddref(PVM pVM, uint16_t u16, uint16_t iShwPT);
3332void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPoolPage, PPGMPAGE pPhysPage);
3333void pgmPoolTracDerefGCPhysHint(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTHCPHYS HCPhys, RTGCPHYS GCPhysHint);
3334void pgmPoolMonitorChainChanging(PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR) pvAddress, unsigned cbWrite);
3335int pgmPoolMonitorChainFlush(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
3336void pgmPoolMonitorModifiedInsert(PPGMPOOL pPool, PPGMPOOLPAGE pPage);
3337
3338void pgmPoolAddDirtyPage(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage);
3339void pgmPoolResetDirtyPages(PVM pVM);
3340
3341int pgmR3ExitShadowModeBeforePoolFlush(PVM pVM, PVMCPU pVCpu);
3342int pgmR3ReEnterShadowModeAfterPoolFlush(PVM pVM, PVMCPU pVCpu);
3343
3344void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
3345void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3);
3346int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
3347int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
3348
3349int pgmShwSyncPaePDPtr(PVMCPU pVCpu, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
3350#ifndef IN_RC
3351int pgmShwSyncLongModePDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PX86PML4E pGstPml4e, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
3352#endif
3353int pgmShwGetEPTPDPtr(PVMCPU pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
3354
3355PX86PD pgmGstLazyMap32BitPD(PPGMCPU pPGM);
3356PX86PDPT pgmGstLazyMapPaePDPT(PPGMCPU pPGM);
3357PX86PDPAE pgmGstLazyMapPaePD(PPGMCPU pPGM, uint32_t iPdpt);
3358PX86PML4 pgmGstLazyMapPml4(PPGMCPU pPGM);
3359
3360RT_C_DECLS_END
3361
3362/** @} */
3363
3364#endif
3365
3366
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette