VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

Last change on this file was 109100, checked in by vboxsync, 12 days ago

VMM/PGMAll.cpp: Fix off by one error, bugref:10388

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 163.2 KB
Line 
1/* $Id: PGMAll.cpp 109100 2025-04-29 08:14:31Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor - All context code.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.215389.xyz.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#ifdef IN_RING0
35# define VBOX_VMM_TARGET_X86
36#endif
37#include <VBox/vmm/pgm.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/selm.h>
40#include <VBox/vmm/iem.h>
41#include <VBox/vmm/iom.h>
42#include <VBox/sup.h>
43#include <VBox/vmm/mm.h>
44#include <VBox/vmm/stam.h>
45#include <VBox/vmm/trpm.h>
46#include <VBox/vmm/em.h>
47#include <VBox/vmm/hm.h>
48#include <VBox/vmm/hm_vmx.h>
49#include "PGMInternal.h"
50#include <VBox/vmm/vmcc.h>
51#include "PGMInline.h"
52#include <iprt/assert.h>
53#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
54# include <iprt/asm-amd64-x86.h>
55#endif
56#include <iprt/string.h>
57#include <VBox/log.h>
58#include <VBox/param.h>
59#include <VBox/err.h>
60
61#if defined(VBOX_VMM_TARGET_ARMV8)
62# include <iprt/armv8.h>
63#endif
64
65
66/*********************************************************************************************************************************
67* Internal Functions *
68*********************************************************************************************************************************/
69#ifdef VBOX_VMM_TARGET_X86
70DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
71# ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
72DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
73# endif
74DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3);
75# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
76static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested, PPGMPTWALK pWalk,
77 PPGMPTWALKGST pGstWalk);
78static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3);
79static int pgmShwGetNestedEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPhysNested, PEPTPDPT *ppPdpt, PEPTPD *ppPD,
80 PPGMPTWALKGST pGstWalkAll);
81# endif
82static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD);
83static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD);
84# ifdef PGM_WITH_PAGE_ZEROING_DETECTION
85static bool pgmHandlePageZeroingCode(PVMCPUCC pVCpu, PCPUMCTX pCtx);
86# endif
87#endif /* VBOX_VMM_TARGET_X86 */
88
89
90#ifdef VBOX_VMM_TARGET_X86
91
92/*
93 * Second level transation - EPT.
94 */
95# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
96# define PGM_SLAT_TYPE PGM_SLAT_TYPE_EPT
97# include "PGMSlatDefs.h"
98# include "PGMAllGstSlatEpt.cpp.h"
99# undef PGM_SLAT_TYPE
100# endif
101
102
103/*
104 * Shadow - 32-bit mode
105 */
106# define PGM_SHW_TYPE PGM_TYPE_32BIT
107# define PGM_SHW_NAME(name) PGM_SHW_NAME_32BIT(name)
108# include "PGMAllShw-x86.cpp.h"
109
110/* Guest - real mode */
111# define PGM_GST_TYPE PGM_TYPE_REAL
112# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
113# define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_REAL(name)
114# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
115# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
116# include "PGMGstDefs-x86.h"
117# include "PGMAllGst-x86.cpp.h"
118# include "PGMAllBth-x86.cpp.h"
119# undef BTH_PGMPOOLKIND_PT_FOR_PT
120# undef BTH_PGMPOOLKIND_ROOT
121# undef PGM_BTH_NAME
122# undef PGM_GST_TYPE
123# undef PGM_GST_NAME
124
125/* Guest - protected mode */
126# define PGM_GST_TYPE PGM_TYPE_PROT
127# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
128# define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_PROT(name)
129# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_PHYS
130# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD_PHYS
131# include "PGMGstDefs-x86.h"
132# include "PGMAllGst-x86.cpp.h"
133# include "PGMAllBth-x86.cpp.h"
134# undef BTH_PGMPOOLKIND_PT_FOR_PT
135# undef BTH_PGMPOOLKIND_ROOT
136# undef PGM_BTH_NAME
137# undef PGM_GST_TYPE
138# undef PGM_GST_NAME
139
140/* Guest - 32-bit mode */
141# define PGM_GST_TYPE PGM_TYPE_32BIT
142# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
143# define PGM_BTH_NAME(name) PGM_BTH_NAME_32BIT_32BIT(name)
144# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT
145# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB
146# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_32BIT_PD
147# include "PGMGstDefs-x86.h"
148# include "PGMAllGst-x86.cpp.h"
149# include "PGMAllBth-x86.cpp.h"
150# undef BTH_PGMPOOLKIND_PT_FOR_BIG
151# undef BTH_PGMPOOLKIND_PT_FOR_PT
152# undef BTH_PGMPOOLKIND_ROOT
153# undef PGM_BTH_NAME
154# undef PGM_GST_TYPE
155# undef PGM_GST_NAME
156
157# undef PGM_SHW_TYPE
158# undef PGM_SHW_NAME
159
160
161/*
162 * Shadow - PAE mode
163 */
164# define PGM_SHW_TYPE PGM_TYPE_PAE
165# define PGM_SHW_NAME(name) PGM_SHW_NAME_PAE(name)
166# define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
167# include "PGMAllShw-x86.cpp.h"
168
169/* Guest - real mode */
170# define PGM_GST_TYPE PGM_TYPE_REAL
171# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
172# define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_REAL(name)
173# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
174# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
175# include "PGMGstDefs-x86.h"
176# include "PGMAllBth-x86.cpp.h"
177# undef BTH_PGMPOOLKIND_PT_FOR_PT
178# undef BTH_PGMPOOLKIND_ROOT
179# undef PGM_BTH_NAME
180# undef PGM_GST_TYPE
181# undef PGM_GST_NAME
182
183/* Guest - protected mode */
184# define PGM_GST_TYPE PGM_TYPE_PROT
185# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
186# define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PROT(name)
187# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
188# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_PHYS
189# include "PGMGstDefs-x86.h"
190# include "PGMAllBth-x86.cpp.h"
191# undef BTH_PGMPOOLKIND_PT_FOR_PT
192# undef BTH_PGMPOOLKIND_ROOT
193# undef PGM_BTH_NAME
194# undef PGM_GST_TYPE
195# undef PGM_GST_NAME
196
197/* Guest - 32-bit mode */
198# define PGM_GST_TYPE PGM_TYPE_32BIT
199# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
200# define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_32BIT(name)
201# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_32BIT_PT
202# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB
203# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT_FOR_32BIT
204# include "PGMGstDefs-x86.h"
205# include "PGMAllBth-x86.cpp.h"
206# undef BTH_PGMPOOLKIND_PT_FOR_BIG
207# undef BTH_PGMPOOLKIND_PT_FOR_PT
208# undef BTH_PGMPOOLKIND_ROOT
209# undef PGM_BTH_NAME
210# undef PGM_GST_TYPE
211# undef PGM_GST_NAME
212
213
214/* Guest - PAE mode */
215# define PGM_GST_TYPE PGM_TYPE_PAE
216# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
217# define PGM_BTH_NAME(name) PGM_BTH_NAME_PAE_PAE(name)
218# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
219# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
220# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PDPT
221# include "PGMGstDefs-x86.h"
222# include "PGMAllGst-x86.cpp.h"
223# include "PGMAllBth-x86.cpp.h"
224# undef BTH_PGMPOOLKIND_PT_FOR_BIG
225# undef BTH_PGMPOOLKIND_PT_FOR_PT
226# undef BTH_PGMPOOLKIND_ROOT
227# undef PGM_BTH_NAME
228# undef PGM_GST_TYPE
229# undef PGM_GST_NAME
230
231# undef PGM_SHW_TYPE
232# undef PGM_SHW_NAME
233
234
235/*
236 * Shadow - AMD64 mode
237 */
238# define PGM_SHW_TYPE PGM_TYPE_AMD64
239# define PGM_SHW_NAME(name) PGM_SHW_NAME_AMD64(name)
240# include "PGMAllShw-x86.cpp.h"
241
242/* Guest - protected mode (only used for AMD-V nested paging in 64 bits mode) */
243/** @todo retire this hack. */
244# define PGM_GST_TYPE PGM_TYPE_PROT
245# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
246# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_PROT(name)
247# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PHYS
248# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_PAE_PD_PHYS
249# include "PGMGstDefs-x86.h"
250# include "PGMAllBth-x86.cpp.h"
251# undef BTH_PGMPOOLKIND_PT_FOR_PT
252# undef BTH_PGMPOOLKIND_ROOT
253# undef PGM_BTH_NAME
254# undef PGM_GST_TYPE
255# undef PGM_GST_NAME
256
257# ifdef VBOX_WITH_64_BITS_GUESTS
258/* Guest - AMD64 mode */
259# define PGM_GST_TYPE PGM_TYPE_AMD64
260# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
261# define PGM_BTH_NAME(name) PGM_BTH_NAME_AMD64_AMD64(name)
262# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_PAE_PT_FOR_PAE_PT
263# define BTH_PGMPOOLKIND_PT_FOR_BIG PGMPOOLKIND_PAE_PT_FOR_PAE_2MB
264# define BTH_PGMPOOLKIND_ROOT PGMPOOLKIND_64BIT_PML4
265# include "PGMGstDefs-x86.h"
266# include "PGMAllGst-x86.cpp.h"
267# include "PGMAllBth-x86.cpp.h"
268# undef BTH_PGMPOOLKIND_PT_FOR_BIG
269# undef BTH_PGMPOOLKIND_PT_FOR_PT
270# undef BTH_PGMPOOLKIND_ROOT
271# undef PGM_BTH_NAME
272# undef PGM_GST_TYPE
273# undef PGM_GST_NAME
274# endif /* VBOX_WITH_64_BITS_GUESTS */
275
276# undef PGM_SHW_TYPE
277# undef PGM_SHW_NAME
278
279
280/*
281 * Shadow - 32-bit nested paging mode.
282 */
283# define PGM_SHW_TYPE PGM_TYPE_NESTED_32BIT
284# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_32BIT(name)
285# include "PGMAllShw-x86.cpp.h"
286
287/* Guest - real mode */
288# define PGM_GST_TYPE PGM_TYPE_REAL
289# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
290# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_REAL(name)
291# include "PGMGstDefs-x86.h"
292# include "PGMAllBth-x86.cpp.h"
293# undef PGM_BTH_NAME
294# undef PGM_GST_TYPE
295# undef PGM_GST_NAME
296
297/* Guest - protected mode */
298# define PGM_GST_TYPE PGM_TYPE_PROT
299# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
300# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PROT(name)
301# include "PGMGstDefs-x86.h"
302# include "PGMAllBth-x86.cpp.h"
303# undef PGM_BTH_NAME
304# undef PGM_GST_TYPE
305# undef PGM_GST_NAME
306
307/* Guest - 32-bit mode */
308# define PGM_GST_TYPE PGM_TYPE_32BIT
309# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
310# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_32BIT(name)
311# include "PGMGstDefs-x86.h"
312# include "PGMAllBth-x86.cpp.h"
313# undef PGM_BTH_NAME
314# undef PGM_GST_TYPE
315# undef PGM_GST_NAME
316
317/* Guest - PAE mode */
318# define PGM_GST_TYPE PGM_TYPE_PAE
319# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
320# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_PAE(name)
321# include "PGMGstDefs-x86.h"
322# include "PGMAllBth-x86.cpp.h"
323# undef PGM_BTH_NAME
324# undef PGM_GST_TYPE
325# undef PGM_GST_NAME
326
327# ifdef VBOX_WITH_64_BITS_GUESTS
328/* Guest - AMD64 mode */
329# define PGM_GST_TYPE PGM_TYPE_AMD64
330# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
331# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_32BIT_AMD64(name)
332# include "PGMGstDefs-x86.h"
333# include "PGMAllBth-x86.cpp.h"
334# undef PGM_BTH_NAME
335# undef PGM_GST_TYPE
336# undef PGM_GST_NAME
337# endif /* VBOX_WITH_64_BITS_GUESTS */
338
339# undef PGM_SHW_TYPE
340# undef PGM_SHW_NAME
341
342
343/*
344 * Shadow - PAE nested paging mode.
345 */
346# define PGM_SHW_TYPE PGM_TYPE_NESTED_PAE
347# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_PAE(name)
348# include "PGMAllShw-x86.cpp.h"
349
350/* Guest - real mode */
351# define PGM_GST_TYPE PGM_TYPE_REAL
352# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
353# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_REAL(name)
354# include "PGMGstDefs-x86.h"
355# include "PGMAllBth-x86.cpp.h"
356# undef PGM_BTH_NAME
357# undef PGM_GST_TYPE
358# undef PGM_GST_NAME
359
360/* Guest - protected mode */
361# define PGM_GST_TYPE PGM_TYPE_PROT
362# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
363# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PROT(name)
364# include "PGMGstDefs-x86.h"
365# include "PGMAllBth-x86.cpp.h"
366# undef PGM_BTH_NAME
367# undef PGM_GST_TYPE
368# undef PGM_GST_NAME
369
370/* Guest - 32-bit mode */
371# define PGM_GST_TYPE PGM_TYPE_32BIT
372# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
373# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_32BIT(name)
374# include "PGMGstDefs-x86.h"
375# include "PGMAllBth-x86.cpp.h"
376# undef PGM_BTH_NAME
377# undef PGM_GST_TYPE
378# undef PGM_GST_NAME
379
380/* Guest - PAE mode */
381# define PGM_GST_TYPE PGM_TYPE_PAE
382# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
383# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_PAE(name)
384# include "PGMGstDefs-x86.h"
385# include "PGMAllBth-x86.cpp.h"
386# undef PGM_BTH_NAME
387# undef PGM_GST_TYPE
388# undef PGM_GST_NAME
389
390# ifdef VBOX_WITH_64_BITS_GUESTS
391/* Guest - AMD64 mode */
392# define PGM_GST_TYPE PGM_TYPE_AMD64
393# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
394# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_PAE_AMD64(name)
395# include "PGMGstDefs-x86.h"
396# include "PGMAllBth-x86.cpp.h"
397# undef PGM_BTH_NAME
398# undef PGM_GST_TYPE
399# undef PGM_GST_NAME
400# endif /* VBOX_WITH_64_BITS_GUESTS */
401
402# undef PGM_SHW_TYPE
403# undef PGM_SHW_NAME
404
405
406/*
407 * Shadow - AMD64 nested paging mode.
408 */
409# define PGM_SHW_TYPE PGM_TYPE_NESTED_AMD64
410# define PGM_SHW_NAME(name) PGM_SHW_NAME_NESTED_AMD64(name)
411# include "PGMAllShw-x86.cpp.h"
412
413/* Guest - real mode */
414# define PGM_GST_TYPE PGM_TYPE_REAL
415# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
416# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_REAL(name)
417# include "PGMGstDefs-x86.h"
418# include "PGMAllBth-x86.cpp.h"
419# undef PGM_BTH_NAME
420# undef PGM_GST_TYPE
421# undef PGM_GST_NAME
422
423/* Guest - protected mode */
424# define PGM_GST_TYPE PGM_TYPE_PROT
425# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
426# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PROT(name)
427# include "PGMGstDefs-x86.h"
428# include "PGMAllBth-x86.cpp.h"
429# undef PGM_BTH_NAME
430# undef PGM_GST_TYPE
431# undef PGM_GST_NAME
432
433/* Guest - 32-bit mode */
434# define PGM_GST_TYPE PGM_TYPE_32BIT
435# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
436# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_32BIT(name)
437# include "PGMGstDefs-x86.h"
438# include "PGMAllBth-x86.cpp.h"
439# undef PGM_BTH_NAME
440# undef PGM_GST_TYPE
441# undef PGM_GST_NAME
442
443/* Guest - PAE mode */
444# define PGM_GST_TYPE PGM_TYPE_PAE
445# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
446# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_PAE(name)
447# include "PGMGstDefs-x86.h"
448# include "PGMAllBth-x86.cpp.h"
449# undef PGM_BTH_NAME
450# undef PGM_GST_TYPE
451# undef PGM_GST_NAME
452
453# ifdef VBOX_WITH_64_BITS_GUESTS
454/* Guest - AMD64 mode */
455# define PGM_GST_TYPE PGM_TYPE_AMD64
456# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
457# define PGM_BTH_NAME(name) PGM_BTH_NAME_NESTED_AMD64_AMD64(name)
458# include "PGMGstDefs-x86.h"
459# include "PGMAllBth-x86.cpp.h"
460# undef PGM_BTH_NAME
461# undef PGM_GST_TYPE
462# undef PGM_GST_NAME
463# endif /* VBOX_WITH_64_BITS_GUESTS */
464
465# undef PGM_SHW_TYPE
466# undef PGM_SHW_NAME
467
468
469/*
470 * Shadow - EPT.
471 */
472# define PGM_SHW_TYPE PGM_TYPE_EPT
473# define PGM_SHW_NAME(name) PGM_SHW_NAME_EPT(name)
474# include "PGMAllShw-x86.cpp.h"
475
476/* Guest - real mode */
477# define PGM_GST_TYPE PGM_TYPE_REAL
478# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
479# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_REAL(name)
480# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
481# include "PGMGstDefs-x86.h"
482# include "PGMAllBth-x86.cpp.h"
483# undef BTH_PGMPOOLKIND_PT_FOR_PT
484# undef PGM_BTH_NAME
485# undef PGM_GST_TYPE
486# undef PGM_GST_NAME
487
488/* Guest - protected mode */
489# define PGM_GST_TYPE PGM_TYPE_PROT
490# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
491# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PROT(name)
492# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
493# include "PGMGstDefs-x86.h"
494# include "PGMAllBth-x86.cpp.h"
495# undef BTH_PGMPOOLKIND_PT_FOR_PT
496# undef PGM_BTH_NAME
497# undef PGM_GST_TYPE
498# undef PGM_GST_NAME
499
500/* Guest - 32-bit mode */
501# define PGM_GST_TYPE PGM_TYPE_32BIT
502# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
503# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_32BIT(name)
504# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
505# include "PGMGstDefs-x86.h"
506# include "PGMAllBth-x86.cpp.h"
507# undef BTH_PGMPOOLKIND_PT_FOR_PT
508# undef PGM_BTH_NAME
509# undef PGM_GST_TYPE
510# undef PGM_GST_NAME
511
512/* Guest - PAE mode */
513# define PGM_GST_TYPE PGM_TYPE_PAE
514# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
515# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_PAE(name)
516# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
517# include "PGMGstDefs-x86.h"
518# include "PGMAllBth-x86.cpp.h"
519# undef BTH_PGMPOOLKIND_PT_FOR_PT
520# undef PGM_BTH_NAME
521# undef PGM_GST_TYPE
522# undef PGM_GST_NAME
523
524# ifdef VBOX_WITH_64_BITS_GUESTS
525/* Guest - AMD64 mode */
526# define PGM_GST_TYPE PGM_TYPE_AMD64
527# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
528# define PGM_BTH_NAME(name) PGM_BTH_NAME_EPT_AMD64(name)
529# define BTH_PGMPOOLKIND_PT_FOR_PT PGMPOOLKIND_EPT_PT_FOR_PHYS
530# include "PGMGstDefs-x86.h"
531# include "PGMAllBth-x86.cpp.h"
532# undef BTH_PGMPOOLKIND_PT_FOR_PT
533# undef PGM_BTH_NAME
534# undef PGM_GST_TYPE
535# undef PGM_GST_NAME
536# endif /* VBOX_WITH_64_BITS_GUESTS */
537
538# undef PGM_SHW_TYPE
539# undef PGM_SHW_NAME
540
541
542/*
543 * Shadow - NEM / None.
544 */
545# define PGM_SHW_TYPE PGM_TYPE_NONE
546# define PGM_SHW_NAME(name) PGM_SHW_NAME_NONE(name)
547# include "PGMAllShw-x86.cpp.h"
548
549/* Guest - real mode */
550# define PGM_GST_TYPE PGM_TYPE_REAL
551# define PGM_GST_NAME(name) PGM_GST_NAME_REAL(name)
552# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_REAL(name)
553# include "PGMGstDefs-x86.h"
554# include "PGMAllBth-x86.cpp.h"
555# undef PGM_BTH_NAME
556# undef PGM_GST_TYPE
557# undef PGM_GST_NAME
558
559/* Guest - protected mode */
560# define PGM_GST_TYPE PGM_TYPE_PROT
561# define PGM_GST_NAME(name) PGM_GST_NAME_PROT(name)
562# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PROT(name)
563# include "PGMGstDefs-x86.h"
564# include "PGMAllBth-x86.cpp.h"
565# undef PGM_BTH_NAME
566# undef PGM_GST_TYPE
567# undef PGM_GST_NAME
568
569/* Guest - 32-bit mode */
570# define PGM_GST_TYPE PGM_TYPE_32BIT
571# define PGM_GST_NAME(name) PGM_GST_NAME_32BIT(name)
572# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_32BIT(name)
573# include "PGMGstDefs-x86.h"
574# include "PGMAllBth-x86.cpp.h"
575# undef PGM_BTH_NAME
576# undef PGM_GST_TYPE
577# undef PGM_GST_NAME
578
579/* Guest - PAE mode */
580# define PGM_GST_TYPE PGM_TYPE_PAE
581# define PGM_GST_NAME(name) PGM_GST_NAME_PAE(name)
582# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_PAE(name)
583# include "PGMGstDefs-x86.h"
584# include "PGMAllBth-x86.cpp.h"
585# undef PGM_BTH_NAME
586# undef PGM_GST_TYPE
587# undef PGM_GST_NAME
588
589# ifdef VBOX_WITH_64_BITS_GUESTS
590/* Guest - AMD64 mode */
591# define PGM_GST_TYPE PGM_TYPE_AMD64
592# define PGM_GST_NAME(name) PGM_GST_NAME_AMD64(name)
593# define PGM_BTH_NAME(name) PGM_BTH_NAME_NONE_AMD64(name)
594# include "PGMGstDefs-x86.h"
595# include "PGMAllBth-x86.cpp.h"
596# undef PGM_BTH_NAME
597# undef PGM_GST_TYPE
598# undef PGM_GST_NAME
599# endif /* VBOX_WITH_64_BITS_GUESTS */
600
601# undef PGM_SHW_TYPE
602# undef PGM_SHW_NAME
603
604
605
606/**
607 * Guest mode data array.
608 */
609PGMMODEDATAGST const g_aPgmGuestModeData[PGM_GUEST_MODE_DATA_ARRAY_SIZE] =
610{
611 { UINT32_MAX, NULL, NULL, NULL, NULL }, /* 0 */
612 {
613 PGM_TYPE_REAL,
614 PGM_GST_NAME_REAL(GetPage),
615 PGM_GST_NAME_REAL(QueryPageFast),
616 PGM_GST_NAME_REAL(ModifyPage),
617 PGM_GST_NAME_REAL(Enter),
618 PGM_GST_NAME_REAL(Exit),
619# ifdef IN_RING3
620 PGM_GST_NAME_REAL(Relocate),
621# endif
622 },
623 {
624 PGM_TYPE_PROT,
625 PGM_GST_NAME_PROT(GetPage),
626 PGM_GST_NAME_PROT(QueryPageFast),
627 PGM_GST_NAME_PROT(ModifyPage),
628 PGM_GST_NAME_PROT(Enter),
629 PGM_GST_NAME_PROT(Exit),
630# ifdef IN_RING3
631 PGM_GST_NAME_PROT(Relocate),
632# endif
633 },
634 {
635 PGM_TYPE_32BIT,
636 PGM_GST_NAME_32BIT(GetPage),
637 PGM_GST_NAME_32BIT(QueryPageFast),
638 PGM_GST_NAME_32BIT(ModifyPage),
639 PGM_GST_NAME_32BIT(Enter),
640 PGM_GST_NAME_32BIT(Exit),
641# ifdef IN_RING3
642 PGM_GST_NAME_32BIT(Relocate),
643# endif
644 },
645 {
646 PGM_TYPE_PAE,
647 PGM_GST_NAME_PAE(GetPage),
648 PGM_GST_NAME_PAE(QueryPageFast),
649 PGM_GST_NAME_PAE(ModifyPage),
650 PGM_GST_NAME_PAE(Enter),
651 PGM_GST_NAME_PAE(Exit),
652# ifdef IN_RING3
653 PGM_GST_NAME_PAE(Relocate),
654# endif
655 },
656# ifdef VBOX_WITH_64_BITS_GUESTS
657 {
658 PGM_TYPE_AMD64,
659 PGM_GST_NAME_AMD64(GetPage),
660 PGM_GST_NAME_AMD64(QueryPageFast),
661 PGM_GST_NAME_AMD64(ModifyPage),
662 PGM_GST_NAME_AMD64(Enter),
663 PGM_GST_NAME_AMD64(Exit),
664# ifdef IN_RING3
665 PGM_GST_NAME_AMD64(Relocate),
666# endif
667 },
668# endif
669};
670
671
672/**
673 * The shadow mode data array.
674 */
675PGMMODEDATASHW const g_aPgmShadowModeData[PGM_SHADOW_MODE_DATA_ARRAY_SIZE] =
676{
677 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* 0 */
678 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_REAL */
679 { UINT8_MAX, NULL, NULL, NULL, NULL }, /* PGM_TYPE_PROT */
680 {
681 PGM_TYPE_32BIT,
682 PGM_SHW_NAME_32BIT(GetPage),
683 PGM_SHW_NAME_32BIT(ModifyPage),
684 PGM_SHW_NAME_32BIT(Enter),
685 PGM_SHW_NAME_32BIT(Exit),
686# ifdef IN_RING3
687 PGM_SHW_NAME_32BIT(Relocate),
688# endif
689 },
690 {
691 PGM_TYPE_PAE,
692 PGM_SHW_NAME_PAE(GetPage),
693 PGM_SHW_NAME_PAE(ModifyPage),
694 PGM_SHW_NAME_PAE(Enter),
695 PGM_SHW_NAME_PAE(Exit),
696# ifdef IN_RING3
697 PGM_SHW_NAME_PAE(Relocate),
698# endif
699 },
700 {
701 PGM_TYPE_AMD64,
702 PGM_SHW_NAME_AMD64(GetPage),
703 PGM_SHW_NAME_AMD64(ModifyPage),
704 PGM_SHW_NAME_AMD64(Enter),
705 PGM_SHW_NAME_AMD64(Exit),
706# ifdef IN_RING3
707 PGM_SHW_NAME_AMD64(Relocate),
708# endif
709 },
710 {
711 PGM_TYPE_NESTED_32BIT,
712 PGM_SHW_NAME_NESTED_32BIT(GetPage),
713 PGM_SHW_NAME_NESTED_32BIT(ModifyPage),
714 PGM_SHW_NAME_NESTED_32BIT(Enter),
715 PGM_SHW_NAME_NESTED_32BIT(Exit),
716# ifdef IN_RING3
717 PGM_SHW_NAME_NESTED_32BIT(Relocate),
718# endif
719 },
720 {
721 PGM_TYPE_NESTED_PAE,
722 PGM_SHW_NAME_NESTED_PAE(GetPage),
723 PGM_SHW_NAME_NESTED_PAE(ModifyPage),
724 PGM_SHW_NAME_NESTED_PAE(Enter),
725 PGM_SHW_NAME_NESTED_PAE(Exit),
726# ifdef IN_RING3
727 PGM_SHW_NAME_NESTED_PAE(Relocate),
728# endif
729 },
730 {
731 PGM_TYPE_NESTED_AMD64,
732 PGM_SHW_NAME_NESTED_AMD64(GetPage),
733 PGM_SHW_NAME_NESTED_AMD64(ModifyPage),
734 PGM_SHW_NAME_NESTED_AMD64(Enter),
735 PGM_SHW_NAME_NESTED_AMD64(Exit),
736# ifdef IN_RING3
737 PGM_SHW_NAME_NESTED_AMD64(Relocate),
738# endif
739 },
740 {
741 PGM_TYPE_EPT,
742 PGM_SHW_NAME_EPT(GetPage),
743 PGM_SHW_NAME_EPT(ModifyPage),
744 PGM_SHW_NAME_EPT(Enter),
745 PGM_SHW_NAME_EPT(Exit),
746# ifdef IN_RING3
747 PGM_SHW_NAME_EPT(Relocate),
748# endif
749 },
750 {
751 PGM_TYPE_NONE,
752 PGM_SHW_NAME_NONE(GetPage),
753 PGM_SHW_NAME_NONE(ModifyPage),
754 PGM_SHW_NAME_NONE(Enter),
755 PGM_SHW_NAME_NONE(Exit),
756# ifdef IN_RING3
757 PGM_SHW_NAME_NONE(Relocate),
758# endif
759 },
760};
761
762
763/**
764 * The guest+shadow mode data array.
765 */
766PGMMODEDATABTH const g_aPgmBothModeData[PGM_BOTH_MODE_DATA_ARRAY_SIZE] =
767{
768# if !defined(IN_RING3) && !defined(VBOX_STRICT)
769# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
770# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
771 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler) }
772
773# elif !defined(IN_RING3) && defined(VBOX_STRICT)
774# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
775# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
776 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler), Nm(AssertCR3) }
777
778# elif defined(IN_RING3) && !defined(VBOX_STRICT)
779# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL }
780# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
781 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), }
782
783# elif defined(IN_RING3) && defined(VBOX_STRICT)
784# define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
785# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
786 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(AssertCR3) }
787
788# else
789# error "Misconfig."
790# endif
791
792 /* 32-bit shadow paging mode: */
793 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
794 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_32BIT_REAL),
795 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_32BIT_PROT),
796 PGMMODEDATABTH_ENTRY(PGM_TYPE_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_32BIT_32BIT),
797 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_PAE - illegal */
798 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_AMD64 - illegal */
799 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
800 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_PAE - illegal */
801 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
802 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_EPT - illegal */
803 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_32BIT, PGM_TYPE_NONE - illegal */
804
805 /* PAE shadow paging mode: */
806 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
807 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_PAE_REAL),
808 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_PAE_PROT),
809 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_PAE_32BIT),
810 PGMMODEDATABTH_ENTRY(PGM_TYPE_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_PAE_PAE),
811 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_AMD64 - illegal */
812 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_32BIT - illegal */
813 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_PAE - illegal */
814 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
815 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_EPT - illegal */
816 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_PAE, PGM_TYPE_NONE - illegal */
817
818 /* AMD64 shadow paging mode: */
819 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
820 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_AMD64_REAL),
821 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_AMD64_PROT),
822 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_AMD64_32BIT),
823 PGMMODEDATABTH_NULL_ENTRY(), //PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_AMD64_PAE),
824# ifdef VBOX_WITH_64_BITS_GUESTS
825 PGMMODEDATABTH_ENTRY(PGM_TYPE_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_AMD64_AMD64),
826# else
827 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_AMD64 - illegal */
828# endif
829 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
830 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_PAE - illegal */
831 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
832 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_EPT - illegal */
833 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_AMD64, PGM_TYPE_NONE - illegal */
834
835 /* 32-bit nested paging mode: */
836 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
837 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_32BIT_REAL),
838 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_32BIT_PROT),
839 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_32BIT_32BIT),
840 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_32BIT_PAE),
841# ifdef VBOX_WITH_64_BITS_GUESTS
842 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_32BIT_AMD64),
843# else
844 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_AMD64 - illegal */
845# endif
846 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_32BIT - illegal */
847 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_PAE - illegal */
848 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NESTED_AMD64 - illegal */
849 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_EPT - illegal */
850 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_32BIT, PGM_TYPE_NONE - illegal */
851
852 /* PAE nested paging mode: */
853 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
854 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_PAE_REAL),
855 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_PAE_PROT),
856 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_PAE_32BIT),
857 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_PAE_PAE),
858# ifdef VBOX_WITH_64_BITS_GUESTS
859 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_PAE_AMD64),
860# else
861 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_AMD64 - illegal */
862# endif
863 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_32BIT - illegal */
864 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_PAE - illegal */
865 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NESTED_AMD64 - illegal */
866 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_EPT - illegal */
867 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_PAE, PGM_TYPE_NONE - illegal */
868
869 /* AMD64 nested paging mode: */
870 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
871 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_REAL, PGM_BTH_NAME_NESTED_AMD64_REAL),
872 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PROT, PGM_BTH_NAME_NESTED_AMD64_PROT),
873 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_32BIT, PGM_BTH_NAME_NESTED_AMD64_32BIT),
874 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_PAE, PGM_BTH_NAME_NESTED_AMD64_PAE),
875# ifdef VBOX_WITH_64_BITS_GUESTS
876 PGMMODEDATABTH_ENTRY(PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64, PGM_BTH_NAME_NESTED_AMD64_AMD64),
877# else
878 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_AMD64 - illegal */
879# endif
880 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_32BIT - illegal */
881 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_PAE - illegal */
882 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NESTED_AMD64 - illegal */
883 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_EPT - illegal */
884 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NESTED_AMD64, PGM_TYPE_NONE - illegal */
885
886 /* EPT nested paging mode: */
887 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
888 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
889 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
890 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
891 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
892# ifdef VBOX_WITH_64_BITS_GUESTS
893 PGMMODEDATABTH_ENTRY(PGM_TYPE_EPT, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
894# else
895 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_AMD64 - illegal */
896# endif
897 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_32BIT - illegal */
898 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_PAE - illegal */
899 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NESTED_AMD64 - illegal */
900 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_EPT - illegal */
901 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_EPT, PGM_TYPE_NONE - illegal */
902
903 /* NONE / NEM: */
904 PGMMODEDATABTH_NULL_ENTRY(), /* 0 */
905 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_REAL, PGM_BTH_NAME_EPT_REAL),
906 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PROT, PGM_BTH_NAME_EPT_PROT),
907 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_32BIT, PGM_BTH_NAME_EPT_32BIT),
908 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_PAE, PGM_BTH_NAME_EPT_PAE),
909# ifdef VBOX_WITH_64_BITS_GUESTS
910 PGMMODEDATABTH_ENTRY(PGM_TYPE_NONE, PGM_TYPE_AMD64, PGM_BTH_NAME_EPT_AMD64),
911# else
912 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_AMD64 - illegal */
913# endif
914 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_32BIT - illegal */
915 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_PAE - illegal */
916 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NESTED_AMD64 - illegal */
917 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_EPT - illegal */
918 PGMMODEDATABTH_NULL_ENTRY(), /* PGM_TYPE_NONE, PGM_TYPE_NONE - illegal */
919
920
921# undef PGMMODEDATABTH_ENTRY
922# undef PGMMODEDATABTH_NULL_ENTRY
923};
924
925
926/** Mask array used by pgmGetCr3MaskForMode.
927 * X86_CR3_AMD64_PAGE_MASK is used for modes that doesn't have a CR3 or EPTP. */
928static uint64_t const g_auCr3MaskForMode[PGMMODE_MAX] =
929{
930 /* [PGMMODE_INVALID] = */ X86_CR3_AMD64_PAGE_MASK,
931 /* [PGMMODE_REAL] = */ X86_CR3_AMD64_PAGE_MASK,
932 /* [PGMMODE_PROTECTED] = */ X86_CR3_AMD64_PAGE_MASK,
933 /* [PGMMODE_32_BIT] = */ X86_CR3_PAGE_MASK,
934 /* [PGMMODE_PAE] = */ X86_CR3_PAE_PAGE_MASK,
935 /* [PGMMODE_PAE_NX] = */ X86_CR3_PAE_PAGE_MASK,
936 /* [PGMMODE_AMD64] = */ X86_CR3_AMD64_PAGE_MASK,
937 /* [PGMMODE_AMD64_NX] = */ X86_CR3_AMD64_PAGE_MASK,
938 /* [PGMMODE_NESTED_32BIT = */ X86_CR3_PAGE_MASK,
939 /* [PGMMODE_NESTED_PAE] = */ X86_CR3_PAE_PAGE_MASK,
940 /* [PGMMODE_NESTED_AMD64] = */ X86_CR3_AMD64_PAGE_MASK,
941 /* [PGMMODE_EPT] = */ X86_CR3_EPT_PAGE_MASK,
942 /* [12] = */ 0,
943 /* [13] = */ 0,
944 /* [14] = */ 0,
945 /* [15] = */ 0,
946 /* [16] = */ 0,
947 /* [17] = */ 0,
948 /* [18] = */ 0,
949 /* [19] = */ 0,
950 /* [20] = */ 0,
951 /* [21] = */ 0,
952 /* [22] = */ 0,
953 /* [23] = */ 0,
954 /* [24] = */ 0,
955 /* [25] = */ 0,
956 /* [26] = */ 0,
957 /* [27] = */ 0,
958 /* [28] = */ 0,
959 /* [29] = */ 0,
960 /* [30] = */ 0,
961 /* [31] = */ 0,
962 /* [PGMMODE_NONE] = */ X86_CR3_AMD64_PAGE_MASK,
963};
964AssertCompile(PGMMODE_NONE == 32);
965
966
967#elif defined(VBOX_VMM_TARGET_ARMV8)
968# include "PGMAllGst-armv8.cpp.h"
969#else
970# error "port me"
971#endif
972
973
974#ifdef VBOX_VMM_TARGET_X86
975
976/**
977 * Gets the physical address mask for CR3 in the given paging mode.
978 *
979 * The mask is for eliminating flags and other stuff in CR3/EPTP when
980 * extracting the physical address. It is not for validating whether there are
981 * reserved bits set. PGM ASSUMES that whoever loaded the CR3 value and passed
982 * it to PGM checked for reserved bits, including reserved physical address
983 * bits.
984 *
985 * @returns The CR3 mask.
986 * @param enmMode The paging mode.
987 * @param enmSlatMode The second-level address translation mode.
988 */
989DECLINLINE(uint64_t) pgmGetCr3MaskForMode(PGMMODE enmMode, PGMSLAT enmSlatMode)
990{
991 if (enmSlatMode == PGMSLAT_DIRECT)
992 {
993 Assert(enmMode != PGMMODE_EPT);
994 return g_auCr3MaskForMode[(unsigned)enmMode < (unsigned)PGMMODE_MAX ? enmMode : 0];
995 }
996 Assert(enmSlatMode == PGMSLAT_EPT);
997 return X86_CR3_EPT_PAGE_MASK;
998}
999
1000
1001/**
1002 * Gets the masked CR3 value according to the current guest paging mode.
1003 *
1004 * See disclaimer in pgmGetCr3MaskForMode.
1005 *
1006 * @returns The masked PGM CR3 value.
1007 * @param pVCpu The cross context virtual CPU structure.
1008 * @param uCr3 The raw guest CR3 value.
1009 */
1010DECLINLINE(RTGCPHYS) pgmGetGuestMaskedCr3(PVMCPUCC pVCpu, uint64_t uCr3)
1011{
1012 uint64_t const fCr3Mask = pgmGetCr3MaskForMode(pVCpu->pgm.s.enmGuestMode, pVCpu->pgm.s.enmGuestSlatMode);
1013 RTGCPHYS GCPhysCR3 = (RTGCPHYS)(uCr3 & fCr3Mask);
1014 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
1015 return GCPhysCR3;
1016}
1017
1018
1019# ifdef IN_RING0
1020/**
1021 * \#PF Handler.
1022 *
1023 * @returns VBox status code (appropriate for trap handling and GC return).
1024 * @param pVCpu The cross context virtual CPU structure.
1025 * @param uErr The trap error code.
1026 * @param pCtx Pointer to the register context for the CPU.
1027 * @param pvFault The fault address.
1028 */
1029VMMDECL(int) PGMTrap0eHandler(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTX pCtx, RTGCPTR pvFault)
1030{
1031 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1032
1033 Log(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv eip=%04x:%RGv cr3=%RGp\n", uErr, pvFault, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, (RTGCPHYS)CPUMGetGuestCR3(pVCpu)));
1034 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.StatRZTrap0e, a);
1035 STAM_STATS({ pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = NULL; } );
1036
1037
1038# ifdef VBOX_WITH_STATISTICS
1039 /*
1040 * Error code stats.
1041 */
1042 if (uErr & X86_TRAP_PF_US)
1043 {
1044 if (!(uErr & X86_TRAP_PF_P))
1045 {
1046 if (uErr & X86_TRAP_PF_RW)
1047 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentWrite);
1048 else
1049 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNotPresentRead);
1050 }
1051 else if (uErr & X86_TRAP_PF_RW)
1052 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSWrite);
1053 else if (uErr & X86_TRAP_PF_RSVD)
1054 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSReserved);
1055 else if (uErr & X86_TRAP_PF_ID)
1056 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSNXE);
1057 else
1058 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eUSRead);
1059 }
1060 else
1061 { /* Supervisor */
1062 if (!(uErr & X86_TRAP_PF_P))
1063 {
1064 if (uErr & X86_TRAP_PF_RW)
1065 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentWrite);
1066 else
1067 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVNotPresentRead);
1068 }
1069 else if (uErr & X86_TRAP_PF_RW)
1070 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVWrite);
1071 else if (uErr & X86_TRAP_PF_ID)
1072 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSNXE);
1073 else if (uErr & X86_TRAP_PF_RSVD)
1074 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eSVReserved);
1075 }
1076# endif /* VBOX_WITH_STATISTICS */
1077
1078 /*
1079 * Call the worker.
1080 */
1081 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1082 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1083 AssertReturn(g_aPgmBothModeData[idxBth].pfnTrap0eHandler, VERR_PGM_MODE_IPE);
1084 bool fLockTaken = false;
1085 int rc = g_aPgmBothModeData[idxBth].pfnTrap0eHandler(pVCpu, uErr, pCtx, pvFault, &fLockTaken);
1086 if (fLockTaken)
1087 {
1088 PGM_LOCK_ASSERT_OWNER(pVM);
1089 PGM_UNLOCK(pVM);
1090 }
1091 LogFlow(("PGMTrap0eHandler: uErr=%RGx pvFault=%RGv rc=%Rrc\n", uErr, pvFault, rc));
1092
1093 /*
1094 * Return code tweaks.
1095 */
1096 if (rc != VINF_SUCCESS)
1097 {
1098 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
1099 rc = VINF_SUCCESS;
1100
1101 /* Note: hack alert for difficult to reproduce problem. */
1102 if ( rc == VERR_PAGE_NOT_PRESENT /* SMP only ; disassembly might fail. */
1103 || rc == VERR_PAGE_TABLE_NOT_PRESENT /* seen with UNI & SMP */
1104 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT /* seen with SMP */
1105 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT) /* precaution */
1106 {
1107 Log(("WARNING: Unexpected VERR_PAGE_TABLE_NOT_PRESENT (%d) for page fault at %RGv error code %x (rip=%RGv)\n", rc, pvFault, uErr, pCtx->rip));
1108 /* Some kind of inconsistency in the SMP case; it's safe to just execute the instruction again; not sure about single VCPU VMs though. */
1109 rc = VINF_SUCCESS;
1110 }
1111 }
1112
1113 STAM_STATS({ if (rc == VINF_EM_RAW_GUEST_TRAP) STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.StatRZTrap0eGuestPF); });
1114 STAM_STATS({ if (!pVCpu->pgmr0.s.pStatTrap0eAttributionR0)
1115 pVCpu->pgmr0.s.pStatTrap0eAttributionR0 = &pVCpu->pgm.s.Stats.StatRZTrap0eTime2Misc; });
1116 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.Stats.StatRZTrap0e, pVCpu->pgmr0.s.pStatTrap0eAttributionR0, a);
1117 return rc;
1118}
1119# endif /* IN_RING0 */
1120
1121
1122/**
1123 * Prefetch a page
1124 *
1125 * Typically used to sync commonly used pages before entering raw mode
1126 * after a CR3 reload.
1127 *
1128 * @returns VBox status code suitable for scheduling.
1129 * @retval VINF_SUCCESS on success.
1130 * @retval VINF_PGM_SYNC_CR3 if we're out of shadow pages or something like that.
1131 * @param pVCpu The cross context virtual CPU structure.
1132 * @param GCPtrPage Page to invalidate.
1133 */
1134VMMDECL(int) PGMPrefetchPage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1135{
1136 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1137
1138 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1139 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1140 AssertReturn(g_aPgmBothModeData[idxBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
1141 int rc = g_aPgmBothModeData[idxBth].pfnPrefetchPage(pVCpu, GCPtrPage);
1142
1143 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,Prefetch), a);
1144 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("rc=%Rrc\n", rc));
1145 return rc;
1146}
1147
1148
1149/**
1150 * Emulation of the invlpg instruction (HC only actually).
1151 *
1152 * @returns Strict VBox status code, special care required.
1153 * @retval VINF_PGM_SYNC_CR3 - handled.
1154 * @retval VINF_EM_RAW_EMULATE_INSTR - not handled (RC only).
1155 *
1156 * @param pVCpu The cross context virtual CPU structure.
1157 * @param GCPtrPage Page to invalidate.
1158 *
1159 * @remark ASSUMES the page table entry or page directory is valid. Fairly
1160 * safe, but there could be edge cases!
1161 *
1162 * @todo Flush page or page directory only if necessary!
1163 * @todo VBOXSTRICTRC
1164 */
1165VMMDECL(int) PGMInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtrPage)
1166{
1167 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1168 int rc;
1169 Log3(("PGMInvalidatePage: GCPtrPage=%RGv\n", GCPtrPage));
1170
1171 IEMTlbInvalidatePage(pVCpu, GCPtrPage);
1172
1173 /*
1174 * Call paging mode specific worker.
1175 */
1176 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1177 PGM_LOCK_VOID(pVM);
1178
1179 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1180 AssertReturnStmt(idxBth < RT_ELEMENTS(g_aPgmBothModeData), PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1181 AssertReturnStmt(g_aPgmBothModeData[idxBth].pfnInvalidatePage, PGM_UNLOCK(pVM), VERR_PGM_MODE_IPE);
1182 rc = g_aPgmBothModeData[idxBth].pfnInvalidatePage(pVCpu, GCPtrPage);
1183
1184 PGM_UNLOCK(pVM);
1185 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,InvalidatePage), a);
1186
1187 /* Ignore all irrelevant error codes. */
1188 if ( rc == VERR_PAGE_NOT_PRESENT
1189 || rc == VERR_PAGE_TABLE_NOT_PRESENT
1190 || rc == VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT
1191 || rc == VERR_PAGE_MAP_LEVEL4_NOT_PRESENT)
1192 rc = VINF_SUCCESS;
1193
1194 return rc;
1195}
1196
1197
1198/**
1199 * Executes an instruction using the interpreter.
1200 *
1201 * @returns VBox status code (appropriate for trap handling and GC return).
1202 * @param pVCpu The cross context virtual CPU structure.
1203 * @param pvFault Fault address.
1204 */
1205VMMDECL(VBOXSTRICTRC) PGMInterpretInstruction(PVMCPUCC pVCpu, RTGCPTR pvFault)
1206{
1207 RT_NOREF(pvFault);
1208 VBOXSTRICTRC rc = EMInterpretInstruction(pVCpu);
1209 if (rc == VERR_EM_INTERPRETER)
1210 rc = VINF_EM_RAW_EMULATE_INSTR;
1211 if (rc != VINF_SUCCESS)
1212 Log(("PGMInterpretInstruction: returns %Rrc (pvFault=%RGv)\n", VBOXSTRICTRC_VAL(rc), pvFault));
1213 return rc;
1214}
1215
1216
1217/**
1218 * Gets effective page information (from the VMM page directory).
1219 *
1220 * @returns VBox status code.
1221 * @param pVCpu The cross context virtual CPU structure.
1222 * @param GCPtr Guest Context virtual address of the page.
1223 * @param pfFlags Where to store the flags. These are X86_PTE_*.
1224 * @param pHCPhys Where to store the HC physical address of the page.
1225 * This is page aligned.
1226 * @remark You should use PGMMapGetPage() for pages in a mapping.
1227 */
1228VMMDECL(int) PGMShwGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
1229{
1230 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1231 PGM_LOCK_VOID(pVM);
1232
1233 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1234 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1235 AssertReturn(g_aPgmShadowModeData[idxShw].pfnGetPage, VERR_PGM_MODE_IPE);
1236 int rc = g_aPgmShadowModeData[idxShw].pfnGetPage(pVCpu, GCPtr, pfFlags, pHCPhys);
1237
1238 PGM_UNLOCK(pVM);
1239 return rc;
1240}
1241
1242
1243/**
1244 * Modify page flags for a range of pages in the shadow context.
1245 *
1246 * The existing flags are ANDed with the fMask and ORed with the fFlags.
1247 *
1248 * @returns VBox status code.
1249 * @param pVCpu The cross context virtual CPU structure.
1250 * @param GCPtr Virtual address of the first page in the range.
1251 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
1252 * @param fMask The AND mask - page flags X86_PTE_*.
1253 * Be very CAREFUL when ~'ing constants which could be 32-bit!
1254 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1255 * @remark You must use PGMMapModifyPage() for pages in a mapping.
1256 */
1257DECLINLINE(int) pdmShwModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
1258{
1259 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
1260 Assert(!(fOpFlags & ~(PGM_MK_PG_IS_MMIO2 | PGM_MK_PG_IS_WRITE_FAULT)));
1261
1262 GCPtr &= ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK; /** @todo this ain't necessary, right... */
1263
1264 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1265 PGM_LOCK_VOID(pVM);
1266
1267 uintptr_t idxShw = pVCpu->pgm.s.idxShadowModeData;
1268 AssertReturn(idxShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
1269 AssertReturn(g_aPgmShadowModeData[idxShw].pfnModifyPage, VERR_PGM_MODE_IPE);
1270 int rc = g_aPgmShadowModeData[idxShw].pfnModifyPage(pVCpu, GCPtr, GUEST_PAGE_SIZE, fFlags, fMask, fOpFlags);
1271
1272 PGM_UNLOCK(pVM);
1273 return rc;
1274}
1275
1276
1277/**
1278 * Changing the page flags for a single page in the shadow page tables so as to
1279 * make it read-only.
1280 *
1281 * @returns VBox status code.
1282 * @param pVCpu The cross context virtual CPU structure.
1283 * @param GCPtr Virtual address of the first page in the range.
1284 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1285 */
1286VMMDECL(int) PGMShwMakePageReadonly(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1287{
1288 return pdmShwModifyPage(pVCpu, GCPtr, 0, ~(uint64_t)X86_PTE_RW, fOpFlags);
1289}
1290
1291
1292/**
1293 * Changing the page flags for a single page in the shadow page tables so as to
1294 * make it writable.
1295 *
1296 * The call must know with 101% certainty that the guest page tables maps this
1297 * as writable too. This function will deal shared, zero and write monitored
1298 * pages.
1299 *
1300 * @returns VBox status code.
1301 * @param pVCpu The cross context virtual CPU structure.
1302 * @param GCPtr Virtual address of the first page in the range.
1303 * @param fOpFlags A combination of the PGM_MK_PK_XXX flags.
1304 */
1305VMMDECL(int) PGMShwMakePageWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1306{
1307 if (pVCpu->pgm.s.enmShadowMode != PGMMODE_NONE) /* avoid assertions */
1308 return pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)0, fOpFlags);
1309 return VINF_SUCCESS;
1310}
1311
1312
1313/**
1314 * Changing the page flags for a single page in the shadow page tables so as to
1315 * make it not present.
1316 *
1317 * @returns VBox status code.
1318 * @param pVCpu The cross context virtual CPU structure.
1319 * @param GCPtr Virtual address of the first page in the range.
1320 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1321 */
1322VMMDECL(int) PGMShwMakePageNotPresent(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fOpFlags)
1323{
1324 return pdmShwModifyPage(pVCpu, GCPtr, 0, 0, fOpFlags);
1325}
1326
1327# ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
1328
1329/**
1330 * Changing the page flags for a single page in the shadow page tables so as to
1331 * make it supervisor and writable.
1332 *
1333 * This if for dealing with CR0.WP=0 and readonly user pages.
1334 *
1335 * @returns VBox status code.
1336 * @param pVCpu The cross context virtual CPU structure.
1337 * @param GCPtr Virtual address of the first page in the range.
1338 * @param fBigPage Whether or not this is a big page. If it is, we have to
1339 * change the shadow PDE as well. If it isn't, the caller
1340 * has checked that the shadow PDE doesn't need changing.
1341 * We ASSUME 4KB pages backing the big page here!
1342 * @param fOpFlags A combination of the PGM_MK_PG_XXX flags.
1343 */
1344int pgmShwMakePageSupervisorAndWritable(PVMCPUCC pVCpu, RTGCPTR GCPtr, bool fBigPage, uint32_t fOpFlags)
1345{
1346 int rc = pdmShwModifyPage(pVCpu, GCPtr, X86_PTE_RW, ~(uint64_t)X86_PTE_US, fOpFlags);
1347 if (rc == VINF_SUCCESS && fBigPage)
1348 {
1349 /* this is a bit ugly... */
1350 switch (pVCpu->pgm.s.enmShadowMode)
1351 {
1352 case PGMMODE_32_BIT:
1353 {
1354 PX86PDE pPde = pgmShwGet32BitPDEPtr(pVCpu, GCPtr);
1355 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1356 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1357 pPde->u |= X86_PDE_RW;
1358 Log(("-> PDE=%#llx (32)\n", pPde->u));
1359 break;
1360 }
1361 case PGMMODE_PAE:
1362 case PGMMODE_PAE_NX:
1363 {
1364 PX86PDEPAE pPde = pgmShwGetPaePDEPtr(pVCpu, GCPtr);
1365 AssertReturn(pPde, VERR_INTERNAL_ERROR_3);
1366 Log(("pgmShwMakePageSupervisorAndWritable: PDE=%#llx", pPde->u));
1367 pPde->u |= X86_PDE_RW;
1368 Log(("-> PDE=%#llx (PAE)\n", pPde->u));
1369 break;
1370 }
1371 default:
1372 AssertFailedReturn(VERR_INTERNAL_ERROR_4);
1373 }
1374 }
1375 return rc;
1376}
1377
1378
1379/**
1380 * Gets the shadow page directory for the specified address, PAE.
1381 *
1382 * @returns Pointer to the shadow PD.
1383 * @param pVCpu The cross context virtual CPU structure.
1384 * @param GCPtr The address.
1385 * @param uGstPdpe Guest PDPT entry. Valid.
1386 * @param ppPD Receives address of page directory
1387 */
1388int pgmShwSyncPaePDPtr(PVMCPUCC pVCpu, RTGCPTR GCPtr, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1389{
1390 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1391 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1392 PPGMPOOLPAGE pShwPage;
1393 int rc;
1394 PGM_LOCK_ASSERT_OWNER(pVM);
1395
1396
1397 /* Allocate page directory if not present. */
1398 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1399 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1400 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1401 X86PGPAEUINT const uPdpe = pPdpe->u;
1402 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1403 {
1404 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1405 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1406 Assert((pPdpe->u & X86_PDPE_PG_MASK) == pShwPage->Core.Key);
1407
1408 pgmPoolCacheUsed(pPool, pShwPage);
1409
1410 /* Update the entry if necessary. */
1411 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS);
1412 if (uPdpeNew == uPdpe)
1413 { /* likely */ }
1414 else
1415 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1416 }
1417 else
1418 {
1419 RTGCPTR64 GCPdPt;
1420 PGMPOOLKIND enmKind;
1421 if (pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu))
1422 {
1423 /* AMD-V nested paging or real/protected mode without paging. */
1424 GCPdPt = GCPtr & ~(RT_BIT_64(X86_PDPT_SHIFT) - 1);
1425 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1426 }
1427 else if (CPUMGetGuestCR4(pVCpu) & X86_CR4_PAE)
1428 {
1429 if (uGstPdpe & X86_PDPE_P)
1430 {
1431 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1432 enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
1433 }
1434 else
1435 {
1436 /* PD not present; guest must reload CR3 to change it.
1437 * No need to monitor anything in this case. */
1438 /** @todo r=bird: WTF is hit?!? */
1439 /*Assert(VM_IS_RAW_MODE_ENABLED(pVM)); - ??? */
1440 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1441 enmKind = PGMPOOLKIND_PAE_PD_PHYS;
1442 Assert(uGstPdpe & X86_PDPE_P); /* caller should do this already */
1443 }
1444 }
1445 else
1446 {
1447 GCPdPt = CPUMGetGuestCR3(pVCpu);
1448 enmKind = (PGMPOOLKIND)(PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD + iPdPt);
1449 }
1450
1451 /* Create a reference back to the PDPT by using the index in its shadow page. */
1452 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1453 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPdPt, false /*fLockPage*/,
1454 &pShwPage);
1455 AssertRCReturn(rc, rc);
1456
1457 /* Hook it up. */
1458 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | (uGstPdpe & (X86_PDPE_P | X86_PDPE_A)) | (uPdpe & PGM_PDPT_FLAGS));
1459 }
1460 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdpe);
1461
1462 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1463 return VINF_SUCCESS;
1464}
1465
1466
1467/**
1468 * Gets the pointer to the shadow page directory entry for an address, PAE.
1469 *
1470 * @returns Pointer to the PDE.
1471 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1472 * @param GCPtr The address.
1473 * @param ppShwPde Receives the address of the pgm pool page for the shadow page directory
1474 */
1475DECLINLINE(int) pgmShwGetPaePoolPagePD(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde)
1476{
1477 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1478 PGM_LOCK_ASSERT_OWNER(pVM);
1479
1480 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pVCpu);
1481 AssertReturn(pPdpt, VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT); /* can't happen */
1482 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
1483 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1484 if (!(uPdpe & X86_PDPE_P))
1485 {
1486 LogFlow(("pgmShwGetPaePoolPagePD: PD %d not present (%RX64)\n", iPdPt, uPdpe));
1487 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1488 }
1489 AssertMsg(uPdpe & X86_PDPE_PG_MASK, ("GCPtr=%RGv\n", GCPtr));
1490
1491 /* Fetch the pgm pool shadow descriptor. */
1492 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pVM->pgm.s.CTX_SUFF(pPool), uPdpe & X86_PDPE_PG_MASK);
1493 AssertReturn(pShwPde, VERR_PGM_POOL_GET_PAGE_FAILED);
1494
1495 *ppShwPde = pShwPde;
1496 return VINF_SUCCESS;
1497}
1498
1499
1500/**
1501 * Syncs the SHADOW page directory pointer for the specified address.
1502 *
1503 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1504 *
1505 * The caller is responsible for making sure the guest has a valid PD before
1506 * calling this function.
1507 *
1508 * @returns VBox status code.
1509 * @param pVCpu The cross context virtual CPU structure.
1510 * @param GCPtr The address.
1511 * @param uGstPml4e Guest PML4 entry (valid).
1512 * @param uGstPdpe Guest PDPT entry (valid).
1513 * @param ppPD Receives address of page directory
1514 */
1515static int pgmShwSyncLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, X86PGPAEUINT uGstPml4e, X86PGPAEUINT uGstPdpe, PX86PDPAE *ppPD)
1516{
1517 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1518 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1519 bool const fNestedPagingOrNoGstPaging = pVM->pgm.s.fNestedPaging || !CPUMIsGuestPagingEnabled(pVCpu);
1520 int rc;
1521
1522 PGM_LOCK_ASSERT_OWNER(pVM);
1523
1524 /*
1525 * PML4.
1526 */
1527 PPGMPOOLPAGE pShwPage;
1528 {
1529 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1530 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1531 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1532 X86PGPAEUINT const uPml4e = pPml4e->u;
1533
1534 /* Allocate page directory pointer table if not present. */
1535 if (uPml4e & (X86_PML4E_P | X86_PML4E_PG_MASK))
1536 {
1537 pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1538 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1539
1540 pgmPoolCacheUsed(pPool, pShwPage);
1541
1542 /* Update the entry if needed. */
1543 X86PGPAEUINT const uPml4eNew = pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1544 | (uPml4e & PGM_PML4_FLAGS);
1545 if (uPml4e == uPml4eNew)
1546 { /* likely */ }
1547 else
1548 ASMAtomicWriteU64(&pPml4e->u, uPml4eNew);
1549 }
1550 else
1551 {
1552 Assert(pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1553
1554 RTGCPTR64 GCPml4;
1555 PGMPOOLKIND enmKind;
1556 if (fNestedPagingOrNoGstPaging)
1557 {
1558 /* AMD-V nested paging or real/protected mode without paging */
1559 GCPml4 = (RTGCPTR64)iPml4 << X86_PML4_SHIFT; /** @todo bogus calculation for PML5 */
1560 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_PHYS;
1561 }
1562 else
1563 {
1564 GCPml4 = uGstPml4e & X86_PML4E_PG_MASK;
1565 enmKind = PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT;
1566 }
1567
1568 /* Create a reference back to the PDPT by using the index in its shadow page. */
1569 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1570 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1571 &pShwPage);
1572 AssertRCReturn(rc, rc);
1573
1574 /* Hook it up. */
1575 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | (uGstPml4e & pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask)
1576 | (uPml4e & PGM_PML4_FLAGS));
1577 }
1578 }
1579
1580 /*
1581 * PDPT.
1582 */
1583 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1584 PX86PDPT pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1585 PX86PDPE pPdpe = &pPdpt->a[iPdPt];
1586 X86PGPAEUINT const uPdpe = pPdpe->u;
1587
1588 /* Allocate page directory if not present. */
1589 if (uPdpe & (X86_PDPE_P | X86_PDPE_PG_MASK))
1590 {
1591 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1592 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1593
1594 pgmPoolCacheUsed(pPool, pShwPage);
1595
1596 /* Update the entry if needed. */
1597 X86PGPAEUINT const uPdpeNew = pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask)
1598 | (uPdpe & PGM_PDPT_FLAGS);
1599 if (uPdpe == uPdpeNew)
1600 { /* likely */ }
1601 else
1602 ASMAtomicWriteU64(&pPdpe->u, uPdpeNew);
1603 }
1604 else
1605 {
1606 RTGCPTR64 GCPdPt;
1607 PGMPOOLKIND enmKind;
1608 if (fNestedPagingOrNoGstPaging)
1609 {
1610 /* AMD-V nested paging or real/protected mode without paging */
1611 GCPdPt = GCPtr & ~(RT_BIT_64(iPdPt << X86_PDPT_SHIFT) - 1);
1612 enmKind = PGMPOOLKIND_64BIT_PD_FOR_PHYS;
1613 }
1614 else
1615 {
1616 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK;
1617 enmKind = PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD;
1618 }
1619
1620 /* Create a reference back to the PDPT by using the index in its shadow page. */
1621 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1622 pShwPage->idx, iPdPt, false /*fLockPage*/,
1623 &pShwPage);
1624 AssertRCReturn(rc, rc);
1625
1626 /* Hook it up. */
1627 ASMAtomicWriteU64(&pPdpe->u,
1628 pShwPage->Core.Key | (uGstPdpe & pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask) | (uPdpe & PGM_PDPT_FLAGS));
1629 }
1630
1631 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1632 return VINF_SUCCESS;
1633}
1634
1635
1636/**
1637 * Gets the SHADOW page directory pointer for the specified address (long mode).
1638 *
1639 * @returns VBox status code.
1640 * @param pVCpu The cross context virtual CPU structure.
1641 * @param GCPtr The address.
1642 * @param ppPml4e Receives the address of the page map level 4 entry.
1643 * @param ppPdpt Receives the address of the page directory pointer table.
1644 * @param ppPD Receives the address of the page directory.
1645 */
1646DECLINLINE(int) pgmShwGetLongModePDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD)
1647{
1648 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1649 PGM_LOCK_ASSERT_OWNER(pVM);
1650
1651 /*
1652 * PML4
1653 */
1654 const unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
1655 PCX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pVCpu, iPml4);
1656 AssertReturn(pPml4e, VERR_PGM_PML4_MAPPING);
1657 if (ppPml4e)
1658 *ppPml4e = (PX86PML4E)pPml4e;
1659 X86PGPAEUINT const uPml4e = pPml4e->u;
1660 Log4(("pgmShwGetLongModePDPtr %RGv (%RHv) %RX64\n", GCPtr, pPml4e, uPml4e));
1661 if (!(uPml4e & X86_PML4E_P)) /** @todo other code is check for NULL page frame number! */
1662 return VERR_PAGE_MAP_LEVEL4_NOT_PRESENT;
1663
1664 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1665 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, uPml4e & X86_PML4E_PG_MASK);
1666 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1667
1668 /*
1669 * PDPT
1670 */
1671 const unsigned iPdPt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
1672 PCX86PDPT pPdpt = *ppPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1673 X86PGPAEUINT const uPdpe = pPdpt->a[iPdPt].u;
1674 if (!(uPdpe & X86_PDPE_P)) /** @todo other code is check for NULL page frame number! */
1675 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
1676
1677 pShwPage = pgmPoolGetPage(pPool, uPdpe & X86_PDPE_PG_MASK);
1678 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1679
1680 *ppPD = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1681 Log4(("pgmShwGetLongModePDPtr %RGv -> *ppPD=%p PDE=%p/%RX64\n", GCPtr, *ppPD, &(*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK], (*ppPD)->a[(GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK].u));
1682 return VINF_SUCCESS;
1683}
1684
1685
1686/**
1687 * Syncs the SHADOW EPT page directory pointer for the specified address. Allocates
1688 * backing pages in case the PDPT or PML4 entry is missing.
1689 *
1690 * @returns VBox status code.
1691 * @param pVCpu The cross context virtual CPU structure.
1692 * @param GCPtr The address.
1693 * @param ppPdpt Receives address of pdpt
1694 * @param ppPD Receives address of page directory
1695 */
1696static int pgmShwGetEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPtr, PEPTPDPT *ppPdpt, PEPTPD *ppPD)
1697{
1698 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1699 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1700 int rc;
1701
1702 Assert(pVM->pgm.s.fNestedPaging);
1703 PGM_LOCK_ASSERT_OWNER(pVM);
1704
1705 /*
1706 * PML4 level.
1707 */
1708 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1709 Assert(pPml4);
1710
1711 /* Allocate page directory pointer table if not present. */
1712 PPGMPOOLPAGE pShwPage;
1713 {
1714 const unsigned iPml4 = (GCPtr >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1715 PEPTPML4E pPml4e = &pPml4->a[iPml4];
1716 EPTPML4E Pml4e;
1717 Pml4e.u = pPml4e->u;
1718 if (!(Pml4e.u & (EPT_E_PG_MASK | EPT_E_READ)))
1719 {
1720 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
1721 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1722 pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, false /*fLockPage*/,
1723 &pShwPage);
1724 AssertRCReturn(rc, rc);
1725
1726 /* Hook up the new PDPT now. */
1727 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1728 }
1729 else
1730 {
1731 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1732 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1733
1734 pgmPoolCacheUsed(pPool, pShwPage);
1735
1736 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */
1737 if (Pml4e.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1738 { }
1739 else
1740 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1741 }
1742 }
1743
1744 /*
1745 * PDPT level.
1746 */
1747 const unsigned iPdPt = (GCPtr >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1748 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1749 PEPTPDPTE pPdpe = &pPdpt->a[iPdPt];
1750
1751 if (ppPdpt)
1752 *ppPdpt = pPdpt;
1753
1754 /* Allocate page directory if not present. */
1755 EPTPDPTE Pdpe;
1756 Pdpe.u = pPdpe->u;
1757 if (!(Pdpe.u & (EPT_E_PG_MASK | EPT_E_READ)))
1758 {
1759 RTGCPTR64 const GCPdPt = GCPtr & ~(RT_BIT_64(EPT_PDPT_SHIFT) - 1);
1760 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_EPT_PD_FOR_PHYS, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1761 pShwPage->idx, iPdPt, false /*fLockPage*/,
1762 &pShwPage);
1763 AssertRCReturn(rc, rc);
1764
1765 /* Hook up the new PD now. */
1766 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1767 }
1768 else
1769 {
1770 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & EPT_PDPTE_PG_MASK);
1771 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1772
1773 pgmPoolCacheUsed(pPool, pShwPage);
1774
1775 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */
1776 if (Pdpe.u == (pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE))
1777 { }
1778 else
1779 ASMAtomicWriteU64(&pPdpe->u, pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE);
1780 }
1781
1782 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1783 return VINF_SUCCESS;
1784}
1785
1786
1787# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1788/**
1789 * Syncs the SHADOW nested-guest page directory pointer for the specified address.
1790 * Allocates backing pages in case the PDPT or PML4 entry is missing.
1791 *
1792 * @returns VBox status code.
1793 * @param pVCpu The cross context virtual CPU structure.
1794 * @param GCPhysNested The nested-guest physical address.
1795 * @param ppPdpt Where to store the PDPT. Optional, can be NULL.
1796 * @param ppPD Where to store the PD. Optional, can be NULL.
1797 * @param pGstWalkAll The guest walk info.
1798 */
1799static int pgmShwGetNestedEPTPDPtr(PVMCPUCC pVCpu, RTGCPTR64 GCPhysNested, PEPTPDPT *ppPdpt, PEPTPD *ppPD,
1800 PPGMPTWALKGST pGstWalkAll)
1801{
1802 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1803 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
1804 int rc;
1805
1806 PPGMPOOLPAGE pShwPage;
1807 Assert(pVM->pgm.s.fNestedPaging);
1808 Assert(pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT);
1809 PGM_LOCK_ASSERT_OWNER(pVM);
1810
1811 /*
1812 * PML4 level.
1813 */
1814 {
1815 PEPTPML4 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pVCpu->pgm.s.CTX_SUFF(pShwPageCR3));
1816 Assert(pPml4);
1817
1818 /* Allocate page directory pointer table if not present. */
1819 {
1820 uint64_t const fShwFlags = pGstWalkAll->u.Ept.Pml4e.u & pVCpu->pgm.s.fGstEptShadowedPml4eMask;
1821 const unsigned iPml4e = (GCPhysNested >> EPT_PML4_SHIFT) & EPT_PML4_MASK;
1822 PEPTPML4E pPml4e = &pPml4->a[iPml4e];
1823
1824 if (!(pPml4e->u & (EPT_E_PG_MASK | EPT_PRESENT_MASK)))
1825 {
1826 RTGCPHYS const GCPhysPdpt = pGstWalkAll->u.Ept.Pml4e.u & EPT_PML4E_PG_MASK;
1827 rc = pgmPoolAlloc(pVM, GCPhysPdpt, PGMPOOLKIND_EPT_PDPT_FOR_EPT_PDPT, PGMPOOLACCESS_DONTCARE,
1828 PGM_A20_IS_ENABLED(pVCpu), pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4e, false /*fLockPage*/,
1829 &pShwPage);
1830 AssertRCReturn(rc, rc);
1831
1832 /* Hook up the new PDPT now. */
1833 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | fShwFlags);
1834 }
1835 else
1836 {
1837 pShwPage = pgmPoolGetPage(pPool, pPml4e->u & EPT_PML4E_PG_MASK);
1838 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1839
1840 pgmPoolCacheUsed(pPool, pShwPage);
1841
1842 /* Hook up the cached PDPT if needed (probably not given 512*512 PTs to sync). */
1843 if (pPml4e->u != (pShwPage->Core.Key | fShwFlags))
1844 ASMAtomicWriteU64(&pPml4e->u, pShwPage->Core.Key | fShwFlags);
1845 }
1846 Assert(PGMPOOL_PAGE_IS_NESTED(pShwPage));
1847 Log7Func(("GstPml4e=%RX64 ShwPml4e=%RX64 iPml4e=%u\n", pGstWalkAll->u.Ept.Pml4e.u, pPml4e->u, iPml4e));
1848 }
1849 }
1850
1851 /*
1852 * PDPT level.
1853 */
1854 {
1855 AssertReturn(!(pGstWalkAll->u.Ept.Pdpte.u & EPT_E_LEAF), VERR_NOT_SUPPORTED); /* shadowing 1GB pages not supported yet. */
1856
1857 PEPTPDPT pPdpt = (PEPTPDPT)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1858 if (ppPdpt)
1859 *ppPdpt = pPdpt;
1860
1861 uint64_t const fShwFlags = pGstWalkAll->u.Ept.Pdpte.u & pVCpu->pgm.s.fGstEptShadowedPdpteMask;
1862 const unsigned iPdPte = (GCPhysNested >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
1863 PEPTPDPTE pPdpte = &pPdpt->a[iPdPte];
1864
1865 if (!(pPdpte->u & (EPT_E_PG_MASK | EPT_PRESENT_MASK)))
1866 {
1867 RTGCPHYS const GCPhysPd = pGstWalkAll->u.Ept.Pdpte.u & EPT_PDPTE_PG_MASK;
1868 rc = pgmPoolAlloc(pVM, GCPhysPd, PGMPOOLKIND_EPT_PD_FOR_EPT_PD, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
1869 pShwPage->idx, iPdPte, false /*fLockPage*/, &pShwPage);
1870 AssertRCReturn(rc, rc);
1871
1872 /* Hook up the new PD now. */
1873 ASMAtomicWriteU64(&pPdpte->u, pShwPage->Core.Key | fShwFlags);
1874 }
1875 else
1876 {
1877 pShwPage = pgmPoolGetPage(pPool, pPdpte->u & EPT_PDPTE_PG_MASK);
1878 AssertReturn(pShwPage, VERR_PGM_POOL_GET_PAGE_FAILED);
1879
1880 pgmPoolCacheUsed(pPool, pShwPage);
1881
1882 /* Hook up the cached PD if needed (probably not given there are 512 PTs we may need sync). */
1883 if (pPdpte->u != (pShwPage->Core.Key | fShwFlags))
1884 ASMAtomicWriteU64(&pPdpte->u, pShwPage->Core.Key | fShwFlags);
1885 }
1886 Assert(PGMPOOL_PAGE_IS_NESTED(pShwPage));
1887 Log7Func(("GstPdpte=%RX64 ShwPdpte=%RX64 iPdPte=%u \n", pGstWalkAll->u.Ept.Pdpte.u, pPdpte->u, iPdPte));
1888
1889 *ppPD = (PEPTPD)PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pShwPage);
1890 }
1891
1892 return VINF_SUCCESS;
1893}
1894# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
1895
1896
1897# ifdef IN_RING0
1898/**
1899 * Synchronizes a range of nested page table entries.
1900 *
1901 * The caller must own the PGM lock.
1902 *
1903 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1904 * @param GCPhys Where to start.
1905 * @param cPages How many pages which entries should be synced.
1906 * @param enmShwPagingMode The shadow paging mode (PGMMODE_EPT for VT-x,
1907 * host paging mode for AMD-V).
1908 */
1909int pgmShwSyncNestedPageLocked(PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint32_t cPages, PGMMODE enmShwPagingMode)
1910{
1911 PGM_LOCK_ASSERT_OWNER(pVCpu->CTX_SUFF(pVM));
1912
1913/** @todo r=bird: Gotta love this nested paging hacking we're still carrying with us... (Split PGM_TYPE_NESTED.) */
1914 int rc;
1915 switch (enmShwPagingMode)
1916 {
1917 case PGMMODE_32_BIT:
1918 {
1919 X86PDE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1920 rc = PGM_BTH_NAME_32BIT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1921 break;
1922 }
1923
1924 case PGMMODE_PAE:
1925 case PGMMODE_PAE_NX:
1926 {
1927 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1928 rc = PGM_BTH_NAME_PAE_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1929 break;
1930 }
1931
1932 case PGMMODE_AMD64:
1933 case PGMMODE_AMD64_NX:
1934 {
1935 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1936 rc = PGM_BTH_NAME_AMD64_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1937 break;
1938 }
1939
1940 case PGMMODE_EPT:
1941 {
1942 X86PDEPAE PdeDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A };
1943 rc = PGM_BTH_NAME_EPT_PROT(SyncPage)(pVCpu, PdeDummy, GCPhys, cPages, ~0U /*uErr*/);
1944 break;
1945 }
1946
1947 default:
1948 AssertMsgFailedReturn(("%d\n", enmShwPagingMode), VERR_IPE_NOT_REACHED_DEFAULT_CASE);
1949 }
1950 return rc;
1951}
1952# endif /* IN_RING0 */
1953
1954# endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */
1955
1956/**
1957 * Maps the guest CR3.
1958 *
1959 * @returns VBox status code.
1960 * @param pVCpu The cross context virtual CPU structure.
1961 * @param GCPhysCr3 The guest CR3 value.
1962 * @param pHCPtrGuestCr3 Where to store the mapped memory.
1963 */
1964DECLINLINE(int) pgmGstMapCr3(PVMCPUCC pVCpu, RTGCPHYS GCPhysCr3, PRTHCPTR pHCPtrGuestCr3)
1965{
1966 /** @todo this needs some reworking wrt. locking? */
1967 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
1968 PGM_LOCK_VOID(pVM);
1969 PPGMPAGE pPageCr3 = pgmPhysGetPage(pVM, GCPhysCr3);
1970 AssertReturnStmt(pPageCr3, PGM_UNLOCK(pVM), VERR_PGM_INVALID_CR3_ADDR);
1971
1972 RTHCPTR HCPtrGuestCr3;
1973 int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCr3, GCPhysCr3, (void **)&HCPtrGuestCr3);
1974 PGM_UNLOCK(pVM);
1975
1976 *pHCPtrGuestCr3 = HCPtrGuestCr3;
1977 return rc;
1978}
1979
1980
1981# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1982/**
1983 * Unmaps the guest CR3.
1984 *
1985 * @returns VBox status code.
1986 * @param pVCpu The cross context virtual CPU structure.
1987 */
1988DECLINLINE(int) pgmGstUnmapCr3(PVMCPUCC pVCpu)
1989{
1990 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
1991 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
1992 AssertReturn(g_aPgmBothModeData[idxBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
1993 return g_aPgmBothModeData[idxBth].pfnUnmapCR3(pVCpu);
1994}
1995# endif
1996
1997#endif /* VBOX_VMM_TARGET_X86 */
1998
1999
2000/**
2001 * Gets effective Guest OS page information.
2002 *
2003 * @returns VBox status code.
2004 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2005 * @param GCPtr Guest Context virtual address of the page.
2006 * @param pWalk Where to store the page walk information.
2007 * @thread EMT(pVCpu)
2008 */
2009VMMDECL(int) PGMGstGetPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk)
2010{
2011 VMCPU_ASSERT_EMT(pVCpu);
2012 Assert(pWalk);
2013
2014#if defined(VBOX_VMM_TARGET_X86)
2015 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
2016 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2017 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
2018 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pWalk);
2019#elif defined(VBOX_VMM_TARGET_ARMV8)
2020 /** @todo Incorporate EL (for nested virt and EL3 later on). */
2021 uintptr_t idx = (GCPtr & RT_BIT_64(55))
2022 ? pVCpu->pgm.s.aidxGuestModeDataTtbr1[1]
2023 : pVCpu->pgm.s.aidxGuestModeDataTtbr0[1];
2024 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2025 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
2026 return g_aPgmGuestModeData[idx].pfnGetPage(pVCpu, GCPtr, pWalk);
2027#else
2028# error "Port me"
2029#endif
2030}
2031
2032
2033/**
2034 * Gets effective Guest OS page information.
2035 *
2036 * @returns VBox status code.
2037 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2038 * @param GCPtr Guest Context virtual address of the page.
2039 * @param fFlags PGMQPAGE_F_XXX. If zero, no accessed or dirty bits will
2040 * be set.
2041 * @param pWalk Where to store the page walk information.
2042 * @thread EMT(pVCpu)
2043 */
2044VMM_INT_DECL(int) PGMGstQueryPageFast(PVMCPUCC pVCpu, RTGCPTR GCPtr, uint32_t fFlags, PPGMPTWALKFAST pWalk)
2045{
2046 VMCPU_ASSERT_EMT(pVCpu);
2047 Assert(pWalk);
2048 Assert(!(fFlags & ~(PGMQPAGE_F_VALID_MASK)));
2049 Assert(!(fFlags & PGMQPAGE_F_EXECUTE) || !(fFlags & PGMQPAGE_F_WRITE));
2050
2051#if defined(VBOX_VMM_TARGET_X86)
2052 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
2053 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2054 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
2055 return g_aPgmGuestModeData[idx].pfnQueryPageFast(pVCpu, GCPtr, fFlags, pWalk);
2056#elif defined(VBOX_VMM_TARGET_ARMV8)
2057 /** @todo Incorporate EL (for nested virt and EL3 later on). */
2058 uintptr_t idx = (GCPtr & RT_BIT_64(55))
2059 ? pVCpu->pgm.s.aidxGuestModeDataTtbr1[1]
2060 : pVCpu->pgm.s.aidxGuestModeDataTtbr0[1];
2061 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2062 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
2063 return g_aPgmGuestModeData[idx].pfnQueryPageFast(pVCpu, GCPtr, fFlags, pWalk);
2064#else
2065# error "Port me"
2066#endif
2067}
2068
2069
2070/**
2071 * Performs a guest page table walk.
2072 *
2073 * The guest should be in paged protect mode or long mode when making a call to
2074 * this function.
2075 *
2076 * @returns VBox status code.
2077 * @retval VINF_SUCCESS on success.
2078 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
2079 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
2080 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
2081 *
2082 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2083 * @param GCPtr The guest virtual address to walk by.
2084 * @param pWalk Where to return the walk result. This is valid for some
2085 * error codes as well.
2086 * @param pGstWalk The guest mode specific page walk information.
2087 */
2088int pgmGstPtWalk(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
2089{
2090 VMCPU_ASSERT_EMT(pVCpu);
2091#ifdef VBOX_VMM_TARGET_X86
2092 switch (pVCpu->pgm.s.enmGuestMode)
2093 {
2094 case PGMMODE_32_BIT:
2095 pGstWalk->enmType = PGMPTWALKGSTTYPE_32BIT;
2096 return PGM_GST_NAME_32BIT(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Legacy);
2097
2098 case PGMMODE_PAE:
2099 case PGMMODE_PAE_NX:
2100 pGstWalk->enmType = PGMPTWALKGSTTYPE_PAE;
2101 return PGM_GST_NAME_PAE(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Pae);
2102
2103 case PGMMODE_AMD64:
2104 case PGMMODE_AMD64_NX:
2105 pGstWalk->enmType = PGMPTWALKGSTTYPE_AMD64;
2106 return PGM_GST_NAME_AMD64(Walk)(pVCpu, GCPtr, pWalk, &pGstWalk->u.Amd64);
2107
2108 case PGMMODE_REAL:
2109 case PGMMODE_PROTECTED:
2110 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
2111 return VERR_PGM_NOT_USED_IN_MODE;
2112
2113 case PGMMODE_EPT:
2114 case PGMMODE_NESTED_32BIT:
2115 case PGMMODE_NESTED_PAE:
2116 case PGMMODE_NESTED_AMD64:
2117 default:
2118 AssertFailed();
2119 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
2120 return VERR_PGM_NOT_USED_IN_MODE;
2121 }
2122
2123#elif defined(VBOX_VMM_TARGET_ARMV8)
2124 /** @todo Incorporate EL (for nested virt and EL3 later on). */
2125 uintptr_t idx = (GCPtr & RT_BIT_64(55))
2126 ? pVCpu->pgm.s.aidxGuestModeDataTtbr1[1]
2127 : pVCpu->pgm.s.aidxGuestModeDataTtbr0[1];
2128 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2129 AssertReturn(g_aPgmGuestModeData[idx].pfnGetPage, VERR_PGM_MODE_IPE);
2130 return g_aPgmGuestModeData[idx].pfnWalk(pVCpu, GCPtr, pWalk, pGstWalk);
2131#else
2132# error "port me"
2133#endif
2134}
2135
2136
2137/**
2138 * Tries to continue the previous walk.
2139 *
2140 * @note Requires the caller to hold the PGM lock from the first
2141 * pgmGstPtWalk() call to the last pgmGstPtWalkNext() call. Otherwise
2142 * we cannot use the pointers.
2143 *
2144 * @returns VBox status code.
2145 * @retval VINF_SUCCESS on success.
2146 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
2147 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
2148 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
2149 *
2150 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2151 * @param GCPtr The guest virtual address to walk by.
2152 * @param pWalk Pointer to the previous walk result and where to return
2153 * the result of this walk. This is valid for some error
2154 * codes as well.
2155 * @param pGstWalk The guest-mode specific walk information.
2156 */
2157int pgmGstPtWalkNext(PVMCPUCC pVCpu, RTGCPTR GCPtr, PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
2158{
2159#ifdef VBOX_VMM_TARGET_X86 /** @todo optimize for ARMv8 */
2160 /*
2161 * We can only handle successfully walks.
2162 * We also limit ourselves to the next page.
2163 */
2164 if ( pWalk->fSucceeded
2165 && GCPtr - pWalk->GCPtr == GUEST_PAGE_SIZE)
2166 {
2167 Assert(pWalk->uLevel == 0);
2168 if (pGstWalk->enmType == PGMPTWALKGSTTYPE_AMD64)
2169 {
2170 /*
2171 * AMD64
2172 */
2173 if (!pWalk->fGigantPage && !pWalk->fBigPage)
2174 {
2175 /*
2176 * We fall back to full walk if the PDE table changes, if any
2177 * reserved bits are set, or if the effective page access changes.
2178 */
2179 const uint64_t fPteSame = X86_PTE_P | X86_PTE_RW | X86_PTE_US | X86_PTE_PWT
2180 | X86_PTE_PCD | X86_PTE_A | X86_PTE_PAE_NX;
2181 const uint64_t fPdeSame = X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PWT
2182 | X86_PDE_PCD | X86_PDE_A | X86_PDE_PAE_NX | X86_PDE_PS;
2183
2184 if ((GCPtr >> X86_PD_PAE_SHIFT) == (pWalk->GCPtr >> X86_PD_PAE_SHIFT))
2185 {
2186 if (pGstWalk->u.Amd64.pPte)
2187 {
2188 X86PTEPAE Pte;
2189 Pte.u = pGstWalk->u.Amd64.pPte[1].u;
2190 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
2191 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2192 {
2193 pWalk->GCPtr = GCPtr;
2194 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2195 pGstWalk->u.Amd64.Pte.u = Pte.u;
2196 pGstWalk->u.Amd64.pPte++;
2197 return VINF_SUCCESS;
2198 }
2199 }
2200 }
2201 else if ((GCPtr >> X86_PDPT_SHIFT) == (pWalk->GCPtr >> X86_PDPT_SHIFT))
2202 {
2203 Assert(!((GCPtr >> X86_PT_PAE_SHIFT) & X86_PT_PAE_MASK)); /* Must be first PT entry. */
2204 if (pGstWalk->u.Amd64.pPde)
2205 {
2206 X86PDEPAE Pde;
2207 Pde.u = pGstWalk->u.Amd64.pPde[1].u;
2208 if ( (Pde.u & fPdeSame) == (pGstWalk->u.Amd64.Pde.u & fPdeSame)
2209 && !(Pde.u & (pVCpu)->pgm.s.fGstAmd64MbzPdeMask))
2210 {
2211 /* Get the new PTE and check out the first entry. */
2212 int rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, PGM_A20_APPLY(pVCpu, (Pde.u & X86_PDE_PAE_PG_MASK)),
2213 &pGstWalk->u.Amd64.pPt);
2214 if (RT_SUCCESS(rc))
2215 {
2216 pGstWalk->u.Amd64.pPte = &pGstWalk->u.Amd64.pPt->a[0];
2217 X86PTEPAE Pte;
2218 Pte.u = pGstWalk->u.Amd64.pPte->u;
2219 if ( (Pte.u & fPteSame) == (pGstWalk->u.Amd64.Pte.u & fPteSame)
2220 && !(Pte.u & (pVCpu)->pgm.s.fGstAmd64MbzPteMask))
2221 {
2222 pWalk->GCPtr = GCPtr;
2223 pWalk->GCPhys = Pte.u & X86_PTE_PAE_PG_MASK;
2224 pGstWalk->u.Amd64.Pte.u = Pte.u;
2225 pGstWalk->u.Amd64.Pde.u = Pde.u;
2226 pGstWalk->u.Amd64.pPde++;
2227 return VINF_SUCCESS;
2228 }
2229 }
2230 }
2231 }
2232 }
2233 }
2234 else if (!pWalk->fGigantPage)
2235 {
2236 if ((GCPtr & X86_PAGE_2M_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_2M_BASE_MASK))
2237 {
2238 pWalk->GCPtr = GCPtr;
2239 pWalk->GCPhys += GUEST_PAGE_SIZE;
2240 return VINF_SUCCESS;
2241 }
2242 }
2243 else
2244 {
2245 if ((GCPtr & X86_PAGE_1G_BASE_MASK) == (pWalk->GCPtr & X86_PAGE_1G_BASE_MASK))
2246 {
2247 pWalk->GCPtr = GCPtr;
2248 pWalk->GCPhys += GUEST_PAGE_SIZE;
2249 return VINF_SUCCESS;
2250 }
2251 }
2252 }
2253 }
2254#endif /* VBOX_VMM_TARGET_X86 */
2255 /* Case we don't handle. Do full walk. */
2256 return pgmGstPtWalk(pVCpu, GCPtr, pWalk, pGstWalk);
2257}
2258
2259
2260#ifdef VBOX_VMM_TARGET_X86
2261/**
2262 * Modify page flags for a range of pages in the guest's tables
2263 *
2264 * The existing flags are ANDed with the fMask and ORed with the fFlags.
2265 *
2266 * @returns VBox status code.
2267 * @param pVCpu The cross context virtual CPU structure.
2268 * @param GCPtr Virtual address of the first page in the range.
2269 * @param cb Size (in bytes) of the range to apply the modification to.
2270 * @param fFlags The OR mask - page flags X86_PTE_*, excluding the page mask of course.
2271 * @param fMask The AND mask - page flags X86_PTE_*, excluding the page mask of course.
2272 * Be very CAREFUL when ~'ing constants which could be 32-bit!
2273 */
2274VMMDECL(int) PGMGstModifyPage(PVMCPUCC pVCpu, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask)
2275{
2276 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2277 VMCPU_ASSERT_EMT(pVCpu);
2278
2279 /*
2280 * Validate input.
2281 */
2282 AssertMsg(!(fFlags & X86_PTE_PAE_PG_MASK), ("fFlags=%#llx\n", fFlags));
2283 Assert(cb);
2284
2285 LogFlow(("PGMGstModifyPage %RGv %d bytes fFlags=%08llx fMask=%08llx\n", GCPtr, cb, fFlags, fMask));
2286
2287 /*
2288 * Adjust input.
2289 */
2290 cb += GCPtr & GUEST_PAGE_OFFSET_MASK;
2291 cb = RT_ALIGN_Z(cb, GUEST_PAGE_SIZE);
2292 GCPtr &= ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK;
2293
2294 /*
2295 * Call worker.
2296 */
2297 uintptr_t idx = pVCpu->pgm.s.idxGuestModeData;
2298 AssertReturn(idx < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
2299 AssertReturn(g_aPgmGuestModeData[idx].pfnModifyPage, VERR_PGM_MODE_IPE);
2300 int rc = g_aPgmGuestModeData[idx].pfnModifyPage(pVCpu, GCPtr, cb, fFlags, fMask);
2301
2302 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,GstModifyPage), a);
2303 return rc;
2304}
2305#endif /* VBOX_VMM_TARGET_X86 */
2306
2307#ifdef VBOX_VMM_TARGET_X86
2308
2309/**
2310 * Checks whether the given PAE PDPEs are potentially valid for the guest.
2311 *
2312 * @returns @c true if the PDPE is valid, @c false otherwise.
2313 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2314 * @param paPaePdpes The PAE PDPEs to validate.
2315 *
2316 * @remarks This function -only- checks the reserved bits in the PDPE entries.
2317 */
2318VMM_INT_DECL(bool) PGMGstArePaePdpesValid(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
2319{
2320 Assert(paPaePdpes);
2321 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
2322 {
2323 X86PDPE const PaePdpe = paPaePdpes[i];
2324 if ( !(PaePdpe.u & X86_PDPE_P)
2325 || !(PaePdpe.u & pVCpu->pgm.s.fGstPaeMbzPdpeMask))
2326 { /* likely */ }
2327 else
2328 return false;
2329 }
2330 return true;
2331}
2332
2333
2334/**
2335 * Performs the lazy mapping of the 32-bit guest PD.
2336 *
2337 * @returns VBox status code.
2338 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2339 * @param ppPd Where to return the pointer to the mapping. This is
2340 * always set.
2341 */
2342int pgmGstLazyMap32BitPD(PVMCPUCC pVCpu, PX86PD *ppPd)
2343{
2344 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2345 PGM_LOCK_VOID(pVM);
2346
2347 Assert(!pVCpu->pgm.s.CTX_SUFF(pGst32BitPd));
2348
2349 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3);
2350 PPGMPAGE pPage;
2351 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2352 if (RT_SUCCESS(rc))
2353 {
2354 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPd);
2355 if (RT_SUCCESS(rc))
2356 {
2357# ifdef IN_RING3
2358 pVCpu->pgm.s.pGst32BitPdR0 = NIL_RTR0PTR;
2359 pVCpu->pgm.s.pGst32BitPdR3 = *ppPd;
2360# else
2361 pVCpu->pgm.s.pGst32BitPdR3 = NIL_RTR0PTR;
2362 pVCpu->pgm.s.pGst32BitPdR0 = *ppPd;
2363# endif
2364 PGM_UNLOCK(pVM);
2365 return VINF_SUCCESS;
2366 }
2367 AssertRC(rc);
2368 }
2369 PGM_UNLOCK(pVM);
2370
2371 *ppPd = NULL;
2372 return rc;
2373}
2374
2375
2376/**
2377 * Performs the lazy mapping of the PAE guest PDPT.
2378 *
2379 * @returns VBox status code.
2380 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2381 * @param ppPdpt Where to return the pointer to the mapping. This is
2382 * always set.
2383 */
2384int pgmGstLazyMapPaePDPT(PVMCPUCC pVCpu, PX86PDPT *ppPdpt)
2385{
2386 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt));
2387 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2388 PGM_LOCK_VOID(pVM);
2389
2390 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3);
2391 PPGMPAGE pPage;
2392 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2393 if (RT_SUCCESS(rc))
2394 {
2395 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPdpt);
2396 if (RT_SUCCESS(rc))
2397 {
2398# ifdef IN_RING3
2399 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
2400 pVCpu->pgm.s.pGstPaePdptR3 = *ppPdpt;
2401# else
2402 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
2403 pVCpu->pgm.s.pGstPaePdptR0 = *ppPdpt;
2404# endif
2405 PGM_UNLOCK(pVM);
2406 return VINF_SUCCESS;
2407 }
2408 AssertRC(rc);
2409 }
2410
2411 PGM_UNLOCK(pVM);
2412 *ppPdpt = NULL;
2413 return rc;
2414}
2415
2416
2417/**
2418 * Performs the lazy mapping / updating of a PAE guest PD.
2419 *
2420 * @returns Pointer to the mapping.
2421 * @returns VBox status code.
2422 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2423 * @param iPdpt Which PD entry to map (0..3).
2424 * @param ppPd Where to return the pointer to the mapping. This is
2425 * always set.
2426 */
2427int pgmGstLazyMapPaePD(PVMCPUCC pVCpu, uint32_t iPdpt, PX86PDPAE *ppPd)
2428{
2429 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2430 PGM_LOCK_VOID(pVM);
2431
2432 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt);
2433 Assert(pGuestPDPT);
2434 Assert(pGuestPDPT->a[iPdpt].u & X86_PDPE_P);
2435 RTGCPHYS GCPhys = pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK;
2436 bool const fChanged = pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] != GCPhys;
2437
2438 PPGMPAGE pPage;
2439 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
2440 if (RT_SUCCESS(rc))
2441 {
2442 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)ppPd);
2443 AssertRC(rc);
2444 if (RT_SUCCESS(rc))
2445 {
2446# ifdef IN_RING3
2447 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2448 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = *ppPd;
2449# else
2450 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2451 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = *ppPd;
2452# endif
2453 if (fChanged)
2454 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = GCPhys;
2455 PGM_UNLOCK(pVM);
2456 return VINF_SUCCESS;
2457 }
2458 }
2459
2460 /* Invalid page or some failure, invalidate the entry. */
2461 pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt] = NIL_RTGCPHYS;
2462 pVCpu->pgm.s.apGstPaePDsR3[iPdpt] = NIL_RTR3PTR;
2463 pVCpu->pgm.s.apGstPaePDsR0[iPdpt] = NIL_RTR0PTR;
2464
2465 PGM_UNLOCK(pVM);
2466 return rc;
2467}
2468
2469
2470/**
2471 * Performs the lazy mapping of the 32-bit guest PD.
2472 *
2473 * @returns VBox status code.
2474 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2475 * @param ppPml4 Where to return the pointer to the mapping. This will
2476 * always be set.
2477 */
2478int pgmGstLazyMapPml4(PVMCPUCC pVCpu, PX86PML4 *ppPml4)
2479{
2480 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4));
2481 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2482 PGM_LOCK_VOID(pVM);
2483
2484 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, pVCpu->pgm.s.GCPhysCR3);
2485 PPGMPAGE pPage;
2486 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage);
2487 if (RT_SUCCESS(rc))
2488 {
2489 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysCR3, (void **)ppPml4);
2490 if (RT_SUCCESS(rc))
2491 {
2492# ifdef IN_RING3
2493 pVCpu->pgm.s.pGstAmd64Pml4R0 = NIL_RTR0PTR;
2494 pVCpu->pgm.s.pGstAmd64Pml4R3 = *ppPml4;
2495# else
2496 pVCpu->pgm.s.pGstAmd64Pml4R3 = NIL_RTR3PTR;
2497 pVCpu->pgm.s.pGstAmd64Pml4R0 = *ppPml4;
2498# endif
2499 PGM_UNLOCK(pVM);
2500 return VINF_SUCCESS;
2501 }
2502 }
2503
2504 PGM_UNLOCK(pVM);
2505 *ppPml4 = NULL;
2506 return rc;
2507}
2508
2509
2510# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2511 /**
2512 * Performs the lazy mapping of the guest PML4 table when using EPT paging.
2513 *
2514 * @returns VBox status code.
2515 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2516 * @param ppEptPml4 Where to return the pointer to the mapping. This will
2517 * always be set.
2518 */
2519int pgmGstLazyMapEptPml4(PVMCPUCC pVCpu, PEPTPML4 *ppEptPml4)
2520{
2521 Assert(!pVCpu->pgm.s.CTX_SUFF(pGstEptPml4));
2522 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
2523 PGM_LOCK_VOID(pVM);
2524
2525 RTGCPHYS const GCPhysEpt = pVCpu->pgm.s.uEptPtr & EPT_EPTP_PG_MASK;
2526 PPGMPAGE pPage;
2527 int rc = pgmPhysGetPageEx(pVM, GCPhysEpt, &pPage);
2528 if (RT_SUCCESS(rc))
2529 {
2530 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhysEpt, (void **)ppEptPml4);
2531 if (RT_SUCCESS(rc))
2532 {
2533# ifdef IN_RING3
2534 pVCpu->pgm.s.pGstEptPml4R0 = NIL_RTR0PTR;
2535 pVCpu->pgm.s.pGstEptPml4R3 = *ppEptPml4;
2536# else
2537 pVCpu->pgm.s.pGstEptPml4R3 = NIL_RTR3PTR;
2538 pVCpu->pgm.s.pGstEptPml4R0 = *ppEptPml4;
2539# endif
2540 PGM_UNLOCK(pVM);
2541 return VINF_SUCCESS;
2542 }
2543 }
2544
2545 PGM_UNLOCK(pVM);
2546 *ppEptPml4 = NULL;
2547 return rc;
2548}
2549# endif
2550
2551
2552/**
2553 * Gets the current CR3 register value for the shadow memory context.
2554 * @returns CR3 value.
2555 * @param pVCpu The cross context virtual CPU structure.
2556 */
2557VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
2558{
2559# ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
2560 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
2561 AssertPtrReturn(pPoolPage, NIL_RTHCPHYS);
2562 return pPoolPage->Core.Key;
2563# else
2564 RT_NOREF(pVCpu);
2565 return NIL_RTHCPHYS;
2566# endif
2567}
2568
2569
2570/**
2571 * Forces lazy remapping of the guest's PAE page-directory structures.
2572 *
2573 * @param pVCpu The cross context virtual CPU structure.
2574 */
2575static void pgmGstFlushPaePdpes(PVMCPU pVCpu)
2576{
2577 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->pgm.s.aGCPhysGstPaePDs); i++)
2578 {
2579 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
2580 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
2581 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
2582 }
2583}
2584
2585
2586# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2587
2588/**
2589 * Performs a guest second-level address translation (SLAT).
2590 *
2591 * @returns VBox status code.
2592 * @retval VINF_SUCCESS on success.
2593 * @retval VERR_PAGE_TABLE_NOT_PRESENT on failure. Check pWalk for details.
2594 * @retval VERR_PGM_NOT_USED_IN_MODE if not paging isn't enabled. @a pWalk is
2595 * not valid, except enmType is PGMPTWALKGSTTYPE_INVALID.
2596 *
2597 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2598 * @param GCPhysNested The nested-guest physical address being translated.
2599 * @param fIsLinearAddrValid Whether the linear address in @a GCPtrNested is the
2600 * cause for this translation.
2601 * @param GCPtrNested The nested-guest virtual address that initiated the
2602 * SLAT. If none, pass 0 (and not NIL_RTGCPTR).
2603 * @param pWalk Where to return the walk result. This is updated for
2604 * all error codes other than
2605 * VERR_PGM_NOT_USED_IN_MODE.
2606 * @param pGstWalk Where to store the second-level paging-mode specific
2607 * walk info.
2608 */
2609static int pgmGstSlatWalk(PVMCPUCC pVCpu, RTGCPHYS GCPhysNested, bool fIsLinearAddrValid, RTGCPTR GCPtrNested,
2610 PPGMPTWALK pWalk, PPGMPTWALKGST pGstWalk)
2611{
2612 /* SLAT mode must be valid at this point as this should only be used -after- we have determined SLAT mode. */
2613 Assert( pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_DIRECT
2614 && pVCpu->pgm.s.enmGuestSlatMode != PGMSLAT_INVALID);
2615 AssertPtr(pWalk);
2616 AssertPtr(pGstWalk);
2617 switch (pVCpu->pgm.s.enmGuestSlatMode)
2618 {
2619 case PGMSLAT_EPT:
2620 pGstWalk->enmType = PGMPTWALKGSTTYPE_EPT;
2621 return PGM_GST_SLAT_NAME_EPT(Walk)(pVCpu, GCPhysNested, fIsLinearAddrValid, GCPtrNested, pWalk, &pGstWalk->u.Ept);
2622
2623 default:
2624 AssertFailed();
2625 pGstWalk->enmType = PGMPTWALKGSTTYPE_INVALID;
2626 return VERR_PGM_NOT_USED_IN_MODE;
2627 }
2628}
2629
2630
2631/**
2632 * Performs second-level address translation for the given CR3 and updates the
2633 * nested-guest CR3 when successful.
2634 *
2635 * @returns VBox status code.
2636 * @param pVCpu The cross context virtual CPU structure.
2637 * @param uCr3 The masked nested-guest CR3 value.
2638 * @param pGCPhysCR3 Where to store the translated CR3.
2639 *
2640 * @warning This updates PGMCPU::GCPhysNstGstCR3 when the translation succeeds. Be
2641 * mindful of this in code that's hyper sensitive to the order of
2642 * operations.
2643 */
2644static int pgmGstSlatTranslateCr3(PVMCPUCC pVCpu, uint64_t uCr3, PRTGCPHYS pGCPhysCr3)
2645{
2646 if (uCr3 != pVCpu->pgm.s.GCPhysNstGstCR3)
2647 {
2648 PGMPTWALK Walk;
2649 PGMPTWALKGST GstWalk;
2650 int const rc = pgmGstSlatWalk(pVCpu, uCr3, false /* fIsLinearAddrValid */, 0 /* GCPtrNested */, &Walk, &GstWalk);
2651 if (RT_SUCCESS(rc))
2652 {
2653 /* Update nested-guest CR3. */
2654 pVCpu->pgm.s.GCPhysNstGstCR3 = uCr3;
2655
2656 /* Pass back the translated result. */
2657 *pGCPhysCr3 = Walk.GCPhys;
2658 return VINF_SUCCESS;
2659 }
2660
2661 /* Translation failed. */
2662 *pGCPhysCr3 = NIL_RTGCPHYS;
2663 return rc;
2664 }
2665
2666 /*
2667 * If the nested-guest CR3 has not changed, then the previously
2668 * translated CR3 result (i.e. GCPhysCR3) is passed back.
2669 */
2670 *pGCPhysCr3 = pVCpu->pgm.s.GCPhysCR3;
2671 return VINF_SUCCESS;
2672}
2673
2674# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
2675
2676/**
2677 * Performs and schedules necessary updates following a CR3 load or reload.
2678 *
2679 * This will normally involve mapping the guest PD or nPDPT
2680 *
2681 * @returns VBox status code.
2682 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync. This can
2683 * safely be ignored and overridden since the FF will be set too then.
2684 * @param pVCpu The cross context virtual CPU structure.
2685 * @param cr3 The new cr3.
2686 * @param fGlobal Indicates whether this is a global flush or not.
2687 */
2688VMM_INT_DECL(int) PGMFlushTLB(PVMCPUCC pVCpu, uint64_t cr3, bool fGlobal)
2689{
2690 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2691
2692 VMCPU_ASSERT_EMT(pVCpu);
2693
2694 /*
2695 * Always flag the necessary updates; necessary for hardware acceleration
2696 */
2697 /** @todo optimize this, it shouldn't always be necessary. */
2698 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2699 if (fGlobal)
2700 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2701
2702 /*
2703 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2704 */
2705 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2706 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2707#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2708 if ( pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT
2709 && PGMMODE_WITH_PAGING(pVCpu->pgm.s.enmGuestMode))
2710 {
2711 RTGCPHYS GCPhysOut;
2712 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2713 if (RT_SUCCESS(rc))
2714 GCPhysCR3 = GCPhysOut;
2715 else
2716 {
2717 /* CR3 SLAT translation failed but we try to pretend it
2718 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2719 AssertMsgFailed(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
2720 int const rc2 = pgmGstUnmapCr3(pVCpu);
2721 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2722 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2723 return rc2;
2724 }
2725 }
2726#endif
2727
2728 LogFlowFunc(("cr3=%RX64 old=%RX64 fGlobal=%d\n", cr3, GCPhysOldCR3, fGlobal));
2729 int rc = VINF_SUCCESS;
2730 if (GCPhysOldCR3 != GCPhysCR3)
2731 {
2732 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2733 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2734 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2735
2736 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2737 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2738 if (RT_LIKELY(rc == VINF_SUCCESS))
2739 { }
2740 else
2741 {
2742 AssertMsg(rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc));
2743 Assert(VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_PGM_SYNC_CR3));
2744 pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped) = false;
2745 pVCpu->pgm.s.GCPhysPaeCR3 = NIL_RTGCPHYS;
2746 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2747 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3;
2748 }
2749
2750 if (fGlobal)
2751 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3Global));
2752 else
2753 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBNewCR3));
2754 }
2755 else
2756 {
2757#ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
2758 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
2759 PPGMPOOL const pPool = pVM->pgm.s.CTX_SUFF(pPool);
2760 if (pPool->cDirtyPages)
2761 {
2762 PGM_LOCK_VOID(pVM);
2763 pgmPoolResetDirtyPages(pVM);
2764 PGM_UNLOCK(pVM);
2765 }
2766#endif
2767 if (fGlobal)
2768 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3Global));
2769 else
2770 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLBSameCR3));
2771
2772 /*
2773 * Flush PAE PDPTEs.
2774 */
2775 if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2776 pgmGstFlushPaePdpes(pVCpu);
2777 }
2778
2779 if (!fGlobal)
2780 IEMTlbInvalidateAll(pVCpu);
2781 else
2782 IEMTlbInvalidateAllGlobal(pVCpu);
2783 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,FlushTLB), a);
2784 return rc;
2785}
2786
2787
2788/**
2789 * Performs and schedules necessary updates following a CR3 load or reload when
2790 * using nested or extended paging.
2791 *
2792 * This API is an alternative to PGMFlushTLB that avoids actually flushing the
2793 * TLB and triggering a SyncCR3.
2794 *
2795 * This will normally involve mapping the guest PD or nPDPT
2796 *
2797 * @returns VBox status code.
2798 * @retval VINF_SUCCESS.
2799 * @retval VINF_PGM_SYNC_CR3 if monitoring requires a CR3 sync (not for nested
2800 * paging modes). This can safely be ignored and overridden since the
2801 * FF will be set too then.
2802 * @param pVCpu The cross context virtual CPU structure.
2803 * @param cr3 The new CR3.
2804 */
2805VMM_INT_DECL(int) PGMUpdateCR3(PVMCPUCC pVCpu, uint64_t cr3)
2806{
2807 VMCPU_ASSERT_EMT(pVCpu);
2808
2809 /* We assume we're only called in nested paging mode. */
2810 Assert(pVCpu->CTX_SUFF(pVM)->pgm.s.fNestedPaging || pVCpu->pgm.s.enmShadowMode == PGMMODE_EPT);
2811
2812 /*
2813 * Remap the CR3 content and adjust the monitoring if CR3 was actually changed.
2814 */
2815 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2816 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2817#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2818 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
2819 {
2820 RTGCPHYS GCPhysOut;
2821 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2822 if (RT_SUCCESS(rc))
2823 GCPhysCR3 = GCPhysOut;
2824 else
2825 {
2826 /* CR3 SLAT translation failed but we try to pretend it
2827 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2828 Log(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", cr3, rc));
2829 int const rc2 = pgmGstUnmapCr3(pVCpu);
2830 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2831 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2832 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2833 return rc2;
2834 }
2835 }
2836#endif
2837
2838 LogFlowFunc(("cr3=%RX64 old=%RX64\n", cr3, GCPhysOldCR3));
2839 int rc = VINF_SUCCESS;
2840 if (GCPhysOldCR3 != GCPhysCR3)
2841 {
2842 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2843 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2844 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2845
2846 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2847 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2848
2849 AssertRCSuccess(rc); /* Assumes VINF_PGM_SYNC_CR3 doesn't apply to nested paging. */ /** @todo this isn't true for the mac, but we need hw to test/fix this. */
2850 }
2851 /*
2852 * Flush PAE PDPTEs.
2853 */
2854 else if (PGMMODE_IS_PAE(pVCpu->pgm.s.enmGuestMode))
2855 pgmGstFlushPaePdpes(pVCpu);
2856
2857 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
2858 return rc;
2859}
2860
2861
2862/**
2863 * Synchronize the paging structures.
2864 *
2865 * This function is called in response to the VM_FF_PGM_SYNC_CR3 and
2866 * VM_FF_PGM_SYNC_CR3_NONGLOBAL. Those two force action flags are set
2867 * in several places, most importantly whenever the CR3 is loaded.
2868 *
2869 * @returns VBox status code. May return VINF_PGM_SYNC_CR3 in RC/R0.
2870 * @retval VERR_PGM_NO_HYPERVISOR_ADDRESS in raw-mode when we're unable to map
2871 * the VMM into guest context.
2872 * @param pVCpu The cross context virtual CPU structure.
2873 * @param cr0 Guest context CR0 register
2874 * @param cr3 Guest context CR3 register
2875 * @param cr4 Guest context CR4 register
2876 * @param fGlobal Including global page directories or not
2877 */
2878VMM_INT_DECL(int) PGMSyncCR3(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)
2879{
2880 VMCPU_ASSERT_EMT(pVCpu);
2881
2882#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
2883 /*
2884 * The pool may have pending stuff and even require a return to ring-3 to
2885 * clear the whole thing.
2886 */
2887 int rcPool = pgmPoolSyncCR3(pVCpu);
2888 if (rcPool != VINF_SUCCESS)
2889 return rcPool;
2890#endif
2891
2892 /*
2893 * We might be called when we shouldn't.
2894 *
2895 * The mode switching will ensure that the PD is resynced after every mode
2896 * switch. So, if we find ourselves here when in protected or real mode
2897 * we can safely clear the FF and return immediately.
2898 */
2899 if (pVCpu->pgm.s.enmGuestMode <= PGMMODE_PROTECTED)
2900 {
2901 Assert((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE));
2902 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2903 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2904 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2905 return VINF_SUCCESS;
2906 }
2907
2908 /* If global pages are not supported, then all flushes are global. */
2909 if (!(cr4 & X86_CR4_PGE))
2910 fGlobal = true;
2911 LogFlow(("PGMSyncCR3: cr0=%RX64 cr3=%RX64 cr4=%RX64 fGlobal=%d[%d,%d]\n", cr0, cr3, cr4, fGlobal,
2912 VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)));
2913
2914 /*
2915 * Check if we need to finish an aborted MapCR3 call (see PGMFlushTLB).
2916 * This should be done before SyncCR3.
2917 */
2918 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_MAP_CR3)
2919 {
2920 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MAP_CR3;
2921
2922 RTGCPHYS const GCPhysOldCR3 = pVCpu->pgm.s.GCPhysCR3;
2923 RTGCPHYS GCPhysCR3 = pgmGetGuestMaskedCr3(pVCpu, cr3);
2924#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
2925 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
2926 {
2927 RTGCPHYS GCPhysOut;
2928 int rc2 = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
2929 if (RT_SUCCESS(rc2))
2930 GCPhysCR3 = GCPhysOut;
2931 else
2932 {
2933 /* CR3 SLAT translation failed but we try to pretend it
2934 succeeded for the reasons mentioned in PGMHCChangeMode(). */
2935 AssertMsgFailed(("Failed to translate CR3 %#RX64. rc2=%Rrc\n", cr3, rc2));
2936 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
2937 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
2938 return rc2;
2939 }
2940 }
2941#endif
2942 Assert(!pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped));
2943 int rc = VINF_SUCCESS;
2944 if (GCPhysOldCR3 != GCPhysCR3)
2945 {
2946 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2947 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2948 AssertReturn(g_aPgmBothModeData[idxBth].pfnMapCR3, VERR_PGM_MODE_IPE);
2949 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
2950 rc = g_aPgmBothModeData[idxBth].pfnMapCR3(pVCpu, GCPhysCR3);
2951 }
2952
2953 /* Make sure we check for pending pgm pool syncs as we clear VMCPU_FF_PGM_SYNC_CR3 later on! */
2954 if ( rc == VINF_PGM_SYNC_CR3
2955 || (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL))
2956 {
2957 Log(("PGMSyncCR3: pending pgm pool sync after MapCR3!\n"));
2958#ifdef IN_RING3
2959# ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
2960 rc = pgmPoolSyncCR3(pVCpu);
2961# else
2962 rc = VINF_SUCCESS;
2963# endif
2964#else
2965 if (rc == VINF_PGM_SYNC_CR3)
2966 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3;
2967 return VINF_PGM_SYNC_CR3;
2968#endif
2969 }
2970 AssertRCReturn(rc, rc);
2971 AssertRCSuccessReturn(rc, VERR_IPE_UNEXPECTED_INFO_STATUS);
2972 }
2973
2974 /*
2975 * Let the 'Bth' function do the work and we'll just keep track of the flags.
2976 */
2977 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2978
2979 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
2980 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), VERR_PGM_MODE_IPE);
2981 AssertReturn(g_aPgmBothModeData[idxBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
2982 int rcSync = g_aPgmBothModeData[idxBth].pfnSyncCR3(pVCpu, cr0, cr3, cr4, fGlobal);
2983
2984 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
2985 AssertMsg(rcSync == VINF_SUCCESS || rcSync == VINF_PGM_SYNC_CR3 || RT_FAILURE(rcSync), ("rcSync=%Rrc\n", rcSync));
2986 if (rcSync == VINF_SUCCESS)
2987 {
2988 if (pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)
2989 {
2990 /* Go back to ring 3 if a pgm pool sync is again pending. */
2991 return VINF_PGM_SYNC_CR3;
2992 }
2993
2994 if (!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_ALWAYS))
2995 {
2996 Assert(!(pVCpu->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL));
2997 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
2998 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
2999 }
3000 }
3001
3002 /*
3003 * Now flush the CR3 (guest context).
3004 */
3005 if (rcSync == VINF_SUCCESS)
3006 PGM_INVL_VCPU_TLBS(pVCpu);
3007 return rcSync;
3008}
3009
3010
3011# ifdef VBOX_STRICT
3012/**
3013 * Asserts that everything related to the guest CR3 is correctly shadowed.
3014 *
3015 * This will call PGMAssertNoMappingConflicts() and PGMAssertHandlerAndFlagsInSync(),
3016 * and assert the correctness of the guest CR3 mapping before asserting that the
3017 * shadow page tables is in sync with the guest page tables.
3018 *
3019 * @returns Number of conflicts.
3020 * @param pVM The cross context VM structure.
3021 * @param pVCpu The cross context virtual CPU structure.
3022 * @param cr3 The current guest CR3 register value.
3023 * @param cr4 The current guest CR4 register value.
3024 */
3025VMMDECL(unsigned) PGMAssertCR3(PVMCC pVM, PVMCPUCC pVCpu, uint64_t cr3, uint64_t cr4)
3026{
3027 AssertReturn(pVM->enmTarget == VMTARGET_X86, 0);
3028 STAM_PROFILE_START(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
3029
3030 uintptr_t const idxBth = pVCpu->pgm.s.idxBothModeData;
3031 AssertReturn(idxBth < RT_ELEMENTS(g_aPgmBothModeData), -VERR_PGM_MODE_IPE);
3032 AssertReturn(g_aPgmBothModeData[idxBth].pfnAssertCR3, -VERR_PGM_MODE_IPE);
3033
3034 PGM_LOCK_VOID(pVM);
3035 unsigned cErrors = g_aPgmBothModeData[idxBth].pfnAssertCR3(pVCpu, cr3, cr4, 0, ~(RTGCPTR)0);
3036 PGM_UNLOCK(pVM);
3037
3038 STAM_PROFILE_STOP(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncCR3), a);
3039 return cErrors;
3040}
3041# endif /* VBOX_STRICT */
3042
3043
3044/**
3045 * Called by CPUM or REM when CR0.WP changes to 1.
3046 *
3047 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3048 * @thread EMT
3049 */
3050VMMDECL(void) PGMCr0WpEnabled(PVMCPUCC pVCpu)
3051{
3052 /*
3053 * Netware WP0+RO+US hack cleanup when WP0 -> WP1.
3054 *
3055 * Use the counter to judge whether there might be pool pages with active
3056 * hacks in them. If there are, we will be running the risk of messing up
3057 * the guest by allowing it to write to read-only pages. Thus, we have to
3058 * clear the page pool ASAP if there is the slightest chance.
3059 */
3060 if (pVCpu->pgm.s.cNetwareWp0Hacks > 0)
3061 {
3062 Assert(pVCpu->CTX_SUFF(pVM)->cCpus == 1);
3063
3064 Log(("PGMCr0WpEnabled: %llu WP0 hacks active - clearing page pool\n", pVCpu->pgm.s.cNetwareWp0Hacks));
3065 pVCpu->pgm.s.cNetwareWp0Hacks = 0;
3066 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;
3067 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3068 }
3069}
3070
3071
3072/**
3073 * Updates PGM's copy of the guest's EPT pointer.
3074 *
3075 * @param pVCpu The cross context virtual CPU structure.
3076 * @param uEptPtr The EPT pointer.
3077 *
3078 * @remarks This can be called as part of VM-entry so we might be in the midst of
3079 * switching to VMX non-root mode.
3080 */
3081VMM_INT_DECL(void) PGMSetGuestEptPtr(PVMCPUCC pVCpu, uint64_t uEptPtr)
3082{
3083 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3084 PGM_LOCK_VOID(pVM);
3085 pVCpu->pgm.s.uEptPtr = uEptPtr;
3086 pVCpu->pgm.s.pGstEptPml4R3 = 0;
3087 pVCpu->pgm.s.pGstEptPml4R0 = 0;
3088 PGM_UNLOCK(pVM);
3089}
3090
3091
3092/**
3093 * Maps all the PAE PDPE entries.
3094 *
3095 * @returns VBox status code.
3096 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3097 * @param paPaePdpes The new PAE PDPE values.
3098 *
3099 * @remarks This function may be invoked during the process of changing the guest
3100 * paging mode to PAE, hence the guest state (CR0, CR4 etc.) may not
3101 * reflect PAE paging just yet.
3102 */
3103VMM_INT_DECL(int) PGMGstMapPaePdpes(PVMCPUCC pVCpu, PCX86PDPE paPaePdpes)
3104{
3105 Assert(paPaePdpes);
3106 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
3107 {
3108 X86PDPE const PaePdpe = paPaePdpes[i];
3109
3110 /*
3111 * In some cases (e.g. in SVM with nested paging) the validation of the PAE PDPEs
3112 * are deferred.[1] Also, different situations require different handling of invalid
3113 * PDPE entries. Here we assume the caller has already validated or doesn't require
3114 * validation of the PDPEs.
3115 *
3116 * In the case of nested EPT (i.e. for nested-guests), the PAE PDPEs have been
3117 * validated by the VMX transition.
3118 *
3119 * [1] -- See AMD spec. 15.25.10 "Legacy PAE Mode".
3120 */
3121 if ((PaePdpe.u & (pVCpu->pgm.s.fGstPaeMbzPdpeMask | X86_PDPE_P)) == X86_PDPE_P)
3122 {
3123 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
3124 RTHCPTR HCPtr;
3125
3126 RTGCPHYS GCPhys;
3127#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3128 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
3129 {
3130 PGMPTWALK Walk;
3131 PGMPTWALKGST GstWalk;
3132 RTGCPHYS const GCPhysNested = PaePdpe.u & X86_PDPE_PG_MASK;
3133 int const rc = pgmGstSlatWalk(pVCpu, GCPhysNested, false /* fIsLinearAddrValid */, 0 /* GCPtrNested */,
3134 &Walk, &GstWalk);
3135 if (RT_SUCCESS(rc))
3136 GCPhys = Walk.GCPhys;
3137 else
3138 {
3139 /*
3140 * Second-level address translation of the PAE PDPE has failed but we must -NOT-
3141 * abort and return a failure now. This is because we're called from a Mov CRx
3142 * instruction (or similar operation). Let's just pretend success but flag that
3143 * we need to map this PDPE lazily later.
3144 *
3145 * See Intel spec. 25.3 "Changes to instruction behavior in VMX non-root operation".
3146 * See Intel spec. 28.3.1 "EPT Overview".
3147 */
3148 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
3149 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
3150 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
3151 continue;
3152 }
3153 }
3154 else
3155#endif
3156 {
3157 GCPhys = PGM_A20_APPLY(pVCpu, PaePdpe.u & X86_PDPE_PG_MASK);
3158 }
3159
3160 PGM_LOCK_VOID(pVM);
3161 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
3162 AssertReturnStmt(pPage, PGM_UNLOCK(pVM), VERR_PGM_INVALID_PDPE_ADDR);
3163 int const rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr);
3164 PGM_UNLOCK(pVM);
3165 if (RT_SUCCESS(rc))
3166 {
3167#ifdef IN_RING3
3168 pVCpu->pgm.s.apGstPaePDsR3[i] = (PX86PDPAE)HCPtr;
3169 pVCpu->pgm.s.apGstPaePDsR0[i] = NIL_RTR0PTR;
3170#else
3171 pVCpu->pgm.s.apGstPaePDsR3[i] = NIL_RTR3PTR;
3172 pVCpu->pgm.s.apGstPaePDsR0[i] = (PX86PDPAE)HCPtr;
3173#endif
3174 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = GCPhys;
3175 continue;
3176 }
3177 AssertMsgFailed(("PGMPhysMapPaePdpes: rc2=%d GCPhys=%RGp i=%d\n", rc, GCPhys, i));
3178 }
3179 pVCpu->pgm.s.apGstPaePDsR3[i] = 0;
3180 pVCpu->pgm.s.apGstPaePDsR0[i] = 0;
3181 pVCpu->pgm.s.aGCPhysGstPaePDs[i] = NIL_RTGCPHYS;
3182 }
3183 return VINF_SUCCESS;
3184}
3185
3186
3187/**
3188 * Validates and maps the PDPT and PAE PDPEs referenced by the given CR3.
3189 *
3190 * @returns VBox status code.
3191 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
3192 * @param cr3 The guest CR3 value.
3193 *
3194 * @remarks This function may be invoked during the process of changing the guest
3195 * paging mode to PAE but the guest state (CR0, CR4 etc.) may not reflect
3196 * PAE paging just yet.
3197 */
3198VMM_INT_DECL(int) PGMGstMapPaePdpesAtCr3(PVMCPUCC pVCpu, uint64_t cr3)
3199{
3200 /*
3201 * Read the page-directory-pointer table (PDPT) at CR3.
3202 */
3203 RTGCPHYS GCPhysCR3 = (cr3 & X86_CR3_PAE_PAGE_MASK);
3204 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhysCR3);
3205
3206#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3207 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
3208 {
3209 RTGCPHYS GCPhysOut;
3210 int const rc = pgmGstSlatTranslateCr3(pVCpu, GCPhysCR3, &GCPhysOut);
3211 if (RT_SUCCESS(rc))
3212 GCPhysCR3 = GCPhysOut;
3213 else
3214 {
3215 Log(("Failed to load CR3 at %#RX64. rc=%Rrc\n", GCPhysCR3, rc));
3216 return rc;
3217 }
3218 }
3219#endif
3220
3221 RTHCPTR HCPtrGuestCr3;
3222 int rc = pgmGstMapCr3(pVCpu, GCPhysCR3, &HCPtrGuestCr3);
3223 if (RT_SUCCESS(rc))
3224 {
3225 /*
3226 * Validate the page-directory-pointer table entries (PDPE).
3227 */
3228 X86PDPE aPaePdpes[X86_PG_PAE_PDPE_ENTRIES];
3229 memcpy(&aPaePdpes[0], HCPtrGuestCr3, sizeof(aPaePdpes));
3230 if (PGMGstArePaePdpesValid(pVCpu, &aPaePdpes[0]))
3231 {
3232 /*
3233 * Map the PDPT.
3234 * We deliberately don't update PGM's GCPhysCR3 here as it's expected
3235 * that PGMFlushTLB will be called soon and only a change to CR3 then
3236 * will cause the shadow page tables to be updated.
3237 */
3238#ifdef IN_RING3
3239 pVCpu->pgm.s.pGstPaePdptR3 = (PX86PDPT)HCPtrGuestCr3;
3240 pVCpu->pgm.s.pGstPaePdptR0 = NIL_RTR0PTR;
3241#else
3242 pVCpu->pgm.s.pGstPaePdptR3 = NIL_RTR3PTR;
3243 pVCpu->pgm.s.pGstPaePdptR0 = (PX86PDPT)HCPtrGuestCr3;
3244#endif
3245
3246 /*
3247 * Update CPUM and map the 4 PAE PDPEs.
3248 */
3249 CPUMSetGuestPaePdpes(pVCpu, &aPaePdpes[0]);
3250 rc = PGMGstMapPaePdpes(pVCpu, &aPaePdpes[0]);
3251 if (RT_SUCCESS(rc))
3252 {
3253#ifdef IN_RING3
3254 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = true;
3255 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = false;
3256#else
3257 pVCpu->pgm.s.fPaePdpesAndCr3MappedR3 = false;
3258 pVCpu->pgm.s.fPaePdpesAndCr3MappedR0 = true;
3259#endif
3260 pVCpu->pgm.s.GCPhysPaeCR3 = GCPhysCR3;
3261 }
3262 }
3263 else
3264 rc = VERR_PGM_PAE_PDPE_RSVD;
3265 }
3266 return rc;
3267}
3268
3269
3270/**
3271 * Called whenever CR0 or CR4 in a way which may affect the paging mode.
3272 *
3273 * @returns VBox status code, with the following informational code for
3274 * VM scheduling.
3275 * @retval VINF_SUCCESS if the was no change, or it was successfully dealt with.
3276 * @retval VINF_EM_SUSPEND or VINF_EM_OFF on a fatal runtime error. (R3 only)
3277 *
3278 * @param pVCpu The cross context virtual CPU structure.
3279 * @param cr0 The new cr0.
3280 * @param cr4 The new cr4.
3281 * @param efer The new extended feature enable register.
3282 * @param fForce Whether to force a mode change.
3283 */
3284VMM_INT_DECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr4, uint64_t efer, bool fForce)
3285{
3286 VMCPU_ASSERT_EMT(pVCpu);
3287
3288 /*
3289 * Calc the new guest mode.
3290 *
3291 * Note! We check PG before PE and without requiring PE because of the
3292 * special AMD-V paged real mode (APM vol 2, rev 3.28, 15.9).
3293 */
3294 PGMMODE enmGuestMode;
3295 if (cr0 & X86_CR0_PG)
3296 {
3297 if (!(cr4 & X86_CR4_PAE))
3298 {
3299 bool const fPse = !!(cr4 & X86_CR4_PSE);
3300 if (pVCpu->pgm.s.fGst32BitPageSizeExtension != fPse)
3301 Log(("PGMChangeMode: CR4.PSE %d -> %d\n", pVCpu->pgm.s.fGst32BitPageSizeExtension, fPse));
3302 pVCpu->pgm.s.fGst32BitPageSizeExtension = fPse;
3303 enmGuestMode = PGMMODE_32_BIT;
3304 }
3305 else if (!(efer & MSR_K6_EFER_LME))
3306 {
3307 if (!(efer & MSR_K6_EFER_NXE))
3308 enmGuestMode = PGMMODE_PAE;
3309 else
3310 enmGuestMode = PGMMODE_PAE_NX;
3311 }
3312 else
3313 {
3314 if (!(efer & MSR_K6_EFER_NXE))
3315 enmGuestMode = PGMMODE_AMD64;
3316 else
3317 enmGuestMode = PGMMODE_AMD64_NX;
3318 }
3319 }
3320 else if (!(cr0 & X86_CR0_PE))
3321 enmGuestMode = PGMMODE_REAL;
3322 else
3323 enmGuestMode = PGMMODE_PROTECTED;
3324
3325 /*
3326 * Did it change?
3327 */
3328 if ( !fForce
3329 && pVCpu->pgm.s.enmGuestMode == enmGuestMode)
3330 return VINF_SUCCESS;
3331
3332 /* Flush the TLB */
3333 PGM_INVL_VCPU_TLBS(pVCpu);
3334 return PGMHCChangeMode(pVCpu->CTX_SUFF(pVM), pVCpu, enmGuestMode, fForce);
3335}
3336
3337
3338/**
3339 * Converts a PGMMODE value to a PGM_TYPE_* \#define.
3340 *
3341 * @returns PGM_TYPE_*.
3342 * @param pgmMode The mode value to convert.
3343 */
3344DECLINLINE(unsigned) pgmModeToType(PGMMODE pgmMode)
3345{
3346 switch (pgmMode)
3347 {
3348 case PGMMODE_REAL: return PGM_TYPE_REAL;
3349 case PGMMODE_PROTECTED: return PGM_TYPE_PROT;
3350 case PGMMODE_32_BIT: return PGM_TYPE_32BIT;
3351 case PGMMODE_PAE:
3352 case PGMMODE_PAE_NX: return PGM_TYPE_PAE;
3353 case PGMMODE_AMD64:
3354 case PGMMODE_AMD64_NX: return PGM_TYPE_AMD64;
3355 case PGMMODE_NESTED_32BIT: return PGM_TYPE_NESTED_32BIT;
3356 case PGMMODE_NESTED_PAE: return PGM_TYPE_NESTED_PAE;
3357 case PGMMODE_NESTED_AMD64: return PGM_TYPE_NESTED_AMD64;
3358 case PGMMODE_EPT: return PGM_TYPE_EPT;
3359 case PGMMODE_NONE: return PGM_TYPE_NONE;
3360 default:
3361 AssertFatalMsgFailed(("pgmMode=%d\n", pgmMode));
3362 }
3363}
3364
3365
3366/**
3367 * Calculates the shadow paging mode.
3368 *
3369 * @returns The shadow paging mode.
3370 * @param pVM The cross context VM structure.
3371 * @param enmGuestMode The guest mode.
3372 * @param enmHostMode The host mode.
3373 * @param enmShadowMode The current shadow mode.
3374 */
3375static PGMMODE pgmCalcShadowMode(PVMCC pVM, PGMMODE enmGuestMode, SUPPAGINGMODE enmHostMode, PGMMODE enmShadowMode)
3376{
3377 switch (enmGuestMode)
3378 {
3379 case PGMMODE_REAL:
3380 case PGMMODE_PROTECTED:
3381 switch (enmHostMode)
3382 {
3383 case SUPPAGINGMODE_32_BIT:
3384 case SUPPAGINGMODE_32_BIT_GLOBAL:
3385 enmShadowMode = PGMMODE_32_BIT;
3386 break;
3387
3388 case SUPPAGINGMODE_PAE:
3389 case SUPPAGINGMODE_PAE_NX:
3390 case SUPPAGINGMODE_PAE_GLOBAL:
3391 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3392 enmShadowMode = PGMMODE_PAE;
3393 break;
3394
3395 case SUPPAGINGMODE_AMD64:
3396 case SUPPAGINGMODE_AMD64_GLOBAL:
3397 case SUPPAGINGMODE_AMD64_NX:
3398 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3399 enmShadowMode = PGMMODE_PAE;
3400 break;
3401
3402 default:
3403 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3404 }
3405 break;
3406
3407 case PGMMODE_32_BIT:
3408 switch (enmHostMode)
3409 {
3410 case SUPPAGINGMODE_32_BIT:
3411 case SUPPAGINGMODE_32_BIT_GLOBAL:
3412 enmShadowMode = PGMMODE_32_BIT;
3413 break;
3414
3415 case SUPPAGINGMODE_PAE:
3416 case SUPPAGINGMODE_PAE_NX:
3417 case SUPPAGINGMODE_PAE_GLOBAL:
3418 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3419 enmShadowMode = PGMMODE_PAE;
3420 break;
3421
3422 case SUPPAGINGMODE_AMD64:
3423 case SUPPAGINGMODE_AMD64_GLOBAL:
3424 case SUPPAGINGMODE_AMD64_NX:
3425 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3426 enmShadowMode = PGMMODE_PAE;
3427 break;
3428
3429 default:
3430 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3431 }
3432 break;
3433
3434 case PGMMODE_PAE:
3435 case PGMMODE_PAE_NX: /** @todo This might require more switchers and guest+both modes. */
3436 switch (enmHostMode)
3437 {
3438 case SUPPAGINGMODE_32_BIT:
3439 case SUPPAGINGMODE_32_BIT_GLOBAL:
3440 enmShadowMode = PGMMODE_PAE;
3441 break;
3442
3443 case SUPPAGINGMODE_PAE:
3444 case SUPPAGINGMODE_PAE_NX:
3445 case SUPPAGINGMODE_PAE_GLOBAL:
3446 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3447 enmShadowMode = PGMMODE_PAE;
3448 break;
3449
3450 case SUPPAGINGMODE_AMD64:
3451 case SUPPAGINGMODE_AMD64_GLOBAL:
3452 case SUPPAGINGMODE_AMD64_NX:
3453 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3454 enmShadowMode = PGMMODE_PAE;
3455 break;
3456
3457 default:
3458 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3459 }
3460 break;
3461
3462 case PGMMODE_AMD64:
3463 case PGMMODE_AMD64_NX:
3464 switch (enmHostMode)
3465 {
3466 case SUPPAGINGMODE_32_BIT:
3467 case SUPPAGINGMODE_32_BIT_GLOBAL:
3468 enmShadowMode = PGMMODE_AMD64;
3469 break;
3470
3471 case SUPPAGINGMODE_PAE:
3472 case SUPPAGINGMODE_PAE_NX:
3473 case SUPPAGINGMODE_PAE_GLOBAL:
3474 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3475 enmShadowMode = PGMMODE_AMD64;
3476 break;
3477
3478 case SUPPAGINGMODE_AMD64:
3479 case SUPPAGINGMODE_AMD64_GLOBAL:
3480 case SUPPAGINGMODE_AMD64_NX:
3481 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3482 enmShadowMode = PGMMODE_AMD64;
3483 break;
3484
3485 default:
3486 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", enmHostMode), PGMMODE_INVALID);
3487 }
3488 break;
3489
3490 default:
3491 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), PGMMODE_INVALID);
3492 }
3493
3494 /*
3495 * Override the shadow mode when NEM, IEM or nested paging is active.
3496 */
3497 if (!VM_IS_HM_ENABLED(pVM))
3498 {
3499 Assert(VM_IS_NEM_ENABLED(pVM) || VM_IS_EXEC_ENGINE_IEM(pVM));
3500 pVM->pgm.s.fNestedPaging = true;
3501 enmShadowMode = PGMMODE_NONE;
3502 }
3503 else
3504 {
3505 bool fNestedPaging = HMIsNestedPagingActive(pVM);
3506 pVM->pgm.s.fNestedPaging = fNestedPaging;
3507 if (fNestedPaging)
3508 {
3509 if (HMIsVmxActive(pVM))
3510 enmShadowMode = PGMMODE_EPT;
3511 else
3512 {
3513 /* The nested SVM paging depends on the host one. */
3514 Assert(HMIsSvmActive(pVM));
3515 if ( enmGuestMode == PGMMODE_AMD64
3516 || enmGuestMode == PGMMODE_AMD64_NX)
3517 enmShadowMode = PGMMODE_NESTED_AMD64;
3518 else
3519 switch (pVM->pgm.s.enmHostMode)
3520 {
3521 case SUPPAGINGMODE_32_BIT:
3522 case SUPPAGINGMODE_32_BIT_GLOBAL:
3523 enmShadowMode = PGMMODE_NESTED_32BIT;
3524 break;
3525
3526 case SUPPAGINGMODE_PAE:
3527 case SUPPAGINGMODE_PAE_GLOBAL:
3528 case SUPPAGINGMODE_PAE_NX:
3529 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3530 enmShadowMode = PGMMODE_NESTED_PAE;
3531 break;
3532
3533 case SUPPAGINGMODE_AMD64:
3534 case SUPPAGINGMODE_AMD64_GLOBAL:
3535 case SUPPAGINGMODE_AMD64_NX:
3536 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3537 enmShadowMode = PGMMODE_NESTED_AMD64;
3538 break;
3539
3540 default:
3541 AssertLogRelMsgFailedReturn(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3542 }
3543 }
3544 }
3545#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3546 else
3547 {
3548 /* Nested paging is a requirement for nested VT-x. */
3549 AssertLogRelMsgReturn(enmGuestMode != PGMMODE_EPT, ("enmHostMode=%d\n", pVM->pgm.s.enmHostMode), PGMMODE_INVALID);
3550 }
3551#endif
3552 }
3553
3554 return enmShadowMode;
3555}
3556
3557#elif defined(VBOX_VMM_TARGET_ARMV8)
3558
3559VMM_INT_DECL(int) PGMChangeMode(PVMCPUCC pVCpu, uint8_t bEl, uint64_t u64RegSctlr, uint64_t u64RegTcr)
3560{
3561 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
3562 Assert(bEl > 0 && bEl < 4);
3563
3564 /* Only go through the setup when something has changed. */
3565 int rc;
3566 if ( u64RegSctlr != pVCpu->pgm.s.au64RegSctlrEl[bEl - 1]
3567 || u64RegTcr != pVCpu->pgm.s.au64RegTcrEl[bEl - 1])
3568 {
3569 /* guest */
3570 uintptr_t const idxOldGst = pVCpu->pgm.s.aidxGuestModeDataTtbr0[bEl];
3571 if ( idxOldGst < RT_ELEMENTS(g_aPgmGuestModeData)
3572 && g_aPgmGuestModeData[idxOldGst].pfnExit)
3573 {
3574 rc = g_aPgmGuestModeData[idxOldGst].pfnExit(pVCpu);
3575 AssertMsgReturn(RT_SUCCESS(rc), ("Exit failed for guest mode %d: %Rrc\n", idxOldGst, rc), rc);
3576 }
3577
3578 uintptr_t const idxNewGstTtbr0 = pgmR3DeduceTypeFromTcr<ARMV8_TCR_EL1_AARCH64_T0SZ_SHIFT, ARMV8_TCR_EL1_AARCH64_TG0_SHIFT,
3579 ARMV8_TCR_EL1_AARCH64_TBI0_BIT, ARMV8_TCR_EL1_AARCH64_EPD0_BIT, false>
3580 (u64RegSctlr, u64RegTcr, &pVCpu->pgm.s.afLookupMaskTtbr0[bEl]);
3581 uintptr_t const idxNewGstTtbr1 = pgmR3DeduceTypeFromTcr<ARMV8_TCR_EL1_AARCH64_T1SZ_SHIFT, ARMV8_TCR_EL1_AARCH64_TG1_SHIFT,
3582 ARMV8_TCR_EL1_AARCH64_TBI1_BIT, ARMV8_TCR_EL1_AARCH64_EPD1_BIT, true>
3583 (u64RegSctlr, u64RegTcr, &pVCpu->pgm.s.afLookupMaskTtbr1[bEl]);
3584 Assert(idxNewGstTtbr0 != 0 && idxNewGstTtbr1 != 0);
3585
3586 /*
3587 * Change the paging mode data indexes.
3588 */
3589 AssertReturn(idxNewGstTtbr0 < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
3590 AssertReturn(g_aPgmGuestModeData[idxNewGstTtbr0].uType == idxNewGstTtbr0, VERR_PGM_MODE_IPE);
3591 AssertPtrReturn(g_aPgmGuestModeData[idxNewGstTtbr0].pfnGetPage, VERR_PGM_MODE_IPE);
3592 AssertPtrReturn(g_aPgmGuestModeData[idxNewGstTtbr0].pfnModifyPage, VERR_PGM_MODE_IPE);
3593 AssertPtrReturn(g_aPgmGuestModeData[idxNewGstTtbr0].pfnExit, VERR_PGM_MODE_IPE);
3594 AssertPtrReturn(g_aPgmGuestModeData[idxNewGstTtbr0].pfnEnter, VERR_PGM_MODE_IPE);
3595
3596 AssertReturn(idxNewGstTtbr1 < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
3597 AssertReturn(g_aPgmGuestModeData[idxNewGstTtbr1].uType == idxNewGstTtbr1, VERR_PGM_MODE_IPE);
3598 AssertPtrReturn(g_aPgmGuestModeData[idxNewGstTtbr1].pfnGetPage, VERR_PGM_MODE_IPE);
3599 AssertPtrReturn(g_aPgmGuestModeData[idxNewGstTtbr1].pfnModifyPage, VERR_PGM_MODE_IPE);
3600 AssertPtrReturn(g_aPgmGuestModeData[idxNewGstTtbr1].pfnExit, VERR_PGM_MODE_IPE);
3601 AssertPtrReturn(g_aPgmGuestModeData[idxNewGstTtbr1].pfnEnter, VERR_PGM_MODE_IPE);
3602
3603 rc = g_aPgmGuestModeData[idxNewGstTtbr0].pfnEnter(pVCpu);
3604 int rc2 = g_aPgmGuestModeData[idxNewGstTtbr1].pfnEnter(pVCpu);
3605
3606 /* status codes. */
3607 AssertRC(rc);
3608 AssertRC(rc2);
3609 if (RT_SUCCESS(rc))
3610 {
3611 rc = rc2;
3612 if (RT_SUCCESS(rc)) /* no informational status codes. */
3613 rc = VINF_SUCCESS;
3614 }
3615
3616 pVCpu->pgm.s.aidxGuestModeDataTtbr0[bEl] = idxNewGstTtbr0;
3617 pVCpu->pgm.s.aidxGuestModeDataTtbr1[bEl] = idxNewGstTtbr1;
3618 if (bEl == 1)
3619 {
3620 /* Also set the value for EL0, saves us an if condition in the hot paths later on. */
3621 pVCpu->pgm.s.aidxGuestModeDataTtbr0[0] = idxNewGstTtbr0;
3622 pVCpu->pgm.s.aidxGuestModeDataTtbr1[0] = idxNewGstTtbr1;
3623
3624 pVCpu->pgm.s.afLookupMaskTtbr0[0] = pVCpu->pgm.s.afLookupMaskTtbr0[1];
3625 pVCpu->pgm.s.afLookupMaskTtbr1[0] = pVCpu->pgm.s.afLookupMaskTtbr1[1];
3626 }
3627
3628 pVCpu->pgm.s.aenmGuestMode[bEl - 1] = (u64RegSctlr & ARMV8_SCTLR_EL1_M) ? PGMMODE_VMSA_V8_64 : PGMMODE_NONE;
3629
3630 /* Cache values. */
3631 pVCpu->pgm.s.au64RegSctlrEl[bEl - 1] = u64RegSctlr;
3632 pVCpu->pgm.s.au64RegTcrEl[bEl - 1] = u64RegTcr;
3633 }
3634 else
3635 rc = VINF_SUCCESS;
3636
3637 return rc;
3638}
3639#else
3640# error "Port me"
3641#endif /* VBOX_VMM_TARGET_X86 */
3642
3643
3644/**
3645 * Performs the actual mode change.
3646 * This is called by PGMChangeMode and pgmR3InitPaging().
3647 *
3648 * @returns VBox status code. May suspend or power off the VM on error, but this
3649 * will trigger using FFs and not informational status codes.
3650 *
3651 * @param pVM The cross context VM structure.
3652 * @param pVCpu The cross context virtual CPU structure.
3653 * @param enmGuestMode The new guest mode. This is assumed to be different from
3654 * the current mode.
3655 * @param fForce Whether to force a shadow paging mode change.
3656 */
3657VMM_INT_DECL(int) PGMHCChangeMode(PVMCC pVM, PVMCPUCC pVCpu, PGMMODE enmGuestMode, bool fForce)
3658{
3659#ifdef VBOX_VMM_TARGET_X86
3660 Log(("PGMHCChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmGuestMode), PGMGetModeName(enmGuestMode)));
3661 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cGuestModeChanges);
3662
3663 /*
3664 * Calc the shadow mode and switcher.
3665 */
3666 PGMMODE const enmShadowMode = pgmCalcShadowMode(pVM, enmGuestMode, pVM->pgm.s.enmHostMode, pVCpu->pgm.s.enmShadowMode);
3667 bool const fShadowModeChanged = enmShadowMode != pVCpu->pgm.s.enmShadowMode || fForce;
3668
3669 /*
3670 * Exit old mode(s).
3671 */
3672 /* shadow */
3673 if (fShadowModeChanged)
3674 {
3675 LogFlow(("PGMHCChangeMode: Shadow mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode), PGMGetModeName(enmShadowMode)));
3676 uintptr_t idxOldShw = pVCpu->pgm.s.idxShadowModeData;
3677 if ( idxOldShw < RT_ELEMENTS(g_aPgmShadowModeData)
3678 && g_aPgmShadowModeData[idxOldShw].pfnExit)
3679 {
3680 int rc = g_aPgmShadowModeData[idxOldShw].pfnExit(pVCpu);
3681 AssertMsgRCReturn(rc, ("Exit failed for shadow mode %d: %Rrc\n", pVCpu->pgm.s.enmShadowMode, rc), rc);
3682 }
3683 }
3684 else
3685 LogFlow(("PGMHCChangeMode: Shadow mode remains: %s\n", PGMGetModeName(pVCpu->pgm.s.enmShadowMode)));
3686
3687 /* guest */
3688 uintptr_t const idxOldGst = pVCpu->pgm.s.idxGuestModeData;
3689 if ( idxOldGst < RT_ELEMENTS(g_aPgmGuestModeData)
3690 && g_aPgmGuestModeData[idxOldGst].pfnExit)
3691 {
3692 int rc = g_aPgmGuestModeData[idxOldGst].pfnExit(pVCpu);
3693 AssertMsgReturn(RT_SUCCESS(rc), ("Exit failed for guest mode %d: %Rrc\n", pVCpu->pgm.s.enmGuestMode, rc), rc);
3694 }
3695 pVCpu->pgm.s.GCPhysCR3 = NIL_RTGCPHYS;
3696 pVCpu->pgm.s.GCPhysNstGstCR3 = NIL_RTGCPHYS;
3697 pVCpu->pgm.s.GCPhysPaeCR3 = NIL_RTGCPHYS;
3698 Assert(!pVCpu->pgm.s.CTX_SUFF(fPaePdpesAndCr3Mapped));
3699
3700 /*
3701 * Change the paging mode data indexes.
3702 */
3703 uintptr_t idxNewGst = pVCpu->pgm.s.idxGuestModeData = pgmModeToType(enmGuestMode);
3704 AssertReturn(idxNewGst < RT_ELEMENTS(g_aPgmGuestModeData), VERR_PGM_MODE_IPE);
3705 AssertReturn(g_aPgmGuestModeData[idxNewGst].uType == idxNewGst, VERR_PGM_MODE_IPE);
3706 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnGetPage, VERR_PGM_MODE_IPE);
3707 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnModifyPage, VERR_PGM_MODE_IPE);
3708 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnExit, VERR_PGM_MODE_IPE);
3709 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnEnter, VERR_PGM_MODE_IPE);
3710# ifdef IN_RING3
3711 AssertPtrReturn(g_aPgmGuestModeData[idxNewGst].pfnRelocate, VERR_PGM_MODE_IPE);
3712# endif
3713
3714 uintptr_t const idxNewShw = pVCpu->pgm.s.idxShadowModeData = pgmModeToType(enmShadowMode);
3715 AssertReturn(idxNewShw < RT_ELEMENTS(g_aPgmShadowModeData), VERR_PGM_MODE_IPE);
3716 AssertReturn(g_aPgmShadowModeData[idxNewShw].uType == idxNewShw, VERR_PGM_MODE_IPE);
3717 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnGetPage, VERR_PGM_MODE_IPE);
3718 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnModifyPage, VERR_PGM_MODE_IPE);
3719 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnExit, VERR_PGM_MODE_IPE);
3720 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnEnter, VERR_PGM_MODE_IPE);
3721# ifdef IN_RING3
3722 AssertPtrReturn(g_aPgmShadowModeData[idxNewShw].pfnRelocate, VERR_PGM_MODE_IPE);
3723# endif
3724
3725 uintptr_t const idxNewBth = pVCpu->pgm.s.idxBothModeData = (idxNewShw - PGM_TYPE_FIRST_SHADOW) * PGM_TYPE_END + idxNewGst;
3726 AssertReturn(g_aPgmBothModeData[idxNewBth].uShwType == idxNewShw, VERR_PGM_MODE_IPE);
3727 AssertReturn(g_aPgmBothModeData[idxNewBth].uGstType == idxNewGst, VERR_PGM_MODE_IPE);
3728 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnInvalidatePage, VERR_PGM_MODE_IPE);
3729 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
3730 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
3731 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnMapCR3, VERR_PGM_MODE_IPE);
3732 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
3733 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnEnter, VERR_PGM_MODE_IPE);
3734# ifdef VBOX_STRICT
3735 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnAssertCR3, VERR_PGM_MODE_IPE);
3736# endif
3737
3738 /*
3739 * Determine SLAT mode -before- entering the new shadow mode!
3740 */
3741 pVCpu->pgm.s.enmGuestSlatMode = !CPUMIsGuestVmxEptPagingEnabled(pVCpu) ? PGMSLAT_DIRECT : PGMSLAT_EPT;
3742
3743 /*
3744 * Enter new shadow mode (if changed).
3745 */
3746 if (fShadowModeChanged)
3747 {
3748 pVCpu->pgm.s.enmShadowMode = enmShadowMode;
3749 int rc = g_aPgmShadowModeData[idxNewShw].pfnEnter(pVCpu);
3750 AssertLogRelMsgRCReturnStmt(rc, ("Entering enmShadowMode=%s failed: %Rrc\n", PGMGetModeName(enmShadowMode), rc),
3751 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID, rc);
3752 }
3753
3754 /*
3755 * Always flag the necessary updates
3756 */
3757 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
3758
3759 /*
3760 * Enter the new guest and shadow+guest modes.
3761 */
3762 /* Calc the new CR3 value. */
3763 RTGCPHYS GCPhysCR3;
3764 switch (enmGuestMode)
3765 {
3766 case PGMMODE_REAL:
3767 case PGMMODE_PROTECTED:
3768 GCPhysCR3 = NIL_RTGCPHYS;
3769 break;
3770
3771 case PGMMODE_32_BIT:
3772 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAGE_MASK;
3773 break;
3774
3775 case PGMMODE_PAE_NX:
3776 case PGMMODE_PAE:
3777 if (!pVM->cpum.ro.GuestFeatures.fPae)
3778# ifdef IN_RING3 /** @todo r=bird: wrong place, probably hasn't really worked for a while. */
3779 return VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_FATAL, "PAEmode",
3780 N_("The guest is trying to switch to the PAE mode which is currently disabled by default in VirtualBox. PAE support can be enabled using the VM settings (System/Processor)"));
3781# else
3782 AssertLogRelMsgFailedReturn(("enmGuestMode=%s - Try enable PAE for the guest!\n", PGMGetModeName(enmGuestMode)), VERR_PGM_MODE_IPE);
3783
3784# endif
3785 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_PAE_PAGE_MASK;
3786 break;
3787
3788# ifdef VBOX_WITH_64_BITS_GUESTS
3789 case PGMMODE_AMD64_NX:
3790 case PGMMODE_AMD64:
3791 GCPhysCR3 = CPUMGetGuestCR3(pVCpu) & X86_CR3_AMD64_PAGE_MASK;
3792 break;
3793# endif
3794 default:
3795 AssertLogRelMsgFailedReturn(("enmGuestMode=%d\n", enmGuestMode), VERR_PGM_MODE_IPE);
3796 }
3797
3798# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3799 /*
3800 * If a nested-guest is using EPT paging:
3801 * - Update the second-level address translation (SLAT) mode.
3802 * - Indicate that the CR3 is nested-guest physical address.
3803 */
3804 if (pVCpu->pgm.s.enmGuestSlatMode == PGMSLAT_EPT)
3805 {
3806 if (PGMMODE_WITH_PAGING(enmGuestMode))
3807 {
3808 /*
3809 * Translate CR3 to its guest-physical address.
3810 * We don't use pgmGstSlatTranslateCr3() here as we want to update GCPhysNstGstCR3 -after-
3811 * switching modes to keep it consistent with how GCPhysCR3 is updated.
3812 */
3813 PGMPTWALK Walk;
3814 PGMPTWALKGST GstWalk;
3815 int const rc = pgmGstSlatWalk(pVCpu, GCPhysCR3, false /* fIsLinearAddrValid */, 0 /* GCPtrNested */, &Walk,
3816 &GstWalk);
3817 if (RT_SUCCESS(rc))
3818 { /* likely */ }
3819 else
3820 {
3821 /*
3822 * SLAT failed but we avoid reporting this to the caller because the caller
3823 * is not supposed to fail. The only time the caller needs to indicate a
3824 * failure to software is when PAE paging is used by the nested-guest, but
3825 * we handle the PAE case separately (e.g., see VMX transition in IEM).
3826 * In all other cases, the failure will be indicated when CR3 tries to be
3827 * translated on the next linear-address memory access.
3828 * See Intel spec. 27.2.1 "EPT Overview".
3829 */
3830 Log(("SLAT failed for CR3 %#RX64 rc=%Rrc\n", GCPhysCR3, rc));
3831
3832 /* Trying to coax PGM to succeed for the time being... */
3833 Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS);
3834 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3;
3835 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3836 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3837 return VINF_SUCCESS;
3838 }
3839 pVCpu->pgm.s.GCPhysNstGstCR3 = GCPhysCR3;
3840 GCPhysCR3 = Walk.GCPhys & X86_CR3_EPT_PAGE_MASK;
3841 }
3842 }
3843 else
3844 Assert(pVCpu->pgm.s.GCPhysNstGstCR3 == NIL_RTGCPHYS);
3845# endif
3846
3847 /*
3848 * Enter the new guest mode.
3849 */
3850 pVCpu->pgm.s.enmGuestMode = enmGuestMode;
3851 int rc = g_aPgmGuestModeData[idxNewGst].pfnEnter(pVCpu, GCPhysCR3);
3852 int rc2 = g_aPgmBothModeData[idxNewBth].pfnEnter(pVCpu, GCPhysCR3);
3853
3854 /* Set the new guest CR3 (and nested-guest CR3). */
3855 pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
3856
3857 /* status codes. */
3858 AssertRC(rc);
3859 AssertRC(rc2);
3860 if (RT_SUCCESS(rc))
3861 {
3862 rc = rc2;
3863 if (RT_SUCCESS(rc)) /* no informational status codes. */
3864 rc = VINF_SUCCESS;
3865 }
3866
3867 /*
3868 * Notify HM.
3869 */
3870 HMHCChangedPagingMode(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);
3871 return rc;
3872
3873#elif defined(VBOX_VMM_TARGET_ARMV8)
3874 Log(("PGMHCChangeMode: Guest mode: %s -> %s\n", PGMGetModeName(pVCpu->pgm.s.aenmGuestMode[1]), PGMGetModeName(enmGuestMode)));
3875 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cGuestModeChanges);
3876
3877 //AssertReleaseFailed(); /** @todo Called by the PGM saved state code. */
3878 RT_NOREF(pVM, pVCpu, enmGuestMode, fForce);
3879 return VINF_SUCCESS;
3880
3881#else
3882# error "port me"
3883#endif
3884}
3885
3886
3887/**
3888 * Get mode name.
3889 *
3890 * @returns read-only name string.
3891 * @param enmMode The mode which name is desired.
3892 */
3893VMMDECL(const char *) PGMGetModeName(PGMMODE enmMode)
3894{
3895 switch (enmMode)
3896 {
3897 case PGMMODE_REAL: return "Real";
3898 case PGMMODE_PROTECTED: return "Protected";
3899 case PGMMODE_32_BIT: return "32-bit";
3900 case PGMMODE_PAE: return "PAE";
3901 case PGMMODE_PAE_NX: return "PAE+NX";
3902 case PGMMODE_AMD64: return "AMD64";
3903 case PGMMODE_AMD64_NX: return "AMD64+NX";
3904 case PGMMODE_NESTED_32BIT: return "Nested-32";
3905 case PGMMODE_NESTED_PAE: return "Nested-PAE";
3906 case PGMMODE_NESTED_AMD64: return "Nested-AMD64";
3907 case PGMMODE_EPT: return "EPT";
3908 case PGMMODE_NONE: return "None";
3909 case PGMMODE_VMSA_V8_32: return "VMSAv8-32";
3910 case PGMMODE_VMSA_V8_64: return "VMSAv8-64";
3911
3912 case PGMMODE_INVALID:
3913 case PGMMODE_MAX:
3914 case PGMMODE_32BIT_HACK:
3915 break;
3916 }
3917 return "unknown mode value";
3918}
3919
3920
3921/**
3922 * Gets the current guest paging mode.
3923 *
3924 * If you just need the CPU mode (real/protected/long), use CPUMGetGuestMode().
3925 *
3926 * @returns The current paging mode.
3927 * @param pVCpu The cross context virtual CPU structure.
3928 */
3929VMMDECL(PGMMODE) PGMGetGuestMode(PVMCPU pVCpu)
3930{
3931#if defined(VBOX_VMM_TARGET_X86)
3932 return pVCpu->pgm.s.enmGuestMode;
3933#elif defined(VBOX_VMM_TARGET_ARMV8)
3934 return pVCpu->pgm.s.aenmGuestMode[1]; /** @todo Add parameter to select exception level. */
3935#else
3936# error "Port me"
3937#endif
3938}
3939
3940
3941/**
3942 * Gets the current shadow paging mode.
3943 *
3944 * @returns The current paging mode.
3945 * @param pVCpu The cross context virtual CPU structure.
3946 */
3947VMMDECL(PGMMODE) PGMGetShadowMode(PVMCPU pVCpu)
3948{
3949#if !defined(VBOX_WITH_ONLY_PGM_NEM_MODE) && defined(VBOX_VMM_TARGET_X86)
3950 return pVCpu->pgm.s.enmShadowMode;
3951#else
3952 RT_NOREF(pVCpu);
3953 return PGMMODE_NONE;
3954#endif
3955}
3956
3957#ifdef VBOX_VMM_TARGET_X86
3958
3959/**
3960 * Gets the current host paging mode.
3961 *
3962 * @returns The current paging mode.
3963 * @param pVM The cross context VM structure.
3964 */
3965VMMDECL(PGMMODE) PGMGetHostMode(PVM pVM)
3966{
3967 switch (pVM->pgm.s.enmHostMode)
3968 {
3969 case SUPPAGINGMODE_32_BIT:
3970 case SUPPAGINGMODE_32_BIT_GLOBAL:
3971 return PGMMODE_32_BIT;
3972
3973 case SUPPAGINGMODE_PAE:
3974 case SUPPAGINGMODE_PAE_GLOBAL:
3975 return PGMMODE_PAE;
3976
3977 case SUPPAGINGMODE_PAE_NX:
3978 case SUPPAGINGMODE_PAE_GLOBAL_NX:
3979 return PGMMODE_PAE_NX;
3980
3981 case SUPPAGINGMODE_AMD64:
3982 case SUPPAGINGMODE_AMD64_GLOBAL:
3983 return PGMMODE_AMD64;
3984
3985 case SUPPAGINGMODE_AMD64_NX:
3986 case SUPPAGINGMODE_AMD64_GLOBAL_NX:
3987 return PGMMODE_AMD64_NX;
3988
3989 default: AssertMsgFailed(("enmHostMode=%d\n", pVM->pgm.s.enmHostMode)); break;
3990 }
3991
3992 return PGMMODE_INVALID;
3993}
3994
3995
3996# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
3997/**
3998 * Gets the SLAT mode name.
3999 *
4000 * @returns The read-only SLAT mode descriptive string.
4001 * @param enmSlatMode The SLAT mode value.
4002 */
4003VMM_INT_DECL(const char *) PGMGetSlatModeName(PGMSLAT enmSlatMode)
4004{
4005 switch (enmSlatMode)
4006 {
4007 case PGMSLAT_DIRECT: return "Direct";
4008 case PGMSLAT_EPT: return "EPT";
4009 case PGMSLAT_32BIT: return "32-bit";
4010 case PGMSLAT_PAE: return "PAE";
4011 case PGMSLAT_AMD64: return "AMD64";
4012 default: return "Unknown";
4013 }
4014}
4015# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
4016
4017
4018/**
4019 * Notification from CPUM that the EFER.NXE bit has changed.
4020 *
4021 * @param pVCpu The cross context virtual CPU structure of the CPU for
4022 * which EFER changed.
4023 * @param fNxe The new NXE state.
4024 */
4025VMM_INT_DECL(void) PGMNotifyNxeChanged(PVMCPU pVCpu, bool fNxe)
4026{
4027/** @todo VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu); */
4028 Log(("PGMNotifyNxeChanged: fNxe=%RTbool\n", fNxe));
4029
4030 pVCpu->pgm.s.fNoExecuteEnabled = fNxe;
4031 if (fNxe)
4032 {
4033 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
4034 pVCpu->pgm.s.fGstPaeMbzPteMask &= ~X86_PTE_PAE_NX;
4035 pVCpu->pgm.s.fGstPaeMbzPdeMask &= ~X86_PDE_PAE_NX;
4036 pVCpu->pgm.s.fGstPaeMbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
4037 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask - N/A */
4038 pVCpu->pgm.s.fGstAmd64MbzPteMask &= ~X86_PTE_PAE_NX;
4039 pVCpu->pgm.s.fGstAmd64MbzPdeMask &= ~X86_PDE_PAE_NX;
4040 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask &= ~X86_PDE2M_PAE_NX;
4041 pVCpu->pgm.s.fGstAmd64MbzPdpeMask &= ~X86_PDPE_LM_NX;
4042 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask &= ~X86_PDPE_LM_NX;
4043 pVCpu->pgm.s.fGstAmd64MbzPml4eMask &= ~X86_PML4E_NX;
4044
4045 pVCpu->pgm.s.fGst64ShadowedPteMask |= X86_PTE_PAE_NX;
4046 pVCpu->pgm.s.fGst64ShadowedPdeMask |= X86_PDE_PAE_NX;
4047 pVCpu->pgm.s.fGst64ShadowedBigPdeMask |= X86_PDE2M_PAE_NX;
4048 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask |= X86_PDE2M_PAE_NX;
4049 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask |= X86_PDPE_LM_NX;
4050 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask |= X86_PML4E_NX;
4051 }
4052 else
4053 {
4054 /*pVCpu->pgm.s.fGst32BitMbzBigPdeMask - N/A */
4055 pVCpu->pgm.s.fGstPaeMbzPteMask |= X86_PTE_PAE_NX;
4056 pVCpu->pgm.s.fGstPaeMbzPdeMask |= X86_PDE_PAE_NX;
4057 pVCpu->pgm.s.fGstPaeMbzBigPdeMask |= X86_PDE2M_PAE_NX;
4058 /*pVCpu->pgm.s.fGstPaeMbzPdpeMask -N/A */
4059 pVCpu->pgm.s.fGstAmd64MbzPteMask |= X86_PTE_PAE_NX;
4060 pVCpu->pgm.s.fGstAmd64MbzPdeMask |= X86_PDE_PAE_NX;
4061 pVCpu->pgm.s.fGstAmd64MbzBigPdeMask |= X86_PDE2M_PAE_NX;
4062 pVCpu->pgm.s.fGstAmd64MbzPdpeMask |= X86_PDPE_LM_NX;
4063 pVCpu->pgm.s.fGstAmd64MbzBigPdpeMask |= X86_PDPE_LM_NX;
4064 pVCpu->pgm.s.fGstAmd64MbzPml4eMask |= X86_PML4E_NX;
4065
4066 pVCpu->pgm.s.fGst64ShadowedPteMask &= ~X86_PTE_PAE_NX;
4067 pVCpu->pgm.s.fGst64ShadowedPdeMask &= ~X86_PDE_PAE_NX;
4068 pVCpu->pgm.s.fGst64ShadowedBigPdeMask &= ~X86_PDE2M_PAE_NX;
4069 pVCpu->pgm.s.fGst64ShadowedBigPde4PteMask &= ~X86_PDE2M_PAE_NX;
4070 pVCpu->pgm.s.fGstAmd64ShadowedPdpeMask &= ~X86_PDPE_LM_NX;
4071 pVCpu->pgm.s.fGstAmd64ShadowedPml4eMask &= ~X86_PML4E_NX;
4072 }
4073}
4074
4075#endif /* VBOX_VMM_TARGET_X86 */
4076
4077/**
4078 * Check if any pgm pool pages are marked dirty (not monitored)
4079 *
4080 * @returns bool locked/not locked
4081 * @param pVM The cross context VM structure.
4082 */
4083VMMDECL(bool) PGMHasDirtyPages(PVM pVM)
4084{
4085#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
4086 return pVM->pgm.s.CTX_SUFF(pPool)->cDirtyPages != 0;
4087#else
4088 RT_NOREF(pVM);
4089 return false;
4090#endif
4091}
4092
4093
4094/**
4095 * Enable or disable large page usage
4096 *
4097 * @returns VBox status code.
4098 * @param pVM The cross context VM structure.
4099 * @param fUseLargePages Use/not use large pages
4100 */
4101VMMDECL(int) PGMSetLargePageUsage(PVMCC pVM, bool fUseLargePages)
4102{
4103 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
4104
4105 pVM->pgm.s.fUseLargePages = fUseLargePages;
4106 return VINF_SUCCESS;
4107}
4108
4109
4110/**
4111 * Check if this VCPU currently owns the PGM lock.
4112 *
4113 * @returns bool owner/not owner
4114 * @param pVM The cross context VM structure.
4115 */
4116VMMDECL(bool) PGMIsLockOwner(PVMCC pVM)
4117{
4118 return PDMCritSectIsOwner(pVM, &pVM->pgm.s.CritSectX);
4119}
4120
4121
4122/**
4123 * Acquire the PGM lock.
4124 *
4125 * @returns VBox status code
4126 * @param pVM The cross context VM structure.
4127 * @param fVoid Set if the caller cannot handle failure returns.
4128 * @param SRC_POS The source position of the caller (RT_SRC_POS).
4129 */
4130#if defined(VBOX_STRICT) || defined(DOXYGEN_RUNNING)
4131int pgmLockDebug(PVMCC pVM, bool fVoid, RT_SRC_POS_DECL)
4132#else
4133int pgmLock(PVMCC pVM, bool fVoid)
4134#endif
4135{
4136#if defined(VBOX_STRICT)
4137 int rc = PDMCritSectEnterDebug(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS, (uintptr_t)ASMReturnAddress(), RT_SRC_POS_ARGS);
4138#else
4139 int rc = PDMCritSectEnter(pVM, &pVM->pgm.s.CritSectX, VINF_SUCCESS);
4140#endif
4141 if (RT_SUCCESS(rc))
4142 return rc;
4143 if (fVoid)
4144 PDM_CRITSECT_RELEASE_ASSERT_RC(pVM, &pVM->pgm.s.CritSectX, rc);
4145 else
4146 AssertRC(rc);
4147 return rc;
4148}
4149
4150
4151/**
4152 * Release the PGM lock.
4153 *
4154 * @param pVM The cross context VM structure.
4155 */
4156void pgmUnlock(PVMCC pVM)
4157{
4158 uint32_t cDeprecatedPageLocks = pVM->pgm.s.cDeprecatedPageLocks;
4159 pVM->pgm.s.cDeprecatedPageLocks = 0;
4160 int rc = PDMCritSectLeave(pVM, &pVM->pgm.s.CritSectX);
4161 if (rc == VINF_SEM_NESTED)
4162 pVM->pgm.s.cDeprecatedPageLocks = cDeprecatedPageLocks;
4163}
4164
4165
4166#if !defined(IN_R0) || defined(LOG_ENABLED)
4167
4168/** Format handler for PGMPAGE.
4169 * @copydoc FNRTSTRFORMATTYPE */
4170static DECLCALLBACK(size_t) pgmFormatTypeHandlerPage(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
4171 const char *pszType, void const *pvValue,
4172 int cchWidth, int cchPrecision, unsigned fFlags,
4173 void *pvUser)
4174{
4175 size_t cch;
4176 PCPGMPAGE pPage = (PCPGMPAGE)pvValue;
4177 if (RT_VALID_PTR(pPage))
4178 {
4179 char szTmp[64+80];
4180
4181 cch = 0;
4182
4183 /* The single char state stuff. */
4184 static const char s_achPageStates[4] = { 'Z', 'A', 'W', 'S' };
4185 szTmp[cch++] = s_achPageStates[PGM_PAGE_GET_STATE_NA(pPage)];
4186
4187# define IS_PART_INCLUDED(lvl) ( !(fFlags & RTSTR_F_PRECISION) || cchPrecision == (lvl) || cchPrecision >= (lvl)+10 )
4188 if (IS_PART_INCLUDED(5))
4189 {
4190 static const char s_achHandlerStates[4*2] = { '-', 't', 'w', 'a' , '_', 'T', 'W', 'A' };
4191 szTmp[cch++] = s_achHandlerStates[ PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)
4192 | ((uint8_t)PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage) << 2)];
4193 }
4194
4195 /* The type. */
4196 if (IS_PART_INCLUDED(4))
4197 {
4198 szTmp[cch++] = ':';
4199 static const char s_achPageTypes[8][4] = { "INV", "RAM", "MI2", "M2A", "SHA", "ROM", "MIO", "BAD" };
4200 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][0];
4201 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][1];
4202 szTmp[cch++] = s_achPageTypes[PGM_PAGE_GET_TYPE_NA(pPage)][2];
4203 }
4204
4205 /* The numbers. */
4206 if (IS_PART_INCLUDED(3))
4207 {
4208 szTmp[cch++] = ':';
4209 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_HCPHYS_NA(pPage), 16, 12, 0, RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
4210 }
4211
4212 if (IS_PART_INCLUDED(2))
4213 {
4214 szTmp[cch++] = ':';
4215 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_PAGEID(pPage), 16, 7, 0, RTSTR_F_ZEROPAD | RTSTR_F_32BIT);
4216 }
4217
4218# ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
4219 if (IS_PART_INCLUDED(6))
4220 {
4221 szTmp[cch++] = ':';
4222 static const char s_achRefs[4] = { '-', 'U', '!', 'L' };
4223 szTmp[cch++] = s_achRefs[PGM_PAGE_GET_TD_CREFS_NA(pPage)];
4224 cch += RTStrFormatNumber(&szTmp[cch], PGM_PAGE_GET_TD_IDX_NA(pPage), 16, 4, 0, RTSTR_F_ZEROPAD | RTSTR_F_16BIT);
4225 }
4226# endif
4227# undef IS_PART_INCLUDED
4228
4229 cch = pfnOutput(pvArgOutput, szTmp, cch);
4230#if 0
4231 size_t cch2 = 0;
4232 szTmp[cch2++] = '(';
4233 cch2 += RTStrFormatNumber(&szTmp[cch2], (uintptr_t)pPage, 16, 18, 0, RTSTR_F_SPECIAL | RTSTR_F_ZEROPAD | RTSTR_F_64BIT);
4234 szTmp[cch2++] = ')';
4235 szTmp[cch2] = '\0';
4236 cch += pfnOutput(pvArgOutput, szTmp, cch2);
4237#endif
4238 }
4239 else
4240 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmpage-ptr>"));
4241 NOREF(pszType); NOREF(cchWidth); NOREF(pvUser);
4242 return cch;
4243}
4244
4245
4246/** Format handler for PGMRAMRANGE.
4247 * @copydoc FNRTSTRFORMATTYPE */
4248static DECLCALLBACK(size_t) pgmFormatTypeHandlerRamRange(PFNRTSTROUTPUT pfnOutput, void *pvArgOutput,
4249 const char *pszType, void const *pvValue,
4250 int cchWidth, int cchPrecision, unsigned fFlags,
4251 void *pvUser)
4252{
4253 size_t cch;
4254 PGMRAMRANGE const *pRam = (PGMRAMRANGE const *)pvValue;
4255 if (RT_VALID_PTR(pRam))
4256 {
4257 char szTmp[80];
4258 cch = RTStrPrintf(szTmp, sizeof(szTmp), "%RGp-%RGp", pRam->GCPhys, pRam->GCPhysLast);
4259 cch = pfnOutput(pvArgOutput, szTmp, cch);
4260 }
4261 else
4262 cch = pfnOutput(pvArgOutput, RT_STR_TUPLE("<bad-pgmramrange-ptr>"));
4263 NOREF(pszType); NOREF(cchWidth); NOREF(cchPrecision); NOREF(pvUser); NOREF(fFlags);
4264 return cch;
4265}
4266
4267/** Format type andlers to be registered/deregistered. */
4268static const struct
4269{
4270 char szType[24];
4271 PFNRTSTRFORMATTYPE pfnHandler;
4272} g_aPgmFormatTypes[] =
4273{
4274 { "pgmpage", pgmFormatTypeHandlerPage },
4275 { "pgmramrange", pgmFormatTypeHandlerRamRange }
4276};
4277
4278#endif /* !IN_R0 || LOG_ENABLED */
4279
4280/**
4281 * Registers the global string format types.
4282 *
4283 * This should be called at module load time or in some other manner that ensure
4284 * that it's called exactly one time.
4285 *
4286 * @returns IPRT status code on RTStrFormatTypeRegister failure.
4287 */
4288VMMDECL(int) PGMRegisterStringFormatTypes(void)
4289{
4290#if !defined(IN_R0) || defined(LOG_ENABLED)
4291 int rc = VINF_SUCCESS;
4292 unsigned i;
4293 for (i = 0; RT_SUCCESS(rc) && i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
4294 {
4295 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
4296# ifdef IN_RING0
4297 if (rc == VERR_ALREADY_EXISTS)
4298 {
4299 /* in case of cleanup failure in ring-0 */
4300 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
4301 rc = RTStrFormatTypeRegister(g_aPgmFormatTypes[i].szType, g_aPgmFormatTypes[i].pfnHandler, NULL);
4302 }
4303# endif
4304 }
4305 if (RT_FAILURE(rc))
4306 while (i-- > 0)
4307 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
4308
4309 return rc;
4310#else
4311 return VINF_SUCCESS;
4312#endif
4313}
4314
4315
4316/**
4317 * Deregisters the global string format types.
4318 *
4319 * This should be called at module unload time or in some other manner that
4320 * ensure that it's called exactly one time.
4321 */
4322VMMDECL(void) PGMDeregisterStringFormatTypes(void)
4323{
4324#if !defined(IN_R0) || defined(LOG_ENABLED)
4325 for (unsigned i = 0; i < RT_ELEMENTS(g_aPgmFormatTypes); i++)
4326 RTStrFormatTypeDeregister(g_aPgmFormatTypes[i].szType);
4327#endif
4328}
4329
4330#ifdef PGM_WITH_PAGE_ZEROING_DETECTION
4331# ifndef VBOX_VMM_TARGET_X86
4332# error "misconfig: PGM_WITH_PAGE_ZEROING_DETECTION not implemented for ARM guests"
4333# endif
4334
4335/**
4336 * Helper for checking whether XMM0 is zero, possibly retriving external state.
4337 */
4338static bool pgmHandlePageZeroingIsXmm0Zero(PVMCPUCC pVCpu, PCPUMCTX pCtx)
4339{
4340 if (pCtx->fExtrn & CPUMCTX_EXTRN_SSE_AVX)
4341 {
4342 int rc = CPUMImportGuestStateOnDemand(pVCpu, CPUMCTX_EXTRN_SSE_AVX);
4343 AssertRCReturn(rc, false);
4344 }
4345 return pCtx->XState.x87.aXMM[0].au64[0] == 0
4346 && pCtx->XState.x87.aXMM[0].au64[1] == 0
4347 && pCtx->XState.x87.aXMM[0].au64[2] == 0
4348 && pCtx->XState.x87.aXMM[0].au64[3] == 0;
4349}
4350
4351
4352/**
4353 * Helper for comparing opcode bytes.
4354 */
4355static bool pgmHandlePageZeroingMatchOpcodes(PVMCPUCC pVCpu, PCPUMCTX pCtx, uint8_t const *pbOpcodes, uint32_t cbOpcodes)
4356{
4357 uint8_t abTmp[64];
4358 AssertMsgReturn(cbOpcodes <= sizeof(abTmp), ("cbOpcodes=%#x\n", cbOpcodes), false);
4359 int rc = PGMPhysSimpleReadGCPtr(pVCpu, abTmp, pCtx->rip + pCtx->cs.u64Base, cbOpcodes);
4360 if (RT_SUCCESS(rc))
4361 return memcmp(abTmp, pbOpcodes, cbOpcodes) == 0;
4362 return false;
4363}
4364
4365
4366/**
4367 * Called on faults on ZERO pages to check if the guest is trying to zero it.
4368 *
4369 * Since it's a waste of time to zero a ZERO page and it will cause an
4370 * unnecessary page allocation, we'd like to detect and avoid this.
4371 * If any known page zeroing code is detected, this function will update the CPU
4372 * state to pretend the page was zeroed by the code.
4373 *
4374 * @returns true if page zeroing code was detected and CPU state updated to skip
4375 * the code.
4376 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
4377 * @param pCtx The guest register context.
4378 */
4379static bool pgmHandlePageZeroingCode(PVMCPUCC pVCpu, PCPUMCTX pCtx)
4380{
4381 CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER);
4382
4383 /*
4384 * Sort by mode first.
4385 */
4386 if (CPUMIsGuestInLongModeEx(pCtx))
4387 {
4388 if (CPUMIsGuestIn64BitCodeEx(pCtx))
4389 {
4390 /*
4391 * 64-bit code.
4392 */
4393 Log9(("pgmHandlePageZeroingCode: not page zeroing - 64-bit\n"));
4394 }
4395 else if (pCtx->cs.Attr.n.u1DefBig)
4396 Log9(("pgmHandlePageZeroingCode: not page zeroing - 32-bit lm\n"));
4397 else
4398 Log9(("pgmHandlePageZeroingCode: not page zeroing - 16-bit lm\n"));
4399 }
4400 else if (CPUMIsGuestInPagedProtectedModeEx(pCtx))
4401 {
4402 if (pCtx->cs.Attr.n.u1DefBig)
4403 {
4404 /*
4405 * 32-bit paged protected mode code.
4406 */
4407 CPUMCTX_ASSERT_NOT_EXTRN(pCtx, CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_RBX
4408 | CPUMCTX_EXTRN_RSP | CPUMCTX_EXTRN_RBP | CPUMCTX_EXTRN_RSI | CPUMCTX_EXTRN_RDI
4409 | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
4410
4411 /* 1. Generic 'rep stosd' detection. */
4412 static uint8_t const s_abRepStosD[] = { 0xf3, 0xab };
4413 if ( pCtx->eax == 0
4414 && pCtx->ecx == X86_PAGE_SIZE / 4
4415 && !(pCtx->edi & X86_PAGE_OFFSET_MASK)
4416 && pgmHandlePageZeroingMatchOpcodes(pVCpu, pCtx, s_abRepStosD, sizeof(s_abRepStosD)))
4417 {
4418 pCtx->ecx = 0;
4419 pCtx->edi += X86_PAGE_SIZE;
4420 Log9(("pgmHandlePageZeroingCode: REP STOSD: eip=%RX32 -> %RX32\n", pCtx->eip, pCtx->eip + sizeof(s_abRepStosD)));
4421 pCtx->eip += sizeof(s_abRepStosD);
4422 return true;
4423 }
4424
4425 /* 2. Windows 2000 sp4 KiXMMIZeroPageNoSave loop code: */
4426 static uint8_t const s_abW2kSp4XmmZero[] =
4427 {
4428 0x0f, 0x2b, 0x01,
4429 0x0f, 0x2b, 0x41, 0x10,
4430 0x0f, 0x2b, 0x41, 0x20,
4431 0x0f, 0x2b, 0x41, 0x30,
4432 0x83, 0xc1, 0x40,
4433 0x48,
4434 0x75, 0xeb,
4435 };
4436 if ( pCtx->eax == 64
4437 && !(pCtx->ecx & X86_PAGE_OFFSET_MASK)
4438 && pgmHandlePageZeroingMatchOpcodes(pVCpu, pCtx, s_abW2kSp4XmmZero, sizeof(s_abW2kSp4XmmZero))
4439 && pgmHandlePageZeroingIsXmm0Zero(pVCpu, pCtx))
4440 {
4441 pCtx->eax = 1;
4442 pCtx->ecx += X86_PAGE_SIZE;
4443 Log9(("pgmHandlePageZeroingCode: w2k sp4 xmm: eip=%RX32 -> %RX32\n",
4444 pCtx->eip, pCtx->eip + sizeof(s_abW2kSp4XmmZero) - 3));
4445 pCtx->eip += sizeof(s_abW2kSp4XmmZero) - 3;
4446 return true;
4447 }
4448 Log9(("pgmHandlePageZeroingCode: not page zeroing - 32-bit\n"));
4449 }
4450 else if (!pCtx->eflags.Bits.u1VM)
4451 Log9(("pgmHandlePageZeroingCode: not page zeroing - 16-bit\n"));
4452 else
4453 Log9(("pgmHandlePageZeroingCode: not page zeroing - v86\n"));
4454 }
4455 return false;
4456}
4457
4458#endif /* PGM_WITH_PAGE_ZEROING_DETECTION */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette