VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp@ 99316

Last change on this file since 99316 was 99316, checked in by vboxsync, 2 years ago

VMM/PGM: Nested VMX: bugref:10318 Added PGMHandlerPhysicalRegisterVmxApicAccessPage which holds the PGM lock to fix registeration of the same VMX APIC-access page by multiple VCPUs.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 84.2 KB
Line 
1/* $Id: PGMAllHandler.cpp 99316 2023-04-06 15:19:22Z vboxsync $ */
2/** @file
3 * PGM - Page Manager / Monitor, Access Handlers.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.215389.xyz.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_PGM
33#define VBOX_WITHOUT_PAGING_BIT_FIELDS /* 64-bit bitfields are just asking for trouble. See @bugref{9841} and others. */
34#include <VBox/vmm/dbgf.h>
35#include <VBox/vmm/pgm.h>
36#include <VBox/vmm/iem.h>
37#include <VBox/vmm/iom.h>
38#include <VBox/vmm/mm.h>
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/nem.h>
41#include <VBox/vmm/stam.h>
42#include <VBox/vmm/dbgf.h>
43#ifdef IN_RING0
44# include <VBox/vmm/pdmdev.h>
45#endif
46#include "PGMInternal.h"
47#include <VBox/vmm/vmcc.h>
48#include "PGMInline.h"
49
50#include <VBox/log.h>
51#include <iprt/assert.h>
52#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
53# include <iprt/asm-amd64-x86.h>
54#endif
55#include <iprt/string.h>
56#include <VBox/param.h>
57#include <VBox/err.h>
58#include <VBox/vmm/selm.h>
59
60
61/*********************************************************************************************************************************
62* Global Variables *
63*********************************************************************************************************************************/
64/** Dummy physical access handler type record. */
65CTX_SUFF(PGMPHYSHANDLERTYPEINT) const g_pgmHandlerPhysicalDummyType =
66{
67 /* .hType = */ UINT64_C(0x93b7557e1937aaff),
68 /* .enmKind = */ PGMPHYSHANDLERKIND_INVALID,
69 /* .uState = */ PGM_PAGE_HNDL_PHYS_STATE_ALL,
70 /* .fKeepPgmLock = */ true,
71 /* .fRing0DevInsIdx = */ false,
72#ifdef IN_RING0
73 /* .fNotInHm = */ false,
74 /* .pfnHandler = */ pgmR0HandlerPhysicalHandlerToRing3,
75 /* .pfnPfHandler = */ pgmR0HandlerPhysicalPfHandlerToRing3,
76#elif defined(IN_RING3)
77 /* .fRing0Enabled = */ false,
78 /* .fNotInHm = */ false,
79 /* .pfnHandler = */ pgmR3HandlerPhysicalHandlerInvalid,
80#else
81# error "unsupported context"
82#endif
83 /* .pszDesc = */ "dummy"
84};
85
86
87/*********************************************************************************************************************************
88* Internal Functions *
89*********************************************************************************************************************************/
90static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
91 void *pvBitmap, uint32_t offBitmap);
92static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur);
93static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur);
94
95
96#ifndef IN_RING3
97
98/**
99 * @callback_method_impl{FNPGMPHYSHANDLER,
100 * Dummy for forcing ring-3 handling of the access.}
101 */
102DECLCALLBACK(VBOXSTRICTRC)
103pgmR0HandlerPhysicalHandlerToRing3(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
104 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, uint64_t uUser)
105{
106 RT_NOREF(pVM, pVCpu, GCPhys, pvPhys, pvBuf, cbBuf, enmAccessType, enmOrigin, uUser);
107 return VINF_EM_RAW_EMULATE_INSTR;
108}
109
110
111/**
112 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
113 * Dummy for forcing ring-3 handling of the access.}
114 */
115DECLCALLBACK(VBOXSTRICTRC)
116pgmR0HandlerPhysicalPfHandlerToRing3(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTX pCtx,
117 RTGCPTR pvFault, RTGCPHYS GCPhysFault, uint64_t uUser)
118{
119 RT_NOREF(pVM, pVCpu, uErrorCode, pCtx, pvFault, GCPhysFault, uUser);
120 return VINF_EM_RAW_EMULATE_INSTR;
121}
122
123#endif /* !IN_RING3 */
124
125
126/**
127 * Worker for pgmHandlerPhysicalExCreate.
128 *
129 * @returns A new physical handler on success or NULL on failure.
130 * @param pVM The cross context VM structure.
131 * @param pType The physical handler type.
132 * @param hType The physical handler type registeration handle.
133 * @param uUser User argument to the handlers (not pointer).
134 * @param pszDesc Description of this handler. If NULL, the type description
135 * will be used instead.
136 */
137DECL_FORCE_INLINE(PPGMPHYSHANDLER) pgmHandlerPhysicalExCreateWorker(PVMCC pVM, PCPGMPHYSHANDLERTYPEINT pType,
138 PGMPHYSHANDLERTYPE hType, uint64_t uUser,
139 R3PTRTYPE(const char *) pszDesc)
140{
141 PGM_LOCK_ASSERT_OWNER(pVM);
142 PPGMPHYSHANDLER pNew = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.allocateNode();
143 if (pNew)
144 {
145 pNew->Key = NIL_RTGCPHYS;
146 pNew->KeyLast = NIL_RTGCPHYS;
147 pNew->cPages = 0;
148 pNew->cAliasedPages = 0;
149 pNew->cTmpOffPages = 0;
150 pNew->uUser = uUser;
151 pNew->hType = hType;
152 pNew->pszDesc = pszDesc != NIL_RTR3PTR ? pszDesc
153#ifdef IN_RING3
154 : pType->pszDesc;
155#else
156 : pVM->pgm.s.aPhysHandlerTypes[hType & PGMPHYSHANDLERTYPE_IDX_MASK].pszDesc;
157#endif
158 }
159 return pNew;
160}
161
162
163/**
164 * Creates a physical access handler, allocation part.
165 *
166 * @returns VBox status code.
167 * @retval VERR_OUT_OF_RESOURCES if no more handlers available.
168 *
169 * @param pVM The cross context VM structure.
170 * @param hType The handler type registration handle.
171 * @param uUser User argument to the handlers (not pointer).
172 * @param pszDesc Description of this handler. If NULL, the type
173 * description will be used instead.
174 * @param ppPhysHandler Where to return the access handler structure on
175 * success.
176 */
177int pgmHandlerPhysicalExCreate(PVMCC pVM, PGMPHYSHANDLERTYPE hType, uint64_t uUser,
178 R3PTRTYPE(const char *) pszDesc, PPGMPHYSHANDLER *ppPhysHandler)
179{
180 /*
181 * Validate input.
182 */
183 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
184 AssertReturn(pType, VERR_INVALID_HANDLE);
185 AssertReturn(pType->enmKind > PGMPHYSHANDLERKIND_INVALID && pType->enmKind < PGMPHYSHANDLERKIND_END, VERR_INVALID_HANDLE);
186 AssertPtr(ppPhysHandler);
187
188 Log(("pgmHandlerPhysicalExCreate: uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
189 uUser, hType, pType->enmKind, pType->pszDesc, pszDesc, R3STRING(pszDesc)));
190
191 /*
192 * Allocate and initialize the new entry.
193 */
194 int rc = PGM_LOCK(pVM);
195 AssertRCReturn(rc, rc);
196 *ppPhysHandler = pgmHandlerPhysicalExCreateWorker(pVM, pType, hType, uUser, pszDesc);
197 PGM_UNLOCK(pVM);
198 if (*ppPhysHandler)
199 return VINF_SUCCESS;
200 return VERR_OUT_OF_RESOURCES;
201}
202
203
204/**
205 * Duplicates a physical access handler.
206 *
207 * @returns VBox status code.
208 * @retval VINF_SUCCESS when successfully installed.
209 *
210 * @param pVM The cross context VM structure.
211 * @param pPhysHandlerSrc The source handler to duplicate
212 * @param ppPhysHandler Where to return the access handler structure on
213 * success.
214 */
215int pgmHandlerPhysicalExDup(PVMCC pVM, PPGMPHYSHANDLER pPhysHandlerSrc, PPGMPHYSHANDLER *ppPhysHandler)
216{
217 return pgmHandlerPhysicalExCreate(pVM, pPhysHandlerSrc->hType, pPhysHandlerSrc->uUser,
218 pPhysHandlerSrc->pszDesc, ppPhysHandler);
219}
220
221
222/**
223 * Register a access handler for a physical range.
224 *
225 * @returns VBox status code.
226 * @retval VINF_SUCCESS when successfully installed.
227 * @retval VINF_PGM_GCPHYS_ALIASED could be returned.
228 *
229 * @param pVM The cross context VM structure.
230 * @param pPhysHandler The physical handler.
231 * @param GCPhys Start physical address.
232 * @param GCPhysLast Last physical address. (inclusive)
233 */
234int pgmHandlerPhysicalExRegister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
235{
236 /*
237 * Validate input.
238 */
239 AssertReturn(pPhysHandler, VERR_INVALID_POINTER);
240 PGMPHYSHANDLERTYPE const hType = pPhysHandler->hType;
241 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
242 AssertReturn(pType, VERR_INVALID_HANDLE);
243 AssertReturn(pType->enmKind > PGMPHYSHANDLERKIND_INVALID && pType->enmKind < PGMPHYSHANDLERKIND_END, VERR_INVALID_HANDLE);
244
245 AssertPtr(pPhysHandler);
246
247 Log(("pgmHandlerPhysicalExRegister: GCPhys=%RGp GCPhysLast=%RGp hType=%#x (%d, %s) pszDesc=%RHv:%s\n", GCPhys, GCPhysLast,
248 hType, pType->enmKind, pType->pszDesc, pPhysHandler->pszDesc, R3STRING(pPhysHandler->pszDesc)));
249 AssertReturn(pPhysHandler->Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
250
251 AssertMsgReturn(GCPhys < GCPhysLast, ("GCPhys >= GCPhysLast (%#x >= %#x)\n", GCPhys, GCPhysLast), VERR_INVALID_PARAMETER);
252 Assert(GCPhysLast - GCPhys < _4G); /* ASSUMPTION in PGMAllPhys.cpp */
253
254 switch (pType->enmKind)
255 {
256 case PGMPHYSHANDLERKIND_WRITE:
257 if (!pType->fNotInHm)
258 break;
259 RT_FALL_THRU(); /* Simplification: fNotInHm can only be used with full pages */
260 case PGMPHYSHANDLERKIND_MMIO:
261 case PGMPHYSHANDLERKIND_ALL:
262 /* Simplification for PGMPhysRead, PGMR0Trap0eHandlerNPMisconfig and others: Full pages. */
263 AssertMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
264 AssertMsgReturn((GCPhysLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK, ("%RGp\n", GCPhysLast), VERR_INVALID_PARAMETER);
265 break;
266 default:
267 AssertMsgFailed(("Invalid input enmKind=%d!\n", pType->enmKind));
268 return VERR_INVALID_PARAMETER;
269 }
270
271 /*
272 * We require the range to be within registered ram.
273 * There is no apparent need to support ranges which cover more than one ram range.
274 */
275 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
276 if ( !pRam
277 || GCPhysLast > pRam->GCPhysLast)
278 {
279#ifdef IN_RING3
280 DBGFR3Info(pVM->pUVM, "phys", NULL, NULL);
281#endif
282 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
283 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
284 }
285 Assert(GCPhys >= pRam->GCPhys && GCPhys < pRam->GCPhysLast);
286 Assert(GCPhysLast <= pRam->GCPhysLast && GCPhysLast >= pRam->GCPhys);
287
288 /*
289 * Try insert into list.
290 */
291 pPhysHandler->Key = GCPhys;
292 pPhysHandler->KeyLast = GCPhysLast;
293 pPhysHandler->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
294
295 int rc = PGM_LOCK(pVM);
296 if (RT_SUCCESS(rc))
297 {
298 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pPhysHandler);
299 if (RT_SUCCESS(rc))
300 {
301 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
302 if (rc == VINF_PGM_SYNC_CR3)
303 rc = VINF_PGM_GCPHYS_ALIASED;
304
305#if defined(IN_RING3) || defined(IN_RING0)
306 NEMHCNotifyHandlerPhysicalRegister(pVM, pType->enmKind, GCPhys, GCPhysLast - GCPhys + 1);
307#endif
308 PGM_UNLOCK(pVM);
309
310 if (rc != VINF_SUCCESS)
311 Log(("PGMHandlerPhysicalRegisterEx: returns %Rrc (%RGp-%RGp)\n", rc, GCPhys, GCPhysLast));
312 return rc;
313 }
314 PGM_UNLOCK(pVM);
315 }
316
317 pPhysHandler->Key = NIL_RTGCPHYS;
318 pPhysHandler->KeyLast = NIL_RTGCPHYS;
319
320 AssertMsgReturn(rc == VERR_ALREADY_EXISTS, ("%Rrc GCPhys=%RGp GCPhysLast=%RGp\n", rc, GCPhys, GCPhysLast), rc);
321
322#if defined(IN_RING3) && defined(VBOX_STRICT)
323 DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL);
324#endif
325 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp pszDesc=%s/%s\n",
326 GCPhys, GCPhysLast, R3STRING(pPhysHandler->pszDesc), R3STRING(pType->pszDesc)));
327 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
328}
329
330
331/**
332 * Worker for pgmHandlerPhysicalRegisterVmxApicAccessPage.
333 *
334 * @returns VBox status code.
335 * @retval VINF_SUCCESS when successfully installed.
336 * @retval VINF_PGM_GCPHYS_ALIASED could be returned.
337 *
338 * @param pVM The cross context VM structure.
339 * @param pPhysHandler The physical handler.
340 * @param GCPhys The address of the virtual VMX APIC-access page.
341 */
342static int pgmHandlerPhysicalRegisterVmxApicAccessPage(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler, RTGCPHYS GCPhys)
343{
344 PGM_LOCK_ASSERT_OWNER(pVM);
345 LogFunc(("GCPhys=%RGp\n", GCPhys));
346
347 /*
348 * We require the range to be within registered ram.
349 * There is no apparent need to support ranges which cover more than one ram range.
350 */
351 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
352 RTGCPHYS const GCPhysLast = GCPhys | X86_PAGE_4K_OFFSET_MASK;
353 if ( !pRam
354 || GCPhysLast > pRam->GCPhysLast)
355 {
356#ifdef IN_RING3
357 DBGFR3Info(pVM->pUVM, "phys", NULL, NULL);
358#endif
359 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
360 return VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
361 }
362 Assert(GCPhys >= pRam->GCPhys && GCPhys < pRam->GCPhysLast);
363 Assert(GCPhysLast <= pRam->GCPhysLast && GCPhysLast >= pRam->GCPhys);
364
365 /*
366 * Try insert into list.
367 */
368 pPhysHandler->Key = GCPhys;
369 pPhysHandler->KeyLast = GCPhysLast;
370 pPhysHandler->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
371
372 int rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pPhysHandler);
373 if (RT_SUCCESS(rc))
374 {
375 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pPhysHandler, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
376 if (rc == VINF_PGM_SYNC_CR3)
377 rc = VINF_PGM_GCPHYS_ALIASED;
378
379#if defined(IN_RING3) || defined(IN_RING0)
380 NEMHCNotifyHandlerPhysicalRegister(pVM, PGMPHYSHANDLERKIND_ALL, GCPhys, GCPhysLast - GCPhys + 1);
381#endif
382 return rc;
383 }
384
385 pPhysHandler->Key = NIL_RTGCPHYS;
386 pPhysHandler->KeyLast = NIL_RTGCPHYS;
387
388 AssertMsgReturn(rc == VERR_ALREADY_EXISTS, ("%Rrc GCPhys=%RGp GCPhysLast=%RGp\n", rc, GCPhys, GCPhysLast), rc);
389#if defined(IN_RING3) && defined(VBOX_STRICT)
390 DBGFR3Info(pVM->pUVM, "handlers", "phys nostats", NULL);
391#endif
392 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
393 return VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
394}
395
396
397/**
398 * Register a access handler for a physical range.
399 *
400 * @returns VBox status code.
401 * @retval VINF_SUCCESS when successfully installed.
402 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
403 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
404 * flagged together with a pool clearing.
405 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
406 * one. A debug assertion is raised.
407 *
408 * @param pVM The cross context VM structure.
409 * @param GCPhys Start physical address.
410 * @param GCPhysLast Last physical address. (inclusive)
411 * @param hType The handler type registration handle.
412 * @param uUser User argument to the handler.
413 * @param pszDesc Description of this handler. If NULL, the type
414 * description will be used instead.
415 */
416VMMDECL(int) PGMHandlerPhysicalRegister(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, PGMPHYSHANDLERTYPE hType,
417 uint64_t uUser, R3PTRTYPE(const char *) pszDesc)
418{
419#ifdef LOG_ENABLED
420 PCPGMPHYSHANDLERTYPEINT pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
421 Log(("PGMHandlerPhysicalRegister: GCPhys=%RGp GCPhysLast=%RGp uUser=%#RX64 hType=%#x (%d, %s) pszDesc=%RHv:%s\n",
422 GCPhys, GCPhysLast, uUser, hType, pType->enmKind, R3STRING(pType->pszDesc), pszDesc, R3STRING(pszDesc)));
423#endif
424
425 PPGMPHYSHANDLER pNew;
426 int rc = pgmHandlerPhysicalExCreate(pVM, hType, uUser, pszDesc, &pNew);
427 if (RT_SUCCESS(rc))
428 {
429 rc = pgmHandlerPhysicalExRegister(pVM, pNew, GCPhys, GCPhysLast);
430 if (RT_SUCCESS(rc))
431 return rc;
432 pgmHandlerPhysicalExDestroy(pVM, pNew);
433 }
434 return rc;
435}
436
437
438/**
439 * Register an access handler for a virtual VMX APIC-access page.
440 *
441 * This holds the PGM lock across the whole operation to resolve races between
442 * VCPUs registering the same page simultaneously. It's also a slightly slimmer
443 * version of the regular registeration function as it's specific to the VMX
444 * APIC-access page.
445 *
446 * @returns VBox status code.
447 * @retval VINF_SUCCESS when successfully installed.
448 * @retval VINF_PGM_GCPHYS_ALIASED when the shadow PTs could be updated because
449 * the guest page aliased or/and mapped by multiple PTs. A CR3 sync has been
450 * flagged together with a pool clearing.
451 * @retval VERR_PGM_HANDLER_PHYSICAL_CONFLICT if the range conflicts with an existing
452 * one. A debug assertion is raised.
453 *
454 * @param pVM The cross context VM structure.
455 * @param GCPhys Start physical address.
456 * @param hType The handler type registration handle.
457 */
458VMMDECL(int) PGMHandlerPhysicalRegisterVmxApicAccessPage(PVMCC pVM, RTGCPHYS GCPhys, PGMPHYSHANDLERTYPE hType)
459{
460 PCPGMPHYSHANDLERTYPEINT const pType = pgmHandlerPhysicalTypeHandleToPtr(pVM, hType);
461 AssertReturn(pType, VERR_INVALID_HANDLE);
462 AssertReturn(pType->enmKind == PGMPHYSHANDLERKIND_ALL, VERR_INVALID_HANDLE);
463 AssertMsgReturn(!(GCPhys & GUEST_PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_INVALID_PARAMETER);
464
465 /*
466 * Find if the VMX APIC access page has already been registered at this address.
467 */
468 int rc = PGM_LOCK_VOID(pVM);
469 AssertRCReturn(rc, rc);
470
471 PPGMPHYSHANDLER pHandler;
472 rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pHandler);
473 if (RT_SUCCESS(rc))
474 {
475 PCPGMPHYSHANDLERTYPEINT const pHandlerType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pHandler);
476 Assert(GCPhys >= pHandler->Key && GCPhys <= pHandler->KeyLast);
477 Assert( pHandlerType->enmKind == PGMPHYSHANDLERKIND_WRITE
478 || pHandlerType->enmKind == PGMPHYSHANDLERKIND_ALL
479 || pHandlerType->enmKind == PGMPHYSHANDLERKIND_MMIO);
480
481 /* Check it's the virtual VMX APIC-access page. */
482 if ( pHandlerType->fNotInHm
483 && pHandlerType->enmKind == PGMPHYSHANDLERKIND_ALL)
484 rc = VINF_SUCCESS;
485 else
486 {
487 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
488 AssertMsgFailed(("Conflict! GCPhys=%RGp enmKind=%#x fNotInHm=%RTbool\n", GCPhys, pHandlerType->enmKind,
489 pHandlerType->fNotInHm));
490 }
491
492 PGM_UNLOCK(pVM);
493 return rc;
494 }
495
496 /*
497 * Create and register a physical handler for the virtual VMX APIC-access page.
498 */
499 pHandler = pgmHandlerPhysicalExCreateWorker(pVM, pType, hType, 0 /*uUser*/, NULL /*pszDesc*/);
500 if (pHandler)
501 {
502 rc = pgmHandlerPhysicalRegisterVmxApicAccessPage(pVM, pHandler, GCPhys);
503 if (RT_SUCCESS(rc))
504 { /* likely */ }
505 else
506 pgmHandlerPhysicalExDestroy(pVM, pHandler);
507 }
508 else
509 rc = VERR_OUT_OF_RESOURCES;
510
511 PGM_UNLOCK(pVM);
512 return rc;
513}
514
515
516/**
517 * Sets ram range flags and attempts updating shadow PTs.
518 *
519 * @returns VBox status code.
520 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
521 * @retval VINF_PGM_SYNC_CR3 when the shadow PTs could be updated because
522 * the guest page aliased or/and mapped by multiple PTs. FFs set.
523 * @param pVM The cross context VM structure.
524 * @param pCur The physical handler.
525 * @param pRam The RAM range.
526 * @param pvBitmap Dirty bitmap. Optional.
527 * @param offBitmap Dirty bitmap offset.
528 */
529static int pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(PVMCC pVM, PPGMPHYSHANDLER pCur, PPGMRAMRANGE pRam,
530 void *pvBitmap, uint32_t offBitmap)
531{
532 /*
533 * Iterate the guest ram pages updating the flags and flushing PT entries
534 * mapping the page.
535 */
536 bool fFlushTLBs = false;
537 int rc = VINF_SUCCESS;
538 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
539 const unsigned uState = pCurType->uState;
540 uint32_t cPages = pCur->cPages;
541 uint32_t i = (pCur->Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
542 for (;;)
543 {
544 PPGMPAGE pPage = &pRam->aPages[i];
545 AssertMsg(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage),
546 ("%RGp %R[pgmpage]\n", pRam->GCPhys + (i << GUEST_PAGE_SHIFT), pPage));
547
548 /* Only do upgrades. */
549 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
550 {
551 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState, pCurType->fNotInHm);
552
553 const RTGCPHYS GCPhysPage = pRam->GCPhys + (i << GUEST_PAGE_SHIFT);
554 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage,
555 false /* allow updates of PTEs (instead of flushing) */, &fFlushTLBs);
556 if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
557 rc = rc2;
558
559#ifdef VBOX_WITH_NATIVE_NEM
560 /* Tell NEM about the protection update. */
561 if (VM_IS_NEM_ENABLED(pVM))
562 {
563 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
564 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
565 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
566 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
567 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
568 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
569 }
570#endif
571 if (pvBitmap)
572 ASMBitSet(pvBitmap, offBitmap);
573 }
574
575 /* next */
576 if (--cPages == 0)
577 break;
578 i++;
579 offBitmap++;
580 }
581
582 if (fFlushTLBs)
583 {
584 PGM_INVL_ALL_VCPU_TLBS(pVM);
585 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: flushing guest TLBs; rc=%d\n", rc));
586 }
587 else
588 Log(("pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs: doesn't flush guest TLBs. rc=%Rrc; sync flags=%x VMCPU_FF_PGM_SYNC_CR3=%d\n", rc, VMMGetCpu(pVM)->pgm.s.fSyncFlags, VMCPU_FF_IS_SET(VMMGetCpu(pVM), VMCPU_FF_PGM_SYNC_CR3)));
589
590 return rc;
591}
592
593
594/**
595 * Deregister a physical page access handler.
596 *
597 * @returns VBox status code.
598 * @param pVM The cross context VM structure.
599 * @param pPhysHandler The handler to deregister (but not free).
600 */
601int pgmHandlerPhysicalExDeregister(PVMCC pVM, PPGMPHYSHANDLER pPhysHandler)
602{
603 LogFlow(("pgmHandlerPhysicalExDeregister: Removing Range %RGp-%RGp %s\n",
604 pPhysHandler->Key, pPhysHandler->KeyLast, R3STRING(pPhysHandler->pszDesc)));
605
606 int rc = PGM_LOCK(pVM);
607 AssertRCReturn(rc, rc);
608
609 RTGCPHYS const GCPhys = pPhysHandler->Key;
610 AssertReturnStmt(GCPhys != NIL_RTGCPHYS, PGM_UNLOCK(pVM), VERR_PGM_HANDLER_NOT_FOUND);
611
612 /*
613 * Remove the handler from the tree.
614 */
615
616 PPGMPHYSHANDLER pRemoved;
617 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->remove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pRemoved);
618 if (RT_SUCCESS(rc))
619 {
620 if (pRemoved == pPhysHandler)
621 {
622 /*
623 * Clear the page bits, notify the REM about this change and clear
624 * the cache.
625 */
626 pgmHandlerPhysicalResetRamFlags(pVM, pPhysHandler);
627 if (VM_IS_NEM_ENABLED(pVM))
628 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pPhysHandler);
629 pVM->pgm.s.idxLastPhysHandler = 0;
630
631 pPhysHandler->Key = NIL_RTGCPHYS;
632 pPhysHandler->KeyLast = NIL_RTGCPHYS;
633
634 PGM_UNLOCK(pVM);
635
636 return VINF_SUCCESS;
637 }
638
639 /*
640 * Both of the failure conditions here are considered internal processing
641 * errors because they can only be caused by race conditions or corruption.
642 * If we ever need to handle concurrent deregistration, we have to move
643 * the NIL_RTGCPHYS check inside the PGM lock.
644 */
645 pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->insert(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, pRemoved);
646 }
647
648 PGM_UNLOCK(pVM);
649
650 if (RT_FAILURE(rc))
651 AssertMsgFailed(("Didn't find range starting at %RGp in the tree! %Rrc=rc\n", GCPhys, rc));
652 else
653 AssertMsgFailed(("Found different handle at %RGp in the tree: got %p insteaded of %p\n",
654 GCPhys, pRemoved, pPhysHandler));
655 return VERR_PGM_HANDLER_IPE_1;
656}
657
658
659/**
660 * Destroys (frees) a physical handler.
661 *
662 * The caller must deregister it before destroying it!
663 *
664 * @returns VBox status code.
665 * @param pVM The cross context VM structure.
666 * @param pHandler The handler to free. NULL if ignored.
667 */
668int pgmHandlerPhysicalExDestroy(PVMCC pVM, PPGMPHYSHANDLER pHandler)
669{
670 if (pHandler)
671 {
672 AssertPtr(pHandler);
673 AssertReturn(pHandler->Key == NIL_RTGCPHYS, VERR_WRONG_ORDER);
674
675 int rc = PGM_LOCK(pVM);
676 if (RT_SUCCESS(rc))
677 {
678 rc = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.freeNode(pHandler);
679 PGM_UNLOCK(pVM);
680 }
681 return rc;
682 }
683 return VINF_SUCCESS;
684}
685
686
687/**
688 * Deregister a physical page access handler.
689 *
690 * @returns VBox status code.
691 * @param pVM The cross context VM structure.
692 * @param GCPhys Start physical address.
693 */
694VMMDECL(int) PGMHandlerPhysicalDeregister(PVMCC pVM, RTGCPHYS GCPhys)
695{
696 AssertReturn(pVM->VMCC_CTX(pgm).s.pPhysHandlerTree, VERR_PGM_HANDLER_IPE_1);
697
698 /*
699 * Find the handler.
700 */
701 int rc = PGM_LOCK(pVM);
702 AssertRCReturn(rc, rc);
703
704 PPGMPHYSHANDLER pRemoved;
705 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->remove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pRemoved);
706 if (RT_SUCCESS(rc))
707 {
708 Assert(pRemoved->Key == GCPhys);
709 LogFlow(("PGMHandlerPhysicalDeregister: Removing Range %RGp-%RGp %s\n",
710 pRemoved->Key, pRemoved->KeyLast, R3STRING(pRemoved->pszDesc)));
711
712 /*
713 * Clear the page bits, notify the REM about this change and clear
714 * the cache.
715 */
716 pgmHandlerPhysicalResetRamFlags(pVM, pRemoved);
717 if (VM_IS_NEM_ENABLED(pVM))
718 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pRemoved);
719 pVM->pgm.s.idxLastPhysHandler = 0;
720
721 pRemoved->Key = NIL_RTGCPHYS;
722 rc = pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator.freeNode(pRemoved);
723
724 PGM_UNLOCK(pVM);
725 return rc;
726 }
727
728 PGM_UNLOCK(pVM);
729
730 if (rc == VERR_NOT_FOUND)
731 {
732 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
733 rc = VERR_PGM_HANDLER_NOT_FOUND;
734 }
735 return rc;
736}
737
738
739/**
740 * Shared code with modify.
741 */
742static void pgmHandlerPhysicalDeregisterNotifyNEM(PVMCC pVM, PPGMPHYSHANDLER pCur)
743{
744#ifdef VBOX_WITH_NATIVE_NEM
745 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
746 RTGCPHYS GCPhysStart = pCur->Key;
747 RTGCPHYS GCPhysLast = pCur->KeyLast;
748
749 /*
750 * Page align the range.
751 *
752 * Since we've reset (recalculated) the physical handler state of all pages
753 * we can make use of the page states to figure out whether a page should be
754 * included in the REM notification or not.
755 */
756 if ( (pCur->Key & GUEST_PAGE_OFFSET_MASK)
757 || ((pCur->KeyLast + 1) & GUEST_PAGE_OFFSET_MASK))
758 {
759 Assert(pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO);
760
761 if (GCPhysStart & GUEST_PAGE_OFFSET_MASK)
762 {
763 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysStart);
764 if ( pPage
765 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
766 {
767 RTGCPHYS GCPhys = (GCPhysStart + (GUEST_PAGE_SIZE - 1)) & X86_PTE_PAE_PG_MASK;
768 if ( GCPhys > GCPhysLast
769 || GCPhys < GCPhysStart)
770 return;
771 GCPhysStart = GCPhys;
772 }
773 else
774 GCPhysStart &= X86_PTE_PAE_PG_MASK;
775 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
776 }
777
778 if (GCPhysLast & GUEST_PAGE_OFFSET_MASK)
779 {
780 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysLast);
781 if ( pPage
782 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
783 {
784 RTGCPHYS GCPhys = (GCPhysLast & X86_PTE_PAE_PG_MASK) - 1;
785 if ( GCPhys < GCPhysStart
786 || GCPhys > GCPhysLast)
787 return;
788 GCPhysLast = GCPhys;
789 }
790 else
791 GCPhysLast |= GUEST_PAGE_OFFSET_MASK;
792 Assert(!pPage || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO); /* these are page aligned atm! */
793 }
794 }
795
796 /*
797 * Tell NEM.
798 */
799 PPGMRAMRANGE const pRam = pgmPhysGetRange(pVM, GCPhysStart);
800 RTGCPHYS const cb = GCPhysLast - GCPhysStart + 1;
801 uint8_t u2State = UINT8_MAX;
802 NEMHCNotifyHandlerPhysicalDeregister(pVM, pCurType->enmKind, GCPhysStart, cb,
803 pRam ? PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysStart) : NULL, &u2State);
804 if (u2State != UINT8_MAX && pRam)
805 pgmPhysSetNemStateForPages(&pRam->aPages[(GCPhysStart - pRam->GCPhys) >> GUEST_PAGE_SHIFT],
806 cb >> GUEST_PAGE_SHIFT, u2State);
807#else
808 RT_NOREF(pVM, pCur);
809#endif
810}
811
812
813/**
814 * pgmHandlerPhysicalResetRamFlags helper that checks for other handlers on
815 * edge pages.
816 */
817DECLINLINE(void) pgmHandlerPhysicalRecalcPageState(PVMCC pVM, RTGCPHYS GCPhys, bool fAbove, PPGMRAMRANGE *ppRamHint)
818{
819 /*
820 * Look for other handlers.
821 */
822 unsigned uState = PGM_PAGE_HNDL_PHYS_STATE_NONE;
823 for (;;)
824 {
825 PPGMPHYSHANDLER pCur;
826 int rc;
827 if (fAbove)
828 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
829 GCPhys, &pCur);
830 else
831 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookupMatchingOrBelow(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
832 GCPhys, &pCur);
833 if (rc == VERR_NOT_FOUND)
834 break;
835 AssertRCBreak(rc);
836 if (((fAbove ? pCur->Key : pCur->KeyLast) >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
837 break;
838 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
839 uState = RT_MAX(uState, pCurType->uState);
840
841 /* next? */
842 RTGCPHYS GCPhysNext = fAbove
843 ? pCur->KeyLast + 1
844 : pCur->Key - 1;
845 if ((GCPhysNext >> GUEST_PAGE_SHIFT) != (GCPhys >> GUEST_PAGE_SHIFT))
846 break;
847 GCPhys = GCPhysNext;
848 }
849
850 /*
851 * Update if we found something that is a higher priority state than the current.
852 * Note! The PGMPHYSHANDLER_F_NOT_IN_HM can be ignored here as it requires whole pages.
853 */
854 if (uState != PGM_PAGE_HNDL_PHYS_STATE_NONE)
855 {
856 PPGMPAGE pPage;
857 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, ppRamHint);
858 if ( RT_SUCCESS(rc)
859 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState)
860 {
861 /* This should normally not be necessary. */
862 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, uState);
863 bool fFlushTLBs;
864 rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, false /*fFlushPTEs*/, &fFlushTLBs);
865 if (RT_SUCCESS(rc) && fFlushTLBs)
866 PGM_INVL_ALL_VCPU_TLBS(pVM);
867 else
868 AssertRC(rc);
869
870#ifdef VBOX_WITH_NATIVE_NEM
871 /* Tell NEM about the protection update. */
872 if (VM_IS_NEM_ENABLED(pVM))
873 {
874 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
875 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
876 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
877 PGM_RAMRANGE_CALC_PAGE_R3PTR(*ppRamHint, GCPhys),
878 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
879 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
880 }
881#endif
882 }
883 else
884 AssertRC(rc);
885 }
886}
887
888
889/**
890 * Resets an aliased page.
891 *
892 * @param pVM The cross context VM structure.
893 * @param pPage The page.
894 * @param GCPhysPage The page address in case it comes in handy.
895 * @param pRam The RAM range the page is associated with (for NEM
896 * notifications).
897 * @param fDoAccounting Whether to perform accounting. (Only set during
898 * reset where pgmR3PhysRamReset doesn't have the
899 * handler structure handy.)
900 * @param fFlushIemTlbs Whether to perform IEM TLB flushing or not. This
901 * can be cleared only if the caller does the flushing
902 * after calling this function.
903 */
904void pgmHandlerPhysicalResetAliasedPage(PVMCC pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage, PPGMRAMRANGE pRam,
905 bool fDoAccounting, bool fFlushIemTlbs)
906{
907 Assert( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
908 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
909 Assert(PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
910#ifdef VBOX_WITH_NATIVE_NEM
911 RTHCPHYS const HCPhysPrev = PGM_PAGE_GET_HCPHYS(pPage);
912#endif
913
914 /*
915 * Flush any shadow page table references *first*.
916 */
917 bool fFlushTLBs = false;
918 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPage, true /*fFlushPTEs*/, &fFlushTLBs);
919 AssertLogRelRCReturnVoid(rc);
920#if defined(VBOX_VMM_TARGET_ARMV8)
921 AssertReleaseFailed();
922#else
923 HMFlushTlbOnAllVCpus(pVM);
924#endif
925
926 /*
927 * Make it an MMIO/Zero page.
928 */
929 PGM_PAGE_SET_HCPHYS(pVM, pPage, pVM->pgm.s.HCPhysZeroPg);
930 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO);
931 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ZERO);
932 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
933 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_ALL);
934
935 /*
936 * Flush its TLB entry.
937 */
938 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
939 if (fFlushIemTlbs)
940 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID);
941
942 /*
943 * Do accounting for pgmR3PhysRamReset.
944 */
945 if (fDoAccounting)
946 {
947 PPGMPHYSHANDLER pHandler;
948 rc = pgmHandlerPhysicalLookup(pVM, GCPhysPage, &pHandler);
949 if (RT_SUCCESS(rc))
950 {
951 Assert(pHandler->cAliasedPages > 0);
952 pHandler->cAliasedPages--;
953 }
954 else
955 AssertMsgFailed(("rc=%Rrc GCPhysPage=%RGp\n", rc, GCPhysPage));
956 }
957
958#ifdef VBOX_WITH_NATIVE_NEM
959 /*
960 * Tell NEM about the protection change.
961 */
962 if (VM_IS_NEM_ENABLED(pVM))
963 {
964 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
965 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, HCPhysPrev, pVM->pgm.s.HCPhysZeroPg,
966 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
967 NEM_PAGE_PROT_NONE, PGMPAGETYPE_MMIO, &u2State);
968 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
969 }
970#else
971 RT_NOREF(pRam);
972#endif
973}
974
975
976/**
977 * Resets ram range flags.
978 *
979 * @returns VBox status code.
980 * @retval VINF_SUCCESS when shadow PTs was successfully updated.
981 * @param pVM The cross context VM structure.
982 * @param pCur The physical handler.
983 *
984 * @remark We don't start messing with the shadow page tables, as we've
985 * already got code in Trap0e which deals with out of sync handler
986 * flags (originally conceived for global pages).
987 */
988static void pgmHandlerPhysicalResetRamFlags(PVMCC pVM, PPGMPHYSHANDLER pCur)
989{
990 /*
991 * Iterate the guest ram pages updating the state.
992 */
993 RTUINT cPages = pCur->cPages;
994 RTGCPHYS GCPhys = pCur->Key;
995 PPGMRAMRANGE pRamHint = NULL;
996 for (;;)
997 {
998 PPGMPAGE pPage;
999 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint);
1000 if (RT_SUCCESS(rc))
1001 {
1002 /* Reset aliased MMIO pages to MMIO, since this aliasing is our business.
1003 (We don't flip MMIO to RAM though, that's PGMPhys.cpp's job.) */
1004 bool fNemNotifiedAlready = false;
1005 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
1006 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
1007 {
1008 Assert(pCur->cAliasedPages > 0);
1009 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhys, pRamHint, false /*fDoAccounting*/, true /*fFlushIemTlbs*/);
1010 pCur->cAliasedPages--;
1011 fNemNotifiedAlready = true;
1012 }
1013#ifdef VBOX_STRICT
1014 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1015 AssertMsg(pCurType && (pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO || PGM_PAGE_IS_MMIO(pPage)),
1016 ("%RGp %R[pgmpage]\n", GCPhys, pPage));
1017#endif
1018 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE, false);
1019
1020#ifdef VBOX_WITH_NATIVE_NEM
1021 /* Tell NEM about the protection change. */
1022 if (VM_IS_NEM_ENABLED(pVM) && !fNemNotifiedAlready)
1023 {
1024 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1025 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1026 NEMHCNotifyPhysPageProtChanged(pVM, GCPhys, PGM_PAGE_GET_HCPHYS(pPage),
1027 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRamHint, GCPhys),
1028 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1029 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1030 }
1031#endif
1032 RT_NOREF(fNemNotifiedAlready);
1033 }
1034 else
1035 AssertRC(rc);
1036
1037 /* next */
1038 if (--cPages == 0)
1039 break;
1040 GCPhys += GUEST_PAGE_SIZE;
1041 }
1042
1043 pCur->cAliasedPages = 0;
1044 pCur->cTmpOffPages = 0;
1045
1046 /*
1047 * Check for partial start and end pages.
1048 */
1049 if (pCur->Key & GUEST_PAGE_OFFSET_MASK)
1050 pgmHandlerPhysicalRecalcPageState(pVM, pCur->Key - 1, false /* fAbove */, &pRamHint);
1051 if ((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) != GUEST_PAGE_OFFSET_MASK)
1052 pgmHandlerPhysicalRecalcPageState(pVM, pCur->KeyLast + 1, true /* fAbove */, &pRamHint);
1053}
1054
1055
1056#if 0 /* unused */
1057/**
1058 * Modify a physical page access handler.
1059 *
1060 * Modification can only be done to the range it self, not the type or anything else.
1061 *
1062 * @returns VBox status code.
1063 * For all return codes other than VERR_PGM_HANDLER_NOT_FOUND and VINF_SUCCESS the range is deregistered
1064 * and a new registration must be performed!
1065 * @param pVM The cross context VM structure.
1066 * @param GCPhysCurrent Current location.
1067 * @param GCPhys New location.
1068 * @param GCPhysLast New last location.
1069 */
1070VMMDECL(int) PGMHandlerPhysicalModify(PVMCC pVM, RTGCPHYS GCPhysCurrent, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast)
1071{
1072 /*
1073 * Remove it.
1074 */
1075 int rc;
1076 PGM_LOCK_VOID(pVM);
1077 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysCurrent);
1078 if (pCur)
1079 {
1080 /*
1081 * Clear the ram flags. (We're gonna move or free it!)
1082 */
1083 pgmHandlerPhysicalResetRamFlags(pVM, pCur);
1084 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1085 @todo pCurType validation
1086 bool const fRestoreAsRAM = pCurType->pfnHandlerR3 /** @todo this isn't entirely correct. */
1087 && pCurType->enmKind != PGMPHYSHANDLERKIND_MMIO;
1088
1089 /*
1090 * Validate the new range, modify and reinsert.
1091 */
1092 if (GCPhysLast >= GCPhys)
1093 {
1094 /*
1095 * We require the range to be within registered ram.
1096 * There is no apparent need to support ranges which cover more than one ram range.
1097 */
1098 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1099 if ( pRam
1100 && GCPhys <= pRam->GCPhysLast
1101 && GCPhysLast >= pRam->GCPhys)
1102 {
1103 pCur->Core.Key = GCPhys;
1104 pCur->Core.KeyLast = GCPhysLast;
1105 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> GUEST_PAGE_SHIFT;
1106
1107 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pCur->Core))
1108 {
1109 RTGCPHYS const cb = GCPhysLast - GCPhys + 1;
1110 PGMPHYSHANDLERKIND const enmKind = pCurType->enmKind;
1111
1112 /*
1113 * Set ram flags, flush shadow PT entries and finally tell REM about this.
1114 */
1115 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL, 0);
1116
1117 /** @todo NEM: not sure we need this notification... */
1118 NEMHCNotifyHandlerPhysicalModify(pVM, enmKind, GCPhysCurrent, GCPhys, cb, fRestoreAsRAM);
1119
1120 PGM_UNLOCK(pVM);
1121
1122 PGM_INVL_ALL_VCPU_TLBS(pVM);
1123 Log(("PGMHandlerPhysicalModify: GCPhysCurrent=%RGp -> GCPhys=%RGp GCPhysLast=%RGp\n",
1124 GCPhysCurrent, GCPhys, GCPhysLast));
1125 return VINF_SUCCESS;
1126 }
1127
1128 AssertMsgFailed(("Conflict! GCPhys=%RGp GCPhysLast=%RGp\n", GCPhys, GCPhysLast));
1129 rc = VERR_PGM_HANDLER_PHYSICAL_CONFLICT;
1130 }
1131 else
1132 {
1133 AssertMsgFailed(("No RAM range for %RGp-%RGp\n", GCPhys, GCPhysLast));
1134 rc = VERR_PGM_HANDLER_PHYSICAL_NO_RAM_RANGE;
1135 }
1136 }
1137 else
1138 {
1139 AssertMsgFailed(("Invalid range %RGp-%RGp\n", GCPhys, GCPhysLast));
1140 rc = VERR_INVALID_PARAMETER;
1141 }
1142
1143 /*
1144 * Invalid new location, flush the cache and free it.
1145 * We've only gotta notify REM and free the memory.
1146 */
1147 if (VM_IS_NEM_ENABLED(pVM))
1148 pgmHandlerPhysicalDeregisterNotifyNEM(pVM, pCur);
1149 pVM->pgm.s.pLastPhysHandlerR0 = 0;
1150 pVM->pgm.s.pLastPhysHandlerR3 = 0;
1151 PGMHandlerPhysicalTypeRelease(pVM, pCur->hType);
1152 MMHyperFree(pVM, pCur);
1153 }
1154 else
1155 {
1156 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhysCurrent));
1157 rc = VERR_PGM_HANDLER_NOT_FOUND;
1158 }
1159
1160 PGM_UNLOCK(pVM);
1161 return rc;
1162}
1163#endif /* unused */
1164
1165
1166/**
1167 * Changes the user callback arguments associated with a physical access handler.
1168 *
1169 * @returns VBox status code.
1170 * @param pVM The cross context VM structure.
1171 * @param GCPhys Start physical address of the handler.
1172 * @param uUser User argument to the handlers.
1173 */
1174VMMDECL(int) PGMHandlerPhysicalChangeUserArg(PVMCC pVM, RTGCPHYS GCPhys, uint64_t uUser)
1175{
1176 /*
1177 * Find the handler and make the change.
1178 */
1179 int rc = PGM_LOCK(pVM);
1180 AssertRCReturn(rc, rc);
1181
1182 PPGMPHYSHANDLER pCur;
1183 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1184 if (RT_SUCCESS(rc))
1185 {
1186 Assert(pCur->Key == GCPhys);
1187 pCur->uUser = uUser;
1188 }
1189 else if (rc == VERR_NOT_FOUND)
1190 {
1191 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
1192 rc = VERR_PGM_HANDLER_NOT_FOUND;
1193 }
1194
1195 PGM_UNLOCK(pVM);
1196 return rc;
1197}
1198
1199#if 0 /* unused */
1200
1201/**
1202 * Splits a physical access handler in two.
1203 *
1204 * @returns VBox status code.
1205 * @param pVM The cross context VM structure.
1206 * @param GCPhys Start physical address of the handler.
1207 * @param GCPhysSplit The split address.
1208 */
1209VMMDECL(int) PGMHandlerPhysicalSplit(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysSplit)
1210{
1211 AssertReturn(GCPhys < GCPhysSplit, VERR_INVALID_PARAMETER);
1212
1213 /*
1214 * Do the allocation without owning the lock.
1215 */
1216 PPGMPHYSHANDLER pNew;
1217 int rc = MMHyperAlloc(pVM, sizeof(*pNew), 0, MM_TAG_PGM_HANDLERS, (void **)&pNew);
1218 if (RT_FAILURE(rc))
1219 return rc;
1220
1221 /*
1222 * Get the handler.
1223 */
1224 PGM_LOCK_VOID(pVM);
1225 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1226 if (RT_LIKELY(pCur))
1227 {
1228 if (RT_LIKELY(GCPhysSplit <= pCur->Core.KeyLast))
1229 {
1230 /*
1231 * Create new handler node for the 2nd half.
1232 */
1233 *pNew = *pCur;
1234 pNew->Core.Key = GCPhysSplit;
1235 pNew->cPages = (pNew->Core.KeyLast - (pNew->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1236
1237 pCur->Core.KeyLast = GCPhysSplit - 1;
1238 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1239
1240 if (RT_LIKELY(RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core)))
1241 {
1242 LogFlow(("PGMHandlerPhysicalSplit: %RGp-%RGp and %RGp-%RGp\n",
1243 pCur->Core.Key, pCur->Core.KeyLast, pNew->Core.Key, pNew->Core.KeyLast));
1244 PGM_UNLOCK(pVM);
1245 return VINF_SUCCESS;
1246 }
1247 AssertMsgFailed(("whu?\n"));
1248 rc = VERR_PGM_PHYS_HANDLER_IPE;
1249 }
1250 else
1251 {
1252 AssertMsgFailed(("outside range: %RGp-%RGp split %RGp\n", pCur->Core.Key, pCur->Core.KeyLast, GCPhysSplit));
1253 rc = VERR_INVALID_PARAMETER;
1254 }
1255 }
1256 else
1257 {
1258 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys));
1259 rc = VERR_PGM_HANDLER_NOT_FOUND;
1260 }
1261 PGM_UNLOCK(pVM);
1262 MMHyperFree(pVM, pNew);
1263 return rc;
1264}
1265
1266
1267/**
1268 * Joins up two adjacent physical access handlers which has the same callbacks.
1269 *
1270 * @returns VBox status code.
1271 * @param pVM The cross context VM structure.
1272 * @param GCPhys1 Start physical address of the first handler.
1273 * @param GCPhys2 Start physical address of the second handler.
1274 */
1275VMMDECL(int) PGMHandlerPhysicalJoin(PVMCC pVM, RTGCPHYS GCPhys1, RTGCPHYS GCPhys2)
1276{
1277 /*
1278 * Get the handlers.
1279 */
1280 int rc;
1281 PGM_LOCK_VOID(pVM);
1282 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys1);
1283 if (RT_LIKELY(pCur1))
1284 {
1285 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1286 if (RT_LIKELY(pCur2))
1287 {
1288 /*
1289 * Make sure that they are adjacent, and that they've got the same callbacks.
1290 */
1291 if (RT_LIKELY(pCur1->Core.KeyLast + 1 == pCur2->Core.Key))
1292 {
1293 if (RT_LIKELY(pCur1->hType == pCur2->hType))
1294 {
1295 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2);
1296 if (RT_LIKELY(pCur3 == pCur2))
1297 {
1298 pCur1->Core.KeyLast = pCur2->Core.KeyLast;
1299 pCur1->cPages = (pCur1->Core.KeyLast - (pCur1->Core.Key & X86_PTE_PAE_PG_MASK) + GUEST_PAGE_SIZE) >> GUEST_PAGE_SHIFT;
1300 LogFlow(("PGMHandlerPhysicalJoin: %RGp-%RGp %RGp-%RGp\n",
1301 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1302 pVM->pgm.s.pLastPhysHandlerR0 = 0;
1303 pVM->pgm.s.pLastPhysHandlerR3 = 0;
1304 PGMHandlerPhysicalTypeRelease(pVM, pCur2->hType);
1305 MMHyperFree(pVM, pCur2);
1306 PGM_UNLOCK(pVM);
1307 return VINF_SUCCESS;
1308 }
1309
1310 Assert(pCur3 == pCur2);
1311 rc = VERR_PGM_PHYS_HANDLER_IPE;
1312 }
1313 else
1314 {
1315 AssertMsgFailed(("mismatching handlers\n"));
1316 rc = VERR_ACCESS_DENIED;
1317 }
1318 }
1319 else
1320 {
1321 AssertMsgFailed(("not adjacent: %RGp-%RGp %RGp-%RGp\n",
1322 pCur1->Core.Key, pCur1->Core.KeyLast, pCur2->Core.Key, pCur2->Core.KeyLast));
1323 rc = VERR_INVALID_PARAMETER;
1324 }
1325 }
1326 else
1327 {
1328 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys2));
1329 rc = VERR_PGM_HANDLER_NOT_FOUND;
1330 }
1331 }
1332 else
1333 {
1334 AssertMsgFailed(("Didn't find range starting at %RGp\n", GCPhys1));
1335 rc = VERR_PGM_HANDLER_NOT_FOUND;
1336 }
1337 PGM_UNLOCK(pVM);
1338 return rc;
1339
1340}
1341
1342#endif /* unused */
1343
1344/**
1345 * Resets any modifications to individual pages in a physical page access
1346 * handler region.
1347 *
1348 * This is used in pair with PGMHandlerPhysicalPageTempOff(),
1349 * PGMHandlerPhysicalPageAliasMmio2() or PGMHandlerPhysicalPageAliasHC().
1350 *
1351 * @returns VBox status code.
1352 * @param pVM The cross context VM structure.
1353 * @param GCPhys The start address of the handler regions, i.e. what you
1354 * passed to PGMR3HandlerPhysicalRegister(),
1355 * PGMHandlerPhysicalRegisterEx() or
1356 * PGMHandlerPhysicalModify().
1357 */
1358VMMDECL(int) PGMHandlerPhysicalReset(PVMCC pVM, RTGCPHYS GCPhys)
1359{
1360 LogFlow(("PGMHandlerPhysicalReset GCPhys=%RGp\n", GCPhys));
1361 int rc = PGM_LOCK(pVM);
1362 AssertRCReturn(rc, rc);
1363
1364 /*
1365 * Find the handler.
1366 */
1367 PPGMPHYSHANDLER pCur;
1368 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1369 if (RT_SUCCESS(rc))
1370 {
1371 Assert(pCur->Key == GCPhys);
1372
1373 /*
1374 * Validate kind.
1375 */
1376 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1377 switch (pCurType->enmKind)
1378 {
1379 case PGMPHYSHANDLERKIND_WRITE:
1380 case PGMPHYSHANDLERKIND_ALL:
1381 case PGMPHYSHANDLERKIND_MMIO: /* NOTE: Only use when clearing MMIO ranges with aliased MMIO2 pages! */
1382 {
1383 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset)); /** @todo move out of switch */
1384 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1385 Assert(pRam);
1386 Assert(pRam->GCPhys <= pCur->Key);
1387 Assert(pRam->GCPhysLast >= pCur->KeyLast);
1388
1389 if (pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO)
1390 {
1391 /*
1392 * Reset all the PGMPAGETYPE_MMIO2_ALIAS_MMIO pages first and that's it.
1393 * This could probably be optimized a bit wrt to flushing, but I'm too lazy
1394 * to do that now...
1395 */
1396 if (pCur->cAliasedPages)
1397 {
1398 PPGMPAGE pPage = &pRam->aPages[(pCur->Key - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
1399 RTGCPHYS GCPhysPage = pCur->Key;
1400 uint32_t cLeft = pCur->cPages;
1401 bool fFlushIemTlb = false;
1402 while (cLeft-- > 0)
1403 {
1404 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO
1405 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO)
1406 {
1407 fFlushIemTlb |= PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO;
1408 Assert(pCur->cAliasedPages > 0);
1409 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam,
1410 false /*fDoAccounting*/, false /*fFlushIemTlbs*/);
1411 --pCur->cAliasedPages;
1412#ifndef VBOX_STRICT
1413 if (pCur->cAliasedPages == 0)
1414 break;
1415#endif
1416 }
1417 Assert(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO);
1418 GCPhysPage += GUEST_PAGE_SIZE;
1419 pPage++;
1420 }
1421 Assert(pCur->cAliasedPages == 0);
1422
1423 /*
1424 * Flush IEM TLBs in case they contain any references to aliased pages.
1425 * This is only necessary for MMIO2 aliases.
1426 */
1427 if (fFlushIemTlb)
1428 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID);
1429 }
1430 }
1431 else if (pCur->cTmpOffPages > 0)
1432 {
1433 /*
1434 * Set the flags and flush shadow PT entries.
1435 */
1436 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, NULL /*pvBitmap*/, 0 /*offBitmap*/);
1437 }
1438
1439 pCur->cAliasedPages = 0;
1440 pCur->cTmpOffPages = 0;
1441
1442 rc = VINF_SUCCESS;
1443 break;
1444 }
1445
1446 /*
1447 * Invalid.
1448 */
1449 default:
1450 AssertMsgFailed(("Invalid type %d/%#x! Corruption!\n", pCurType->enmKind, pCur->hType));
1451 rc = VERR_PGM_PHYS_HANDLER_IPE;
1452 break;
1453 }
1454 }
1455 else if (rc == VERR_NOT_FOUND)
1456 {
1457 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1458 rc = VERR_PGM_HANDLER_NOT_FOUND;
1459 }
1460
1461 PGM_UNLOCK(pVM);
1462 return rc;
1463}
1464
1465
1466/**
1467 * Special version of PGMHandlerPhysicalReset used by MMIO2 w/ dirty page
1468 * tracking.
1469 *
1470 * @returns VBox status code.
1471 * @param pVM The cross context VM structure.
1472 * @param GCPhys The start address of the handler region.
1473 * @param pvBitmap Dirty bitmap. Caller has cleared this already, only
1474 * dirty bits will be set. Caller also made sure it's big
1475 * enough.
1476 * @param offBitmap Dirty bitmap offset.
1477 * @remarks Caller must own the PGM critical section.
1478 */
1479DECLHIDDEN(int) pgmHandlerPhysicalResetMmio2WithBitmap(PVMCC pVM, RTGCPHYS GCPhys, void *pvBitmap, uint32_t offBitmap)
1480{
1481 LogFlow(("pgmHandlerPhysicalResetMmio2WithBitmap GCPhys=%RGp\n", GCPhys));
1482 PGM_LOCK_ASSERT_OWNER(pVM);
1483
1484 /*
1485 * Find the handler.
1486 */
1487 PPGMPHYSHANDLER pCur;
1488 int rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1489 if (RT_SUCCESS(rc))
1490 {
1491 Assert(pCur->Key == GCPhys);
1492
1493 /*
1494 * Validate kind.
1495 */
1496 PCPGMPHYSHANDLERTYPEINT pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1497 if ( pCurType
1498 && pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE)
1499 {
1500 STAM_COUNTER_INC(&pVM->pgm.s.Stats.CTX_MID_Z(Stat,PhysHandlerReset));
1501
1502 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys);
1503 Assert(pRam);
1504 Assert(pRam->GCPhys <= pCur->Key);
1505 Assert(pRam->GCPhysLast >= pCur->KeyLast);
1506
1507 /*
1508 * Set the flags and flush shadow PT entries.
1509 */
1510 if (pCur->cTmpOffPages > 0)
1511 {
1512 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pCur, pRam, pvBitmap, offBitmap);
1513 pCur->cTmpOffPages = 0;
1514 }
1515 else
1516 rc = VINF_SUCCESS;
1517 }
1518 else
1519 {
1520 AssertFailed();
1521 rc = VERR_WRONG_TYPE;
1522 }
1523 }
1524 else if (rc == VERR_NOT_FOUND)
1525 {
1526 AssertMsgFailed(("Didn't find MMIO Range starting at %#x\n", GCPhys));
1527 rc = VERR_PGM_HANDLER_NOT_FOUND;
1528 }
1529
1530 return rc;
1531}
1532
1533
1534/**
1535 * Temporarily turns off the access monitoring of a page within a monitored
1536 * physical write/all page access handler region.
1537 *
1538 * Use this when no further \#PFs are required for that page. Be aware that
1539 * a page directory sync might reset the flags, and turn on access monitoring
1540 * for the page.
1541 *
1542 * The caller must do required page table modifications.
1543 *
1544 * @returns VBox status code.
1545 * @param pVM The cross context VM structure.
1546 * @param GCPhys The start address of the access handler. This
1547 * must be a fully page aligned range or we risk
1548 * messing up other handlers installed for the
1549 * start and end pages.
1550 * @param GCPhysPage The physical address of the page to turn off
1551 * access monitoring for.
1552 */
1553VMMDECL(int) PGMHandlerPhysicalPageTempOff(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage)
1554{
1555 LogFlow(("PGMHandlerPhysicalPageTempOff GCPhysPage=%RGp\n", GCPhysPage));
1556 int rc = PGM_LOCK(pVM);
1557 AssertRCReturn(rc, rc);
1558
1559 /*
1560 * Validate the range.
1561 */
1562 PPGMPHYSHANDLER pCur;
1563 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1564 if (RT_SUCCESS(rc))
1565 {
1566 Assert(pCur->Key == GCPhys);
1567 if (RT_LIKELY( GCPhysPage >= pCur->Key
1568 && GCPhysPage <= pCur->KeyLast))
1569 {
1570 Assert(!(pCur->Key & GUEST_PAGE_OFFSET_MASK));
1571 Assert((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK);
1572
1573 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE(pVM, pCur);
1574 AssertReturnStmt( pCurType
1575 && ( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1576 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL),
1577 PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1578
1579 /*
1580 * Change the page status.
1581 */
1582 PPGMPAGE pPage;
1583 PPGMRAMRANGE pRam;
1584 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1585 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1586 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
1587 {
1588 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1589 pCur->cTmpOffPages++;
1590
1591#ifdef VBOX_WITH_NATIVE_NEM
1592 /* Tell NEM about the protection change (VGA is using this to track dirty pages). */
1593 if (VM_IS_NEM_ENABLED(pVM))
1594 {
1595 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1596 PGMPAGETYPE enmType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage);
1597 NEMHCNotifyPhysPageProtChanged(pVM, GCPhysPage, PGM_PAGE_GET_HCPHYS(pPage),
1598 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1599 pgmPhysPageCalcNemProtection(pPage, enmType), enmType, &u2State);
1600 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1601 }
1602#endif
1603 }
1604 PGM_UNLOCK(pVM);
1605 return VINF_SUCCESS;
1606 }
1607 PGM_UNLOCK(pVM);
1608 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1609 return VERR_INVALID_PARAMETER;
1610 }
1611 PGM_UNLOCK(pVM);
1612
1613 if (rc == VERR_NOT_FOUND)
1614 {
1615 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1616 return VERR_PGM_HANDLER_NOT_FOUND;
1617 }
1618 return rc;
1619}
1620
1621
1622/**
1623 * Resolves an MMIO2 page.
1624 *
1625 * Caller as taken the PGM lock.
1626 *
1627 * @returns Pointer to the page if valid, NULL otherwise
1628 * @param pVM The cross context VM structure.
1629 * @param pDevIns The device owning it.
1630 * @param hMmio2 The MMIO2 region.
1631 * @param offMmio2Page The offset into the region.
1632 */
1633static PPGMPAGE pgmPhysResolveMmio2PageLocked(PVMCC pVM, PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2Page)
1634{
1635 /* Only works if the handle is in the handle table! */
1636 AssertReturn(hMmio2 != 0, NULL);
1637 hMmio2--;
1638
1639 /* Must check the first one for PGMREGMMIO2RANGE_F_FIRST_CHUNK. */
1640 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), NULL);
1641 PPGMREGMMIO2RANGE pCur = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2];
1642 AssertReturn(pCur, NULL);
1643 AssertReturn(pCur->fFlags & PGMREGMMIO2RANGE_F_FIRST_CHUNK, NULL);
1644
1645 /* Loop thru the sub-ranges till we find the one covering offMmio2. */
1646 for (;;)
1647 {
1648#ifdef IN_RING3
1649 AssertReturn(pCur->pDevInsR3 == pDevIns, NULL);
1650#else
1651 AssertReturn(pCur->pDevInsR3 == pDevIns->pDevInsForR3, NULL);
1652#endif
1653
1654 /* Does it match the offset? */
1655 if (offMmio2Page < pCur->cbReal)
1656 return &pCur->RamRange.aPages[offMmio2Page >> GUEST_PAGE_SHIFT];
1657
1658 /* Advance if we can. */
1659 AssertReturn(!(pCur->fFlags & PGMREGMMIO2RANGE_F_LAST_CHUNK), NULL);
1660 offMmio2Page -= pCur->cbReal;
1661 hMmio2++;
1662 AssertReturn(hMmio2 < RT_ELEMENTS(pVM->pgm.s.apMmio2RangesR3), NULL);
1663 pCur = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[hMmio2];
1664 AssertReturn(pCur, NULL);
1665 }
1666}
1667
1668
1669/**
1670 * Replaces an MMIO page with an MMIO2 page.
1671 *
1672 * This is a worker for IOMMMIOMapMMIO2Page that works in a similar way to
1673 * PGMHandlerPhysicalPageTempOff but for an MMIO page. Since an MMIO page has no
1674 * backing, the caller must provide a replacement page. For various reasons the
1675 * replacement page must be an MMIO2 page.
1676 *
1677 * The caller must do required page table modifications. You can get away
1678 * without making any modifications since it's an MMIO page, the cost is an extra
1679 * \#PF which will the resync the page.
1680 *
1681 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1682 *
1683 * The caller may still get handler callback even after this call and must be
1684 * able to deal correctly with such calls. The reason for these callbacks are
1685 * either that we're executing in the recompiler (which doesn't know about this
1686 * arrangement) or that we've been restored from saved state (where we won't
1687 * save the change).
1688 *
1689 * @returns VBox status code.
1690 * @param pVM The cross context VM structure.
1691 * @param GCPhys The start address of the access handler. This
1692 * must be a fully page aligned range or we risk
1693 * messing up other handlers installed for the
1694 * start and end pages.
1695 * @param GCPhysPage The physical address of the page to turn off
1696 * access monitoring for and replace with the MMIO2
1697 * page.
1698 * @param pDevIns The device instance owning @a hMmio2.
1699 * @param hMmio2 Handle to the MMIO2 region containing the page
1700 * to remap in the the MMIO page at @a GCPhys.
1701 * @param offMmio2PageRemap The offset into @a hMmio2 of the MMIO2 page that
1702 * should serve as backing memory.
1703 *
1704 * @remark May cause a page pool flush if used on a page that is already
1705 * aliased.
1706 *
1707 * @note This trick does only work reliably if the two pages are never ever
1708 * mapped in the same page table. If they are the page pool code will
1709 * be confused should either of them be flushed. See the special case
1710 * of zero page aliasing mentioned in #3170.
1711 *
1712 */
1713VMMDECL(int) PGMHandlerPhysicalPageAliasMmio2(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage,
1714 PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2PageRemap)
1715{
1716#ifdef VBOX_WITH_PGM_NEM_MODE
1717 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1718#endif
1719 int rc = PGM_LOCK(pVM);
1720 AssertRCReturn(rc, rc);
1721
1722 /*
1723 * Resolve the MMIO2 reference.
1724 */
1725 PPGMPAGE pPageRemap = pgmPhysResolveMmio2PageLocked(pVM, pDevIns, hMmio2, offMmio2PageRemap);
1726 if (RT_LIKELY(pPageRemap))
1727 AssertMsgReturnStmt(PGM_PAGE_GET_TYPE(pPageRemap) == PGMPAGETYPE_MMIO2,
1728 ("hMmio2=%RU64 offMmio2PageRemap=%RGp %R[pgmpage]\n", hMmio2, offMmio2PageRemap, pPageRemap),
1729 PGM_UNLOCK(pVM), VERR_PGM_PHYS_NOT_MMIO2);
1730 else
1731 {
1732 PGM_UNLOCK(pVM);
1733 return VERR_OUT_OF_RANGE;
1734 }
1735
1736 /*
1737 * Lookup and validate the range.
1738 */
1739 PPGMPHYSHANDLER pCur;
1740 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1741 if (RT_SUCCESS(rc))
1742 {
1743 Assert(pCur->Key == GCPhys);
1744 if (RT_LIKELY( GCPhysPage >= pCur->Key
1745 && GCPhysPage <= pCur->KeyLast))
1746 {
1747 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1748 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1749 AssertReturnStmt(!(pCur->Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1750 AssertReturnStmt((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1751 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1752
1753 /*
1754 * Validate the page.
1755 */
1756 PPGMPAGE pPage;
1757 PPGMRAMRANGE pRam;
1758 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
1759 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1760 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1761 {
1762 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO,
1763 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1764 VERR_PGM_PHYS_NOT_MMIO2);
1765 if (PGM_PAGE_GET_HCPHYS(pPage) == PGM_PAGE_GET_HCPHYS(pPageRemap))
1766 {
1767 PGM_UNLOCK(pVM);
1768 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1769 }
1770
1771 /*
1772 * The page is already mapped as some other page, reset it
1773 * to an MMIO/ZERO page before doing the new mapping.
1774 */
1775 Log(("PGMHandlerPhysicalPageAliasMmio2: GCPhysPage=%RGp (%R[pgmpage]; %RHp -> %RHp\n",
1776 GCPhysPage, pPage, PGM_PAGE_GET_HCPHYS(pPage), PGM_PAGE_GET_HCPHYS(pPageRemap)));
1777 pgmHandlerPhysicalResetAliasedPage(pVM, pPage, GCPhysPage, pRam,
1778 false /*fDoAccounting*/, false /*fFlushIemTlbs*/);
1779 pCur->cAliasedPages--;
1780
1781 /* Since this may be present in the TLB and now be wrong, invalid
1782 the guest physical address part of the IEM TLBs. Note, we do
1783 this here as we will not invalid */
1784 IEMTlbInvalidateAllPhysicalAllCpus(pVM, NIL_VMCPUID);
1785 }
1786 Assert(PGM_PAGE_IS_ZERO(pPage));
1787
1788 /*
1789 * Do the actual remapping here.
1790 * This page now serves as an alias for the backing memory specified.
1791 */
1792 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: %RGp (%R[pgmpage]) alias for %RU64/%RGp (%R[pgmpage])\n",
1793 GCPhysPage, pPage, hMmio2, offMmio2PageRemap, pPageRemap ));
1794 PGM_PAGE_SET_HCPHYS(pVM, pPage, PGM_PAGE_GET_HCPHYS(pPageRemap));
1795 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO);
1796 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1797 PGM_PAGE_SET_PAGEID(pVM, pPage, PGM_PAGE_GET_PAGEID(pPageRemap));
1798 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1799 pCur->cAliasedPages++;
1800 Assert(pCur->cAliasedPages <= pCur->cPages);
1801
1802 /*
1803 * Flush its TLB entry.
1804 *
1805 * Not calling IEMTlbInvalidateAllPhysicalAllCpus here to conserve
1806 * all the other IEM TLB entires. When this one is kicked out and
1807 * reloaded, it will be using the MMIO2 alias, but till then we'll
1808 * continue doing MMIO.
1809 */
1810 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1811 /** @todo Do some preformance checks of calling
1812 * IEMTlbInvalidateAllPhysicalAllCpus when in IEM mode, to see if it
1813 * actually makes sense or not. Screen updates are typically massive
1814 * and important when this kind of aliasing is used, so it may pay of... */
1815
1816#ifdef VBOX_WITH_NATIVE_NEM
1817 /* Tell NEM about the backing and protection change. */
1818 if (VM_IS_NEM_ENABLED(pVM))
1819 {
1820 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1821 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1822 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1823 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_MMIO2_ALIAS_MMIO),
1824 PGMPAGETYPE_MMIO2_ALIAS_MMIO, &u2State);
1825 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1826 }
1827#endif
1828 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: => %R[pgmpage]\n", pPage));
1829 PGM_UNLOCK(pVM);
1830 return VINF_SUCCESS;
1831 }
1832
1833 PGM_UNLOCK(pVM);
1834 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1835 return VERR_INVALID_PARAMETER;
1836 }
1837
1838 PGM_UNLOCK(pVM);
1839 if (rc == VERR_NOT_FOUND)
1840 {
1841 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1842 return VERR_PGM_HANDLER_NOT_FOUND;
1843 }
1844 return rc;
1845}
1846
1847
1848/**
1849 * Replaces an MMIO page with an arbitrary HC page in the shadow page tables.
1850 *
1851 * This differs from PGMHandlerPhysicalPageAliasMmio2 in that the page doesn't
1852 * need to be a known MMIO2 page and that only shadow paging may access the
1853 * page. The latter distinction is important because the only use for this
1854 * feature is for mapping the special APIC access page that VT-x uses to detect
1855 * APIC MMIO operations, the page is shared between all guest CPUs and actually
1856 * not written to. At least at the moment.
1857 *
1858 * The caller must do required page table modifications. You can get away
1859 * without making any modifications since it's an MMIO page, the cost is an extra
1860 * \#PF which will the resync the page.
1861 *
1862 * Call PGMHandlerPhysicalReset() to restore the MMIO page.
1863 *
1864 *
1865 * @returns VBox status code.
1866 * @param pVM The cross context VM structure.
1867 * @param GCPhys The start address of the access handler. This
1868 * must be a fully page aligned range or we risk
1869 * messing up other handlers installed for the
1870 * start and end pages.
1871 * @param GCPhysPage The physical address of the page to turn off
1872 * access monitoring for.
1873 * @param HCPhysPageRemap The physical address of the HC page that
1874 * serves as backing memory.
1875 *
1876 * @remark May cause a page pool flush if used on a page that is already
1877 * aliased.
1878 */
1879VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap)
1880{
1881/// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
1882#ifdef VBOX_WITH_PGM_NEM_MODE
1883 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
1884#endif
1885 int rc = PGM_LOCK(pVM);
1886 AssertRCReturn(rc, rc);
1887
1888 /*
1889 * Lookup and validate the range.
1890 */
1891 PPGMPHYSHANDLER pCur;
1892 rc = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, GCPhys, &pCur);
1893 if (RT_SUCCESS(rc))
1894 {
1895 Assert(pCur->Key == GCPhys);
1896 if (RT_LIKELY( GCPhysPage >= pCur->Key
1897 && GCPhysPage <= pCur->KeyLast))
1898 {
1899 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1900 AssertReturnStmt(pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO, PGM_UNLOCK(pVM), VERR_ACCESS_DENIED);
1901 AssertReturnStmt(!(pCur->Key & GUEST_PAGE_OFFSET_MASK), PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1902 AssertReturnStmt((pCur->KeyLast & GUEST_PAGE_OFFSET_MASK) == GUEST_PAGE_OFFSET_MASK,
1903 PGM_UNLOCK(pVM), VERR_INVALID_PARAMETER);
1904
1905 /*
1906 * Get and validate the pages.
1907 */
1908 PPGMPAGE pPage;
1909 rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage);
1910 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
1911 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
1912 {
1913 PGM_UNLOCK(pVM);
1914 AssertMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_SPECIAL_ALIAS_MMIO,
1915 ("GCPhysPage=%RGp %R[pgmpage]\n", GCPhysPage, pPage),
1916 VERR_PGM_PHYS_NOT_MMIO2);
1917 return VINF_PGM_HANDLER_ALREADY_ALIASED;
1918 }
1919 Assert(PGM_PAGE_IS_ZERO(pPage));
1920
1921 /*
1922 * Do the actual remapping here.
1923 * This page now serves as an alias for the backing memory
1924 * specified as far as shadow paging is concerned.
1925 */
1926 LogFlow(("PGMHandlerPhysicalPageAliasHC: %RGp (%R[pgmpage]) alias for %RHp\n",
1927 GCPhysPage, pPage, HCPhysPageRemap));
1928 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhysPageRemap);
1929 PGM_PAGE_SET_TYPE(pVM, pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO);
1930 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1931 PGM_PAGE_SET_PAGEID(pVM, pPage, NIL_GMM_PAGEID);
1932 PGM_PAGE_SET_HNDL_PHYS_STATE_ONLY(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED);
1933 pCur->cAliasedPages++;
1934 Assert(pCur->cAliasedPages <= pCur->cPages);
1935
1936 /*
1937 * Flush its TLB entry.
1938 *
1939 * Not calling IEMTlbInvalidateAllPhysicalAllCpus here as special
1940 * aliased MMIO pages are handled like MMIO by the IEM TLB.
1941 */
1942 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
1943
1944#ifdef VBOX_WITH_NATIVE_NEM
1945 /* Tell NEM about the backing and protection change. */
1946 if (VM_IS_NEM_ENABLED(pVM))
1947 {
1948 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhysPage);
1949 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
1950 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, pVM->pgm.s.HCPhysZeroPg, PGM_PAGE_GET_HCPHYS(pPage),
1951 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
1952 pgmPhysPageCalcNemProtection(pPage, PGMPAGETYPE_SPECIAL_ALIAS_MMIO),
1953 PGMPAGETYPE_SPECIAL_ALIAS_MMIO, &u2State);
1954 PGM_PAGE_SET_NEM_STATE(pPage, u2State);
1955 }
1956#endif
1957 LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage));
1958 PGM_UNLOCK(pVM);
1959 return VINF_SUCCESS;
1960 }
1961 PGM_UNLOCK(pVM);
1962 AssertMsgFailed(("The page %#x is outside the range %#x-%#x\n", GCPhysPage, pCur->Key, pCur->KeyLast));
1963 return VERR_INVALID_PARAMETER;
1964 }
1965 PGM_UNLOCK(pVM);
1966
1967 if (rc == VERR_NOT_FOUND)
1968 {
1969 AssertMsgFailed(("Specified physical handler start address %#x is invalid.\n", GCPhys));
1970 return VERR_PGM_HANDLER_NOT_FOUND;
1971 }
1972 return rc;
1973}
1974
1975
1976/**
1977 * Checks if a physical range is handled
1978 *
1979 * @returns boolean
1980 * @param pVM The cross context VM structure.
1981 * @param GCPhys Start physical address earlier passed to PGMR3HandlerPhysicalRegister().
1982 * @remarks Caller must take the PGM lock...
1983 * @thread EMT.
1984 */
1985VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVMCC pVM, RTGCPHYS GCPhys)
1986{
1987 /*
1988 * Find the handler.
1989 */
1990 PGM_LOCK_VOID(pVM);
1991 PPGMPHYSHANDLER pCur;
1992 int rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
1993 if (RT_SUCCESS(rc))
1994 {
1995#ifdef VBOX_STRICT
1996 Assert(GCPhys >= pCur->Key && GCPhys <= pCur->KeyLast);
1997 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
1998 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
1999 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
2000 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO);
2001#endif
2002 PGM_UNLOCK(pVM);
2003 return true;
2004 }
2005 PGM_UNLOCK(pVM);
2006 return false;
2007}
2008
2009
2010/**
2011 * Checks if it's an disabled all access handler or write access handler at the
2012 * given address.
2013 *
2014 * @returns true if it's an all access handler, false if it's a write access
2015 * handler.
2016 * @param pVM The cross context VM structure.
2017 * @param GCPhys The address of the page with a disabled handler.
2018 *
2019 * @remarks The caller, PGMR3PhysTlbGCPhys2Ptr, must hold the PGM lock.
2020 */
2021bool pgmHandlerPhysicalIsAll(PVMCC pVM, RTGCPHYS GCPhys)
2022{
2023 PGM_LOCK_VOID(pVM);
2024 PPGMPHYSHANDLER pCur;
2025 int rc = pgmHandlerPhysicalLookup(pVM, GCPhys, &pCur);
2026 AssertRCReturnStmt(rc, PGM_UNLOCK(pVM), true);
2027
2028 /* Only whole pages can be disabled. */
2029 Assert( pCur->Key <= (GCPhys & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK)
2030 && pCur->KeyLast >= (GCPhys | GUEST_PAGE_OFFSET_MASK));
2031
2032 PCPGMPHYSHANDLERTYPEINT const pCurType = PGMPHYSHANDLER_GET_TYPE_NO_NULL(pVM, pCur);
2033 Assert( pCurType->enmKind == PGMPHYSHANDLERKIND_WRITE
2034 || pCurType->enmKind == PGMPHYSHANDLERKIND_ALL
2035 || pCurType->enmKind == PGMPHYSHANDLERKIND_MMIO); /* sanity */
2036 bool const fRet = pCurType->enmKind != PGMPHYSHANDLERKIND_WRITE;
2037 PGM_UNLOCK(pVM);
2038 return fRet;
2039}
2040
2041#ifdef VBOX_STRICT
2042
2043/**
2044 * State structure used by the PGMAssertHandlerAndFlagsInSync() function
2045 * and its AVL enumerators.
2046 */
2047typedef struct PGMAHAFIS
2048{
2049 /** The current physical address. */
2050 RTGCPHYS GCPhys;
2051 /** Number of errors. */
2052 unsigned cErrors;
2053 /** Pointer to the VM. */
2054 PVM pVM;
2055} PGMAHAFIS, *PPGMAHAFIS;
2056
2057
2058/**
2059 * Asserts that the handlers+guest-page-tables == ramrange-flags and
2060 * that the physical addresses associated with virtual handlers are correct.
2061 *
2062 * @returns Number of mismatches.
2063 * @param pVM The cross context VM structure.
2064 */
2065VMMDECL(unsigned) PGMAssertHandlerAndFlagsInSync(PVMCC pVM)
2066{
2067 PPGM pPGM = &pVM->pgm.s;
2068 PGMAHAFIS State;
2069 State.GCPhys = 0;
2070 State.cErrors = 0;
2071 State.pVM = pVM;
2072
2073 PGM_LOCK_ASSERT_OWNER(pVM);
2074
2075 /*
2076 * Check the RAM flags against the handlers.
2077 */
2078 PPGMPHYSHANDLERTREE const pPhysHandlerTree = pVM->VMCC_CTX(pgm).s.pPhysHandlerTree;
2079 for (PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext))
2080 {
2081 const uint32_t cPages = pRam->cb >> GUEST_PAGE_SHIFT;
2082 for (uint32_t iPage = 0; iPage < cPages; iPage++)
2083 {
2084 PGMPAGE const *pPage = &pRam->aPages[iPage];
2085 if (PGM_PAGE_HAS_ANY_HANDLERS(pPage))
2086 {
2087 State.GCPhys = pRam->GCPhys + (iPage << GUEST_PAGE_SHIFT);
2088
2089 /*
2090 * Physical first - calculate the state based on the handlers
2091 * active on the page, then compare.
2092 */
2093 if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
2094 {
2095 /* the first */
2096 PPGMPHYSHANDLER pPhys;
2097 int rc = pPhysHandlerTree->lookup(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator, State.GCPhys, &pPhys);
2098 if (rc == VERR_NOT_FOUND)
2099 {
2100 rc = pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
2101 State.GCPhys, &pPhys);
2102 if (RT_SUCCESS(rc))
2103 {
2104 Assert(pPhys->Key >= State.GCPhys);
2105 if (pPhys->Key > (State.GCPhys + GUEST_PAGE_SIZE - 1))
2106 pPhys = NULL;
2107 }
2108 else
2109 AssertLogRelMsgReturn(rc == VERR_NOT_FOUND, ("rc=%Rrc GCPhys=%RGp\n", rc, State.GCPhys), 999);
2110 }
2111 else
2112 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc GCPhys=%RGp\n", rc, State.GCPhys), 999);
2113
2114 if (pPhys)
2115 {
2116 PCPGMPHYSHANDLERTYPEINT pPhysType = pgmHandlerPhysicalTypeHandleToPtr(pVM, pPhys->hType);
2117 unsigned uState = pPhysType->uState;
2118 bool const fNotInHm = pPhysType->fNotInHm; /* whole pages, so no need to accumulate sub-page configs. */
2119
2120 /* more? */
2121 while (pPhys->KeyLast < (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
2122 {
2123 PPGMPHYSHANDLER pPhys2;
2124 rc = pPhysHandlerTree->lookupMatchingOrAbove(&pVM->VMCC_CTX(pgm).s.PhysHandlerAllocator,
2125 pPhys->KeyLast + 1, &pPhys2);
2126 if (rc == VERR_NOT_FOUND)
2127 break;
2128 AssertLogRelMsgReturn(RT_SUCCESS(rc), ("rc=%Rrc KeyLast+1=%RGp\n", rc, pPhys->KeyLast + 1), 999);
2129 if (pPhys2->Key > (State.GCPhys | GUEST_PAGE_OFFSET_MASK))
2130 break;
2131 PCPGMPHYSHANDLERTYPEINT pPhysType2 = pgmHandlerPhysicalTypeHandleToPtr(pVM, pPhys2->hType);
2132 uState = RT_MAX(uState, pPhysType2->uState);
2133 pPhys = pPhys2;
2134 }
2135
2136 /* compare.*/
2137 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != uState
2138 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED)
2139 {
2140 AssertMsgFailed(("ram range vs phys handler flags mismatch. GCPhys=%RGp state=%d expected=%d %s\n",
2141 State.GCPhys, PGM_PAGE_GET_HNDL_PHYS_STATE(pPage), uState, pPhysType->pszDesc));
2142 State.cErrors++;
2143 }
2144 AssertMsgStmt(PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage) == fNotInHm,
2145 ("ram range vs phys handler flags mismatch. GCPhys=%RGp fNotInHm=%d, %d %s\n",
2146 State.GCPhys, PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage), fNotInHm, pPhysType->pszDesc),
2147 State.cErrors++);
2148 }
2149 else
2150 {
2151 AssertMsgFailed(("ram range vs phys handler mismatch. no handler for GCPhys=%RGp\n", State.GCPhys));
2152 State.cErrors++;
2153 }
2154 }
2155 }
2156 } /* foreach page in ram range. */
2157 } /* foreach ram range. */
2158
2159 /*
2160 * Do the reverse check for physical handlers.
2161 */
2162 /** @todo */
2163
2164 return State.cErrors;
2165}
2166
2167#endif /* VBOX_STRICT */
2168
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette