VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 55899

Last change on this file since 55899 was 55899, checked in by vboxsync, 10 years ago

PGM: Added an access origin to memory read & write calls that respects handlers. This will later be passed to the access handler, so that things like the page pool (and potentially others) can query IEM about instruction details when needed.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 150.2 KB
Line 
1/* $Id: PGMAllPhys.cpp 55899 2015-05-18 09:47:57Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM_PHYS
22#include <VBox/vmm/pgm.h>
23#include <VBox/vmm/trpm.h>
24#include <VBox/vmm/vmm.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/em.h>
27#ifdef VBOX_WITH_REM
28# include <VBox/vmm/rem.h>
29#endif
30#include "PGMInternal.h"
31#include <VBox/vmm/vm.h>
32#include "PGMInline.h"
33#include <VBox/param.h>
34#include <VBox/err.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37#include <iprt/asm-amd64-x86.h>
38#include <VBox/log.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** Enable the physical TLB. */
48#define PGM_WITH_PHYS_TLB
49
50
51
52#ifndef IN_RING3
53
54/**
55 * \#PF Handler callback for physical memory accesses without a RC/R0 handler.
56 * This simply pushes everything to the HC handler.
57 *
58 * @returns VBox status code (appropriate for trap handling and GC return).
59 * @param pVM Pointer to the VM.
60 * @param uErrorCode CPU Error code.
61 * @param pRegFrame Trap register frame.
62 * @param pvFault The fault address (cr2).
63 * @param GCPhysFault The GC physical address corresponding to pvFault.
64 * @param pvUser User argument.
65 */
66VMMDECL(int) pgmPhysPfHandlerRedirectToHC(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
67 RTGCPHYS GCPhysFault, void *pvUser)
68{
69 NOREF(pVM); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(GCPhysFault); NOREF(pvUser);
70 return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ;
71}
72
73
74/**
75 * \#PF Handler callback for Guest ROM range write access.
76 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
77 *
78 * @returns VBox status code (appropriate for trap handling and GC return).
79 * @param pVM Pointer to the VM.
80 * @param uErrorCode CPU Error code.
81 * @param pRegFrame Trap register frame.
82 * @param pvFault The fault address (cr2).
83 * @param GCPhysFault The GC physical address corresponding to pvFault.
84 * @param pvUser User argument. Pointer to the ROM range structure.
85 */
86DECLEXPORT(int) pgmPhysRomWritePfHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
87 RTGCPHYS GCPhysFault, void *pvUser)
88{
89 int rc;
90 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
91 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
92 PVMCPU pVCpu = VMMGetCpu(pVM);
93 NOREF(uErrorCode); NOREF(pvFault);
94
95 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
96
97 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
98 switch (pRom->aPages[iPage].enmProt)
99 {
100 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
101 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
102 {
103 /*
104 * If it's a simple instruction which doesn't change the cpu state
105 * we will simply skip it. Otherwise we'll have to defer it to REM.
106 */
107 uint32_t cbOp;
108 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
109 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
110 if ( RT_SUCCESS(rc)
111 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
112 && !(pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
113 {
114 switch (pDis->bOpCode)
115 {
116 /** @todo Find other instructions we can safely skip, possibly
117 * adding this kind of detection to DIS or EM. */
118 case OP_MOV:
119 pRegFrame->rip += cbOp;
120 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteHandled);
121 return VINF_SUCCESS;
122 }
123 }
124 break;
125 }
126
127 case PGMROMPROT_READ_RAM_WRITE_RAM:
128 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
129 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
130 AssertRC(rc);
131 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
132
133 case PGMROMPROT_READ_ROM_WRITE_RAM:
134 /* Handle it in ring-3 because it's *way* easier there. */
135 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
136 break;
137
138 default:
139 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
140 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
141 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
142 }
143
144 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteUnhandled);
145 return VINF_EM_RAW_EMULATE_INSTR;
146}
147
148#endif /* IN_RING3 */
149
150/**
151 * Invalidates the RAM range TLBs.
152 *
153 * @param pVM Pointer to the VM.
154 */
155void pgmPhysInvalidRamRangeTlbs(PVM pVM)
156{
157 pgmLock(pVM);
158 for (uint32_t i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)
159 {
160 pVM->pgm.s.apRamRangesTlbR3[i] = NIL_RTR3PTR;
161 pVM->pgm.s.apRamRangesTlbR0[i] = NIL_RTR0PTR;
162 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;
163 }
164 pgmUnlock(pVM);
165}
166
167
168/**
169 * Tests if a value of type RTGCPHYS is negative if the type had been signed
170 * instead of unsigned.
171 *
172 * @returns @c true if negative, @c false if positive or zero.
173 * @param a_GCPhys The value to test.
174 * @todo Move me to iprt/types.h.
175 */
176#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
177
178
179/**
180 * Slow worker for pgmPhysGetRange.
181 *
182 * @copydoc pgmPhysGetRange
183 */
184PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
185{
186 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
187
188 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
189 while (pRam)
190 {
191 RTGCPHYS off = GCPhys - pRam->GCPhys;
192 if (off < pRam->cb)
193 {
194 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
195 return pRam;
196 }
197 if (RTGCPHYS_IS_NEGATIVE(off))
198 pRam = pRam->CTX_SUFF(pLeft);
199 else
200 pRam = pRam->CTX_SUFF(pRight);
201 }
202 return NULL;
203}
204
205
206/**
207 * Slow worker for pgmPhysGetRangeAtOrAbove.
208 *
209 * @copydoc pgmPhysGetRangeAtOrAbove
210 */
211PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
212{
213 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
214
215 PPGMRAMRANGE pLastLeft = NULL;
216 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
217 while (pRam)
218 {
219 RTGCPHYS off = GCPhys - pRam->GCPhys;
220 if (off < pRam->cb)
221 {
222 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
223 return pRam;
224 }
225 if (RTGCPHYS_IS_NEGATIVE(off))
226 {
227 pLastLeft = pRam;
228 pRam = pRam->CTX_SUFF(pLeft);
229 }
230 else
231 pRam = pRam->CTX_SUFF(pRight);
232 }
233 return pLastLeft;
234}
235
236
237/**
238 * Slow worker for pgmPhysGetPage.
239 *
240 * @copydoc pgmPhysGetPage
241 */
242PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
243{
244 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
245
246 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
247 while (pRam)
248 {
249 RTGCPHYS off = GCPhys - pRam->GCPhys;
250 if (off < pRam->cb)
251 {
252 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
253 return &pRam->aPages[off >> PAGE_SHIFT];
254 }
255
256 if (RTGCPHYS_IS_NEGATIVE(off))
257 pRam = pRam->CTX_SUFF(pLeft);
258 else
259 pRam = pRam->CTX_SUFF(pRight);
260 }
261 return NULL;
262}
263
264
265/**
266 * Slow worker for pgmPhysGetPageEx.
267 *
268 * @copydoc pgmPhysGetPageEx
269 */
270int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
271{
272 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
273
274 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
275 while (pRam)
276 {
277 RTGCPHYS off = GCPhys - pRam->GCPhys;
278 if (off < pRam->cb)
279 {
280 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
281 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
282 return VINF_SUCCESS;
283 }
284
285 if (RTGCPHYS_IS_NEGATIVE(off))
286 pRam = pRam->CTX_SUFF(pLeft);
287 else
288 pRam = pRam->CTX_SUFF(pRight);
289 }
290
291 *ppPage = NULL;
292 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
293}
294
295
296/**
297 * Slow worker for pgmPhysGetPageAndRangeEx.
298 *
299 * @copydoc pgmPhysGetPageAndRangeEx
300 */
301int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
302{
303 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
304
305 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
306 while (pRam)
307 {
308 RTGCPHYS off = GCPhys - pRam->GCPhys;
309 if (off < pRam->cb)
310 {
311 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
312 *ppRam = pRam;
313 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
314 return VINF_SUCCESS;
315 }
316
317 if (RTGCPHYS_IS_NEGATIVE(off))
318 pRam = pRam->CTX_SUFF(pLeft);
319 else
320 pRam = pRam->CTX_SUFF(pRight);
321 }
322
323 *ppRam = NULL;
324 *ppPage = NULL;
325 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
326}
327
328
329/**
330 * Checks if Address Gate 20 is enabled or not.
331 *
332 * @returns true if enabled.
333 * @returns false if disabled.
334 * @param pVCpu Pointer to the VMCPU.
335 */
336VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
337{
338 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
339 return pVCpu->pgm.s.fA20Enabled;
340}
341
342
343/**
344 * Validates a GC physical address.
345 *
346 * @returns true if valid.
347 * @returns false if invalid.
348 * @param pVM Pointer to the VM.
349 * @param GCPhys The physical address to validate.
350 */
351VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
352{
353 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
354 return pPage != NULL;
355}
356
357
358/**
359 * Checks if a GC physical address is a normal page,
360 * i.e. not ROM, MMIO or reserved.
361 *
362 * @returns true if normal.
363 * @returns false if invalid, ROM, MMIO or reserved page.
364 * @param pVM Pointer to the VM.
365 * @param GCPhys The physical address to check.
366 */
367VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
368{
369 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
370 return pPage
371 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
372}
373
374
375/**
376 * Converts a GC physical address to a HC physical address.
377 *
378 * @returns VINF_SUCCESS on success.
379 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
380 * page but has no physical backing.
381 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
382 * GC physical address.
383 *
384 * @param pVM Pointer to the VM.
385 * @param GCPhys The GC physical address to convert.
386 * @param pHCPhys Where to store the HC physical address on success.
387 */
388VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
389{
390 pgmLock(pVM);
391 PPGMPAGE pPage;
392 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
393 if (RT_SUCCESS(rc))
394 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
395 pgmUnlock(pVM);
396 return rc;
397}
398
399
400/**
401 * Invalidates all page mapping TLBs.
402 *
403 * @param pVM Pointer to the VM.
404 */
405void pgmPhysInvalidatePageMapTLB(PVM pVM)
406{
407 pgmLock(pVM);
408 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushes);
409
410 /* Clear the shared R0/R3 TLB completely. */
411 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
412 {
413 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
414 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
415 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
416 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
417 }
418
419 /** @todo clear the RC TLB whenever we add it. */
420
421 pgmUnlock(pVM);
422}
423
424
425/**
426 * Invalidates a page mapping TLB entry
427 *
428 * @param pVM Pointer to the VM.
429 * @param GCPhys GCPhys entry to flush
430 */
431void pgmPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
432{
433 PGM_LOCK_ASSERT_OWNER(pVM);
434
435 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushEntry);
436
437#ifdef IN_RC
438 unsigned idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
439 pVM->pgm.s.PhysTlbHC.aEntries[idx].GCPhys = NIL_RTGCPHYS;
440 pVM->pgm.s.PhysTlbHC.aEntries[idx].pPage = 0;
441 pVM->pgm.s.PhysTlbHC.aEntries[idx].pMap = 0;
442 pVM->pgm.s.PhysTlbHC.aEntries[idx].pv = 0;
443#else
444 /* Clear the shared R0/R3 TLB entry. */
445 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
446 pTlbe->GCPhys = NIL_RTGCPHYS;
447 pTlbe->pPage = 0;
448 pTlbe->pMap = 0;
449 pTlbe->pv = 0;
450#endif
451
452 /** @todo clear the RC TLB whenever we add it. */
453}
454
455/**
456 * Makes sure that there is at least one handy page ready for use.
457 *
458 * This will also take the appropriate actions when reaching water-marks.
459 *
460 * @returns VBox status code.
461 * @retval VINF_SUCCESS on success.
462 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
463 *
464 * @param pVM Pointer to the VM.
465 *
466 * @remarks Must be called from within the PGM critical section. It may
467 * nip back to ring-3/0 in some cases.
468 */
469static int pgmPhysEnsureHandyPage(PVM pVM)
470{
471 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
472
473 /*
474 * Do we need to do anything special?
475 */
476#ifdef IN_RING3
477 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
478#else
479 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
480#endif
481 {
482 /*
483 * Allocate pages only if we're out of them, or in ring-3, almost out.
484 */
485#ifdef IN_RING3
486 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
487#else
488 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
489#endif
490 {
491 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
492 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
493#ifdef IN_RING3
494 int rc = PGMR3PhysAllocateHandyPages(pVM);
495#else
496 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
497#endif
498 if (RT_UNLIKELY(rc != VINF_SUCCESS))
499 {
500 if (RT_FAILURE(rc))
501 return rc;
502 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
503 if (!pVM->pgm.s.cHandyPages)
504 {
505 LogRel(("PGM: no more handy pages!\n"));
506 return VERR_EM_NO_MEMORY;
507 }
508 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
509 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
510#ifdef IN_RING3
511# ifdef VBOX_WITH_REM
512 REMR3NotifyFF(pVM);
513# endif
514#else
515 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
516#endif
517 }
518 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
519 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
520 ("%u\n", pVM->pgm.s.cHandyPages),
521 VERR_PGM_HANDY_PAGE_IPE);
522 }
523 else
524 {
525 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
526 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
527#ifndef IN_RING3
528 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
529 {
530 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
531 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
532 }
533#endif
534 }
535 }
536
537 return VINF_SUCCESS;
538}
539
540
541/**
542 * Replace a zero or shared page with new page that we can write to.
543 *
544 * @returns The following VBox status codes.
545 * @retval VINF_SUCCESS on success, pPage is modified.
546 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
547 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
548 *
549 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
550 *
551 * @param pVM Pointer to the VM.
552 * @param pPage The physical page tracking structure. This will
553 * be modified on success.
554 * @param GCPhys The address of the page.
555 *
556 * @remarks Must be called from within the PGM critical section. It may
557 * nip back to ring-3/0 in some cases.
558 *
559 * @remarks This function shouldn't really fail, however if it does
560 * it probably means we've screwed up the size of handy pages and/or
561 * the low-water mark. Or, that some device I/O is causing a lot of
562 * pages to be allocated while while the host is in a low-memory
563 * condition. This latter should be handled elsewhere and in a more
564 * controlled manner, it's on the @bugref{3170} todo list...
565 */
566int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
567{
568 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
569
570 /*
571 * Prereqs.
572 */
573 PGM_LOCK_ASSERT_OWNER(pVM);
574 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
575 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
576
577# ifdef PGM_WITH_LARGE_PAGES
578 /*
579 * Try allocate a large page if applicable.
580 */
581 if ( PGMIsUsingLargePages(pVM)
582 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
583 {
584 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
585 PPGMPAGE pBasePage;
586
587 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
588 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
589 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
590 {
591 rc = pgmPhysAllocLargePage(pVM, GCPhys);
592 if (rc == VINF_SUCCESS)
593 return rc;
594 }
595 /* Mark the base as type page table, so we don't check over and over again. */
596 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
597
598 /* fall back to 4KB pages. */
599 }
600# endif
601
602 /*
603 * Flush any shadow page table mappings of the page.
604 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
605 */
606 bool fFlushTLBs = false;
607 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
608 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
609
610 /*
611 * Ensure that we've got a page handy, take it and use it.
612 */
613 int rc2 = pgmPhysEnsureHandyPage(pVM);
614 if (RT_FAILURE(rc2))
615 {
616 if (fFlushTLBs)
617 PGM_INVL_ALL_VCPU_TLBS(pVM);
618 Assert(rc2 == VERR_EM_NO_MEMORY);
619 return rc2;
620 }
621 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
622 PGM_LOCK_ASSERT_OWNER(pVM);
623 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
624 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
625
626 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
627 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
628 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
629 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
630 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
631 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
632
633 /*
634 * There are one or two action to be taken the next time we allocate handy pages:
635 * - Tell the GMM (global memory manager) what the page is being used for.
636 * (Speeds up replacement operations - sharing and defragmenting.)
637 * - If the current backing is shared, it must be freed.
638 */
639 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
640 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
641
642 void const *pvSharedPage = NULL;
643 if (PGM_PAGE_IS_SHARED(pPage))
644 {
645 /* Mark this shared page for freeing/dereferencing. */
646 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
647 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
648
649 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
650 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
651 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageReplaceShared));
652 pVM->pgm.s.cSharedPages--;
653
654 /* Grab the address of the page so we can make a copy later on. (safe) */
655 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
656 AssertRC(rc);
657 }
658 else
659 {
660 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
661 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRZPageReplaceZero);
662 pVM->pgm.s.cZeroPages--;
663 }
664
665 /*
666 * Do the PGMPAGE modifications.
667 */
668 pVM->pgm.s.cPrivatePages++;
669 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
670 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
671 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
672 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
673 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
674
675 /* Copy the shared page contents to the replacement page. */
676 if (pvSharedPage)
677 {
678 /* Get the virtual address of the new page. */
679 PGMPAGEMAPLOCK PgMpLck;
680 void *pvNewPage;
681 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
682 if (RT_SUCCESS(rc))
683 {
684 memcpy(pvNewPage, pvSharedPage, PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
685 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
686 }
687 }
688
689 if ( fFlushTLBs
690 && rc != VINF_PGM_GCPHYS_ALIASED)
691 PGM_INVL_ALL_VCPU_TLBS(pVM);
692 return rc;
693}
694
695#ifdef PGM_WITH_LARGE_PAGES
696
697/**
698 * Replace a 2 MB range of zero pages with new pages that we can write to.
699 *
700 * @returns The following VBox status codes.
701 * @retval VINF_SUCCESS on success, pPage is modified.
702 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
703 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
704 *
705 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
706 *
707 * @param pVM Pointer to the VM.
708 * @param GCPhys The address of the page.
709 *
710 * @remarks Must be called from within the PGM critical section. It may
711 * nip back to ring-3/0 in some cases.
712 */
713int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys)
714{
715 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
716 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
717
718 /*
719 * Prereqs.
720 */
721 PGM_LOCK_ASSERT_OWNER(pVM);
722 Assert(PGMIsUsingLargePages(pVM));
723
724 PPGMPAGE pFirstPage;
725 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
726 if ( RT_SUCCESS(rc)
727 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM)
728 {
729 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
730
731 /* Don't call this function for already allocated pages. */
732 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
733
734 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
735 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
736 {
737 /* Lazy approach: check all pages in the 2 MB range.
738 * The whole range must be ram and unallocated. */
739 GCPhys = GCPhysBase;
740 unsigned iPage;
741 for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
742 {
743 PPGMPAGE pSubPage;
744 rc = pgmPhysGetPageEx(pVM, GCPhys, &pSubPage);
745 if ( RT_FAILURE(rc)
746 || PGM_PAGE_GET_TYPE(pSubPage) != PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
747 || PGM_PAGE_GET_STATE(pSubPage) != PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
748 {
749 LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pSubPage), PGM_PAGE_GET_STATE(pSubPage), rc));
750 break;
751 }
752 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
753 GCPhys += PAGE_SIZE;
754 }
755 if (iPage != _2M/PAGE_SIZE)
756 {
757 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
758 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
759 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
760 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
761 }
762
763 /*
764 * Do the allocation.
765 */
766# ifdef IN_RING3
767 rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
768# else
769 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
770# endif
771 if (RT_SUCCESS(rc))
772 {
773 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
774 pVM->pgm.s.cLargePages++;
775 return VINF_SUCCESS;
776 }
777
778 /* If we fail once, it most likely means the host's memory is too
779 fragmented; don't bother trying again. */
780 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
781 PGMSetLargePageUsage(pVM, false);
782 return rc;
783 }
784 }
785 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
786}
787
788
789/**
790 * Recheck the entire 2 MB range to see if we can use it again as a large page.
791 *
792 * @returns The following VBox status codes.
793 * @retval VINF_SUCCESS on success, the large page can be used again
794 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
795 *
796 * @param pVM Pointer to the VM.
797 * @param GCPhys The address of the page.
798 * @param pLargePage Page structure of the base page
799 */
800int pgmPhysRecheckLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
801{
802 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
803
804 GCPhys &= X86_PDE2M_PAE_PG_MASK;
805
806 /* Check the base page. */
807 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
808 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
809 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
810 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
811 {
812 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
813 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
814 }
815
816 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
817 /* Check all remaining pages in the 2 MB range. */
818 unsigned i;
819 GCPhys += PAGE_SIZE;
820 for (i = 1; i < _2M/PAGE_SIZE; i++)
821 {
822 PPGMPAGE pPage;
823 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
824 AssertRCBreak(rc);
825
826 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
827 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
828 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
829 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
830 {
831 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
832 break;
833 }
834
835 GCPhys += PAGE_SIZE;
836 }
837 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
838
839 if (i == _2M/PAGE_SIZE)
840 {
841 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
842 pVM->pgm.s.cLargePagesDisabled--;
843 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
844 return VINF_SUCCESS;
845 }
846
847 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
848}
849
850#endif /* PGM_WITH_LARGE_PAGES */
851
852/**
853 * Deal with a write monitored page.
854 *
855 * @returns VBox strict status code.
856 *
857 * @param pVM Pointer to the VM.
858 * @param pPage The physical page tracking structure.
859 *
860 * @remarks Called from within the PGM critical section.
861 */
862void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage)
863{
864 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
865 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
866 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
867 Assert(pVM->pgm.s.cMonitoredPages > 0);
868 pVM->pgm.s.cMonitoredPages--;
869 pVM->pgm.s.cWrittenToPages++;
870}
871
872
873/**
874 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
875 *
876 * @returns VBox strict status code.
877 * @retval VINF_SUCCESS on success.
878 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
879 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
880 *
881 * @param pVM Pointer to the VM.
882 * @param pPage The physical page tracking structure.
883 * @param GCPhys The address of the page.
884 *
885 * @remarks Called from within the PGM critical section.
886 */
887int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
888{
889 PGM_LOCK_ASSERT_OWNER(pVM);
890 switch (PGM_PAGE_GET_STATE(pPage))
891 {
892 case PGM_PAGE_STATE_WRITE_MONITORED:
893 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
894 /* fall thru */
895 default: /* to shut up GCC */
896 case PGM_PAGE_STATE_ALLOCATED:
897 return VINF_SUCCESS;
898
899 /*
900 * Zero pages can be dummy pages for MMIO or reserved memory,
901 * so we need to check the flags before joining cause with
902 * shared page replacement.
903 */
904 case PGM_PAGE_STATE_ZERO:
905 if (PGM_PAGE_IS_MMIO(pPage))
906 return VERR_PGM_PHYS_PAGE_RESERVED;
907 /* fall thru */
908 case PGM_PAGE_STATE_SHARED:
909 return pgmPhysAllocPage(pVM, pPage, GCPhys);
910
911 /* Not allowed to write to ballooned pages. */
912 case PGM_PAGE_STATE_BALLOONED:
913 return VERR_PGM_PHYS_PAGE_BALLOONED;
914 }
915}
916
917
918/**
919 * Internal usage: Map the page specified by its GMM ID.
920 *
921 * This is similar to pgmPhysPageMap
922 *
923 * @returns VBox status code.
924 *
925 * @param pVM Pointer to the VM.
926 * @param idPage The Page ID.
927 * @param HCPhys The physical address (for RC).
928 * @param ppv Where to store the mapping address.
929 *
930 * @remarks Called from within the PGM critical section. The mapping is only
931 * valid while you are inside this section.
932 */
933int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
934{
935 /*
936 * Validation.
937 */
938 PGM_LOCK_ASSERT_OWNER(pVM);
939 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
940 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
941 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
942
943#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
944 /*
945 * Map it by HCPhys.
946 */
947 return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
948
949#else
950 /*
951 * Find/make Chunk TLB entry for the mapping chunk.
952 */
953 PPGMCHUNKR3MAP pMap;
954 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
955 if (pTlbe->idChunk == idChunk)
956 {
957 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
958 pMap = pTlbe->pChunk;
959 }
960 else
961 {
962 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
963
964 /*
965 * Find the chunk, map it if necessary.
966 */
967 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
968 if (pMap)
969 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
970 else
971 {
972# ifdef IN_RING0
973 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
974 AssertRCReturn(rc, rc);
975 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
976 Assert(pMap);
977# else
978 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
979 if (RT_FAILURE(rc))
980 return rc;
981# endif
982 }
983
984 /*
985 * Enter it into the Chunk TLB.
986 */
987 pTlbe->idChunk = idChunk;
988 pTlbe->pChunk = pMap;
989 }
990
991 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
992 return VINF_SUCCESS;
993#endif
994}
995
996
997/**
998 * Maps a page into the current virtual address space so it can be accessed.
999 *
1000 * @returns VBox status code.
1001 * @retval VINF_SUCCESS on success.
1002 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1003 *
1004 * @param pVM Pointer to the VM.
1005 * @param pPage The physical page tracking structure.
1006 * @param GCPhys The address of the page.
1007 * @param ppMap Where to store the address of the mapping tracking structure.
1008 * @param ppv Where to store the mapping address of the page. The page
1009 * offset is masked off!
1010 *
1011 * @remarks Called from within the PGM critical section.
1012 */
1013static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1014{
1015 PGM_LOCK_ASSERT_OWNER(pVM);
1016 NOREF(GCPhys);
1017
1018#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1019 /*
1020 * Just some sketchy GC/R0-darwin code.
1021 */
1022 *ppMap = NULL;
1023 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1024 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
1025 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
1026 return VINF_SUCCESS;
1027
1028#else /* IN_RING3 || IN_RING0 */
1029
1030
1031 /*
1032 * Special cases: MMIO2, ZERO and specially aliased MMIO pages.
1033 */
1034 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
1035 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1036 {
1037 /* Decode the page id to a page in a MMIO2 ram range. */
1038 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
1039 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
1040 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
1041 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
1042 RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys,
1043 pPage->s.idPage, pPage->s.uStateY),
1044 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1045 PPGMMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
1046 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1047 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1048 AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1049 *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << PAGE_SHIFT);
1050 *ppMap = NULL;
1051 return VINF_SUCCESS;
1052 }
1053
1054 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1055 if (idChunk == NIL_GMM_CHUNKID)
1056 {
1057 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
1058 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1059 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
1060 {
1061 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
1062 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1063 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
1064 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1065 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1066 }
1067 else
1068 {
1069 static uint8_t s_abPlayItSafe[0x1000*2]; /* I don't dare return the zero page at the moment. */
1070 *ppv = (uint8_t *)((uintptr_t)&s_abPlayItSafe[0x1000] & ~(uintptr_t)0xfff);
1071 }
1072 *ppMap = NULL;
1073 return VINF_SUCCESS;
1074 }
1075
1076 /*
1077 * Find/make Chunk TLB entry for the mapping chunk.
1078 */
1079 PPGMCHUNKR3MAP pMap;
1080 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1081 if (pTlbe->idChunk == idChunk)
1082 {
1083 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1084 pMap = pTlbe->pChunk;
1085 AssertPtr(pMap->pv);
1086 }
1087 else
1088 {
1089 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1090
1091 /*
1092 * Find the chunk, map it if necessary.
1093 */
1094 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1095 if (pMap)
1096 {
1097 AssertPtr(pMap->pv);
1098 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1099 }
1100 else
1101 {
1102#ifdef IN_RING0
1103 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1104 AssertRCReturn(rc, rc);
1105 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1106 Assert(pMap);
1107#else
1108 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1109 if (RT_FAILURE(rc))
1110 return rc;
1111#endif
1112 AssertPtr(pMap->pv);
1113 }
1114
1115 /*
1116 * Enter it into the Chunk TLB.
1117 */
1118 pTlbe->idChunk = idChunk;
1119 pTlbe->pChunk = pMap;
1120 }
1121
1122 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
1123 *ppMap = pMap;
1124 return VINF_SUCCESS;
1125#endif /* IN_RING3 */
1126}
1127
1128
1129/**
1130 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1131 *
1132 * This is typically used is paths where we cannot use the TLB methods (like ROM
1133 * pages) or where there is no point in using them since we won't get many hits.
1134 *
1135 * @returns VBox strict status code.
1136 * @retval VINF_SUCCESS on success.
1137 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1138 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1139 *
1140 * @param pVM Pointer to the VM.
1141 * @param pPage The physical page tracking structure.
1142 * @param GCPhys The address of the page.
1143 * @param ppv Where to store the mapping address of the page. The page
1144 * offset is masked off!
1145 *
1146 * @remarks Called from within the PGM critical section. The mapping is only
1147 * valid while you are inside section.
1148 */
1149int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1150{
1151 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1152 if (RT_SUCCESS(rc))
1153 {
1154 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1155 PPGMPAGEMAP pMapIgnore;
1156 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1157 if (RT_FAILURE(rc2)) /* preserve rc */
1158 rc = rc2;
1159 }
1160 return rc;
1161}
1162
1163
1164/**
1165 * Maps a page into the current virtual address space so it can be accessed for
1166 * both writing and reading.
1167 *
1168 * This is typically used is paths where we cannot use the TLB methods (like ROM
1169 * pages) or where there is no point in using them since we won't get many hits.
1170 *
1171 * @returns VBox status code.
1172 * @retval VINF_SUCCESS on success.
1173 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1174 *
1175 * @param pVM Pointer to the VM.
1176 * @param pPage The physical page tracking structure. Must be in the
1177 * allocated state.
1178 * @param GCPhys The address of the page.
1179 * @param ppv Where to store the mapping address of the page. The page
1180 * offset is masked off!
1181 *
1182 * @remarks Called from within the PGM critical section. The mapping is only
1183 * valid while you are inside section.
1184 */
1185int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1186{
1187 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1188 PPGMPAGEMAP pMapIgnore;
1189 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1190}
1191
1192
1193/**
1194 * Maps a page into the current virtual address space so it can be accessed for
1195 * reading.
1196 *
1197 * This is typically used is paths where we cannot use the TLB methods (like ROM
1198 * pages) or where there is no point in using them since we won't get many hits.
1199 *
1200 * @returns VBox status code.
1201 * @retval VINF_SUCCESS on success.
1202 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1203 *
1204 * @param pVM Pointer to the VM.
1205 * @param pPage The physical page tracking structure.
1206 * @param GCPhys The address of the page.
1207 * @param ppv Where to store the mapping address of the page. The page
1208 * offset is masked off!
1209 *
1210 * @remarks Called from within the PGM critical section. The mapping is only
1211 * valid while you are inside this section.
1212 */
1213int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1214{
1215 PPGMPAGEMAP pMapIgnore;
1216 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1217}
1218
1219#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1220
1221/**
1222 * Load a guest page into the ring-3 physical TLB.
1223 *
1224 * @returns VBox status code.
1225 * @retval VINF_SUCCESS on success
1226 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1227 * @param pPGM The PGM instance pointer.
1228 * @param GCPhys The guest physical address in question.
1229 */
1230int pgmPhysPageLoadIntoTlb(PVM pVM, RTGCPHYS GCPhys)
1231{
1232 PGM_LOCK_ASSERT_OWNER(pVM);
1233
1234 /*
1235 * Find the ram range and page and hand it over to the with-page function.
1236 * 99.8% of requests are expected to be in the first range.
1237 */
1238 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1239 if (!pPage)
1240 {
1241 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1242 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1243 }
1244
1245 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1246}
1247
1248
1249/**
1250 * Load a guest page into the ring-3 physical TLB.
1251 *
1252 * @returns VBox status code.
1253 * @retval VINF_SUCCESS on success
1254 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1255 *
1256 * @param pVM Pointer to the VM.
1257 * @param pPage Pointer to the PGMPAGE structure corresponding to
1258 * GCPhys.
1259 * @param GCPhys The guest physical address in question.
1260 */
1261int pgmPhysPageLoadIntoTlbWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1262{
1263 PGM_LOCK_ASSERT_OWNER(pVM);
1264 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1265
1266 /*
1267 * Map the page.
1268 * Make a special case for the zero page as it is kind of special.
1269 */
1270 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1271 if ( !PGM_PAGE_IS_ZERO(pPage)
1272 && !PGM_PAGE_IS_BALLOONED(pPage))
1273 {
1274 void *pv;
1275 PPGMPAGEMAP pMap;
1276 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1277 if (RT_FAILURE(rc))
1278 return rc;
1279 pTlbe->pMap = pMap;
1280 pTlbe->pv = pv;
1281 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1282 }
1283 else
1284 {
1285 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1286 pTlbe->pMap = NULL;
1287 pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1288 }
1289#ifdef PGM_WITH_PHYS_TLB
1290 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1291 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1292 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1293 else
1294 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1295#else
1296 pTlbe->GCPhys = NIL_RTGCPHYS;
1297#endif
1298 pTlbe->pPage = pPage;
1299 return VINF_SUCCESS;
1300}
1301
1302#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1303
1304/**
1305 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1306 * own the PGM lock and therefore not need to lock the mapped page.
1307 *
1308 * @returns VBox status code.
1309 * @retval VINF_SUCCESS on success.
1310 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1311 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1312 *
1313 * @param pVM Pointer to the VM.
1314 * @param GCPhys The guest physical address of the page that should be mapped.
1315 * @param pPage Pointer to the PGMPAGE structure for the page.
1316 * @param ppv Where to store the address corresponding to GCPhys.
1317 *
1318 * @internal
1319 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1320 */
1321int pgmPhysGCPhys2CCPtrInternalDepr(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1322{
1323 int rc;
1324 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1325 PGM_LOCK_ASSERT_OWNER(pVM);
1326 pVM->pgm.s.cDeprecatedPageLocks++;
1327
1328 /*
1329 * Make sure the page is writable.
1330 */
1331 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1332 {
1333 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1334 if (RT_FAILURE(rc))
1335 return rc;
1336 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1337 }
1338 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1339
1340 /*
1341 * Get the mapping address.
1342 */
1343#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1344 void *pv;
1345 rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),
1346 PGM_PAGE_GET_HCPHYS(pPage),
1347 &pv
1348 RTLOG_COMMA_SRC_POS);
1349 if (RT_FAILURE(rc))
1350 return rc;
1351 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1352#else
1353 PPGMPAGEMAPTLBE pTlbe;
1354 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1355 if (RT_FAILURE(rc))
1356 return rc;
1357 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1358#endif
1359 return VINF_SUCCESS;
1360}
1361
1362#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1363
1364/**
1365 * Locks a page mapping for writing.
1366 *
1367 * @param pVM Pointer to the VM.
1368 * @param pPage The page.
1369 * @param pTlbe The mapping TLB entry for the page.
1370 * @param pLock The lock structure (output).
1371 */
1372DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1373{
1374 PPGMPAGEMAP pMap = pTlbe->pMap;
1375 if (pMap)
1376 pMap->cRefs++;
1377
1378 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1379 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1380 {
1381 if (cLocks == 0)
1382 pVM->pgm.s.cWriteLockedPages++;
1383 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1384 }
1385 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1386 {
1387 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1388 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1389 if (pMap)
1390 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1391 }
1392
1393 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1394 pLock->pvMap = pMap;
1395}
1396
1397/**
1398 * Locks a page mapping for reading.
1399 *
1400 * @param pVM Pointer to the VM.
1401 * @param pPage The page.
1402 * @param pTlbe The mapping TLB entry for the page.
1403 * @param pLock The lock structure (output).
1404 */
1405DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1406{
1407 PPGMPAGEMAP pMap = pTlbe->pMap;
1408 if (pMap)
1409 pMap->cRefs++;
1410
1411 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1412 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1413 {
1414 if (cLocks == 0)
1415 pVM->pgm.s.cReadLockedPages++;
1416 PGM_PAGE_INC_READ_LOCKS(pPage);
1417 }
1418 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1419 {
1420 PGM_PAGE_INC_READ_LOCKS(pPage);
1421 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1422 if (pMap)
1423 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1424 }
1425
1426 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1427 pLock->pvMap = pMap;
1428}
1429
1430#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1431
1432
1433/**
1434 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1435 * own the PGM lock and have access to the page structure.
1436 *
1437 * @returns VBox status code.
1438 * @retval VINF_SUCCESS on success.
1439 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1440 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1441 *
1442 * @param pVM Pointer to the VM.
1443 * @param GCPhys The guest physical address of the page that should be mapped.
1444 * @param pPage Pointer to the PGMPAGE structure for the page.
1445 * @param ppv Where to store the address corresponding to GCPhys.
1446 * @param pLock Where to store the lock information that
1447 * pgmPhysReleaseInternalPageMappingLock needs.
1448 *
1449 * @internal
1450 */
1451int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1452{
1453 int rc;
1454 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1455 PGM_LOCK_ASSERT_OWNER(pVM);
1456
1457 /*
1458 * Make sure the page is writable.
1459 */
1460 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1461 {
1462 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1463 if (RT_FAILURE(rc))
1464 return rc;
1465 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1466 }
1467 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1468
1469 /*
1470 * Do the job.
1471 */
1472#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1473 void *pv;
1474 PVMCPU pVCpu = VMMGetCpu(pVM);
1475 rc = pgmRZDynMapHCPageInlined(pVCpu,
1476 PGM_PAGE_GET_HCPHYS(pPage),
1477 &pv
1478 RTLOG_COMMA_SRC_POS);
1479 if (RT_FAILURE(rc))
1480 return rc;
1481 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1482 pLock->pvPage = pv;
1483 pLock->pVCpu = pVCpu;
1484
1485#else
1486 PPGMPAGEMAPTLBE pTlbe;
1487 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1488 if (RT_FAILURE(rc))
1489 return rc;
1490 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1491 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1492#endif
1493 return VINF_SUCCESS;
1494}
1495
1496
1497/**
1498 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1499 * own the PGM lock and have access to the page structure.
1500 *
1501 * @returns VBox status code.
1502 * @retval VINF_SUCCESS on success.
1503 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1504 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1505 *
1506 * @param pVM Pointer to the VM.
1507 * @param GCPhys The guest physical address of the page that should be mapped.
1508 * @param pPage Pointer to the PGMPAGE structure for the page.
1509 * @param ppv Where to store the address corresponding to GCPhys.
1510 * @param pLock Where to store the lock information that
1511 * pgmPhysReleaseInternalPageMappingLock needs.
1512 *
1513 * @internal
1514 */
1515int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1516{
1517 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1518 PGM_LOCK_ASSERT_OWNER(pVM);
1519 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1520
1521 /*
1522 * Do the job.
1523 */
1524#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1525 void *pv;
1526 PVMCPU pVCpu = VMMGetCpu(pVM);
1527 int rc = pgmRZDynMapHCPageInlined(pVCpu,
1528 PGM_PAGE_GET_HCPHYS(pPage),
1529 &pv
1530 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1531 if (RT_FAILURE(rc))
1532 return rc;
1533 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1534 pLock->pvPage = pv;
1535 pLock->pVCpu = pVCpu;
1536
1537#else
1538 PPGMPAGEMAPTLBE pTlbe;
1539 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1540 if (RT_FAILURE(rc))
1541 return rc;
1542 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1543 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1544#endif
1545 return VINF_SUCCESS;
1546}
1547
1548
1549/**
1550 * Requests the mapping of a guest page into the current context.
1551 *
1552 * This API should only be used for very short term, as it will consume scarse
1553 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1554 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1555 *
1556 * This API will assume your intention is to write to the page, and will
1557 * therefore replace shared and zero pages. If you do not intend to modify
1558 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1559 *
1560 * @returns VBox status code.
1561 * @retval VINF_SUCCESS on success.
1562 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1563 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1564 *
1565 * @param pVM Pointer to the VM.
1566 * @param GCPhys The guest physical address of the page that should be
1567 * mapped.
1568 * @param ppv Where to store the address corresponding to GCPhys.
1569 * @param pLock Where to store the lock information that
1570 * PGMPhysReleasePageMappingLock needs.
1571 *
1572 * @remarks The caller is responsible for dealing with access handlers.
1573 * @todo Add an informational return code for pages with access handlers?
1574 *
1575 * @remark Avoid calling this API from within critical sections (other than
1576 * the PGM one) because of the deadlock risk. External threads may
1577 * need to delegate jobs to the EMTs.
1578 * @remarks Only one page is mapped! Make no assumption about what's after or
1579 * before the returned page!
1580 * @thread Any thread.
1581 */
1582VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1583{
1584 int rc = pgmLock(pVM);
1585 AssertRCReturn(rc, rc);
1586
1587#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1588 /*
1589 * Find the page and make sure it's writable.
1590 */
1591 PPGMPAGE pPage;
1592 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1593 if (RT_SUCCESS(rc))
1594 {
1595 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1596 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1597 if (RT_SUCCESS(rc))
1598 {
1599 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1600
1601 PVMCPU pVCpu = VMMGetCpu(pVM);
1602 void *pv;
1603 rc = pgmRZDynMapHCPageInlined(pVCpu,
1604 PGM_PAGE_GET_HCPHYS(pPage),
1605 &pv
1606 RTLOG_COMMA_SRC_POS);
1607 if (RT_SUCCESS(rc))
1608 {
1609 AssertRCSuccess(rc);
1610
1611 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1612 *ppv = pv;
1613 pLock->pvPage = pv;
1614 pLock->pVCpu = pVCpu;
1615 }
1616 }
1617 }
1618
1619#else /* IN_RING3 || IN_RING0 */
1620 /*
1621 * Query the Physical TLB entry for the page (may fail).
1622 */
1623 PPGMPAGEMAPTLBE pTlbe;
1624 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1625 if (RT_SUCCESS(rc))
1626 {
1627 /*
1628 * If the page is shared, the zero page, or being write monitored
1629 * it must be converted to a page that's writable if possible.
1630 */
1631 PPGMPAGE pPage = pTlbe->pPage;
1632 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1633 {
1634 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1635 if (RT_SUCCESS(rc))
1636 {
1637 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1638 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1639 }
1640 }
1641 if (RT_SUCCESS(rc))
1642 {
1643 /*
1644 * Now, just perform the locking and calculate the return address.
1645 */
1646 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1647 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1648 }
1649 }
1650
1651#endif /* IN_RING3 || IN_RING0 */
1652 pgmUnlock(pVM);
1653 return rc;
1654}
1655
1656
1657/**
1658 * Requests the mapping of a guest page into the current context.
1659 *
1660 * This API should only be used for very short term, as it will consume scarse
1661 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1662 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1663 *
1664 * @returns VBox status code.
1665 * @retval VINF_SUCCESS on success.
1666 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1667 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1668 *
1669 * @param pVM Pointer to the VM.
1670 * @param GCPhys The guest physical address of the page that should be
1671 * mapped.
1672 * @param ppv Where to store the address corresponding to GCPhys.
1673 * @param pLock Where to store the lock information that
1674 * PGMPhysReleasePageMappingLock needs.
1675 *
1676 * @remarks The caller is responsible for dealing with access handlers.
1677 * @todo Add an informational return code for pages with access handlers?
1678 *
1679 * @remarks Avoid calling this API from within critical sections (other than
1680 * the PGM one) because of the deadlock risk.
1681 * @remarks Only one page is mapped! Make no assumption about what's after or
1682 * before the returned page!
1683 * @thread Any thread.
1684 */
1685VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1686{
1687 int rc = pgmLock(pVM);
1688 AssertRCReturn(rc, rc);
1689
1690#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1691 /*
1692 * Find the page and make sure it's readable.
1693 */
1694 PPGMPAGE pPage;
1695 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1696 if (RT_SUCCESS(rc))
1697 {
1698 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1699 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1700 else
1701 {
1702 PVMCPU pVCpu = VMMGetCpu(pVM);
1703 void *pv;
1704 rc = pgmRZDynMapHCPageInlined(pVCpu,
1705 PGM_PAGE_GET_HCPHYS(pPage),
1706 &pv
1707 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1708 if (RT_SUCCESS(rc))
1709 {
1710 AssertRCSuccess(rc);
1711
1712 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1713 *ppv = pv;
1714 pLock->pvPage = pv;
1715 pLock->pVCpu = pVCpu;
1716 }
1717 }
1718 }
1719
1720#else /* IN_RING3 || IN_RING0 */
1721 /*
1722 * Query the Physical TLB entry for the page (may fail).
1723 */
1724 PPGMPAGEMAPTLBE pTlbe;
1725 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1726 if (RT_SUCCESS(rc))
1727 {
1728 /* MMIO pages doesn't have any readable backing. */
1729 PPGMPAGE pPage = pTlbe->pPage;
1730 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1731 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1732 else
1733 {
1734 /*
1735 * Now, just perform the locking and calculate the return address.
1736 */
1737 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1738 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1739 }
1740 }
1741
1742#endif /* IN_RING3 || IN_RING0 */
1743 pgmUnlock(pVM);
1744 return rc;
1745}
1746
1747
1748/**
1749 * Requests the mapping of a guest page given by virtual address into the current context.
1750 *
1751 * This API should only be used for very short term, as it will consume
1752 * scarse resources (R0 and GC) in the mapping cache. When you're done
1753 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1754 *
1755 * This API will assume your intention is to write to the page, and will
1756 * therefore replace shared and zero pages. If you do not intend to modify
1757 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1758 *
1759 * @returns VBox status code.
1760 * @retval VINF_SUCCESS on success.
1761 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1762 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1763 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1764 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1765 *
1766 * @param pVCpu Pointer to the VMCPU.
1767 * @param GCPhys The guest physical address of the page that should be mapped.
1768 * @param ppv Where to store the address corresponding to GCPhys.
1769 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1770 *
1771 * @remark Avoid calling this API from within critical sections (other than
1772 * the PGM one) because of the deadlock risk.
1773 * @thread EMT
1774 */
1775VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1776{
1777 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1778 RTGCPHYS GCPhys;
1779 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1780 if (RT_SUCCESS(rc))
1781 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1782 return rc;
1783}
1784
1785
1786/**
1787 * Requests the mapping of a guest page given by virtual address into the current context.
1788 *
1789 * This API should only be used for very short term, as it will consume
1790 * scarse resources (R0 and GC) in the mapping cache. When you're done
1791 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1792 *
1793 * @returns VBox status code.
1794 * @retval VINF_SUCCESS on success.
1795 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1796 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1797 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1798 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1799 *
1800 * @param pVCpu Pointer to the VMCPU.
1801 * @param GCPhys The guest physical address of the page that should be mapped.
1802 * @param ppv Where to store the address corresponding to GCPhys.
1803 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1804 *
1805 * @remark Avoid calling this API from within critical sections (other than
1806 * the PGM one) because of the deadlock risk.
1807 * @thread EMT
1808 */
1809VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1810{
1811 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1812 RTGCPHYS GCPhys;
1813 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1814 if (RT_SUCCESS(rc))
1815 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1816 return rc;
1817}
1818
1819
1820/**
1821 * Release the mapping of a guest page.
1822 *
1823 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1824 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1825 *
1826 * @param pVM Pointer to the VM.
1827 * @param pLock The lock structure initialized by the mapping function.
1828 */
1829VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1830{
1831#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1832 Assert(pLock->pvPage != NULL);
1833 Assert(pLock->pVCpu == VMMGetCpu(pVM));
1834 PGM_DYNMAP_UNUSED_HINT(pLock->pVCpu, pLock->pvPage);
1835 pLock->pVCpu = NULL;
1836 pLock->pvPage = NULL;
1837
1838#else
1839 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1840 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1841 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1842
1843 pLock->uPageAndType = 0;
1844 pLock->pvMap = NULL;
1845
1846 pgmLock(pVM);
1847 if (fWriteLock)
1848 {
1849 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1850 Assert(cLocks > 0);
1851 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1852 {
1853 if (cLocks == 1)
1854 {
1855 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1856 pVM->pgm.s.cWriteLockedPages--;
1857 }
1858 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1859 }
1860
1861 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1862 {
1863 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1864 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1865 Assert(pVM->pgm.s.cMonitoredPages > 0);
1866 pVM->pgm.s.cMonitoredPages--;
1867 pVM->pgm.s.cWrittenToPages++;
1868 }
1869 }
1870 else
1871 {
1872 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1873 Assert(cLocks > 0);
1874 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1875 {
1876 if (cLocks == 1)
1877 {
1878 Assert(pVM->pgm.s.cReadLockedPages > 0);
1879 pVM->pgm.s.cReadLockedPages--;
1880 }
1881 PGM_PAGE_DEC_READ_LOCKS(pPage);
1882 }
1883 }
1884
1885 if (pMap)
1886 {
1887 Assert(pMap->cRefs >= 1);
1888 pMap->cRefs--;
1889 }
1890 pgmUnlock(pVM);
1891#endif /* IN_RING3 */
1892}
1893
1894
1895/**
1896 * Release the internal mapping of a guest page.
1897 *
1898 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
1899 * pgmPhysGCPhys2CCPtrInternalReadOnly.
1900 *
1901 * @param pVM Pointer to the VM.
1902 * @param pLock The lock structure initialized by the mapping function.
1903 *
1904 * @remarks Caller must hold the PGM lock.
1905 */
1906void pgmPhysReleaseInternalPageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1907{
1908 PGM_LOCK_ASSERT_OWNER(pVM);
1909 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
1910}
1911
1912
1913/**
1914 * Converts a GC physical address to a HC ring-3 pointer.
1915 *
1916 * @returns VINF_SUCCESS on success.
1917 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1918 * page but has no physical backing.
1919 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1920 * GC physical address.
1921 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1922 * a dynamic ram chunk boundary
1923 *
1924 * @param pVM Pointer to the VM.
1925 * @param GCPhys The GC physical address to convert.
1926 * @param pR3Ptr Where to store the R3 pointer on success.
1927 *
1928 * @deprecated Avoid when possible!
1929 */
1930int pgmPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1931{
1932/** @todo this is kind of hacky and needs some more work. */
1933#ifndef DEBUG_sandervl
1934 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1935#endif
1936
1937 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
1938#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1939 NOREF(pVM); NOREF(pR3Ptr);
1940 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1941#else
1942 pgmLock(pVM);
1943
1944 PPGMRAMRANGE pRam;
1945 PPGMPAGE pPage;
1946 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
1947 if (RT_SUCCESS(rc))
1948 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
1949
1950 pgmUnlock(pVM);
1951 Assert(rc <= VINF_SUCCESS);
1952 return rc;
1953#endif
1954}
1955
1956#if 0 /*defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)*/
1957
1958/**
1959 * Maps and locks a guest CR3 or PD (PAE) page.
1960 *
1961 * @returns VINF_SUCCESS on success.
1962 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1963 * page but has no physical backing.
1964 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1965 * GC physical address.
1966 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1967 * a dynamic ram chunk boundary
1968 *
1969 * @param pVM Pointer to the VM.
1970 * @param GCPhys The GC physical address to convert.
1971 * @param pR3Ptr Where to store the R3 pointer on success. This may or
1972 * may not be valid in ring-0 depending on the
1973 * VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 build option.
1974 *
1975 * @remarks The caller must own the PGM lock.
1976 */
1977int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1978{
1979
1980 PPGMRAMRANGE pRam;
1981 PPGMPAGE pPage;
1982 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
1983 if (RT_SUCCESS(rc))
1984 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
1985 Assert(rc <= VINF_SUCCESS);
1986 return rc;
1987}
1988
1989
1990int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1991{
1992
1993}
1994
1995#endif
1996
1997/**
1998 * Converts a guest pointer to a GC physical address.
1999 *
2000 * This uses the current CR3/CR0/CR4 of the guest.
2001 *
2002 * @returns VBox status code.
2003 * @param pVCpu Pointer to the VMCPU.
2004 * @param GCPtr The guest pointer to convert.
2005 * @param pGCPhys Where to store the GC physical address.
2006 */
2007VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
2008{
2009 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
2010 if (pGCPhys && RT_SUCCESS(rc))
2011 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
2012 return rc;
2013}
2014
2015
2016/**
2017 * Converts a guest pointer to a HC physical address.
2018 *
2019 * This uses the current CR3/CR0/CR4 of the guest.
2020 *
2021 * @returns VBox status code.
2022 * @param pVCpu Pointer to the VMCPU.
2023 * @param GCPtr The guest pointer to convert.
2024 * @param pHCPhys Where to store the HC physical address.
2025 */
2026VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2027{
2028 PVM pVM = pVCpu->CTX_SUFF(pVM);
2029 RTGCPHYS GCPhys;
2030 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
2031 if (RT_SUCCESS(rc))
2032 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
2033 return rc;
2034}
2035
2036
2037
2038#undef LOG_GROUP
2039#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2040
2041
2042#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2043/**
2044 * Cache PGMPhys memory access
2045 *
2046 * @param pVM Pointer to the VM.
2047 * @param pCache Cache structure pointer
2048 * @param GCPhys GC physical address
2049 * @param pbHC HC pointer corresponding to physical page
2050 *
2051 * @thread EMT.
2052 */
2053static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2054{
2055 uint32_t iCacheIndex;
2056
2057 Assert(VM_IS_EMT(pVM));
2058
2059 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
2060 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
2061
2062 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2063
2064 ASMBitSet(&pCache->aEntries, iCacheIndex);
2065
2066 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2067 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2068}
2069#endif /* IN_RING3 */
2070
2071
2072/**
2073 * Deals with reading from a page with one or more ALL access handlers.
2074 *
2075 * @returns VBox status code. Can be ignored in ring-3.
2076 * @retval VINF_SUCCESS.
2077 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2078 *
2079 * @param pVM Pointer to the VM.
2080 * @param pPage The page descriptor.
2081 * @param GCPhys The physical address to start reading at.
2082 * @param pvBuf Where to put the bits we read.
2083 * @param cb How much to read - less or equal to a page.
2084 */
2085static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
2086{
2087 /*
2088 * The most frequent access here is MMIO and shadowed ROM.
2089 * The current code ASSUMES all these access handlers covers full pages!
2090 */
2091
2092 /*
2093 * Whatever we do we need the source page, map it first.
2094 */
2095 PGMPAGEMAPLOCK PgMpLck;
2096 const void *pvSrc = NULL;
2097 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2098 if (RT_FAILURE(rc))
2099 {
2100 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2101 GCPhys, pPage, rc));
2102 memset(pvBuf, 0xff, cb);
2103 return VINF_SUCCESS;
2104 }
2105 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2106
2107 /*
2108 * Deal with any physical handlers.
2109 */
2110#ifdef IN_RING3
2111 PPGMPHYSHANDLER pPhys = NULL;
2112#endif
2113 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
2114 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2115 {
2116#ifdef IN_RING3
2117 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2118 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2119 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
2120 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
2121 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2122
2123 PFNPGMR3PHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler); Assert(pfnHandler);
2124 void *pvUser = pPhys->CTX_SUFF(pvUser);
2125
2126 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
2127 STAM_PROFILE_START(&pPhys->Stat, h);
2128 PGM_LOCK_ASSERT_OWNER(pVM);
2129 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2130 pgmUnlock(pVM);
2131 rc = pfnHandler(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pvUser);
2132 pgmLock(pVM);
2133# ifdef VBOX_WITH_STATISTICS
2134 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2135 if (pPhys)
2136 STAM_PROFILE_STOP(&pPhys->Stat, h);
2137# else
2138 pPhys = NULL; /* might not be valid anymore. */
2139# endif
2140 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
2141#else
2142 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2143 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2144 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2145 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2146#endif
2147 }
2148
2149 /*
2150 * Deal with any virtual handlers.
2151 */
2152 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
2153 {
2154 unsigned iPage;
2155 PPGMVIRTHANDLER pVirt;
2156
2157 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
2158 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
2159 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
2160 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2161 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
2162
2163 PPGMVIRTHANDLERTYPEINT pVirtType = PGMVIRTANDLER_GET_TYPE(pVM, pVirt);
2164#ifdef IN_RING3
2165 if (pVirtType->pfnHandlerR3)
2166 {
2167 if (!pPhys)
2168 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2169 else
2170 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
2171 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2172 + (iPage << PAGE_SHIFT)
2173 + (GCPhys & PAGE_OFFSET_MASK);
2174
2175 STAM_PROFILE_START(&pVirt->Stat, h);
2176 rc2 = pVirtType->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ,
2177 pVirt->CTX_SUFF(pvUser));
2178 STAM_PROFILE_STOP(&pVirt->Stat, h);
2179 if (rc2 == VINF_SUCCESS)
2180 rc = VINF_SUCCESS;
2181 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
2182 }
2183 else
2184 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2185#else
2186 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2187 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2188 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2189 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2190#endif
2191 }
2192
2193 /*
2194 * Take the default action.
2195 */
2196 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2197 memcpy(pvBuf, pvSrc, cb);
2198 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2199 return rc;
2200}
2201
2202
2203/**
2204 * Read physical memory.
2205 *
2206 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2207 * want to ignore those.
2208 *
2209 * @returns VBox status code. Can be ignored in ring-3.
2210 * @retval VINF_SUCCESS.
2211 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2212 *
2213 * @param pVM Pointer to the VM.
2214 * @param GCPhys Physical address start reading from.
2215 * @param pvBuf Where to put the read bits.
2216 * @param cbRead How many bytes to read.
2217 * @param enmOrigin The origin of this call.
2218 */
2219VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
2220{
2221 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2222 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2223
2224 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysRead));
2225 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2226
2227 pgmLock(pVM);
2228
2229 /*
2230 * Copy loop on ram ranges.
2231 */
2232 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2233 for (;;)
2234 {
2235 /* Inside range or not? */
2236 if (pRam && GCPhys >= pRam->GCPhys)
2237 {
2238 /*
2239 * Must work our way thru this page by page.
2240 */
2241 RTGCPHYS off = GCPhys - pRam->GCPhys;
2242 while (off < pRam->cb)
2243 {
2244 unsigned iPage = off >> PAGE_SHIFT;
2245 PPGMPAGE pPage = &pRam->aPages[iPage];
2246 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2247 if (cb > cbRead)
2248 cb = cbRead;
2249
2250 /*
2251 * Any ALL access handlers?
2252 */
2253 if (RT_UNLIKELY( PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
2254 || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage)))
2255 {
2256 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2257 if (RT_FAILURE(rc))
2258 {
2259 pgmUnlock(pVM);
2260 return rc;
2261 }
2262 }
2263 else
2264 {
2265 /*
2266 * Get the pointer to the page.
2267 */
2268 PGMPAGEMAPLOCK PgMpLck;
2269 const void *pvSrc;
2270 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2271 if (RT_SUCCESS(rc))
2272 {
2273 memcpy(pvBuf, pvSrc, cb);
2274 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2275 }
2276 else
2277 {
2278 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2279 pRam->GCPhys + off, pPage, rc));
2280 memset(pvBuf, 0xff, cb);
2281 }
2282 }
2283
2284 /* next page */
2285 if (cb >= cbRead)
2286 {
2287 pgmUnlock(pVM);
2288 return VINF_SUCCESS;
2289 }
2290 cbRead -= cb;
2291 off += cb;
2292 pvBuf = (char *)pvBuf + cb;
2293 } /* walk pages in ram range. */
2294
2295 GCPhys = pRam->GCPhysLast + 1;
2296 }
2297 else
2298 {
2299 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2300
2301 /*
2302 * Unassigned address space.
2303 */
2304 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2305 if (cb >= cbRead)
2306 {
2307 memset(pvBuf, 0xff, cbRead);
2308 break;
2309 }
2310 memset(pvBuf, 0xff, cb);
2311
2312 cbRead -= cb;
2313 pvBuf = (char *)pvBuf + cb;
2314 GCPhys += cb;
2315 }
2316
2317 /* Advance range if necessary. */
2318 while (pRam && GCPhys > pRam->GCPhysLast)
2319 pRam = pRam->CTX_SUFF(pNext);
2320 } /* Ram range walk */
2321
2322 pgmUnlock(pVM);
2323 return VINF_SUCCESS;
2324}
2325
2326
2327/**
2328 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2329 *
2330 * @returns VBox status code. Can be ignored in ring-3.
2331 * @retval VINF_SUCCESS.
2332 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2333 *
2334 * @param pVM Pointer to the VM.
2335 * @param pPage The page descriptor.
2336 * @param GCPhys The physical address to start writing at.
2337 * @param pvBuf What to write.
2338 * @param cbWrite How much to write - less or equal to a page.
2339 */
2340static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
2341{
2342 PGMPAGEMAPLOCK PgMpLck;
2343 void *pvDst = NULL;
2344 int rc;
2345
2346 /*
2347 * Give priority to physical handlers (like #PF does).
2348 *
2349 * Hope for a lonely physical handler first that covers the whole
2350 * write area. This should be a pretty frequent case with MMIO and
2351 * the heavy usage of full page handlers in the page pool.
2352 */
2353 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
2354 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage) /* screw virtual handlers on MMIO pages */)
2355 {
2356 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2357 if (pCur)
2358 {
2359 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2360
2361 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2362 if (cbRange > cbWrite)
2363 cbRange = cbWrite;
2364
2365#ifndef IN_RING3
2366 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2367 NOREF(cbRange);
2368 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2369 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2370
2371#else /* IN_RING3 */
2372 Assert(PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler));
2373 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n",
2374 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2375 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2376 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2377 else
2378 rc = VINF_SUCCESS;
2379 if (RT_SUCCESS(rc))
2380 {
2381 PFNPGMR3PHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler);
2382 void *pvUser = pCur->CTX_SUFF(pvUser);
2383
2384 STAM_PROFILE_START(&pCur->Stat, h);
2385 PGM_LOCK_ASSERT_OWNER(pVM);
2386 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2387 pgmUnlock(pVM);
2388 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2389 pgmLock(pVM);
2390# ifdef VBOX_WITH_STATISTICS
2391 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2392 if (pCur)
2393 STAM_PROFILE_STOP(&pCur->Stat, h);
2394# else
2395 pCur = NULL; /* might not be valid anymore. */
2396# endif
2397 if (rc == VINF_PGM_HANDLER_DO_DEFAULT && pvDst)
2398 {
2399 if (pvDst)
2400 memcpy(pvDst, pvBuf, cbRange);
2401 }
2402 else
2403 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT,
2404 ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur ? pCur->pszDesc : ""));
2405 }
2406 else
2407 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2408 GCPhys, pPage, rc), rc);
2409 if (RT_LIKELY(cbRange == cbWrite))
2410 {
2411 if (pvDst)
2412 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2413 return VINF_SUCCESS;
2414 }
2415
2416 /* more fun to be had below */
2417 cbWrite -= cbRange;
2418 GCPhys += cbRange;
2419 pvBuf = (uint8_t *)pvBuf + cbRange;
2420 pvDst = (uint8_t *)pvDst + cbRange;
2421#endif /* IN_RING3 */
2422 }
2423 /* else: the handler is somewhere else in the page, deal with it below. */
2424 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2425 }
2426 /*
2427 * A virtual handler without any interfering physical handlers.
2428 * Hopefully it'll cover the whole write.
2429 */
2430 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
2431 {
2432 unsigned iPage;
2433 PPGMVIRTHANDLER pCur;
2434 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
2435 if (RT_SUCCESS(rc))
2436 {
2437 PPGMVIRTHANDLERTYPEINT pCurType = PGMVIRTANDLER_GET_TYPE(pVM, pCur);
2438
2439 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
2440 if (cbRange > cbWrite)
2441 cbRange = cbWrite;
2442
2443#ifndef IN_RING3
2444 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2445 NOREF(cbRange);
2446 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2447 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2448
2449#else /* IN_RING3 */
2450
2451 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2452 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2453 if (RT_SUCCESS(rc))
2454 {
2455 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2456 if (pCurType->pfnHandlerR3)
2457 {
2458 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
2459 + (iPage << PAGE_SHIFT)
2460 + (GCPhys & PAGE_OFFSET_MASK);
2461
2462 STAM_PROFILE_START(&pCur->Stat, h);
2463 rc = pCurType->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE,
2464 pCur->CTX_SUFF(pvUser));
2465 STAM_PROFILE_STOP(&pCur->Stat, h);
2466 }
2467 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2468 memcpy(pvDst, pvBuf, cbRange);
2469 else
2470 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
2471 }
2472 else
2473 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2474 GCPhys, pPage, rc), rc);
2475 if (RT_LIKELY(cbRange == cbWrite))
2476 {
2477 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2478 return VINF_SUCCESS;
2479 }
2480
2481 /* more fun to be had below */
2482 cbWrite -= cbRange;
2483 GCPhys += cbRange;
2484 pvBuf = (uint8_t *)pvBuf + cbRange;
2485 pvDst = (uint8_t *)pvDst + cbRange;
2486#endif
2487 }
2488 /* else: the handler is somewhere else in the page, deal with it below. */
2489 }
2490
2491 /*
2492 * Deal with all the odd ends.
2493 */
2494
2495 /* We need a writable destination page. */
2496 if (!pvDst)
2497 {
2498 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2499 AssertLogRelMsgReturn(RT_SUCCESS(rc),
2500 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2501 GCPhys, pPage, rc), rc);
2502 }
2503
2504 /* The loop state (big + ugly). */
2505 unsigned iVirtPage = 0;
2506 PPGMVIRTHANDLER pVirt = NULL;
2507 uint32_t offVirt = PAGE_SIZE;
2508 uint32_t offVirtLast = PAGE_SIZE;
2509 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
2510
2511 PPGMPHYSHANDLER pPhys = NULL;
2512 uint32_t offPhys = PAGE_SIZE;
2513 uint32_t offPhysLast = PAGE_SIZE;
2514 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2515
2516 /* The loop. */
2517 for (;;)
2518 {
2519 /*
2520 * Find the closest handler at or above GCPhys.
2521 */
2522 if (fMoreVirt && !pVirt)
2523 {
2524 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
2525 if (RT_SUCCESS(rc))
2526 {
2527 offVirt = 0;
2528 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2529 }
2530 else
2531 {
2532 PPGMPHYS2VIRTHANDLER pVirtPhys;
2533 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
2534 GCPhys, true /* fAbove */);
2535 if ( pVirtPhys
2536 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
2537 {
2538 /* ASSUME that pVirtPhys only covers one page. */
2539 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
2540 Assert(pVirtPhys->Core.Key > GCPhys);
2541
2542 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
2543 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
2544 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2545 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2546 }
2547 else
2548 {
2549 pVirt = NULL;
2550 fMoreVirt = false;
2551 offVirt = offVirtLast = PAGE_SIZE;
2552 }
2553 }
2554 }
2555
2556 if (fMorePhys && !pPhys)
2557 {
2558 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2559 if (pPhys)
2560 {
2561 offPhys = 0;
2562 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2563 }
2564 else
2565 {
2566 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2567 GCPhys, true /* fAbove */);
2568 if ( pPhys
2569 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2570 {
2571 offPhys = pPhys->Core.Key - GCPhys;
2572 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2573 }
2574 else
2575 {
2576 pPhys = NULL;
2577 fMorePhys = false;
2578 offPhys = offPhysLast = PAGE_SIZE;
2579 }
2580 }
2581 }
2582
2583 /*
2584 * Handle access to space without handlers (that's easy).
2585 */
2586 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2587 uint32_t cbRange = (uint32_t)cbWrite;
2588 if (offPhys && offVirt)
2589 {
2590 if (cbRange > offPhys)
2591 cbRange = offPhys;
2592 if (cbRange > offVirt)
2593 cbRange = offVirt;
2594 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2595 }
2596 /*
2597 * Physical handler.
2598 */
2599 else if (!offPhys && offVirt)
2600 {
2601 if (cbRange > offPhysLast + 1)
2602 cbRange = offPhysLast + 1;
2603 if (cbRange > offVirt)
2604 cbRange = offVirt;
2605#ifdef IN_RING3
2606 PFNPGMR3PHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler);
2607 void *pvUser = pPhys->CTX_SUFF(pvUser);
2608
2609 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2610 STAM_PROFILE_START(&pPhys->Stat, h);
2611 PGM_LOCK_ASSERT_OWNER(pVM);
2612 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2613 pgmUnlock(pVM);
2614 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2615 pgmLock(pVM);
2616# ifdef VBOX_WITH_STATISTICS
2617 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2618 if (pPhys)
2619 STAM_PROFILE_STOP(&pPhys->Stat, h);
2620# else
2621 pPhys = NULL; /* might not be valid anymore. */
2622# endif
2623 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2624#else
2625 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2626 NOREF(cbRange);
2627 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2628 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2629 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2630#endif
2631 }
2632 /*
2633 * Virtual handler.
2634 */
2635 else if (offPhys && !offVirt)
2636 {
2637 if (cbRange > offVirtLast + 1)
2638 cbRange = offVirtLast + 1;
2639 if (cbRange > offPhys)
2640 cbRange = offPhys;
2641
2642 PPGMVIRTHANDLERTYPEINT pVirtType = PGMVIRTANDLER_GET_TYPE(pVM, pVirt);
2643#ifdef IN_RING3
2644 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2645 if (pVirtType->pfnHandlerR3)
2646 {
2647 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2648 + (iVirtPage << PAGE_SHIFT)
2649 + (GCPhys & PAGE_OFFSET_MASK);
2650 STAM_PROFILE_START(&pVirt->Stat, h);
2651 rc = pVirtType->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE,
2652 pVirt->CTX_SUFF(pvUser));
2653 STAM_PROFILE_STOP(&pVirt->Stat, h);
2654 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2655 }
2656 pVirt = NULL;
2657#else
2658 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2659 NOREF(cbRange);
2660 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2661 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2662 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2663#endif
2664 }
2665 /*
2666 * Both... give the physical one priority.
2667 */
2668 else
2669 {
2670 Assert(!offPhys && !offVirt);
2671 if (cbRange > offVirtLast + 1)
2672 cbRange = offVirtLast + 1;
2673 if (cbRange > offPhysLast + 1)
2674 cbRange = offPhysLast + 1;
2675
2676 PPGMVIRTHANDLERTYPEINT pVirtType = PGMVIRTANDLER_GET_TYPE(pVM, pVirt);
2677#ifdef IN_RING3
2678 if (pVirtType->pfnHandlerR3)
2679 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2680 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2681
2682 PFNPGMR3PHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler);
2683 void *pvUser = pPhys->CTX_SUFF(pvUser);
2684
2685 STAM_PROFILE_START(&pPhys->Stat, h);
2686 PGM_LOCK_ASSERT_OWNER(pVM);
2687 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2688 pgmUnlock(pVM);
2689 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2690 pgmLock(pVM);
2691# ifdef VBOX_WITH_STATISTICS
2692 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2693 if (pPhys)
2694 STAM_PROFILE_STOP(&pPhys->Stat, h);
2695# else
2696 pPhys = NULL; /* might not be valid anymore. */
2697# endif
2698 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2699 if (pVirtType->pfnHandlerR3)
2700 {
2701
2702 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2703 + (iVirtPage << PAGE_SHIFT)
2704 + (GCPhys & PAGE_OFFSET_MASK);
2705 STAM_PROFILE_START(&pVirt->Stat, h2);
2706 int rc2 = pVirtType->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE,
2707 pVirt->CTX_SUFF(pvUser));
2708 STAM_PROFILE_STOP(&pVirt->Stat, h2);
2709 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2710 rc = VINF_SUCCESS;
2711 else
2712 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2713 }
2714 pPhys = NULL;
2715 pVirt = NULL;
2716#else
2717 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2718 NOREF(cbRange);
2719 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2720 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2721 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2722#endif
2723 }
2724 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2725 memcpy(pvDst, pvBuf, cbRange);
2726
2727 /*
2728 * Advance if we've got more stuff to do.
2729 */
2730 if (cbRange >= cbWrite)
2731 {
2732 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2733 return VINF_SUCCESS;
2734 }
2735
2736 cbWrite -= cbRange;
2737 GCPhys += cbRange;
2738 pvBuf = (uint8_t *)pvBuf + cbRange;
2739 pvDst = (uint8_t *)pvDst + cbRange;
2740
2741 offPhys -= cbRange;
2742 offPhysLast -= cbRange;
2743 offVirt -= cbRange;
2744 offVirtLast -= cbRange;
2745 }
2746}
2747
2748
2749/**
2750 * Write to physical memory.
2751 *
2752 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2753 * want to ignore those.
2754 *
2755 * @returns VBox status code. Can be ignored in ring-3.
2756 * @retval VINF_SUCCESS.
2757 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2758 *
2759 * @param pVM Pointer to the VM.
2760 * @param GCPhys Physical address to write to.
2761 * @param pvBuf What to write.
2762 * @param cbWrite How many bytes to write.
2763 * @param enmOrigin Who is calling.
2764 */
2765VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
2766{
2767 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()! enmOrigin=%d\n", enmOrigin));
2768 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2769 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2770
2771 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWrite));
2772 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2773
2774 pgmLock(pVM);
2775
2776 /*
2777 * Copy loop on ram ranges.
2778 */
2779 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2780 for (;;)
2781 {
2782 /* Inside range or not? */
2783 if (pRam && GCPhys >= pRam->GCPhys)
2784 {
2785 /*
2786 * Must work our way thru this page by page.
2787 */
2788 RTGCPTR off = GCPhys - pRam->GCPhys;
2789 while (off < pRam->cb)
2790 {
2791 RTGCPTR iPage = off >> PAGE_SHIFT;
2792 PPGMPAGE pPage = &pRam->aPages[iPage];
2793 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2794 if (cb > cbWrite)
2795 cb = cbWrite;
2796
2797 /*
2798 * Any active WRITE or ALL access handlers?
2799 */
2800 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
2801 || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2802 {
2803 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2804 if (RT_FAILURE(rc))
2805 {
2806 pgmUnlock(pVM);
2807 return rc;
2808 }
2809 }
2810 else
2811 {
2812 /*
2813 * Get the pointer to the page.
2814 */
2815 PGMPAGEMAPLOCK PgMpLck;
2816 void *pvDst;
2817 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
2818 if (RT_SUCCESS(rc))
2819 {
2820 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
2821 memcpy(pvDst, pvBuf, cb);
2822 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2823 }
2824 /* Ignore writes to ballooned pages. */
2825 else if (!PGM_PAGE_IS_BALLOONED(pPage))
2826 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2827 pRam->GCPhys + off, pPage, rc));
2828 }
2829
2830 /* next page */
2831 if (cb >= cbWrite)
2832 {
2833 pgmUnlock(pVM);
2834 return VINF_SUCCESS;
2835 }
2836
2837 cbWrite -= cb;
2838 off += cb;
2839 pvBuf = (const char *)pvBuf + cb;
2840 } /* walk pages in ram range */
2841
2842 GCPhys = pRam->GCPhysLast + 1;
2843 }
2844 else
2845 {
2846 /*
2847 * Unassigned address space, skip it.
2848 */
2849 if (!pRam)
2850 break;
2851 size_t cb = pRam->GCPhys - GCPhys;
2852 if (cb >= cbWrite)
2853 break;
2854 cbWrite -= cb;
2855 pvBuf = (const char *)pvBuf + cb;
2856 GCPhys += cb;
2857 }
2858
2859 /* Advance range if necessary. */
2860 while (pRam && GCPhys > pRam->GCPhysLast)
2861 pRam = pRam->CTX_SUFF(pNext);
2862 } /* Ram range walk */
2863
2864 pgmUnlock(pVM);
2865 return VINF_SUCCESS;
2866}
2867
2868
2869/**
2870 * Read from guest physical memory by GC physical address, bypassing
2871 * MMIO and access handlers.
2872 *
2873 * @returns VBox status.
2874 * @param pVM Pointer to the VM.
2875 * @param pvDst The destination address.
2876 * @param GCPhysSrc The source address (GC physical address).
2877 * @param cb The number of bytes to read.
2878 */
2879VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2880{
2881 /*
2882 * Treat the first page as a special case.
2883 */
2884 if (!cb)
2885 return VINF_SUCCESS;
2886
2887 /* map the 1st page */
2888 void const *pvSrc;
2889 PGMPAGEMAPLOCK Lock;
2890 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2891 if (RT_FAILURE(rc))
2892 return rc;
2893
2894 /* optimize for the case where access is completely within the first page. */
2895 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2896 if (RT_LIKELY(cb <= cbPage))
2897 {
2898 memcpy(pvDst, pvSrc, cb);
2899 PGMPhysReleasePageMappingLock(pVM, &Lock);
2900 return VINF_SUCCESS;
2901 }
2902
2903 /* copy to the end of the page. */
2904 memcpy(pvDst, pvSrc, cbPage);
2905 PGMPhysReleasePageMappingLock(pVM, &Lock);
2906 GCPhysSrc += cbPage;
2907 pvDst = (uint8_t *)pvDst + cbPage;
2908 cb -= cbPage;
2909
2910 /*
2911 * Page by page.
2912 */
2913 for (;;)
2914 {
2915 /* map the page */
2916 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2917 if (RT_FAILURE(rc))
2918 return rc;
2919
2920 /* last page? */
2921 if (cb <= PAGE_SIZE)
2922 {
2923 memcpy(pvDst, pvSrc, cb);
2924 PGMPhysReleasePageMappingLock(pVM, &Lock);
2925 return VINF_SUCCESS;
2926 }
2927
2928 /* copy the entire page and advance */
2929 memcpy(pvDst, pvSrc, PAGE_SIZE);
2930 PGMPhysReleasePageMappingLock(pVM, &Lock);
2931 GCPhysSrc += PAGE_SIZE;
2932 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2933 cb -= PAGE_SIZE;
2934 }
2935 /* won't ever get here. */
2936}
2937
2938
2939/**
2940 * Write to guest physical memory referenced by GC pointer.
2941 * Write memory to GC physical address in guest physical memory.
2942 *
2943 * This will bypass MMIO and access handlers.
2944 *
2945 * @returns VBox status.
2946 * @param pVM Pointer to the VM.
2947 * @param GCPhysDst The GC physical address of the destination.
2948 * @param pvSrc The source buffer.
2949 * @param cb The number of bytes to write.
2950 */
2951VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2952{
2953 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2954
2955 /*
2956 * Treat the first page as a special case.
2957 */
2958 if (!cb)
2959 return VINF_SUCCESS;
2960
2961 /* map the 1st page */
2962 void *pvDst;
2963 PGMPAGEMAPLOCK Lock;
2964 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2965 if (RT_FAILURE(rc))
2966 return rc;
2967
2968 /* optimize for the case where access is completely within the first page. */
2969 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2970 if (RT_LIKELY(cb <= cbPage))
2971 {
2972 memcpy(pvDst, pvSrc, cb);
2973 PGMPhysReleasePageMappingLock(pVM, &Lock);
2974 return VINF_SUCCESS;
2975 }
2976
2977 /* copy to the end of the page. */
2978 memcpy(pvDst, pvSrc, cbPage);
2979 PGMPhysReleasePageMappingLock(pVM, &Lock);
2980 GCPhysDst += cbPage;
2981 pvSrc = (const uint8_t *)pvSrc + cbPage;
2982 cb -= cbPage;
2983
2984 /*
2985 * Page by page.
2986 */
2987 for (;;)
2988 {
2989 /* map the page */
2990 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2991 if (RT_FAILURE(rc))
2992 return rc;
2993
2994 /* last page? */
2995 if (cb <= PAGE_SIZE)
2996 {
2997 memcpy(pvDst, pvSrc, cb);
2998 PGMPhysReleasePageMappingLock(pVM, &Lock);
2999 return VINF_SUCCESS;
3000 }
3001
3002 /* copy the entire page and advance */
3003 memcpy(pvDst, pvSrc, PAGE_SIZE);
3004 PGMPhysReleasePageMappingLock(pVM, &Lock);
3005 GCPhysDst += PAGE_SIZE;
3006 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3007 cb -= PAGE_SIZE;
3008 }
3009 /* won't ever get here. */
3010}
3011
3012
3013/**
3014 * Read from guest physical memory referenced by GC pointer.
3015 *
3016 * This function uses the current CR3/CR0/CR4 of the guest and will
3017 * bypass access handlers and not set any accessed bits.
3018 *
3019 * @returns VBox status.
3020 * @param pVCpu Handle to the current virtual CPU.
3021 * @param pvDst The destination address.
3022 * @param GCPtrSrc The source address (GC pointer).
3023 * @param cb The number of bytes to read.
3024 */
3025VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3026{
3027 PVM pVM = pVCpu->CTX_SUFF(pVM);
3028/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3029
3030 /*
3031 * Treat the first page as a special case.
3032 */
3033 if (!cb)
3034 return VINF_SUCCESS;
3035
3036 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleRead));
3037 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3038
3039 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3040 * when many VCPUs are fighting for the lock.
3041 */
3042 pgmLock(pVM);
3043
3044 /* map the 1st page */
3045 void const *pvSrc;
3046 PGMPAGEMAPLOCK Lock;
3047 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3048 if (RT_FAILURE(rc))
3049 {
3050 pgmUnlock(pVM);
3051 return rc;
3052 }
3053
3054 /* optimize for the case where access is completely within the first page. */
3055 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3056 if (RT_LIKELY(cb <= cbPage))
3057 {
3058 memcpy(pvDst, pvSrc, cb);
3059 PGMPhysReleasePageMappingLock(pVM, &Lock);
3060 pgmUnlock(pVM);
3061 return VINF_SUCCESS;
3062 }
3063
3064 /* copy to the end of the page. */
3065 memcpy(pvDst, pvSrc, cbPage);
3066 PGMPhysReleasePageMappingLock(pVM, &Lock);
3067 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3068 pvDst = (uint8_t *)pvDst + cbPage;
3069 cb -= cbPage;
3070
3071 /*
3072 * Page by page.
3073 */
3074 for (;;)
3075 {
3076 /* map the page */
3077 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3078 if (RT_FAILURE(rc))
3079 {
3080 pgmUnlock(pVM);
3081 return rc;
3082 }
3083
3084 /* last page? */
3085 if (cb <= PAGE_SIZE)
3086 {
3087 memcpy(pvDst, pvSrc, cb);
3088 PGMPhysReleasePageMappingLock(pVM, &Lock);
3089 pgmUnlock(pVM);
3090 return VINF_SUCCESS;
3091 }
3092
3093 /* copy the entire page and advance */
3094 memcpy(pvDst, pvSrc, PAGE_SIZE);
3095 PGMPhysReleasePageMappingLock(pVM, &Lock);
3096 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
3097 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3098 cb -= PAGE_SIZE;
3099 }
3100 /* won't ever get here. */
3101}
3102
3103
3104/**
3105 * Write to guest physical memory referenced by GC pointer.
3106 *
3107 * This function uses the current CR3/CR0/CR4 of the guest and will
3108 * bypass access handlers and not set dirty or accessed bits.
3109 *
3110 * @returns VBox status.
3111 * @param pVCpu Handle to the current virtual CPU.
3112 * @param GCPtrDst The destination address (GC pointer).
3113 * @param pvSrc The source address.
3114 * @param cb The number of bytes to write.
3115 */
3116VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3117{
3118 PVM pVM = pVCpu->CTX_SUFF(pVM);
3119 VMCPU_ASSERT_EMT(pVCpu);
3120
3121 /*
3122 * Treat the first page as a special case.
3123 */
3124 if (!cb)
3125 return VINF_SUCCESS;
3126
3127 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWrite));
3128 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3129
3130 /* map the 1st page */
3131 void *pvDst;
3132 PGMPAGEMAPLOCK Lock;
3133 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3134 if (RT_FAILURE(rc))
3135 return rc;
3136
3137 /* optimize for the case where access is completely within the first page. */
3138 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3139 if (RT_LIKELY(cb <= cbPage))
3140 {
3141 memcpy(pvDst, pvSrc, cb);
3142 PGMPhysReleasePageMappingLock(pVM, &Lock);
3143 return VINF_SUCCESS;
3144 }
3145
3146 /* copy to the end of the page. */
3147 memcpy(pvDst, pvSrc, cbPage);
3148 PGMPhysReleasePageMappingLock(pVM, &Lock);
3149 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3150 pvSrc = (const uint8_t *)pvSrc + cbPage;
3151 cb -= cbPage;
3152
3153 /*
3154 * Page by page.
3155 */
3156 for (;;)
3157 {
3158 /* map the page */
3159 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3160 if (RT_FAILURE(rc))
3161 return rc;
3162
3163 /* last page? */
3164 if (cb <= PAGE_SIZE)
3165 {
3166 memcpy(pvDst, pvSrc, cb);
3167 PGMPhysReleasePageMappingLock(pVM, &Lock);
3168 return VINF_SUCCESS;
3169 }
3170
3171 /* copy the entire page and advance */
3172 memcpy(pvDst, pvSrc, PAGE_SIZE);
3173 PGMPhysReleasePageMappingLock(pVM, &Lock);
3174 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3175 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3176 cb -= PAGE_SIZE;
3177 }
3178 /* won't ever get here. */
3179}
3180
3181
3182/**
3183 * Write to guest physical memory referenced by GC pointer and update the PTE.
3184 *
3185 * This function uses the current CR3/CR0/CR4 of the guest and will
3186 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3187 *
3188 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3189 *
3190 * @returns VBox status.
3191 * @param pVCpu Handle to the current virtual CPU.
3192 * @param GCPtrDst The destination address (GC pointer).
3193 * @param pvSrc The source address.
3194 * @param cb The number of bytes to write.
3195 */
3196VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3197{
3198 PVM pVM = pVCpu->CTX_SUFF(pVM);
3199 VMCPU_ASSERT_EMT(pVCpu);
3200
3201 /*
3202 * Treat the first page as a special case.
3203 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3204 */
3205 if (!cb)
3206 return VINF_SUCCESS;
3207
3208 /* map the 1st page */
3209 void *pvDst;
3210 PGMPAGEMAPLOCK Lock;
3211 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3212 if (RT_FAILURE(rc))
3213 return rc;
3214
3215 /* optimize for the case where access is completely within the first page. */
3216 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3217 if (RT_LIKELY(cb <= cbPage))
3218 {
3219 memcpy(pvDst, pvSrc, cb);
3220 PGMPhysReleasePageMappingLock(pVM, &Lock);
3221 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3222 return VINF_SUCCESS;
3223 }
3224
3225 /* copy to the end of the page. */
3226 memcpy(pvDst, pvSrc, cbPage);
3227 PGMPhysReleasePageMappingLock(pVM, &Lock);
3228 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3229 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3230 pvSrc = (const uint8_t *)pvSrc + cbPage;
3231 cb -= cbPage;
3232
3233 /*
3234 * Page by page.
3235 */
3236 for (;;)
3237 {
3238 /* map the page */
3239 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3240 if (RT_FAILURE(rc))
3241 return rc;
3242
3243 /* last page? */
3244 if (cb <= PAGE_SIZE)
3245 {
3246 memcpy(pvDst, pvSrc, cb);
3247 PGMPhysReleasePageMappingLock(pVM, &Lock);
3248 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3249 return VINF_SUCCESS;
3250 }
3251
3252 /* copy the entire page and advance */
3253 memcpy(pvDst, pvSrc, PAGE_SIZE);
3254 PGMPhysReleasePageMappingLock(pVM, &Lock);
3255 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3256 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3257 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3258 cb -= PAGE_SIZE;
3259 }
3260 /* won't ever get here. */
3261}
3262
3263
3264/**
3265 * Read from guest physical memory referenced by GC pointer.
3266 *
3267 * This function uses the current CR3/CR0/CR4 of the guest and will
3268 * respect access handlers and set accessed bits.
3269 *
3270 * @returns VBox status.
3271 * @param pVCpu Handle to the current virtual CPU.
3272 * @param pvDst The destination address.
3273 * @param GCPtrSrc The source address (GC pointer).
3274 * @param cb The number of bytes to read.
3275 * @param enmOrigin Who is calling.
3276 * @thread EMT(pVCpu)
3277 */
3278VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3279{
3280 RTGCPHYS GCPhys;
3281 uint64_t fFlags;
3282 int rc;
3283 PVM pVM = pVCpu->CTX_SUFF(pVM);
3284 VMCPU_ASSERT_EMT(pVCpu);
3285
3286 /*
3287 * Anything to do?
3288 */
3289 if (!cb)
3290 return VINF_SUCCESS;
3291
3292 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3293
3294 /*
3295 * Optimize reads within a single page.
3296 */
3297 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3298 {
3299 /* Convert virtual to physical address + flags */
3300 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3301 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3302 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3303
3304 /* mark the guest page as accessed. */
3305 if (!(fFlags & X86_PTE_A))
3306 {
3307 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3308 AssertRC(rc);
3309 }
3310
3311 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3312 }
3313
3314 /*
3315 * Page by page.
3316 */
3317 for (;;)
3318 {
3319 /* Convert virtual to physical address + flags */
3320 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3321 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3322 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3323
3324 /* mark the guest page as accessed. */
3325 if (!(fFlags & X86_PTE_A))
3326 {
3327 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3328 AssertRC(rc);
3329 }
3330
3331 /* copy */
3332 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3333 if (cbRead < cb)
3334 {
3335 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead, enmOrigin);
3336 if (RT_FAILURE(rc))
3337 return rc;
3338 }
3339 else /* Last page (cbRead is PAGE_SIZE, we only need cb!) */
3340 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3341
3342 /* next */
3343 Assert(cb > cbRead);
3344 cb -= cbRead;
3345 pvDst = (uint8_t *)pvDst + cbRead;
3346 GCPtrSrc += cbRead;
3347 }
3348}
3349
3350
3351/**
3352 * Write to guest physical memory referenced by GC pointer.
3353 *
3354 * This function uses the current CR3/CR0/CR4 of the guest and will
3355 * respect access handlers and set dirty and accessed bits.
3356 *
3357 * @returns VBox status.
3358 * @retval VINF_SUCCESS.
3359 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
3360 *
3361 * @param pVCpu Handle to the current virtual CPU.
3362 * @param GCPtrDst The destination address (GC pointer).
3363 * @param pvSrc The source address.
3364 * @param cb The number of bytes to write.
3365 * @param enmOrigin Who is calling.
3366 */
3367VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3368{
3369 RTGCPHYS GCPhys;
3370 uint64_t fFlags;
3371 int rc;
3372 PVM pVM = pVCpu->CTX_SUFF(pVM);
3373 VMCPU_ASSERT_EMT(pVCpu);
3374
3375 /*
3376 * Anything to do?
3377 */
3378 if (!cb)
3379 return VINF_SUCCESS;
3380
3381 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3382
3383 /*
3384 * Optimize writes within a single page.
3385 */
3386 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3387 {
3388 /* Convert virtual to physical address + flags */
3389 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3390 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3391 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3392
3393 /* Mention when we ignore X86_PTE_RW... */
3394 if (!(fFlags & X86_PTE_RW))
3395 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3396
3397 /* Mark the guest page as accessed and dirty if necessary. */
3398 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3399 {
3400 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3401 AssertRC(rc);
3402 }
3403
3404 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3405 }
3406
3407 /*
3408 * Page by page.
3409 */
3410 for (;;)
3411 {
3412 /* Convert virtual to physical address + flags */
3413 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3414 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3415 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3416
3417 /* Mention when we ignore X86_PTE_RW... */
3418 if (!(fFlags & X86_PTE_RW))
3419 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3420
3421 /* Mark the guest page as accessed and dirty if necessary. */
3422 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3423 {
3424 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3425 AssertRC(rc);
3426 }
3427
3428 /* copy */
3429 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3430 if (cbWrite < cb)
3431 {
3432 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite, enmOrigin);
3433 if (RT_FAILURE(rc))
3434 return rc;
3435 }
3436 else /* Last page (cbWrite is PAGE_SIZE, we only need cb!) */
3437 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3438
3439 /* next */
3440 Assert(cb > cbWrite);
3441 cb -= cbWrite;
3442 pvSrc = (uint8_t *)pvSrc + cbWrite;
3443 GCPtrDst += cbWrite;
3444 }
3445}
3446
3447
3448/**
3449 * Performs a read of guest virtual memory for instruction emulation.
3450 *
3451 * This will check permissions, raise exceptions and update the access bits.
3452 *
3453 * The current implementation will bypass all access handlers. It may later be
3454 * changed to at least respect MMIO.
3455 *
3456 *
3457 * @returns VBox status code suitable to scheduling.
3458 * @retval VINF_SUCCESS if the read was performed successfully.
3459 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3460 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3461 *
3462 * @param pVCpu Handle to the current virtual CPU.
3463 * @param pCtxCore The context core.
3464 * @param pvDst Where to put the bytes we've read.
3465 * @param GCPtrSrc The source address.
3466 * @param cb The number of bytes to read. Not more than a page.
3467 *
3468 * @remark This function will dynamically map physical pages in GC. This may unmap
3469 * mappings done by the caller. Be careful!
3470 */
3471VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3472{
3473 PVM pVM = pVCpu->CTX_SUFF(pVM);
3474 Assert(cb <= PAGE_SIZE);
3475 VMCPU_ASSERT_EMT(pVCpu);
3476
3477/** @todo r=bird: This isn't perfect!
3478 * -# It's not checking for reserved bits being 1.
3479 * -# It's not correctly dealing with the access bit.
3480 * -# It's not respecting MMIO memory or any other access handlers.
3481 */
3482 /*
3483 * 1. Translate virtual to physical. This may fault.
3484 * 2. Map the physical address.
3485 * 3. Do the read operation.
3486 * 4. Set access bits if required.
3487 */
3488 int rc;
3489 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3490 if (cb <= cb1)
3491 {
3492 /*
3493 * Not crossing pages.
3494 */
3495 RTGCPHYS GCPhys;
3496 uint64_t fFlags;
3497 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3498 if (RT_SUCCESS(rc))
3499 {
3500 /** @todo we should check reserved bits ... */
3501 PGMPAGEMAPLOCK PgMpLck;
3502 void const *pvSrc;
3503 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &PgMpLck);
3504 switch (rc)
3505 {
3506 case VINF_SUCCESS:
3507 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3508 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3509 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3510 break;
3511 case VERR_PGM_PHYS_PAGE_RESERVED:
3512 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3513 memset(pvDst, 0xff, cb);
3514 break;
3515 default:
3516 Assert(RT_FAILURE_NP(rc));
3517 return rc;
3518 }
3519
3520 /** @todo access bit emulation isn't 100% correct. */
3521 if (!(fFlags & X86_PTE_A))
3522 {
3523 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3524 AssertRC(rc);
3525 }
3526 return VINF_SUCCESS;
3527 }
3528 }
3529 else
3530 {
3531 /*
3532 * Crosses pages.
3533 */
3534 size_t cb2 = cb - cb1;
3535 uint64_t fFlags1;
3536 RTGCPHYS GCPhys1;
3537 uint64_t fFlags2;
3538 RTGCPHYS GCPhys2;
3539 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3540 if (RT_SUCCESS(rc))
3541 {
3542 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3543 if (RT_SUCCESS(rc))
3544 {
3545 /** @todo we should check reserved bits ... */
3546 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3547 PGMPAGEMAPLOCK PgMpLck;
3548 void const *pvSrc1;
3549 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck);
3550 switch (rc)
3551 {
3552 case VINF_SUCCESS:
3553 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3554 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3555 break;
3556 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3557 memset(pvDst, 0xff, cb1);
3558 break;
3559 default:
3560 Assert(RT_FAILURE_NP(rc));
3561 return rc;
3562 }
3563
3564 void const *pvSrc2;
3565 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);
3566 switch (rc)
3567 {
3568 case VINF_SUCCESS:
3569 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3570 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3571 break;
3572 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3573 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3574 break;
3575 default:
3576 Assert(RT_FAILURE_NP(rc));
3577 return rc;
3578 }
3579
3580 if (!(fFlags1 & X86_PTE_A))
3581 {
3582 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3583 AssertRC(rc);
3584 }
3585 if (!(fFlags2 & X86_PTE_A))
3586 {
3587 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3588 AssertRC(rc);
3589 }
3590 return VINF_SUCCESS;
3591 }
3592 }
3593 }
3594
3595 /*
3596 * Raise a #PF.
3597 */
3598 uint32_t uErr;
3599
3600 /* Get the current privilege level. */
3601 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3602 switch (rc)
3603 {
3604 case VINF_SUCCESS:
3605 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3606 break;
3607
3608 case VERR_PAGE_NOT_PRESENT:
3609 case VERR_PAGE_TABLE_NOT_PRESENT:
3610 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3611 break;
3612
3613 default:
3614 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3615 return rc;
3616 }
3617 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3618 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3619}
3620
3621
3622/**
3623 * Performs a read of guest virtual memory for instruction emulation.
3624 *
3625 * This will check permissions, raise exceptions and update the access bits.
3626 *
3627 * The current implementation will bypass all access handlers. It may later be
3628 * changed to at least respect MMIO.
3629 *
3630 *
3631 * @returns VBox status code suitable to scheduling.
3632 * @retval VINF_SUCCESS if the read was performed successfully.
3633 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3634 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3635 *
3636 * @param pVCpu Handle to the current virtual CPU.
3637 * @param pCtxCore The context core.
3638 * @param pvDst Where to put the bytes we've read.
3639 * @param GCPtrSrc The source address.
3640 * @param cb The number of bytes to read. Not more than a page.
3641 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3642 * an appropriate error status will be returned (no
3643 * informational at all).
3644 *
3645 *
3646 * @remarks Takes the PGM lock.
3647 * @remarks A page fault on the 2nd page of the access will be raised without
3648 * writing the bits on the first page since we're ASSUMING that the
3649 * caller is emulating an instruction access.
3650 * @remarks This function will dynamically map physical pages in GC. This may
3651 * unmap mappings done by the caller. Be careful!
3652 */
3653VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb,
3654 bool fRaiseTrap)
3655{
3656 PVM pVM = pVCpu->CTX_SUFF(pVM);
3657 Assert(cb <= PAGE_SIZE);
3658 VMCPU_ASSERT_EMT(pVCpu);
3659
3660 /*
3661 * 1. Translate virtual to physical. This may fault.
3662 * 2. Map the physical address.
3663 * 3. Do the read operation.
3664 * 4. Set access bits if required.
3665 */
3666 int rc;
3667 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3668 if (cb <= cb1)
3669 {
3670 /*
3671 * Not crossing pages.
3672 */
3673 RTGCPHYS GCPhys;
3674 uint64_t fFlags;
3675 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3676 if (RT_SUCCESS(rc))
3677 {
3678 if (1) /** @todo we should check reserved bits ... */
3679 {
3680 const void *pvSrc;
3681 PGMPAGEMAPLOCK Lock;
3682 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3683 switch (rc)
3684 {
3685 case VINF_SUCCESS:
3686 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3687 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3688 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3689 PGMPhysReleasePageMappingLock(pVM, &Lock);
3690 break;
3691 case VERR_PGM_PHYS_PAGE_RESERVED:
3692 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3693 memset(pvDst, 0xff, cb);
3694 break;
3695 default:
3696 AssertMsgFailed(("%Rrc\n", rc));
3697 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3698 return rc;
3699 }
3700
3701 if (!(fFlags & X86_PTE_A))
3702 {
3703 /** @todo access bit emulation isn't 100% correct. */
3704 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3705 AssertRC(rc);
3706 }
3707 return VINF_SUCCESS;
3708 }
3709 }
3710 }
3711 else
3712 {
3713 /*
3714 * Crosses pages.
3715 */
3716 size_t cb2 = cb - cb1;
3717 uint64_t fFlags1;
3718 RTGCPHYS GCPhys1;
3719 uint64_t fFlags2;
3720 RTGCPHYS GCPhys2;
3721 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3722 if (RT_SUCCESS(rc))
3723 {
3724 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3725 if (RT_SUCCESS(rc))
3726 {
3727 if (1) /** @todo we should check reserved bits ... */
3728 {
3729 const void *pvSrc;
3730 PGMPAGEMAPLOCK Lock;
3731 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3732 switch (rc)
3733 {
3734 case VINF_SUCCESS:
3735 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3736 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3737 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3738 PGMPhysReleasePageMappingLock(pVM, &Lock);
3739 break;
3740 case VERR_PGM_PHYS_PAGE_RESERVED:
3741 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3742 memset(pvDst, 0xff, cb1);
3743 break;
3744 default:
3745 AssertMsgFailed(("%Rrc\n", rc));
3746 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3747 return rc;
3748 }
3749
3750 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3751 switch (rc)
3752 {
3753 case VINF_SUCCESS:
3754 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3755 PGMPhysReleasePageMappingLock(pVM, &Lock);
3756 break;
3757 case VERR_PGM_PHYS_PAGE_RESERVED:
3758 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3759 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3760 break;
3761 default:
3762 AssertMsgFailed(("%Rrc\n", rc));
3763 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3764 return rc;
3765 }
3766
3767 if (!(fFlags1 & X86_PTE_A))
3768 {
3769 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3770 AssertRC(rc);
3771 }
3772 if (!(fFlags2 & X86_PTE_A))
3773 {
3774 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3775 AssertRC(rc);
3776 }
3777 return VINF_SUCCESS;
3778 }
3779 /* sort out which page */
3780 }
3781 else
3782 GCPtrSrc += cb1; /* fault on 2nd page */
3783 }
3784 }
3785
3786 /*
3787 * Raise a #PF if we're allowed to do that.
3788 */
3789 /* Calc the error bits. */
3790 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3791 uint32_t uErr;
3792 switch (rc)
3793 {
3794 case VINF_SUCCESS:
3795 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3796 rc = VERR_ACCESS_DENIED;
3797 break;
3798
3799 case VERR_PAGE_NOT_PRESENT:
3800 case VERR_PAGE_TABLE_NOT_PRESENT:
3801 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3802 break;
3803
3804 default:
3805 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3806 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3807 return rc;
3808 }
3809 if (fRaiseTrap)
3810 {
3811 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3812 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3813 }
3814 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3815 return rc;
3816}
3817
3818
3819/**
3820 * Performs a write to guest virtual memory for instruction emulation.
3821 *
3822 * This will check permissions, raise exceptions and update the dirty and access
3823 * bits.
3824 *
3825 * @returns VBox status code suitable to scheduling.
3826 * @retval VINF_SUCCESS if the read was performed successfully.
3827 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3828 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3829 *
3830 * @param pVCpu Handle to the current virtual CPU.
3831 * @param pCtxCore The context core.
3832 * @param GCPtrDst The destination address.
3833 * @param pvSrc What to write.
3834 * @param cb The number of bytes to write. Not more than a page.
3835 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3836 * an appropriate error status will be returned (no
3837 * informational at all).
3838 *
3839 * @remarks Takes the PGM lock.
3840 * @remarks A page fault on the 2nd page of the access will be raised without
3841 * writing the bits on the first page since we're ASSUMING that the
3842 * caller is emulating an instruction access.
3843 * @remarks This function will dynamically map physical pages in GC. This may
3844 * unmap mappings done by the caller. Be careful!
3845 */
3846VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc,
3847 size_t cb, bool fRaiseTrap)
3848{
3849 Assert(cb <= PAGE_SIZE);
3850 PVM pVM = pVCpu->CTX_SUFF(pVM);
3851 VMCPU_ASSERT_EMT(pVCpu);
3852
3853 /*
3854 * 1. Translate virtual to physical. This may fault.
3855 * 2. Map the physical address.
3856 * 3. Do the write operation.
3857 * 4. Set access bits if required.
3858 */
3859 /** @todo Since this method is frequently used by EMInterpret or IOM
3860 * upon a write fault to an write access monitored page, we can
3861 * reuse the guest page table walking from the \#PF code. */
3862 int rc;
3863 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3864 if (cb <= cb1)
3865 {
3866 /*
3867 * Not crossing pages.
3868 */
3869 RTGCPHYS GCPhys;
3870 uint64_t fFlags;
3871 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3872 if (RT_SUCCESS(rc))
3873 {
3874 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3875 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3876 && CPUMGetGuestCPL(pVCpu) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3877 {
3878 void *pvDst;
3879 PGMPAGEMAPLOCK Lock;
3880 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3881 switch (rc)
3882 {
3883 case VINF_SUCCESS:
3884 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3885 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3886 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3887 PGMPhysReleasePageMappingLock(pVM, &Lock);
3888 break;
3889 case VERR_PGM_PHYS_PAGE_RESERVED:
3890 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3891 /* bit bucket */
3892 break;
3893 default:
3894 AssertMsgFailed(("%Rrc\n", rc));
3895 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3896 return rc;
3897 }
3898
3899 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3900 {
3901 /** @todo dirty & access bit emulation isn't 100% correct. */
3902 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3903 AssertRC(rc);
3904 }
3905 return VINF_SUCCESS;
3906 }
3907 rc = VERR_ACCESS_DENIED;
3908 }
3909 }
3910 else
3911 {
3912 /*
3913 * Crosses pages.
3914 */
3915 size_t cb2 = cb - cb1;
3916 uint64_t fFlags1;
3917 RTGCPHYS GCPhys1;
3918 uint64_t fFlags2;
3919 RTGCPHYS GCPhys2;
3920 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
3921 if (RT_SUCCESS(rc))
3922 {
3923 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3924 if (RT_SUCCESS(rc))
3925 {
3926 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3927 && (fFlags2 & X86_PTE_RW))
3928 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3929 && CPUMGetGuestCPL(pVCpu) <= 2) )
3930 {
3931 void *pvDst;
3932 PGMPAGEMAPLOCK Lock;
3933 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3934 switch (rc)
3935 {
3936 case VINF_SUCCESS:
3937 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3938 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3939 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3940 PGMPhysReleasePageMappingLock(pVM, &Lock);
3941 break;
3942 case VERR_PGM_PHYS_PAGE_RESERVED:
3943 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3944 /* bit bucket */
3945 break;
3946 default:
3947 AssertMsgFailed(("%Rrc\n", rc));
3948 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3949 return rc;
3950 }
3951
3952 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3953 switch (rc)
3954 {
3955 case VINF_SUCCESS:
3956 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3957 PGMPhysReleasePageMappingLock(pVM, &Lock);
3958 break;
3959 case VERR_PGM_PHYS_PAGE_RESERVED:
3960 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3961 /* bit bucket */
3962 break;
3963 default:
3964 AssertMsgFailed(("%Rrc\n", rc));
3965 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3966 return rc;
3967 }
3968
3969 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3970 {
3971 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3972 AssertRC(rc);
3973 }
3974 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3975 {
3976 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3977 AssertRC(rc);
3978 }
3979 return VINF_SUCCESS;
3980 }
3981 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3982 GCPtrDst += cb1; /* fault on the 2nd page. */
3983 rc = VERR_ACCESS_DENIED;
3984 }
3985 else
3986 GCPtrDst += cb1; /* fault on the 2nd page. */
3987 }
3988 }
3989
3990 /*
3991 * Raise a #PF if we're allowed to do that.
3992 */
3993 /* Calc the error bits. */
3994 uint32_t uErr;
3995 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3996 switch (rc)
3997 {
3998 case VINF_SUCCESS:
3999 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
4000 rc = VERR_ACCESS_DENIED;
4001 break;
4002
4003 case VERR_ACCESS_DENIED:
4004 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
4005 break;
4006
4007 case VERR_PAGE_NOT_PRESENT:
4008 case VERR_PAGE_TABLE_NOT_PRESENT:
4009 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
4010 break;
4011
4012 default:
4013 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
4014 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4015 return rc;
4016 }
4017 if (fRaiseTrap)
4018 {
4019 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
4020 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
4021 }
4022 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
4023 return rc;
4024}
4025
4026
4027/**
4028 * Return the page type of the specified physical address.
4029 *
4030 * @returns The page type.
4031 * @param pVM Pointer to the VM.
4032 * @param GCPhys Guest physical address
4033 */
4034VMMDECL(PGMPAGETYPE) PGMPhysGetPageType(PVM pVM, RTGCPHYS GCPhys)
4035{
4036 pgmLock(pVM);
4037 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
4038 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
4039 pgmUnlock(pVM);
4040
4041 return enmPgType;
4042}
4043
4044
4045
4046
4047/**
4048 * Converts a GC physical address to a HC ring-3 pointer, with some
4049 * additional checks.
4050 *
4051 * @returns VBox status code (no informational statuses).
4052 * @retval VINF_SUCCESS on success.
4053 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4054 * access handler of some kind.
4055 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4056 * accesses or is odd in any way.
4057 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4058 *
4059 * @param pVM Pointer to the cross context VM structure.
4060 * @param pVCpu Pointer to the cross context virtual CPU structure of
4061 * the calling EMT.
4062 * @param GCPhys The GC physical address to convert. This API mask the
4063 * A20 line when necessary.
4064 * @param fWritable Whether write access is required.
4065 * @param ppv Where to store the pointer corresponding to GCPhys on
4066 * success.
4067 * @param pLock
4068 *
4069 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
4070 * @thread EMT(pVCpu).
4071 */
4072VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
4073 void **ppv, PPGMPAGEMAPLOCK pLock)
4074{
4075 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
4076
4077 pgmLock(pVM);
4078
4079 PPGMRAMRANGE pRam;
4080 PPGMPAGE pPage;
4081 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4082 if (RT_SUCCESS(rc))
4083 {
4084 if (PGM_PAGE_IS_BALLOONED(pPage))
4085 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4086 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4087 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4088 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4089 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4090 rc = VINF_SUCCESS;
4091 else
4092 {
4093 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4094 {
4095 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4096 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4097 }
4098 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4099 {
4100 Assert(!fByPassHandlers);
4101 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4102 }
4103 }
4104 if (RT_SUCCESS(rc))
4105 {
4106 int rc2;
4107
4108 /* Make sure what we return is writable. */
4109 if (fWritable)
4110 switch (PGM_PAGE_GET_STATE(pPage))
4111 {
4112 case PGM_PAGE_STATE_ALLOCATED:
4113 break;
4114 case PGM_PAGE_STATE_BALLOONED:
4115 AssertFailed();
4116 case PGM_PAGE_STATE_ZERO:
4117 case PGM_PAGE_STATE_SHARED:
4118 case PGM_PAGE_STATE_WRITE_MONITORED:
4119 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
4120 AssertLogRelRCReturn(rc2, rc2);
4121 break;
4122 }
4123
4124#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
4125 void *pv;
4126 rc = pgmRZDynMapHCPageInlined(pVCpu,
4127 PGM_PAGE_GET_HCPHYS(pPage),
4128 &pv
4129 RTLOG_COMMA_SRC_POS);
4130 if (RT_FAILURE(rc))
4131 return rc;
4132 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4133 pLock->pvPage = pv;
4134 pLock->pVCpu = pVCpu;
4135
4136#else
4137 /* Get a ring-3 mapping of the address. */
4138 PPGMPAGER3MAPTLBE pTlbe;
4139 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
4140 AssertLogRelRCReturn(rc2, rc2);
4141
4142 /* Lock it and calculate the address. */
4143 if (fWritable)
4144 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
4145 else
4146 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
4147 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4148#endif
4149
4150 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
4151 }
4152 else
4153 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
4154
4155 /* else: handler catching all access, no pointer returned. */
4156 }
4157 else
4158 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
4159
4160 pgmUnlock(pVM);
4161 return rc;
4162}
4163
4164
4165/**
4166 * Checks if the give GCPhys page requires special handling for the given access
4167 * because it's MMIO or otherwise monitored.
4168 *
4169 * @returns VBox status code (no informational statuses).
4170 * @retval VINF_SUCCESS on success.
4171 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4172 * access handler of some kind.
4173 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4174 * accesses or is odd in any way.
4175 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4176 *
4177 * @param pVM Pointer to the VM.
4178 * @param GCPhys The GC physical address to convert. Since this is only
4179 * used for filling the REM TLB, the A20 mask must be
4180 * applied before calling this API.
4181 * @param fWritable Whether write access is required.
4182 *
4183 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
4184 * a stop gap thing that should be removed once there is a better TLB
4185 * for virtual address accesses.
4186 */
4187VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVM pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
4188{
4189 pgmLock(pVM);
4190 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
4191
4192 PPGMRAMRANGE pRam;
4193 PPGMPAGE pPage;
4194 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4195 if (RT_SUCCESS(rc))
4196 {
4197 if (PGM_PAGE_IS_BALLOONED(pPage))
4198 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4199 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4200 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4201 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4202 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4203 rc = VINF_SUCCESS;
4204 else
4205 {
4206 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4207 {
4208 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4209 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4210 }
4211 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4212 {
4213 Assert(!fByPassHandlers);
4214 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4215 }
4216 }
4217 }
4218
4219 pgmUnlock(pVM);
4220 return rc;
4221}
4222
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette