VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 55903

Last change on this file since 55903 was 55903, checked in by vboxsync, 10 years ago

PGM: Added a pVCpu parameter to all physical handler callouts and also a PGMACCESSORIGIN parameter to the ring-3 one. Fixed virtual handler callout mix up from previous commit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 150.8 KB
Line 
1/* $Id: PGMAllPhys.cpp 55903 2015-05-18 12:02:58Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM_PHYS
22#include <VBox/vmm/pgm.h>
23#include <VBox/vmm/trpm.h>
24#include <VBox/vmm/vmm.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/em.h>
27#ifdef VBOX_WITH_REM
28# include <VBox/vmm/rem.h>
29#endif
30#include "PGMInternal.h"
31#include <VBox/vmm/vm.h>
32#include "PGMInline.h"
33#include <VBox/param.h>
34#include <VBox/err.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37#include <iprt/asm-amd64-x86.h>
38#include <VBox/log.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** Enable the physical TLB. */
48#define PGM_WITH_PHYS_TLB
49
50
51
52#ifndef IN_RING3
53
54/**
55 * \#PF Handler callback for physical memory accesses without a RC/R0 handler.
56 * This simply pushes everything to the HC handler.
57 *
58 * @returns VBox status code (appropriate for trap handling and GC return).
59 * @param pVM Pointer to the VM.
60 * @param pVCpu Pointer to the cross context CPU context for the
61 * calling EMT.
62 * @param uErrorCode CPU Error code.
63 * @param pRegFrame Trap register frame.
64 * @param pvFault The fault address (cr2).
65 * @param GCPhysFault The GC physical address corresponding to pvFault.
66 * @param pvUser User argument.
67 */
68VMMDECL(int) pgmPhysPfHandlerRedirectToHC(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
69 RTGCPHYS GCPhysFault, void *pvUser)
70{
71 NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(GCPhysFault); NOREF(pvUser);
72 return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ;
73}
74
75
76/**
77 * \#PF Handler callback for Guest ROM range write access.
78 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
79 *
80 * @returns VBox status code (appropriate for trap handling and GC return).
81 * @param pVM Pointer to the VM.
82 * @param pVCpu Pointer to the cross context CPU context for the
83 * calling EMT.
84 * @param uErrorCode CPU Error code.
85 * @param pRegFrame Trap register frame.
86 * @param pvFault The fault address (cr2).
87 * @param GCPhysFault The GC physical address corresponding to pvFault.
88 * @param pvUser User argument. Pointer to the ROM range structure.
89 */
90DECLEXPORT(int) pgmPhysRomWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
91 RTGCPHYS GCPhysFault, void *pvUser)
92{
93 int rc;
94 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
95 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
96 NOREF(uErrorCode); NOREF(pvFault);
97
98 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
99
100 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
101 switch (pRom->aPages[iPage].enmProt)
102 {
103 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
104 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
105 {
106 /*
107 * If it's a simple instruction which doesn't change the cpu state
108 * we will simply skip it. Otherwise we'll have to defer it to REM.
109 */
110 uint32_t cbOp;
111 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
112 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
113 if ( RT_SUCCESS(rc)
114 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
115 && !(pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
116 {
117 switch (pDis->bOpCode)
118 {
119 /** @todo Find other instructions we can safely skip, possibly
120 * adding this kind of detection to DIS or EM. */
121 case OP_MOV:
122 pRegFrame->rip += cbOp;
123 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteHandled);
124 return VINF_SUCCESS;
125 }
126 }
127 break;
128 }
129
130 case PGMROMPROT_READ_RAM_WRITE_RAM:
131 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
132 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
133 AssertRC(rc);
134 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
135
136 case PGMROMPROT_READ_ROM_WRITE_RAM:
137 /* Handle it in ring-3 because it's *way* easier there. */
138 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
139 break;
140
141 default:
142 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
143 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
144 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
145 }
146
147 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteUnhandled);
148 return VINF_EM_RAW_EMULATE_INSTR;
149}
150
151#endif /* IN_RING3 */
152
153/**
154 * Invalidates the RAM range TLBs.
155 *
156 * @param pVM Pointer to the VM.
157 */
158void pgmPhysInvalidRamRangeTlbs(PVM pVM)
159{
160 pgmLock(pVM);
161 for (uint32_t i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)
162 {
163 pVM->pgm.s.apRamRangesTlbR3[i] = NIL_RTR3PTR;
164 pVM->pgm.s.apRamRangesTlbR0[i] = NIL_RTR0PTR;
165 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;
166 }
167 pgmUnlock(pVM);
168}
169
170
171/**
172 * Tests if a value of type RTGCPHYS is negative if the type had been signed
173 * instead of unsigned.
174 *
175 * @returns @c true if negative, @c false if positive or zero.
176 * @param a_GCPhys The value to test.
177 * @todo Move me to iprt/types.h.
178 */
179#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
180
181
182/**
183 * Slow worker for pgmPhysGetRange.
184 *
185 * @copydoc pgmPhysGetRange
186 */
187PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
188{
189 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
190
191 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
192 while (pRam)
193 {
194 RTGCPHYS off = GCPhys - pRam->GCPhys;
195 if (off < pRam->cb)
196 {
197 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
198 return pRam;
199 }
200 if (RTGCPHYS_IS_NEGATIVE(off))
201 pRam = pRam->CTX_SUFF(pLeft);
202 else
203 pRam = pRam->CTX_SUFF(pRight);
204 }
205 return NULL;
206}
207
208
209/**
210 * Slow worker for pgmPhysGetRangeAtOrAbove.
211 *
212 * @copydoc pgmPhysGetRangeAtOrAbove
213 */
214PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
215{
216 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
217
218 PPGMRAMRANGE pLastLeft = NULL;
219 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
220 while (pRam)
221 {
222 RTGCPHYS off = GCPhys - pRam->GCPhys;
223 if (off < pRam->cb)
224 {
225 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
226 return pRam;
227 }
228 if (RTGCPHYS_IS_NEGATIVE(off))
229 {
230 pLastLeft = pRam;
231 pRam = pRam->CTX_SUFF(pLeft);
232 }
233 else
234 pRam = pRam->CTX_SUFF(pRight);
235 }
236 return pLastLeft;
237}
238
239
240/**
241 * Slow worker for pgmPhysGetPage.
242 *
243 * @copydoc pgmPhysGetPage
244 */
245PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
246{
247 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
248
249 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
250 while (pRam)
251 {
252 RTGCPHYS off = GCPhys - pRam->GCPhys;
253 if (off < pRam->cb)
254 {
255 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
256 return &pRam->aPages[off >> PAGE_SHIFT];
257 }
258
259 if (RTGCPHYS_IS_NEGATIVE(off))
260 pRam = pRam->CTX_SUFF(pLeft);
261 else
262 pRam = pRam->CTX_SUFF(pRight);
263 }
264 return NULL;
265}
266
267
268/**
269 * Slow worker for pgmPhysGetPageEx.
270 *
271 * @copydoc pgmPhysGetPageEx
272 */
273int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
274{
275 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
276
277 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
278 while (pRam)
279 {
280 RTGCPHYS off = GCPhys - pRam->GCPhys;
281 if (off < pRam->cb)
282 {
283 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
284 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
285 return VINF_SUCCESS;
286 }
287
288 if (RTGCPHYS_IS_NEGATIVE(off))
289 pRam = pRam->CTX_SUFF(pLeft);
290 else
291 pRam = pRam->CTX_SUFF(pRight);
292 }
293
294 *ppPage = NULL;
295 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
296}
297
298
299/**
300 * Slow worker for pgmPhysGetPageAndRangeEx.
301 *
302 * @copydoc pgmPhysGetPageAndRangeEx
303 */
304int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
305{
306 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
307
308 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
309 while (pRam)
310 {
311 RTGCPHYS off = GCPhys - pRam->GCPhys;
312 if (off < pRam->cb)
313 {
314 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
315 *ppRam = pRam;
316 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
317 return VINF_SUCCESS;
318 }
319
320 if (RTGCPHYS_IS_NEGATIVE(off))
321 pRam = pRam->CTX_SUFF(pLeft);
322 else
323 pRam = pRam->CTX_SUFF(pRight);
324 }
325
326 *ppRam = NULL;
327 *ppPage = NULL;
328 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
329}
330
331
332/**
333 * Checks if Address Gate 20 is enabled or not.
334 *
335 * @returns true if enabled.
336 * @returns false if disabled.
337 * @param pVCpu Pointer to the VMCPU.
338 */
339VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
340{
341 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
342 return pVCpu->pgm.s.fA20Enabled;
343}
344
345
346/**
347 * Validates a GC physical address.
348 *
349 * @returns true if valid.
350 * @returns false if invalid.
351 * @param pVM Pointer to the VM.
352 * @param GCPhys The physical address to validate.
353 */
354VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
355{
356 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
357 return pPage != NULL;
358}
359
360
361/**
362 * Checks if a GC physical address is a normal page,
363 * i.e. not ROM, MMIO or reserved.
364 *
365 * @returns true if normal.
366 * @returns false if invalid, ROM, MMIO or reserved page.
367 * @param pVM Pointer to the VM.
368 * @param GCPhys The physical address to check.
369 */
370VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
371{
372 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
373 return pPage
374 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
375}
376
377
378/**
379 * Converts a GC physical address to a HC physical address.
380 *
381 * @returns VINF_SUCCESS on success.
382 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
383 * page but has no physical backing.
384 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
385 * GC physical address.
386 *
387 * @param pVM Pointer to the VM.
388 * @param GCPhys The GC physical address to convert.
389 * @param pHCPhys Where to store the HC physical address on success.
390 */
391VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
392{
393 pgmLock(pVM);
394 PPGMPAGE pPage;
395 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
396 if (RT_SUCCESS(rc))
397 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
398 pgmUnlock(pVM);
399 return rc;
400}
401
402
403/**
404 * Invalidates all page mapping TLBs.
405 *
406 * @param pVM Pointer to the VM.
407 */
408void pgmPhysInvalidatePageMapTLB(PVM pVM)
409{
410 pgmLock(pVM);
411 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushes);
412
413 /* Clear the shared R0/R3 TLB completely. */
414 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
415 {
416 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
417 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
418 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
419 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
420 }
421
422 /** @todo clear the RC TLB whenever we add it. */
423
424 pgmUnlock(pVM);
425}
426
427
428/**
429 * Invalidates a page mapping TLB entry
430 *
431 * @param pVM Pointer to the VM.
432 * @param GCPhys GCPhys entry to flush
433 */
434void pgmPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
435{
436 PGM_LOCK_ASSERT_OWNER(pVM);
437
438 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushEntry);
439
440#ifdef IN_RC
441 unsigned idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
442 pVM->pgm.s.PhysTlbHC.aEntries[idx].GCPhys = NIL_RTGCPHYS;
443 pVM->pgm.s.PhysTlbHC.aEntries[idx].pPage = 0;
444 pVM->pgm.s.PhysTlbHC.aEntries[idx].pMap = 0;
445 pVM->pgm.s.PhysTlbHC.aEntries[idx].pv = 0;
446#else
447 /* Clear the shared R0/R3 TLB entry. */
448 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
449 pTlbe->GCPhys = NIL_RTGCPHYS;
450 pTlbe->pPage = 0;
451 pTlbe->pMap = 0;
452 pTlbe->pv = 0;
453#endif
454
455 /** @todo clear the RC TLB whenever we add it. */
456}
457
458/**
459 * Makes sure that there is at least one handy page ready for use.
460 *
461 * This will also take the appropriate actions when reaching water-marks.
462 *
463 * @returns VBox status code.
464 * @retval VINF_SUCCESS on success.
465 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
466 *
467 * @param pVM Pointer to the VM.
468 *
469 * @remarks Must be called from within the PGM critical section. It may
470 * nip back to ring-3/0 in some cases.
471 */
472static int pgmPhysEnsureHandyPage(PVM pVM)
473{
474 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
475
476 /*
477 * Do we need to do anything special?
478 */
479#ifdef IN_RING3
480 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
481#else
482 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
483#endif
484 {
485 /*
486 * Allocate pages only if we're out of them, or in ring-3, almost out.
487 */
488#ifdef IN_RING3
489 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
490#else
491 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
492#endif
493 {
494 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
495 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
496#ifdef IN_RING3
497 int rc = PGMR3PhysAllocateHandyPages(pVM);
498#else
499 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
500#endif
501 if (RT_UNLIKELY(rc != VINF_SUCCESS))
502 {
503 if (RT_FAILURE(rc))
504 return rc;
505 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
506 if (!pVM->pgm.s.cHandyPages)
507 {
508 LogRel(("PGM: no more handy pages!\n"));
509 return VERR_EM_NO_MEMORY;
510 }
511 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
512 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
513#ifdef IN_RING3
514# ifdef VBOX_WITH_REM
515 REMR3NotifyFF(pVM);
516# endif
517#else
518 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
519#endif
520 }
521 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
522 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
523 ("%u\n", pVM->pgm.s.cHandyPages),
524 VERR_PGM_HANDY_PAGE_IPE);
525 }
526 else
527 {
528 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
529 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
530#ifndef IN_RING3
531 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
532 {
533 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
534 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
535 }
536#endif
537 }
538 }
539
540 return VINF_SUCCESS;
541}
542
543
544/**
545 * Replace a zero or shared page with new page that we can write to.
546 *
547 * @returns The following VBox status codes.
548 * @retval VINF_SUCCESS on success, pPage is modified.
549 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
550 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
551 *
552 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
553 *
554 * @param pVM Pointer to the VM.
555 * @param pPage The physical page tracking structure. This will
556 * be modified on success.
557 * @param GCPhys The address of the page.
558 *
559 * @remarks Must be called from within the PGM critical section. It may
560 * nip back to ring-3/0 in some cases.
561 *
562 * @remarks This function shouldn't really fail, however if it does
563 * it probably means we've screwed up the size of handy pages and/or
564 * the low-water mark. Or, that some device I/O is causing a lot of
565 * pages to be allocated while while the host is in a low-memory
566 * condition. This latter should be handled elsewhere and in a more
567 * controlled manner, it's on the @bugref{3170} todo list...
568 */
569int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
570{
571 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
572
573 /*
574 * Prereqs.
575 */
576 PGM_LOCK_ASSERT_OWNER(pVM);
577 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
578 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
579
580# ifdef PGM_WITH_LARGE_PAGES
581 /*
582 * Try allocate a large page if applicable.
583 */
584 if ( PGMIsUsingLargePages(pVM)
585 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
586 {
587 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
588 PPGMPAGE pBasePage;
589
590 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
591 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
592 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
593 {
594 rc = pgmPhysAllocLargePage(pVM, GCPhys);
595 if (rc == VINF_SUCCESS)
596 return rc;
597 }
598 /* Mark the base as type page table, so we don't check over and over again. */
599 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
600
601 /* fall back to 4KB pages. */
602 }
603# endif
604
605 /*
606 * Flush any shadow page table mappings of the page.
607 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
608 */
609 bool fFlushTLBs = false;
610 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
611 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
612
613 /*
614 * Ensure that we've got a page handy, take it and use it.
615 */
616 int rc2 = pgmPhysEnsureHandyPage(pVM);
617 if (RT_FAILURE(rc2))
618 {
619 if (fFlushTLBs)
620 PGM_INVL_ALL_VCPU_TLBS(pVM);
621 Assert(rc2 == VERR_EM_NO_MEMORY);
622 return rc2;
623 }
624 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
625 PGM_LOCK_ASSERT_OWNER(pVM);
626 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
627 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
628
629 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
630 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
631 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
632 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
633 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
634 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
635
636 /*
637 * There are one or two action to be taken the next time we allocate handy pages:
638 * - Tell the GMM (global memory manager) what the page is being used for.
639 * (Speeds up replacement operations - sharing and defragmenting.)
640 * - If the current backing is shared, it must be freed.
641 */
642 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
643 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
644
645 void const *pvSharedPage = NULL;
646 if (PGM_PAGE_IS_SHARED(pPage))
647 {
648 /* Mark this shared page for freeing/dereferencing. */
649 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
650 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
651
652 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
653 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
654 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageReplaceShared));
655 pVM->pgm.s.cSharedPages--;
656
657 /* Grab the address of the page so we can make a copy later on. (safe) */
658 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
659 AssertRC(rc);
660 }
661 else
662 {
663 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
664 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRZPageReplaceZero);
665 pVM->pgm.s.cZeroPages--;
666 }
667
668 /*
669 * Do the PGMPAGE modifications.
670 */
671 pVM->pgm.s.cPrivatePages++;
672 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
673 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
674 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
675 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
676 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
677
678 /* Copy the shared page contents to the replacement page. */
679 if (pvSharedPage)
680 {
681 /* Get the virtual address of the new page. */
682 PGMPAGEMAPLOCK PgMpLck;
683 void *pvNewPage;
684 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
685 if (RT_SUCCESS(rc))
686 {
687 memcpy(pvNewPage, pvSharedPage, PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
688 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
689 }
690 }
691
692 if ( fFlushTLBs
693 && rc != VINF_PGM_GCPHYS_ALIASED)
694 PGM_INVL_ALL_VCPU_TLBS(pVM);
695 return rc;
696}
697
698#ifdef PGM_WITH_LARGE_PAGES
699
700/**
701 * Replace a 2 MB range of zero pages with new pages that we can write to.
702 *
703 * @returns The following VBox status codes.
704 * @retval VINF_SUCCESS on success, pPage is modified.
705 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
706 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
707 *
708 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
709 *
710 * @param pVM Pointer to the VM.
711 * @param GCPhys The address of the page.
712 *
713 * @remarks Must be called from within the PGM critical section. It may
714 * nip back to ring-3/0 in some cases.
715 */
716int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys)
717{
718 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
719 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
720
721 /*
722 * Prereqs.
723 */
724 PGM_LOCK_ASSERT_OWNER(pVM);
725 Assert(PGMIsUsingLargePages(pVM));
726
727 PPGMPAGE pFirstPage;
728 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
729 if ( RT_SUCCESS(rc)
730 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM)
731 {
732 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
733
734 /* Don't call this function for already allocated pages. */
735 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
736
737 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
738 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
739 {
740 /* Lazy approach: check all pages in the 2 MB range.
741 * The whole range must be ram and unallocated. */
742 GCPhys = GCPhysBase;
743 unsigned iPage;
744 for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
745 {
746 PPGMPAGE pSubPage;
747 rc = pgmPhysGetPageEx(pVM, GCPhys, &pSubPage);
748 if ( RT_FAILURE(rc)
749 || PGM_PAGE_GET_TYPE(pSubPage) != PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
750 || PGM_PAGE_GET_STATE(pSubPage) != PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
751 {
752 LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pSubPage), PGM_PAGE_GET_STATE(pSubPage), rc));
753 break;
754 }
755 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
756 GCPhys += PAGE_SIZE;
757 }
758 if (iPage != _2M/PAGE_SIZE)
759 {
760 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
761 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
762 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
763 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
764 }
765
766 /*
767 * Do the allocation.
768 */
769# ifdef IN_RING3
770 rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
771# else
772 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
773# endif
774 if (RT_SUCCESS(rc))
775 {
776 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
777 pVM->pgm.s.cLargePages++;
778 return VINF_SUCCESS;
779 }
780
781 /* If we fail once, it most likely means the host's memory is too
782 fragmented; don't bother trying again. */
783 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
784 PGMSetLargePageUsage(pVM, false);
785 return rc;
786 }
787 }
788 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
789}
790
791
792/**
793 * Recheck the entire 2 MB range to see if we can use it again as a large page.
794 *
795 * @returns The following VBox status codes.
796 * @retval VINF_SUCCESS on success, the large page can be used again
797 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
798 *
799 * @param pVM Pointer to the VM.
800 * @param GCPhys The address of the page.
801 * @param pLargePage Page structure of the base page
802 */
803int pgmPhysRecheckLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
804{
805 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
806
807 GCPhys &= X86_PDE2M_PAE_PG_MASK;
808
809 /* Check the base page. */
810 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
811 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
812 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
813 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
814 {
815 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
816 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
817 }
818
819 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
820 /* Check all remaining pages in the 2 MB range. */
821 unsigned i;
822 GCPhys += PAGE_SIZE;
823 for (i = 1; i < _2M/PAGE_SIZE; i++)
824 {
825 PPGMPAGE pPage;
826 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
827 AssertRCBreak(rc);
828
829 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
830 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
831 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
832 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
833 {
834 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
835 break;
836 }
837
838 GCPhys += PAGE_SIZE;
839 }
840 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
841
842 if (i == _2M/PAGE_SIZE)
843 {
844 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
845 pVM->pgm.s.cLargePagesDisabled--;
846 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
847 return VINF_SUCCESS;
848 }
849
850 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
851}
852
853#endif /* PGM_WITH_LARGE_PAGES */
854
855/**
856 * Deal with a write monitored page.
857 *
858 * @returns VBox strict status code.
859 *
860 * @param pVM Pointer to the VM.
861 * @param pPage The physical page tracking structure.
862 *
863 * @remarks Called from within the PGM critical section.
864 */
865void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage)
866{
867 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
868 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
869 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
870 Assert(pVM->pgm.s.cMonitoredPages > 0);
871 pVM->pgm.s.cMonitoredPages--;
872 pVM->pgm.s.cWrittenToPages++;
873}
874
875
876/**
877 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
878 *
879 * @returns VBox strict status code.
880 * @retval VINF_SUCCESS on success.
881 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
882 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
883 *
884 * @param pVM Pointer to the VM.
885 * @param pPage The physical page tracking structure.
886 * @param GCPhys The address of the page.
887 *
888 * @remarks Called from within the PGM critical section.
889 */
890int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
891{
892 PGM_LOCK_ASSERT_OWNER(pVM);
893 switch (PGM_PAGE_GET_STATE(pPage))
894 {
895 case PGM_PAGE_STATE_WRITE_MONITORED:
896 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
897 /* fall thru */
898 default: /* to shut up GCC */
899 case PGM_PAGE_STATE_ALLOCATED:
900 return VINF_SUCCESS;
901
902 /*
903 * Zero pages can be dummy pages for MMIO or reserved memory,
904 * so we need to check the flags before joining cause with
905 * shared page replacement.
906 */
907 case PGM_PAGE_STATE_ZERO:
908 if (PGM_PAGE_IS_MMIO(pPage))
909 return VERR_PGM_PHYS_PAGE_RESERVED;
910 /* fall thru */
911 case PGM_PAGE_STATE_SHARED:
912 return pgmPhysAllocPage(pVM, pPage, GCPhys);
913
914 /* Not allowed to write to ballooned pages. */
915 case PGM_PAGE_STATE_BALLOONED:
916 return VERR_PGM_PHYS_PAGE_BALLOONED;
917 }
918}
919
920
921/**
922 * Internal usage: Map the page specified by its GMM ID.
923 *
924 * This is similar to pgmPhysPageMap
925 *
926 * @returns VBox status code.
927 *
928 * @param pVM Pointer to the VM.
929 * @param idPage The Page ID.
930 * @param HCPhys The physical address (for RC).
931 * @param ppv Where to store the mapping address.
932 *
933 * @remarks Called from within the PGM critical section. The mapping is only
934 * valid while you are inside this section.
935 */
936int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
937{
938 /*
939 * Validation.
940 */
941 PGM_LOCK_ASSERT_OWNER(pVM);
942 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
943 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
944 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
945
946#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
947 /*
948 * Map it by HCPhys.
949 */
950 return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
951
952#else
953 /*
954 * Find/make Chunk TLB entry for the mapping chunk.
955 */
956 PPGMCHUNKR3MAP pMap;
957 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
958 if (pTlbe->idChunk == idChunk)
959 {
960 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
961 pMap = pTlbe->pChunk;
962 }
963 else
964 {
965 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
966
967 /*
968 * Find the chunk, map it if necessary.
969 */
970 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
971 if (pMap)
972 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
973 else
974 {
975# ifdef IN_RING0
976 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
977 AssertRCReturn(rc, rc);
978 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
979 Assert(pMap);
980# else
981 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
982 if (RT_FAILURE(rc))
983 return rc;
984# endif
985 }
986
987 /*
988 * Enter it into the Chunk TLB.
989 */
990 pTlbe->idChunk = idChunk;
991 pTlbe->pChunk = pMap;
992 }
993
994 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
995 return VINF_SUCCESS;
996#endif
997}
998
999
1000/**
1001 * Maps a page into the current virtual address space so it can be accessed.
1002 *
1003 * @returns VBox status code.
1004 * @retval VINF_SUCCESS on success.
1005 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1006 *
1007 * @param pVM Pointer to the VM.
1008 * @param pPage The physical page tracking structure.
1009 * @param GCPhys The address of the page.
1010 * @param ppMap Where to store the address of the mapping tracking structure.
1011 * @param ppv Where to store the mapping address of the page. The page
1012 * offset is masked off!
1013 *
1014 * @remarks Called from within the PGM critical section.
1015 */
1016static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1017{
1018 PGM_LOCK_ASSERT_OWNER(pVM);
1019 NOREF(GCPhys);
1020
1021#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1022 /*
1023 * Just some sketchy GC/R0-darwin code.
1024 */
1025 *ppMap = NULL;
1026 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1027 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
1028 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
1029 return VINF_SUCCESS;
1030
1031#else /* IN_RING3 || IN_RING0 */
1032
1033
1034 /*
1035 * Special cases: MMIO2, ZERO and specially aliased MMIO pages.
1036 */
1037 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
1038 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1039 {
1040 /* Decode the page id to a page in a MMIO2 ram range. */
1041 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
1042 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
1043 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
1044 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
1045 RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys,
1046 pPage->s.idPage, pPage->s.uStateY),
1047 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1048 PPGMMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
1049 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1050 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1051 AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1052 *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << PAGE_SHIFT);
1053 *ppMap = NULL;
1054 return VINF_SUCCESS;
1055 }
1056
1057 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1058 if (idChunk == NIL_GMM_CHUNKID)
1059 {
1060 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
1061 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1062 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
1063 {
1064 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
1065 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1066 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
1067 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1068 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1069 }
1070 else
1071 {
1072 static uint8_t s_abPlayItSafe[0x1000*2]; /* I don't dare return the zero page at the moment. */
1073 *ppv = (uint8_t *)((uintptr_t)&s_abPlayItSafe[0x1000] & ~(uintptr_t)0xfff);
1074 }
1075 *ppMap = NULL;
1076 return VINF_SUCCESS;
1077 }
1078
1079 /*
1080 * Find/make Chunk TLB entry for the mapping chunk.
1081 */
1082 PPGMCHUNKR3MAP pMap;
1083 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1084 if (pTlbe->idChunk == idChunk)
1085 {
1086 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1087 pMap = pTlbe->pChunk;
1088 AssertPtr(pMap->pv);
1089 }
1090 else
1091 {
1092 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1093
1094 /*
1095 * Find the chunk, map it if necessary.
1096 */
1097 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1098 if (pMap)
1099 {
1100 AssertPtr(pMap->pv);
1101 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1102 }
1103 else
1104 {
1105#ifdef IN_RING0
1106 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1107 AssertRCReturn(rc, rc);
1108 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1109 Assert(pMap);
1110#else
1111 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1112 if (RT_FAILURE(rc))
1113 return rc;
1114#endif
1115 AssertPtr(pMap->pv);
1116 }
1117
1118 /*
1119 * Enter it into the Chunk TLB.
1120 */
1121 pTlbe->idChunk = idChunk;
1122 pTlbe->pChunk = pMap;
1123 }
1124
1125 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
1126 *ppMap = pMap;
1127 return VINF_SUCCESS;
1128#endif /* IN_RING3 */
1129}
1130
1131
1132/**
1133 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1134 *
1135 * This is typically used is paths where we cannot use the TLB methods (like ROM
1136 * pages) or where there is no point in using them since we won't get many hits.
1137 *
1138 * @returns VBox strict status code.
1139 * @retval VINF_SUCCESS on success.
1140 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1141 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1142 *
1143 * @param pVM Pointer to the VM.
1144 * @param pPage The physical page tracking structure.
1145 * @param GCPhys The address of the page.
1146 * @param ppv Where to store the mapping address of the page. The page
1147 * offset is masked off!
1148 *
1149 * @remarks Called from within the PGM critical section. The mapping is only
1150 * valid while you are inside section.
1151 */
1152int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1153{
1154 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1155 if (RT_SUCCESS(rc))
1156 {
1157 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1158 PPGMPAGEMAP pMapIgnore;
1159 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1160 if (RT_FAILURE(rc2)) /* preserve rc */
1161 rc = rc2;
1162 }
1163 return rc;
1164}
1165
1166
1167/**
1168 * Maps a page into the current virtual address space so it can be accessed for
1169 * both writing and reading.
1170 *
1171 * This is typically used is paths where we cannot use the TLB methods (like ROM
1172 * pages) or where there is no point in using them since we won't get many hits.
1173 *
1174 * @returns VBox status code.
1175 * @retval VINF_SUCCESS on success.
1176 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1177 *
1178 * @param pVM Pointer to the VM.
1179 * @param pPage The physical page tracking structure. Must be in the
1180 * allocated state.
1181 * @param GCPhys The address of the page.
1182 * @param ppv Where to store the mapping address of the page. The page
1183 * offset is masked off!
1184 *
1185 * @remarks Called from within the PGM critical section. The mapping is only
1186 * valid while you are inside section.
1187 */
1188int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1189{
1190 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1191 PPGMPAGEMAP pMapIgnore;
1192 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1193}
1194
1195
1196/**
1197 * Maps a page into the current virtual address space so it can be accessed for
1198 * reading.
1199 *
1200 * This is typically used is paths where we cannot use the TLB methods (like ROM
1201 * pages) or where there is no point in using them since we won't get many hits.
1202 *
1203 * @returns VBox status code.
1204 * @retval VINF_SUCCESS on success.
1205 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1206 *
1207 * @param pVM Pointer to the VM.
1208 * @param pPage The physical page tracking structure.
1209 * @param GCPhys The address of the page.
1210 * @param ppv Where to store the mapping address of the page. The page
1211 * offset is masked off!
1212 *
1213 * @remarks Called from within the PGM critical section. The mapping is only
1214 * valid while you are inside this section.
1215 */
1216int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1217{
1218 PPGMPAGEMAP pMapIgnore;
1219 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1220}
1221
1222#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1223
1224/**
1225 * Load a guest page into the ring-3 physical TLB.
1226 *
1227 * @returns VBox status code.
1228 * @retval VINF_SUCCESS on success
1229 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1230 * @param pPGM The PGM instance pointer.
1231 * @param GCPhys The guest physical address in question.
1232 */
1233int pgmPhysPageLoadIntoTlb(PVM pVM, RTGCPHYS GCPhys)
1234{
1235 PGM_LOCK_ASSERT_OWNER(pVM);
1236
1237 /*
1238 * Find the ram range and page and hand it over to the with-page function.
1239 * 99.8% of requests are expected to be in the first range.
1240 */
1241 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1242 if (!pPage)
1243 {
1244 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1245 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1246 }
1247
1248 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1249}
1250
1251
1252/**
1253 * Load a guest page into the ring-3 physical TLB.
1254 *
1255 * @returns VBox status code.
1256 * @retval VINF_SUCCESS on success
1257 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1258 *
1259 * @param pVM Pointer to the VM.
1260 * @param pPage Pointer to the PGMPAGE structure corresponding to
1261 * GCPhys.
1262 * @param GCPhys The guest physical address in question.
1263 */
1264int pgmPhysPageLoadIntoTlbWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1265{
1266 PGM_LOCK_ASSERT_OWNER(pVM);
1267 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1268
1269 /*
1270 * Map the page.
1271 * Make a special case for the zero page as it is kind of special.
1272 */
1273 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1274 if ( !PGM_PAGE_IS_ZERO(pPage)
1275 && !PGM_PAGE_IS_BALLOONED(pPage))
1276 {
1277 void *pv;
1278 PPGMPAGEMAP pMap;
1279 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1280 if (RT_FAILURE(rc))
1281 return rc;
1282 pTlbe->pMap = pMap;
1283 pTlbe->pv = pv;
1284 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1285 }
1286 else
1287 {
1288 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1289 pTlbe->pMap = NULL;
1290 pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1291 }
1292#ifdef PGM_WITH_PHYS_TLB
1293 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1294 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1295 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1296 else
1297 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1298#else
1299 pTlbe->GCPhys = NIL_RTGCPHYS;
1300#endif
1301 pTlbe->pPage = pPage;
1302 return VINF_SUCCESS;
1303}
1304
1305#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1306
1307/**
1308 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1309 * own the PGM lock and therefore not need to lock the mapped page.
1310 *
1311 * @returns VBox status code.
1312 * @retval VINF_SUCCESS on success.
1313 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1314 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1315 *
1316 * @param pVM Pointer to the VM.
1317 * @param GCPhys The guest physical address of the page that should be mapped.
1318 * @param pPage Pointer to the PGMPAGE structure for the page.
1319 * @param ppv Where to store the address corresponding to GCPhys.
1320 *
1321 * @internal
1322 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1323 */
1324int pgmPhysGCPhys2CCPtrInternalDepr(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1325{
1326 int rc;
1327 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1328 PGM_LOCK_ASSERT_OWNER(pVM);
1329 pVM->pgm.s.cDeprecatedPageLocks++;
1330
1331 /*
1332 * Make sure the page is writable.
1333 */
1334 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1335 {
1336 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1337 if (RT_FAILURE(rc))
1338 return rc;
1339 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1340 }
1341 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1342
1343 /*
1344 * Get the mapping address.
1345 */
1346#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1347 void *pv;
1348 rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),
1349 PGM_PAGE_GET_HCPHYS(pPage),
1350 &pv
1351 RTLOG_COMMA_SRC_POS);
1352 if (RT_FAILURE(rc))
1353 return rc;
1354 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1355#else
1356 PPGMPAGEMAPTLBE pTlbe;
1357 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1358 if (RT_FAILURE(rc))
1359 return rc;
1360 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1361#endif
1362 return VINF_SUCCESS;
1363}
1364
1365#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1366
1367/**
1368 * Locks a page mapping for writing.
1369 *
1370 * @param pVM Pointer to the VM.
1371 * @param pPage The page.
1372 * @param pTlbe The mapping TLB entry for the page.
1373 * @param pLock The lock structure (output).
1374 */
1375DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1376{
1377 PPGMPAGEMAP pMap = pTlbe->pMap;
1378 if (pMap)
1379 pMap->cRefs++;
1380
1381 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1382 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1383 {
1384 if (cLocks == 0)
1385 pVM->pgm.s.cWriteLockedPages++;
1386 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1387 }
1388 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1389 {
1390 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1391 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1392 if (pMap)
1393 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1394 }
1395
1396 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1397 pLock->pvMap = pMap;
1398}
1399
1400/**
1401 * Locks a page mapping for reading.
1402 *
1403 * @param pVM Pointer to the VM.
1404 * @param pPage The page.
1405 * @param pTlbe The mapping TLB entry for the page.
1406 * @param pLock The lock structure (output).
1407 */
1408DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1409{
1410 PPGMPAGEMAP pMap = pTlbe->pMap;
1411 if (pMap)
1412 pMap->cRefs++;
1413
1414 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1415 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1416 {
1417 if (cLocks == 0)
1418 pVM->pgm.s.cReadLockedPages++;
1419 PGM_PAGE_INC_READ_LOCKS(pPage);
1420 }
1421 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1422 {
1423 PGM_PAGE_INC_READ_LOCKS(pPage);
1424 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1425 if (pMap)
1426 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1427 }
1428
1429 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1430 pLock->pvMap = pMap;
1431}
1432
1433#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1434
1435
1436/**
1437 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1438 * own the PGM lock and have access to the page structure.
1439 *
1440 * @returns VBox status code.
1441 * @retval VINF_SUCCESS on success.
1442 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1443 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1444 *
1445 * @param pVM Pointer to the VM.
1446 * @param GCPhys The guest physical address of the page that should be mapped.
1447 * @param pPage Pointer to the PGMPAGE structure for the page.
1448 * @param ppv Where to store the address corresponding to GCPhys.
1449 * @param pLock Where to store the lock information that
1450 * pgmPhysReleaseInternalPageMappingLock needs.
1451 *
1452 * @internal
1453 */
1454int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1455{
1456 int rc;
1457 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1458 PGM_LOCK_ASSERT_OWNER(pVM);
1459
1460 /*
1461 * Make sure the page is writable.
1462 */
1463 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1464 {
1465 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1466 if (RT_FAILURE(rc))
1467 return rc;
1468 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1469 }
1470 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1471
1472 /*
1473 * Do the job.
1474 */
1475#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1476 void *pv;
1477 PVMCPU pVCpu = VMMGetCpu(pVM);
1478 rc = pgmRZDynMapHCPageInlined(pVCpu,
1479 PGM_PAGE_GET_HCPHYS(pPage),
1480 &pv
1481 RTLOG_COMMA_SRC_POS);
1482 if (RT_FAILURE(rc))
1483 return rc;
1484 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1485 pLock->pvPage = pv;
1486 pLock->pVCpu = pVCpu;
1487
1488#else
1489 PPGMPAGEMAPTLBE pTlbe;
1490 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1491 if (RT_FAILURE(rc))
1492 return rc;
1493 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1494 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1495#endif
1496 return VINF_SUCCESS;
1497}
1498
1499
1500/**
1501 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1502 * own the PGM lock and have access to the page structure.
1503 *
1504 * @returns VBox status code.
1505 * @retval VINF_SUCCESS on success.
1506 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1507 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1508 *
1509 * @param pVM Pointer to the VM.
1510 * @param GCPhys The guest physical address of the page that should be mapped.
1511 * @param pPage Pointer to the PGMPAGE structure for the page.
1512 * @param ppv Where to store the address corresponding to GCPhys.
1513 * @param pLock Where to store the lock information that
1514 * pgmPhysReleaseInternalPageMappingLock needs.
1515 *
1516 * @internal
1517 */
1518int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1519{
1520 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1521 PGM_LOCK_ASSERT_OWNER(pVM);
1522 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1523
1524 /*
1525 * Do the job.
1526 */
1527#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1528 void *pv;
1529 PVMCPU pVCpu = VMMGetCpu(pVM);
1530 int rc = pgmRZDynMapHCPageInlined(pVCpu,
1531 PGM_PAGE_GET_HCPHYS(pPage),
1532 &pv
1533 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1534 if (RT_FAILURE(rc))
1535 return rc;
1536 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1537 pLock->pvPage = pv;
1538 pLock->pVCpu = pVCpu;
1539
1540#else
1541 PPGMPAGEMAPTLBE pTlbe;
1542 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1543 if (RT_FAILURE(rc))
1544 return rc;
1545 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1546 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1547#endif
1548 return VINF_SUCCESS;
1549}
1550
1551
1552/**
1553 * Requests the mapping of a guest page into the current context.
1554 *
1555 * This API should only be used for very short term, as it will consume scarse
1556 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1557 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1558 *
1559 * This API will assume your intention is to write to the page, and will
1560 * therefore replace shared and zero pages. If you do not intend to modify
1561 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1562 *
1563 * @returns VBox status code.
1564 * @retval VINF_SUCCESS on success.
1565 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1566 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1567 *
1568 * @param pVM Pointer to the VM.
1569 * @param GCPhys The guest physical address of the page that should be
1570 * mapped.
1571 * @param ppv Where to store the address corresponding to GCPhys.
1572 * @param pLock Where to store the lock information that
1573 * PGMPhysReleasePageMappingLock needs.
1574 *
1575 * @remarks The caller is responsible for dealing with access handlers.
1576 * @todo Add an informational return code for pages with access handlers?
1577 *
1578 * @remark Avoid calling this API from within critical sections (other than
1579 * the PGM one) because of the deadlock risk. External threads may
1580 * need to delegate jobs to the EMTs.
1581 * @remarks Only one page is mapped! Make no assumption about what's after or
1582 * before the returned page!
1583 * @thread Any thread.
1584 */
1585VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1586{
1587 int rc = pgmLock(pVM);
1588 AssertRCReturn(rc, rc);
1589
1590#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1591 /*
1592 * Find the page and make sure it's writable.
1593 */
1594 PPGMPAGE pPage;
1595 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1596 if (RT_SUCCESS(rc))
1597 {
1598 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1599 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1600 if (RT_SUCCESS(rc))
1601 {
1602 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1603
1604 PVMCPU pVCpu = VMMGetCpu(pVM);
1605 void *pv;
1606 rc = pgmRZDynMapHCPageInlined(pVCpu,
1607 PGM_PAGE_GET_HCPHYS(pPage),
1608 &pv
1609 RTLOG_COMMA_SRC_POS);
1610 if (RT_SUCCESS(rc))
1611 {
1612 AssertRCSuccess(rc);
1613
1614 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1615 *ppv = pv;
1616 pLock->pvPage = pv;
1617 pLock->pVCpu = pVCpu;
1618 }
1619 }
1620 }
1621
1622#else /* IN_RING3 || IN_RING0 */
1623 /*
1624 * Query the Physical TLB entry for the page (may fail).
1625 */
1626 PPGMPAGEMAPTLBE pTlbe;
1627 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1628 if (RT_SUCCESS(rc))
1629 {
1630 /*
1631 * If the page is shared, the zero page, or being write monitored
1632 * it must be converted to a page that's writable if possible.
1633 */
1634 PPGMPAGE pPage = pTlbe->pPage;
1635 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1636 {
1637 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1638 if (RT_SUCCESS(rc))
1639 {
1640 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1641 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1642 }
1643 }
1644 if (RT_SUCCESS(rc))
1645 {
1646 /*
1647 * Now, just perform the locking and calculate the return address.
1648 */
1649 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1650 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1651 }
1652 }
1653
1654#endif /* IN_RING3 || IN_RING0 */
1655 pgmUnlock(pVM);
1656 return rc;
1657}
1658
1659
1660/**
1661 * Requests the mapping of a guest page into the current context.
1662 *
1663 * This API should only be used for very short term, as it will consume scarse
1664 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1665 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1666 *
1667 * @returns VBox status code.
1668 * @retval VINF_SUCCESS on success.
1669 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1670 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1671 *
1672 * @param pVM Pointer to the VM.
1673 * @param GCPhys The guest physical address of the page that should be
1674 * mapped.
1675 * @param ppv Where to store the address corresponding to GCPhys.
1676 * @param pLock Where to store the lock information that
1677 * PGMPhysReleasePageMappingLock needs.
1678 *
1679 * @remarks The caller is responsible for dealing with access handlers.
1680 * @todo Add an informational return code for pages with access handlers?
1681 *
1682 * @remarks Avoid calling this API from within critical sections (other than
1683 * the PGM one) because of the deadlock risk.
1684 * @remarks Only one page is mapped! Make no assumption about what's after or
1685 * before the returned page!
1686 * @thread Any thread.
1687 */
1688VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1689{
1690 int rc = pgmLock(pVM);
1691 AssertRCReturn(rc, rc);
1692
1693#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1694 /*
1695 * Find the page and make sure it's readable.
1696 */
1697 PPGMPAGE pPage;
1698 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1699 if (RT_SUCCESS(rc))
1700 {
1701 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1702 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1703 else
1704 {
1705 PVMCPU pVCpu = VMMGetCpu(pVM);
1706 void *pv;
1707 rc = pgmRZDynMapHCPageInlined(pVCpu,
1708 PGM_PAGE_GET_HCPHYS(pPage),
1709 &pv
1710 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1711 if (RT_SUCCESS(rc))
1712 {
1713 AssertRCSuccess(rc);
1714
1715 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1716 *ppv = pv;
1717 pLock->pvPage = pv;
1718 pLock->pVCpu = pVCpu;
1719 }
1720 }
1721 }
1722
1723#else /* IN_RING3 || IN_RING0 */
1724 /*
1725 * Query the Physical TLB entry for the page (may fail).
1726 */
1727 PPGMPAGEMAPTLBE pTlbe;
1728 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1729 if (RT_SUCCESS(rc))
1730 {
1731 /* MMIO pages doesn't have any readable backing. */
1732 PPGMPAGE pPage = pTlbe->pPage;
1733 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1734 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1735 else
1736 {
1737 /*
1738 * Now, just perform the locking and calculate the return address.
1739 */
1740 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1741 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1742 }
1743 }
1744
1745#endif /* IN_RING3 || IN_RING0 */
1746 pgmUnlock(pVM);
1747 return rc;
1748}
1749
1750
1751/**
1752 * Requests the mapping of a guest page given by virtual address into the current context.
1753 *
1754 * This API should only be used for very short term, as it will consume
1755 * scarse resources (R0 and GC) in the mapping cache. When you're done
1756 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1757 *
1758 * This API will assume your intention is to write to the page, and will
1759 * therefore replace shared and zero pages. If you do not intend to modify
1760 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1761 *
1762 * @returns VBox status code.
1763 * @retval VINF_SUCCESS on success.
1764 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1765 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1766 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1767 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1768 *
1769 * @param pVCpu Pointer to the VMCPU.
1770 * @param GCPhys The guest physical address of the page that should be mapped.
1771 * @param ppv Where to store the address corresponding to GCPhys.
1772 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1773 *
1774 * @remark Avoid calling this API from within critical sections (other than
1775 * the PGM one) because of the deadlock risk.
1776 * @thread EMT
1777 */
1778VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1779{
1780 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1781 RTGCPHYS GCPhys;
1782 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1783 if (RT_SUCCESS(rc))
1784 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1785 return rc;
1786}
1787
1788
1789/**
1790 * Requests the mapping of a guest page given by virtual address into the current context.
1791 *
1792 * This API should only be used for very short term, as it will consume
1793 * scarse resources (R0 and GC) in the mapping cache. When you're done
1794 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1795 *
1796 * @returns VBox status code.
1797 * @retval VINF_SUCCESS on success.
1798 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1799 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1800 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1801 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1802 *
1803 * @param pVCpu Pointer to the VMCPU.
1804 * @param GCPhys The guest physical address of the page that should be mapped.
1805 * @param ppv Where to store the address corresponding to GCPhys.
1806 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1807 *
1808 * @remark Avoid calling this API from within critical sections (other than
1809 * the PGM one) because of the deadlock risk.
1810 * @thread EMT
1811 */
1812VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1813{
1814 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1815 RTGCPHYS GCPhys;
1816 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1817 if (RT_SUCCESS(rc))
1818 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1819 return rc;
1820}
1821
1822
1823/**
1824 * Release the mapping of a guest page.
1825 *
1826 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1827 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1828 *
1829 * @param pVM Pointer to the VM.
1830 * @param pLock The lock structure initialized by the mapping function.
1831 */
1832VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1833{
1834#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1835 Assert(pLock->pvPage != NULL);
1836 Assert(pLock->pVCpu == VMMGetCpu(pVM));
1837 PGM_DYNMAP_UNUSED_HINT(pLock->pVCpu, pLock->pvPage);
1838 pLock->pVCpu = NULL;
1839 pLock->pvPage = NULL;
1840
1841#else
1842 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1843 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1844 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1845
1846 pLock->uPageAndType = 0;
1847 pLock->pvMap = NULL;
1848
1849 pgmLock(pVM);
1850 if (fWriteLock)
1851 {
1852 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1853 Assert(cLocks > 0);
1854 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1855 {
1856 if (cLocks == 1)
1857 {
1858 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1859 pVM->pgm.s.cWriteLockedPages--;
1860 }
1861 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1862 }
1863
1864 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1865 {
1866 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1867 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1868 Assert(pVM->pgm.s.cMonitoredPages > 0);
1869 pVM->pgm.s.cMonitoredPages--;
1870 pVM->pgm.s.cWrittenToPages++;
1871 }
1872 }
1873 else
1874 {
1875 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1876 Assert(cLocks > 0);
1877 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1878 {
1879 if (cLocks == 1)
1880 {
1881 Assert(pVM->pgm.s.cReadLockedPages > 0);
1882 pVM->pgm.s.cReadLockedPages--;
1883 }
1884 PGM_PAGE_DEC_READ_LOCKS(pPage);
1885 }
1886 }
1887
1888 if (pMap)
1889 {
1890 Assert(pMap->cRefs >= 1);
1891 pMap->cRefs--;
1892 }
1893 pgmUnlock(pVM);
1894#endif /* IN_RING3 */
1895}
1896
1897
1898/**
1899 * Release the internal mapping of a guest page.
1900 *
1901 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
1902 * pgmPhysGCPhys2CCPtrInternalReadOnly.
1903 *
1904 * @param pVM Pointer to the VM.
1905 * @param pLock The lock structure initialized by the mapping function.
1906 *
1907 * @remarks Caller must hold the PGM lock.
1908 */
1909void pgmPhysReleaseInternalPageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1910{
1911 PGM_LOCK_ASSERT_OWNER(pVM);
1912 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
1913}
1914
1915
1916/**
1917 * Converts a GC physical address to a HC ring-3 pointer.
1918 *
1919 * @returns VINF_SUCCESS on success.
1920 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1921 * page but has no physical backing.
1922 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1923 * GC physical address.
1924 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1925 * a dynamic ram chunk boundary
1926 *
1927 * @param pVM Pointer to the VM.
1928 * @param GCPhys The GC physical address to convert.
1929 * @param pR3Ptr Where to store the R3 pointer on success.
1930 *
1931 * @deprecated Avoid when possible!
1932 */
1933int pgmPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1934{
1935/** @todo this is kind of hacky and needs some more work. */
1936#ifndef DEBUG_sandervl
1937 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1938#endif
1939
1940 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
1941#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1942 NOREF(pVM); NOREF(pR3Ptr);
1943 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1944#else
1945 pgmLock(pVM);
1946
1947 PPGMRAMRANGE pRam;
1948 PPGMPAGE pPage;
1949 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
1950 if (RT_SUCCESS(rc))
1951 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
1952
1953 pgmUnlock(pVM);
1954 Assert(rc <= VINF_SUCCESS);
1955 return rc;
1956#endif
1957}
1958
1959#if 0 /*defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)*/
1960
1961/**
1962 * Maps and locks a guest CR3 or PD (PAE) page.
1963 *
1964 * @returns VINF_SUCCESS on success.
1965 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1966 * page but has no physical backing.
1967 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1968 * GC physical address.
1969 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1970 * a dynamic ram chunk boundary
1971 *
1972 * @param pVM Pointer to the VM.
1973 * @param GCPhys The GC physical address to convert.
1974 * @param pR3Ptr Where to store the R3 pointer on success. This may or
1975 * may not be valid in ring-0 depending on the
1976 * VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 build option.
1977 *
1978 * @remarks The caller must own the PGM lock.
1979 */
1980int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1981{
1982
1983 PPGMRAMRANGE pRam;
1984 PPGMPAGE pPage;
1985 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
1986 if (RT_SUCCESS(rc))
1987 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
1988 Assert(rc <= VINF_SUCCESS);
1989 return rc;
1990}
1991
1992
1993int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1994{
1995
1996}
1997
1998#endif
1999
2000/**
2001 * Converts a guest pointer to a GC physical address.
2002 *
2003 * This uses the current CR3/CR0/CR4 of the guest.
2004 *
2005 * @returns VBox status code.
2006 * @param pVCpu Pointer to the VMCPU.
2007 * @param GCPtr The guest pointer to convert.
2008 * @param pGCPhys Where to store the GC physical address.
2009 */
2010VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
2011{
2012 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
2013 if (pGCPhys && RT_SUCCESS(rc))
2014 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
2015 return rc;
2016}
2017
2018
2019/**
2020 * Converts a guest pointer to a HC physical address.
2021 *
2022 * This uses the current CR3/CR0/CR4 of the guest.
2023 *
2024 * @returns VBox status code.
2025 * @param pVCpu Pointer to the VMCPU.
2026 * @param GCPtr The guest pointer to convert.
2027 * @param pHCPhys Where to store the HC physical address.
2028 */
2029VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2030{
2031 PVM pVM = pVCpu->CTX_SUFF(pVM);
2032 RTGCPHYS GCPhys;
2033 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
2034 if (RT_SUCCESS(rc))
2035 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
2036 return rc;
2037}
2038
2039
2040
2041#undef LOG_GROUP
2042#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2043
2044
2045#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2046/**
2047 * Cache PGMPhys memory access
2048 *
2049 * @param pVM Pointer to the VM.
2050 * @param pCache Cache structure pointer
2051 * @param GCPhys GC physical address
2052 * @param pbHC HC pointer corresponding to physical page
2053 *
2054 * @thread EMT.
2055 */
2056static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2057{
2058 uint32_t iCacheIndex;
2059
2060 Assert(VM_IS_EMT(pVM));
2061
2062 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
2063 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
2064
2065 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2066
2067 ASMBitSet(&pCache->aEntries, iCacheIndex);
2068
2069 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2070 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2071}
2072#endif /* IN_RING3 */
2073
2074
2075/**
2076 * Deals with reading from a page with one or more ALL access handlers.
2077 *
2078 * @returns VBox status code. Can be ignored in ring-3.
2079 * @retval VINF_SUCCESS.
2080 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2081 *
2082 * @param pVM Pointer to the VM.
2083 * @param pPage The page descriptor.
2084 * @param GCPhys The physical address to start reading at.
2085 * @param pvBuf Where to put the bits we read.
2086 * @param cb How much to read - less or equal to a page.
2087 * @param enmOrigin The origin of this call.
2088 */
2089static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb, PGMACCESSORIGIN enmOrigin)
2090{
2091 /*
2092 * The most frequent access here is MMIO and shadowed ROM.
2093 * The current code ASSUMES all these access handlers covers full pages!
2094 */
2095
2096 /*
2097 * Whatever we do we need the source page, map it first.
2098 */
2099 PGMPAGEMAPLOCK PgMpLck;
2100 const void *pvSrc = NULL;
2101 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2102 if (RT_FAILURE(rc))
2103 {
2104 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2105 GCPhys, pPage, rc));
2106 memset(pvBuf, 0xff, cb);
2107 return VINF_SUCCESS;
2108 }
2109 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2110
2111 /*
2112 * Deal with any physical handlers.
2113 */
2114 PVMCPU pVCpu = VMMGetCpu(pVM);
2115#ifdef IN_RING3
2116 PPGMPHYSHANDLER pPhys = NULL;
2117#endif
2118 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
2119 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2120 {
2121#ifdef IN_RING3
2122 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2123 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2124 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
2125 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
2126 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2127
2128 PFNPGMR3PHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler); Assert(pfnHandler);
2129 void *pvUser = pPhys->CTX_SUFF(pvUser);
2130
2131 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
2132 STAM_PROFILE_START(&pPhys->Stat, h);
2133 PGM_LOCK_ASSERT_OWNER(pVM);
2134 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2135 pgmUnlock(pVM);
2136 rc = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, pvUser);
2137 pgmLock(pVM);
2138# ifdef VBOX_WITH_STATISTICS
2139 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2140 if (pPhys)
2141 STAM_PROFILE_STOP(&pPhys->Stat, h);
2142# else
2143 pPhys = NULL; /* might not be valid anymore. */
2144# endif
2145 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
2146#else
2147 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2148 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2149 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2150 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2151#endif
2152 }
2153
2154 /*
2155 * Deal with any virtual handlers.
2156 */
2157 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
2158 {
2159 unsigned iPage;
2160 PPGMVIRTHANDLER pVirt;
2161
2162 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
2163 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
2164 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
2165 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2166 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
2167
2168 PPGMVIRTHANDLERTYPEINT pVirtType = PGMVIRTANDLER_GET_TYPE(pVM, pVirt);
2169#ifdef IN_RING3
2170 if (pVirtType->pfnHandlerR3)
2171 {
2172 if (!pPhys)
2173 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2174 else
2175 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
2176 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2177 + (iPage << PAGE_SHIFT)
2178 + (GCPhys & PAGE_OFFSET_MASK);
2179
2180 STAM_PROFILE_START(&pVirt->Stat, h);
2181 rc2 = pVirtType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin,
2182 pVirt->CTX_SUFF(pvUser));
2183 STAM_PROFILE_STOP(&pVirt->Stat, h);
2184 if (rc2 == VINF_SUCCESS)
2185 rc = VINF_SUCCESS;
2186 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
2187 }
2188 else
2189 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2190#else
2191 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2192 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2193 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2194 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2195#endif
2196 }
2197
2198 /*
2199 * Take the default action.
2200 */
2201 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2202 memcpy(pvBuf, pvSrc, cb);
2203 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2204 return rc;
2205}
2206
2207
2208/**
2209 * Read physical memory.
2210 *
2211 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2212 * want to ignore those.
2213 *
2214 * @returns VBox status code. Can be ignored in ring-3.
2215 * @retval VINF_SUCCESS.
2216 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2217 *
2218 * @param pVM Pointer to the VM.
2219 * @param GCPhys Physical address start reading from.
2220 * @param pvBuf Where to put the read bits.
2221 * @param cbRead How many bytes to read.
2222 * @param enmOrigin The origin of this call.
2223 */
2224VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
2225{
2226 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2227 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2228
2229 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysRead));
2230 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2231
2232 pgmLock(pVM);
2233
2234 /*
2235 * Copy loop on ram ranges.
2236 */
2237 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2238 for (;;)
2239 {
2240 /* Inside range or not? */
2241 if (pRam && GCPhys >= pRam->GCPhys)
2242 {
2243 /*
2244 * Must work our way thru this page by page.
2245 */
2246 RTGCPHYS off = GCPhys - pRam->GCPhys;
2247 while (off < pRam->cb)
2248 {
2249 unsigned iPage = off >> PAGE_SHIFT;
2250 PPGMPAGE pPage = &pRam->aPages[iPage];
2251 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2252 if (cb > cbRead)
2253 cb = cbRead;
2254
2255 /*
2256 * Any ALL access handlers?
2257 */
2258 if (RT_UNLIKELY( PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
2259 || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage)))
2260 {
2261 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2262 if (RT_FAILURE(rc))
2263 {
2264 pgmUnlock(pVM);
2265 return rc;
2266 }
2267 }
2268 else
2269 {
2270 /*
2271 * Get the pointer to the page.
2272 */
2273 PGMPAGEMAPLOCK PgMpLck;
2274 const void *pvSrc;
2275 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2276 if (RT_SUCCESS(rc))
2277 {
2278 memcpy(pvBuf, pvSrc, cb);
2279 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2280 }
2281 else
2282 {
2283 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2284 pRam->GCPhys + off, pPage, rc));
2285 memset(pvBuf, 0xff, cb);
2286 }
2287 }
2288
2289 /* next page */
2290 if (cb >= cbRead)
2291 {
2292 pgmUnlock(pVM);
2293 return VINF_SUCCESS;
2294 }
2295 cbRead -= cb;
2296 off += cb;
2297 pvBuf = (char *)pvBuf + cb;
2298 } /* walk pages in ram range. */
2299
2300 GCPhys = pRam->GCPhysLast + 1;
2301 }
2302 else
2303 {
2304 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2305
2306 /*
2307 * Unassigned address space.
2308 */
2309 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2310 if (cb >= cbRead)
2311 {
2312 memset(pvBuf, 0xff, cbRead);
2313 break;
2314 }
2315 memset(pvBuf, 0xff, cb);
2316
2317 cbRead -= cb;
2318 pvBuf = (char *)pvBuf + cb;
2319 GCPhys += cb;
2320 }
2321
2322 /* Advance range if necessary. */
2323 while (pRam && GCPhys > pRam->GCPhysLast)
2324 pRam = pRam->CTX_SUFF(pNext);
2325 } /* Ram range walk */
2326
2327 pgmUnlock(pVM);
2328 return VINF_SUCCESS;
2329}
2330
2331
2332/**
2333 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2334 *
2335 * @returns VBox status code. Can be ignored in ring-3.
2336 * @retval VINF_SUCCESS.
2337 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2338 *
2339 * @param pVM Pointer to the VM.
2340 * @param pPage The page descriptor.
2341 * @param GCPhys The physical address to start writing at.
2342 * @param pvBuf What to write.
2343 * @param cbWrite How much to write - less or equal to a page.
2344 * @param enmOrigin The origin of this call.
2345 */
2346static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite,
2347 PGMACCESSORIGIN enmOrigin)
2348{
2349 PGMPAGEMAPLOCK PgMpLck;
2350 void *pvDst = NULL;
2351 int rc;
2352
2353 /*
2354 * Give priority to physical handlers (like #PF does).
2355 *
2356 * Hope for a lonely physical handler first that covers the whole
2357 * write area. This should be a pretty frequent case with MMIO and
2358 * the heavy usage of full page handlers in the page pool.
2359 */
2360 PVMCPU pVCpu = VMMGetCpu(pVM);
2361 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
2362 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage) /* screw virtual handlers on MMIO pages */)
2363 {
2364 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2365 if (pCur)
2366 {
2367 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2368
2369 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2370 if (cbRange > cbWrite)
2371 cbRange = cbWrite;
2372
2373#ifndef IN_RING3
2374 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2375 NOREF(cbRange);
2376 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2377 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2378
2379#else /* IN_RING3 */
2380 Assert(PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler));
2381 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n",
2382 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2383 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2384 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2385 else
2386 rc = VINF_SUCCESS;
2387 if (RT_SUCCESS(rc))
2388 {
2389 PFNPGMR3PHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler);
2390 void *pvUser = pCur->CTX_SUFF(pvUser);
2391
2392 STAM_PROFILE_START(&pCur->Stat, h);
2393 PGM_LOCK_ASSERT_OWNER(pVM);
2394 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2395 pgmUnlock(pVM);
2396 rc = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2397 pgmLock(pVM);
2398# ifdef VBOX_WITH_STATISTICS
2399 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2400 if (pCur)
2401 STAM_PROFILE_STOP(&pCur->Stat, h);
2402# else
2403 pCur = NULL; /* might not be valid anymore. */
2404# endif
2405 if (rc == VINF_PGM_HANDLER_DO_DEFAULT && pvDst)
2406 {
2407 if (pvDst)
2408 memcpy(pvDst, pvBuf, cbRange);
2409 }
2410 else
2411 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT,
2412 ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur ? pCur->pszDesc : ""));
2413 }
2414 else
2415 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2416 GCPhys, pPage, rc), rc);
2417 if (RT_LIKELY(cbRange == cbWrite))
2418 {
2419 if (pvDst)
2420 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2421 return VINF_SUCCESS;
2422 }
2423
2424 /* more fun to be had below */
2425 cbWrite -= cbRange;
2426 GCPhys += cbRange;
2427 pvBuf = (uint8_t *)pvBuf + cbRange;
2428 pvDst = (uint8_t *)pvDst + cbRange;
2429#endif /* IN_RING3 */
2430 }
2431 /* else: the handler is somewhere else in the page, deal with it below. */
2432 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2433 }
2434 /*
2435 * A virtual handler without any interfering physical handlers.
2436 * Hopefully it'll cover the whole write.
2437 */
2438 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
2439 {
2440 unsigned iPage;
2441 PPGMVIRTHANDLER pCur;
2442 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
2443 if (RT_SUCCESS(rc))
2444 {
2445 PPGMVIRTHANDLERTYPEINT pCurType = PGMVIRTANDLER_GET_TYPE(pVM, pCur);
2446
2447 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
2448 if (cbRange > cbWrite)
2449 cbRange = cbWrite;
2450
2451#ifndef IN_RING3
2452 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2453 NOREF(cbRange);
2454 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2455 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2456
2457#else /* IN_RING3 */
2458
2459 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2460 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2461 if (RT_SUCCESS(rc))
2462 {
2463 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2464 if (pCurType->pfnHandlerR3)
2465 {
2466 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
2467 + (iPage << PAGE_SHIFT)
2468 + (GCPhys & PAGE_OFFSET_MASK);
2469
2470 STAM_PROFILE_START(&pCur->Stat, h);
2471 rc = pCurType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE,
2472 enmOrigin, pCur->CTX_SUFF(pvUser));
2473 STAM_PROFILE_STOP(&pCur->Stat, h);
2474 }
2475 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2476 memcpy(pvDst, pvBuf, cbRange);
2477 else
2478 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
2479 }
2480 else
2481 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2482 GCPhys, pPage, rc), rc);
2483 if (RT_LIKELY(cbRange == cbWrite))
2484 {
2485 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2486 return VINF_SUCCESS;
2487 }
2488
2489 /* more fun to be had below */
2490 cbWrite -= cbRange;
2491 GCPhys += cbRange;
2492 pvBuf = (uint8_t *)pvBuf + cbRange;
2493 pvDst = (uint8_t *)pvDst + cbRange;
2494#endif
2495 }
2496 /* else: the handler is somewhere else in the page, deal with it below. */
2497 }
2498
2499 /*
2500 * Deal with all the odd ends.
2501 */
2502
2503 /* We need a writable destination page. */
2504 if (!pvDst)
2505 {
2506 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2507 AssertLogRelMsgReturn(RT_SUCCESS(rc),
2508 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2509 GCPhys, pPage, rc), rc);
2510 }
2511
2512 /* The loop state (big + ugly). */
2513 unsigned iVirtPage = 0;
2514 PPGMVIRTHANDLER pVirt = NULL;
2515 uint32_t offVirt = PAGE_SIZE;
2516 uint32_t offVirtLast = PAGE_SIZE;
2517 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
2518
2519 PPGMPHYSHANDLER pPhys = NULL;
2520 uint32_t offPhys = PAGE_SIZE;
2521 uint32_t offPhysLast = PAGE_SIZE;
2522 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2523
2524 /* The loop. */
2525 for (;;)
2526 {
2527 /*
2528 * Find the closest handler at or above GCPhys.
2529 */
2530 if (fMoreVirt && !pVirt)
2531 {
2532 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
2533 if (RT_SUCCESS(rc))
2534 {
2535 offVirt = 0;
2536 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2537 }
2538 else
2539 {
2540 PPGMPHYS2VIRTHANDLER pVirtPhys;
2541 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
2542 GCPhys, true /* fAbove */);
2543 if ( pVirtPhys
2544 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
2545 {
2546 /* ASSUME that pVirtPhys only covers one page. */
2547 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
2548 Assert(pVirtPhys->Core.Key > GCPhys);
2549
2550 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
2551 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
2552 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2553 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2554 }
2555 else
2556 {
2557 pVirt = NULL;
2558 fMoreVirt = false;
2559 offVirt = offVirtLast = PAGE_SIZE;
2560 }
2561 }
2562 }
2563
2564 if (fMorePhys && !pPhys)
2565 {
2566 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2567 if (pPhys)
2568 {
2569 offPhys = 0;
2570 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2571 }
2572 else
2573 {
2574 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2575 GCPhys, true /* fAbove */);
2576 if ( pPhys
2577 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2578 {
2579 offPhys = pPhys->Core.Key - GCPhys;
2580 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2581 }
2582 else
2583 {
2584 pPhys = NULL;
2585 fMorePhys = false;
2586 offPhys = offPhysLast = PAGE_SIZE;
2587 }
2588 }
2589 }
2590
2591 /*
2592 * Handle access to space without handlers (that's easy).
2593 */
2594 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2595 uint32_t cbRange = (uint32_t)cbWrite;
2596 if (offPhys && offVirt)
2597 {
2598 if (cbRange > offPhys)
2599 cbRange = offPhys;
2600 if (cbRange > offVirt)
2601 cbRange = offVirt;
2602 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2603 }
2604 /*
2605 * Physical handler.
2606 */
2607 else if (!offPhys && offVirt)
2608 {
2609 if (cbRange > offPhysLast + 1)
2610 cbRange = offPhysLast + 1;
2611 if (cbRange > offVirt)
2612 cbRange = offVirt;
2613#ifdef IN_RING3
2614 PFNPGMR3PHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler);
2615 void *pvUser = pPhys->CTX_SUFF(pvUser);
2616
2617 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2618 STAM_PROFILE_START(&pPhys->Stat, h);
2619 PGM_LOCK_ASSERT_OWNER(pVM);
2620 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2621 pgmUnlock(pVM);
2622 rc = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2623 pgmLock(pVM);
2624# ifdef VBOX_WITH_STATISTICS
2625 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2626 if (pPhys)
2627 STAM_PROFILE_STOP(&pPhys->Stat, h);
2628# else
2629 pPhys = NULL; /* might not be valid anymore. */
2630# endif
2631 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2632#else
2633 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2634 NOREF(cbRange);
2635 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2636 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2637 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2638#endif
2639 }
2640 /*
2641 * Virtual handler.
2642 */
2643 else if (offPhys && !offVirt)
2644 {
2645 if (cbRange > offVirtLast + 1)
2646 cbRange = offVirtLast + 1;
2647 if (cbRange > offPhys)
2648 cbRange = offPhys;
2649
2650 PPGMVIRTHANDLERTYPEINT pVirtType = PGMVIRTANDLER_GET_TYPE(pVM, pVirt);
2651#ifdef IN_RING3
2652 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2653 if (pVirtType->pfnHandlerR3)
2654 {
2655 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2656 + (iVirtPage << PAGE_SHIFT)
2657 + (GCPhys & PAGE_OFFSET_MASK);
2658 STAM_PROFILE_START(&pVirt->Stat, h);
2659 rc = pVirtType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE,
2660 enmOrigin, pVirt->CTX_SUFF(pvUser));
2661 STAM_PROFILE_STOP(&pVirt->Stat, h);
2662 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2663 }
2664 pVirt = NULL;
2665#else
2666 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2667 NOREF(cbRange);
2668 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2669 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2670 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2671#endif
2672 }
2673 /*
2674 * Both... give the physical one priority.
2675 */
2676 else
2677 {
2678 Assert(!offPhys && !offVirt);
2679 if (cbRange > offVirtLast + 1)
2680 cbRange = offVirtLast + 1;
2681 if (cbRange > offPhysLast + 1)
2682 cbRange = offPhysLast + 1;
2683
2684 PPGMVIRTHANDLERTYPEINT pVirtType = PGMVIRTANDLER_GET_TYPE(pVM, pVirt);
2685#ifdef IN_RING3
2686 if (pVirtType->pfnHandlerR3)
2687 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2688 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2689
2690 PFNPGMR3PHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler);
2691 void *pvUser = pPhys->CTX_SUFF(pvUser);
2692
2693 STAM_PROFILE_START(&pPhys->Stat, h);
2694 PGM_LOCK_ASSERT_OWNER(pVM);
2695 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2696 pgmUnlock(pVM);
2697 rc = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2698 pgmLock(pVM);
2699# ifdef VBOX_WITH_STATISTICS
2700 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2701 if (pPhys)
2702 STAM_PROFILE_STOP(&pPhys->Stat, h);
2703# else
2704 pPhys = NULL; /* might not be valid anymore. */
2705# endif
2706 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2707 if (pVirtType->pfnHandlerR3)
2708 {
2709
2710 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2711 + (iVirtPage << PAGE_SHIFT)
2712 + (GCPhys & PAGE_OFFSET_MASK);
2713 STAM_PROFILE_START(&pVirt->Stat, h2);
2714 int rc2 = pVirtType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE,
2715 enmOrigin, pVirt->CTX_SUFF(pvUser));
2716 STAM_PROFILE_STOP(&pVirt->Stat, h2);
2717 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2718 rc = VINF_SUCCESS;
2719 else
2720 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2721 }
2722 pPhys = NULL;
2723 pVirt = NULL;
2724#else
2725 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2726 NOREF(cbRange);
2727 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2728 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2729 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2730#endif
2731 }
2732 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2733 memcpy(pvDst, pvBuf, cbRange);
2734
2735 /*
2736 * Advance if we've got more stuff to do.
2737 */
2738 if (cbRange >= cbWrite)
2739 {
2740 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2741 return VINF_SUCCESS;
2742 }
2743
2744 cbWrite -= cbRange;
2745 GCPhys += cbRange;
2746 pvBuf = (uint8_t *)pvBuf + cbRange;
2747 pvDst = (uint8_t *)pvDst + cbRange;
2748
2749 offPhys -= cbRange;
2750 offPhysLast -= cbRange;
2751 offVirt -= cbRange;
2752 offVirtLast -= cbRange;
2753 }
2754}
2755
2756
2757/**
2758 * Write to physical memory.
2759 *
2760 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2761 * want to ignore those.
2762 *
2763 * @returns VBox status code. Can be ignored in ring-3.
2764 * @retval VINF_SUCCESS.
2765 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2766 *
2767 * @param pVM Pointer to the VM.
2768 * @param GCPhys Physical address to write to.
2769 * @param pvBuf What to write.
2770 * @param cbWrite How many bytes to write.
2771 * @param enmOrigin Who is calling.
2772 */
2773VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
2774{
2775 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()! enmOrigin=%d\n", enmOrigin));
2776 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2777 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2778
2779 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWrite));
2780 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2781
2782 pgmLock(pVM);
2783
2784 /*
2785 * Copy loop on ram ranges.
2786 */
2787 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2788 for (;;)
2789 {
2790 /* Inside range or not? */
2791 if (pRam && GCPhys >= pRam->GCPhys)
2792 {
2793 /*
2794 * Must work our way thru this page by page.
2795 */
2796 RTGCPTR off = GCPhys - pRam->GCPhys;
2797 while (off < pRam->cb)
2798 {
2799 RTGCPTR iPage = off >> PAGE_SHIFT;
2800 PPGMPAGE pPage = &pRam->aPages[iPage];
2801 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2802 if (cb > cbWrite)
2803 cb = cbWrite;
2804
2805 /*
2806 * Any active WRITE or ALL access handlers?
2807 */
2808 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
2809 || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2810 {
2811 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2812 if (RT_FAILURE(rc))
2813 {
2814 pgmUnlock(pVM);
2815 return rc;
2816 }
2817 }
2818 else
2819 {
2820 /*
2821 * Get the pointer to the page.
2822 */
2823 PGMPAGEMAPLOCK PgMpLck;
2824 void *pvDst;
2825 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
2826 if (RT_SUCCESS(rc))
2827 {
2828 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
2829 memcpy(pvDst, pvBuf, cb);
2830 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2831 }
2832 /* Ignore writes to ballooned pages. */
2833 else if (!PGM_PAGE_IS_BALLOONED(pPage))
2834 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2835 pRam->GCPhys + off, pPage, rc));
2836 }
2837
2838 /* next page */
2839 if (cb >= cbWrite)
2840 {
2841 pgmUnlock(pVM);
2842 return VINF_SUCCESS;
2843 }
2844
2845 cbWrite -= cb;
2846 off += cb;
2847 pvBuf = (const char *)pvBuf + cb;
2848 } /* walk pages in ram range */
2849
2850 GCPhys = pRam->GCPhysLast + 1;
2851 }
2852 else
2853 {
2854 /*
2855 * Unassigned address space, skip it.
2856 */
2857 if (!pRam)
2858 break;
2859 size_t cb = pRam->GCPhys - GCPhys;
2860 if (cb >= cbWrite)
2861 break;
2862 cbWrite -= cb;
2863 pvBuf = (const char *)pvBuf + cb;
2864 GCPhys += cb;
2865 }
2866
2867 /* Advance range if necessary. */
2868 while (pRam && GCPhys > pRam->GCPhysLast)
2869 pRam = pRam->CTX_SUFF(pNext);
2870 } /* Ram range walk */
2871
2872 pgmUnlock(pVM);
2873 return VINF_SUCCESS;
2874}
2875
2876
2877/**
2878 * Read from guest physical memory by GC physical address, bypassing
2879 * MMIO and access handlers.
2880 *
2881 * @returns VBox status.
2882 * @param pVM Pointer to the VM.
2883 * @param pvDst The destination address.
2884 * @param GCPhysSrc The source address (GC physical address).
2885 * @param cb The number of bytes to read.
2886 */
2887VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2888{
2889 /*
2890 * Treat the first page as a special case.
2891 */
2892 if (!cb)
2893 return VINF_SUCCESS;
2894
2895 /* map the 1st page */
2896 void const *pvSrc;
2897 PGMPAGEMAPLOCK Lock;
2898 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2899 if (RT_FAILURE(rc))
2900 return rc;
2901
2902 /* optimize for the case where access is completely within the first page. */
2903 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2904 if (RT_LIKELY(cb <= cbPage))
2905 {
2906 memcpy(pvDst, pvSrc, cb);
2907 PGMPhysReleasePageMappingLock(pVM, &Lock);
2908 return VINF_SUCCESS;
2909 }
2910
2911 /* copy to the end of the page. */
2912 memcpy(pvDst, pvSrc, cbPage);
2913 PGMPhysReleasePageMappingLock(pVM, &Lock);
2914 GCPhysSrc += cbPage;
2915 pvDst = (uint8_t *)pvDst + cbPage;
2916 cb -= cbPage;
2917
2918 /*
2919 * Page by page.
2920 */
2921 for (;;)
2922 {
2923 /* map the page */
2924 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2925 if (RT_FAILURE(rc))
2926 return rc;
2927
2928 /* last page? */
2929 if (cb <= PAGE_SIZE)
2930 {
2931 memcpy(pvDst, pvSrc, cb);
2932 PGMPhysReleasePageMappingLock(pVM, &Lock);
2933 return VINF_SUCCESS;
2934 }
2935
2936 /* copy the entire page and advance */
2937 memcpy(pvDst, pvSrc, PAGE_SIZE);
2938 PGMPhysReleasePageMappingLock(pVM, &Lock);
2939 GCPhysSrc += PAGE_SIZE;
2940 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2941 cb -= PAGE_SIZE;
2942 }
2943 /* won't ever get here. */
2944}
2945
2946
2947/**
2948 * Write to guest physical memory referenced by GC pointer.
2949 * Write memory to GC physical address in guest physical memory.
2950 *
2951 * This will bypass MMIO and access handlers.
2952 *
2953 * @returns VBox status.
2954 * @param pVM Pointer to the VM.
2955 * @param GCPhysDst The GC physical address of the destination.
2956 * @param pvSrc The source buffer.
2957 * @param cb The number of bytes to write.
2958 */
2959VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2960{
2961 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2962
2963 /*
2964 * Treat the first page as a special case.
2965 */
2966 if (!cb)
2967 return VINF_SUCCESS;
2968
2969 /* map the 1st page */
2970 void *pvDst;
2971 PGMPAGEMAPLOCK Lock;
2972 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2973 if (RT_FAILURE(rc))
2974 return rc;
2975
2976 /* optimize for the case where access is completely within the first page. */
2977 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2978 if (RT_LIKELY(cb <= cbPage))
2979 {
2980 memcpy(pvDst, pvSrc, cb);
2981 PGMPhysReleasePageMappingLock(pVM, &Lock);
2982 return VINF_SUCCESS;
2983 }
2984
2985 /* copy to the end of the page. */
2986 memcpy(pvDst, pvSrc, cbPage);
2987 PGMPhysReleasePageMappingLock(pVM, &Lock);
2988 GCPhysDst += cbPage;
2989 pvSrc = (const uint8_t *)pvSrc + cbPage;
2990 cb -= cbPage;
2991
2992 /*
2993 * Page by page.
2994 */
2995 for (;;)
2996 {
2997 /* map the page */
2998 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2999 if (RT_FAILURE(rc))
3000 return rc;
3001
3002 /* last page? */
3003 if (cb <= PAGE_SIZE)
3004 {
3005 memcpy(pvDst, pvSrc, cb);
3006 PGMPhysReleasePageMappingLock(pVM, &Lock);
3007 return VINF_SUCCESS;
3008 }
3009
3010 /* copy the entire page and advance */
3011 memcpy(pvDst, pvSrc, PAGE_SIZE);
3012 PGMPhysReleasePageMappingLock(pVM, &Lock);
3013 GCPhysDst += PAGE_SIZE;
3014 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3015 cb -= PAGE_SIZE;
3016 }
3017 /* won't ever get here. */
3018}
3019
3020
3021/**
3022 * Read from guest physical memory referenced by GC pointer.
3023 *
3024 * This function uses the current CR3/CR0/CR4 of the guest and will
3025 * bypass access handlers and not set any accessed bits.
3026 *
3027 * @returns VBox status.
3028 * @param pVCpu Handle to the current virtual CPU.
3029 * @param pvDst The destination address.
3030 * @param GCPtrSrc The source address (GC pointer).
3031 * @param cb The number of bytes to read.
3032 */
3033VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3034{
3035 PVM pVM = pVCpu->CTX_SUFF(pVM);
3036/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3037
3038 /*
3039 * Treat the first page as a special case.
3040 */
3041 if (!cb)
3042 return VINF_SUCCESS;
3043
3044 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleRead));
3045 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3046
3047 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3048 * when many VCPUs are fighting for the lock.
3049 */
3050 pgmLock(pVM);
3051
3052 /* map the 1st page */
3053 void const *pvSrc;
3054 PGMPAGEMAPLOCK Lock;
3055 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3056 if (RT_FAILURE(rc))
3057 {
3058 pgmUnlock(pVM);
3059 return rc;
3060 }
3061
3062 /* optimize for the case where access is completely within the first page. */
3063 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3064 if (RT_LIKELY(cb <= cbPage))
3065 {
3066 memcpy(pvDst, pvSrc, cb);
3067 PGMPhysReleasePageMappingLock(pVM, &Lock);
3068 pgmUnlock(pVM);
3069 return VINF_SUCCESS;
3070 }
3071
3072 /* copy to the end of the page. */
3073 memcpy(pvDst, pvSrc, cbPage);
3074 PGMPhysReleasePageMappingLock(pVM, &Lock);
3075 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3076 pvDst = (uint8_t *)pvDst + cbPage;
3077 cb -= cbPage;
3078
3079 /*
3080 * Page by page.
3081 */
3082 for (;;)
3083 {
3084 /* map the page */
3085 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3086 if (RT_FAILURE(rc))
3087 {
3088 pgmUnlock(pVM);
3089 return rc;
3090 }
3091
3092 /* last page? */
3093 if (cb <= PAGE_SIZE)
3094 {
3095 memcpy(pvDst, pvSrc, cb);
3096 PGMPhysReleasePageMappingLock(pVM, &Lock);
3097 pgmUnlock(pVM);
3098 return VINF_SUCCESS;
3099 }
3100
3101 /* copy the entire page and advance */
3102 memcpy(pvDst, pvSrc, PAGE_SIZE);
3103 PGMPhysReleasePageMappingLock(pVM, &Lock);
3104 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
3105 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3106 cb -= PAGE_SIZE;
3107 }
3108 /* won't ever get here. */
3109}
3110
3111
3112/**
3113 * Write to guest physical memory referenced by GC pointer.
3114 *
3115 * This function uses the current CR3/CR0/CR4 of the guest and will
3116 * bypass access handlers and not set dirty or accessed bits.
3117 *
3118 * @returns VBox status.
3119 * @param pVCpu Handle to the current virtual CPU.
3120 * @param GCPtrDst The destination address (GC pointer).
3121 * @param pvSrc The source address.
3122 * @param cb The number of bytes to write.
3123 */
3124VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3125{
3126 PVM pVM = pVCpu->CTX_SUFF(pVM);
3127 VMCPU_ASSERT_EMT(pVCpu);
3128
3129 /*
3130 * Treat the first page as a special case.
3131 */
3132 if (!cb)
3133 return VINF_SUCCESS;
3134
3135 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWrite));
3136 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3137
3138 /* map the 1st page */
3139 void *pvDst;
3140 PGMPAGEMAPLOCK Lock;
3141 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3142 if (RT_FAILURE(rc))
3143 return rc;
3144
3145 /* optimize for the case where access is completely within the first page. */
3146 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3147 if (RT_LIKELY(cb <= cbPage))
3148 {
3149 memcpy(pvDst, pvSrc, cb);
3150 PGMPhysReleasePageMappingLock(pVM, &Lock);
3151 return VINF_SUCCESS;
3152 }
3153
3154 /* copy to the end of the page. */
3155 memcpy(pvDst, pvSrc, cbPage);
3156 PGMPhysReleasePageMappingLock(pVM, &Lock);
3157 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3158 pvSrc = (const uint8_t *)pvSrc + cbPage;
3159 cb -= cbPage;
3160
3161 /*
3162 * Page by page.
3163 */
3164 for (;;)
3165 {
3166 /* map the page */
3167 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3168 if (RT_FAILURE(rc))
3169 return rc;
3170
3171 /* last page? */
3172 if (cb <= PAGE_SIZE)
3173 {
3174 memcpy(pvDst, pvSrc, cb);
3175 PGMPhysReleasePageMappingLock(pVM, &Lock);
3176 return VINF_SUCCESS;
3177 }
3178
3179 /* copy the entire page and advance */
3180 memcpy(pvDst, pvSrc, PAGE_SIZE);
3181 PGMPhysReleasePageMappingLock(pVM, &Lock);
3182 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3183 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3184 cb -= PAGE_SIZE;
3185 }
3186 /* won't ever get here. */
3187}
3188
3189
3190/**
3191 * Write to guest physical memory referenced by GC pointer and update the PTE.
3192 *
3193 * This function uses the current CR3/CR0/CR4 of the guest and will
3194 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3195 *
3196 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3197 *
3198 * @returns VBox status.
3199 * @param pVCpu Handle to the current virtual CPU.
3200 * @param GCPtrDst The destination address (GC pointer).
3201 * @param pvSrc The source address.
3202 * @param cb The number of bytes to write.
3203 */
3204VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3205{
3206 PVM pVM = pVCpu->CTX_SUFF(pVM);
3207 VMCPU_ASSERT_EMT(pVCpu);
3208
3209 /*
3210 * Treat the first page as a special case.
3211 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3212 */
3213 if (!cb)
3214 return VINF_SUCCESS;
3215
3216 /* map the 1st page */
3217 void *pvDst;
3218 PGMPAGEMAPLOCK Lock;
3219 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3220 if (RT_FAILURE(rc))
3221 return rc;
3222
3223 /* optimize for the case where access is completely within the first page. */
3224 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3225 if (RT_LIKELY(cb <= cbPage))
3226 {
3227 memcpy(pvDst, pvSrc, cb);
3228 PGMPhysReleasePageMappingLock(pVM, &Lock);
3229 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3230 return VINF_SUCCESS;
3231 }
3232
3233 /* copy to the end of the page. */
3234 memcpy(pvDst, pvSrc, cbPage);
3235 PGMPhysReleasePageMappingLock(pVM, &Lock);
3236 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3237 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3238 pvSrc = (const uint8_t *)pvSrc + cbPage;
3239 cb -= cbPage;
3240
3241 /*
3242 * Page by page.
3243 */
3244 for (;;)
3245 {
3246 /* map the page */
3247 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3248 if (RT_FAILURE(rc))
3249 return rc;
3250
3251 /* last page? */
3252 if (cb <= PAGE_SIZE)
3253 {
3254 memcpy(pvDst, pvSrc, cb);
3255 PGMPhysReleasePageMappingLock(pVM, &Lock);
3256 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3257 return VINF_SUCCESS;
3258 }
3259
3260 /* copy the entire page and advance */
3261 memcpy(pvDst, pvSrc, PAGE_SIZE);
3262 PGMPhysReleasePageMappingLock(pVM, &Lock);
3263 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3264 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3265 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3266 cb -= PAGE_SIZE;
3267 }
3268 /* won't ever get here. */
3269}
3270
3271
3272/**
3273 * Read from guest physical memory referenced by GC pointer.
3274 *
3275 * This function uses the current CR3/CR0/CR4 of the guest and will
3276 * respect access handlers and set accessed bits.
3277 *
3278 * @returns VBox status.
3279 * @param pVCpu Handle to the current virtual CPU.
3280 * @param pvDst The destination address.
3281 * @param GCPtrSrc The source address (GC pointer).
3282 * @param cb The number of bytes to read.
3283 * @param enmOrigin Who is calling.
3284 * @thread EMT(pVCpu)
3285 */
3286VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3287{
3288 RTGCPHYS GCPhys;
3289 uint64_t fFlags;
3290 int rc;
3291 PVM pVM = pVCpu->CTX_SUFF(pVM);
3292 VMCPU_ASSERT_EMT(pVCpu);
3293
3294 /*
3295 * Anything to do?
3296 */
3297 if (!cb)
3298 return VINF_SUCCESS;
3299
3300 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3301
3302 /*
3303 * Optimize reads within a single page.
3304 */
3305 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3306 {
3307 /* Convert virtual to physical address + flags */
3308 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3309 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3310 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3311
3312 /* mark the guest page as accessed. */
3313 if (!(fFlags & X86_PTE_A))
3314 {
3315 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3316 AssertRC(rc);
3317 }
3318
3319 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3320 }
3321
3322 /*
3323 * Page by page.
3324 */
3325 for (;;)
3326 {
3327 /* Convert virtual to physical address + flags */
3328 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3329 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3330 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3331
3332 /* mark the guest page as accessed. */
3333 if (!(fFlags & X86_PTE_A))
3334 {
3335 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3336 AssertRC(rc);
3337 }
3338
3339 /* copy */
3340 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3341 if (cbRead < cb)
3342 {
3343 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead, enmOrigin);
3344 if (RT_FAILURE(rc))
3345 return rc;
3346 }
3347 else /* Last page (cbRead is PAGE_SIZE, we only need cb!) */
3348 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3349
3350 /* next */
3351 Assert(cb > cbRead);
3352 cb -= cbRead;
3353 pvDst = (uint8_t *)pvDst + cbRead;
3354 GCPtrSrc += cbRead;
3355 }
3356}
3357
3358
3359/**
3360 * Write to guest physical memory referenced by GC pointer.
3361 *
3362 * This function uses the current CR3/CR0/CR4 of the guest and will
3363 * respect access handlers and set dirty and accessed bits.
3364 *
3365 * @returns VBox status.
3366 * @retval VINF_SUCCESS.
3367 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
3368 *
3369 * @param pVCpu Handle to the current virtual CPU.
3370 * @param GCPtrDst The destination address (GC pointer).
3371 * @param pvSrc The source address.
3372 * @param cb The number of bytes to write.
3373 * @param enmOrigin Who is calling.
3374 */
3375VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3376{
3377 RTGCPHYS GCPhys;
3378 uint64_t fFlags;
3379 int rc;
3380 PVM pVM = pVCpu->CTX_SUFF(pVM);
3381 VMCPU_ASSERT_EMT(pVCpu);
3382
3383 /*
3384 * Anything to do?
3385 */
3386 if (!cb)
3387 return VINF_SUCCESS;
3388
3389 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3390
3391 /*
3392 * Optimize writes within a single page.
3393 */
3394 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3395 {
3396 /* Convert virtual to physical address + flags */
3397 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3398 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3399 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3400
3401 /* Mention when we ignore X86_PTE_RW... */
3402 if (!(fFlags & X86_PTE_RW))
3403 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3404
3405 /* Mark the guest page as accessed and dirty if necessary. */
3406 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3407 {
3408 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3409 AssertRC(rc);
3410 }
3411
3412 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3413 }
3414
3415 /*
3416 * Page by page.
3417 */
3418 for (;;)
3419 {
3420 /* Convert virtual to physical address + flags */
3421 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3422 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3423 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3424
3425 /* Mention when we ignore X86_PTE_RW... */
3426 if (!(fFlags & X86_PTE_RW))
3427 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3428
3429 /* Mark the guest page as accessed and dirty if necessary. */
3430 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3431 {
3432 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3433 AssertRC(rc);
3434 }
3435
3436 /* copy */
3437 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3438 if (cbWrite < cb)
3439 {
3440 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite, enmOrigin);
3441 if (RT_FAILURE(rc))
3442 return rc;
3443 }
3444 else /* Last page (cbWrite is PAGE_SIZE, we only need cb!) */
3445 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3446
3447 /* next */
3448 Assert(cb > cbWrite);
3449 cb -= cbWrite;
3450 pvSrc = (uint8_t *)pvSrc + cbWrite;
3451 GCPtrDst += cbWrite;
3452 }
3453}
3454
3455
3456/**
3457 * Performs a read of guest virtual memory for instruction emulation.
3458 *
3459 * This will check permissions, raise exceptions and update the access bits.
3460 *
3461 * The current implementation will bypass all access handlers. It may later be
3462 * changed to at least respect MMIO.
3463 *
3464 *
3465 * @returns VBox status code suitable to scheduling.
3466 * @retval VINF_SUCCESS if the read was performed successfully.
3467 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3468 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3469 *
3470 * @param pVCpu Handle to the current virtual CPU.
3471 * @param pCtxCore The context core.
3472 * @param pvDst Where to put the bytes we've read.
3473 * @param GCPtrSrc The source address.
3474 * @param cb The number of bytes to read. Not more than a page.
3475 *
3476 * @remark This function will dynamically map physical pages in GC. This may unmap
3477 * mappings done by the caller. Be careful!
3478 */
3479VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3480{
3481 PVM pVM = pVCpu->CTX_SUFF(pVM);
3482 Assert(cb <= PAGE_SIZE);
3483 VMCPU_ASSERT_EMT(pVCpu);
3484
3485/** @todo r=bird: This isn't perfect!
3486 * -# It's not checking for reserved bits being 1.
3487 * -# It's not correctly dealing with the access bit.
3488 * -# It's not respecting MMIO memory or any other access handlers.
3489 */
3490 /*
3491 * 1. Translate virtual to physical. This may fault.
3492 * 2. Map the physical address.
3493 * 3. Do the read operation.
3494 * 4. Set access bits if required.
3495 */
3496 int rc;
3497 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3498 if (cb <= cb1)
3499 {
3500 /*
3501 * Not crossing pages.
3502 */
3503 RTGCPHYS GCPhys;
3504 uint64_t fFlags;
3505 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3506 if (RT_SUCCESS(rc))
3507 {
3508 /** @todo we should check reserved bits ... */
3509 PGMPAGEMAPLOCK PgMpLck;
3510 void const *pvSrc;
3511 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &PgMpLck);
3512 switch (rc)
3513 {
3514 case VINF_SUCCESS:
3515 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3516 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3517 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3518 break;
3519 case VERR_PGM_PHYS_PAGE_RESERVED:
3520 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3521 memset(pvDst, 0xff, cb);
3522 break;
3523 default:
3524 Assert(RT_FAILURE_NP(rc));
3525 return rc;
3526 }
3527
3528 /** @todo access bit emulation isn't 100% correct. */
3529 if (!(fFlags & X86_PTE_A))
3530 {
3531 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3532 AssertRC(rc);
3533 }
3534 return VINF_SUCCESS;
3535 }
3536 }
3537 else
3538 {
3539 /*
3540 * Crosses pages.
3541 */
3542 size_t cb2 = cb - cb1;
3543 uint64_t fFlags1;
3544 RTGCPHYS GCPhys1;
3545 uint64_t fFlags2;
3546 RTGCPHYS GCPhys2;
3547 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3548 if (RT_SUCCESS(rc))
3549 {
3550 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3551 if (RT_SUCCESS(rc))
3552 {
3553 /** @todo we should check reserved bits ... */
3554 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3555 PGMPAGEMAPLOCK PgMpLck;
3556 void const *pvSrc1;
3557 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck);
3558 switch (rc)
3559 {
3560 case VINF_SUCCESS:
3561 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3562 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3563 break;
3564 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3565 memset(pvDst, 0xff, cb1);
3566 break;
3567 default:
3568 Assert(RT_FAILURE_NP(rc));
3569 return rc;
3570 }
3571
3572 void const *pvSrc2;
3573 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);
3574 switch (rc)
3575 {
3576 case VINF_SUCCESS:
3577 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3578 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3579 break;
3580 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3581 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3582 break;
3583 default:
3584 Assert(RT_FAILURE_NP(rc));
3585 return rc;
3586 }
3587
3588 if (!(fFlags1 & X86_PTE_A))
3589 {
3590 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3591 AssertRC(rc);
3592 }
3593 if (!(fFlags2 & X86_PTE_A))
3594 {
3595 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3596 AssertRC(rc);
3597 }
3598 return VINF_SUCCESS;
3599 }
3600 }
3601 }
3602
3603 /*
3604 * Raise a #PF.
3605 */
3606 uint32_t uErr;
3607
3608 /* Get the current privilege level. */
3609 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3610 switch (rc)
3611 {
3612 case VINF_SUCCESS:
3613 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3614 break;
3615
3616 case VERR_PAGE_NOT_PRESENT:
3617 case VERR_PAGE_TABLE_NOT_PRESENT:
3618 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3619 break;
3620
3621 default:
3622 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3623 return rc;
3624 }
3625 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3626 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3627}
3628
3629
3630/**
3631 * Performs a read of guest virtual memory for instruction emulation.
3632 *
3633 * This will check permissions, raise exceptions and update the access bits.
3634 *
3635 * The current implementation will bypass all access handlers. It may later be
3636 * changed to at least respect MMIO.
3637 *
3638 *
3639 * @returns VBox status code suitable to scheduling.
3640 * @retval VINF_SUCCESS if the read was performed successfully.
3641 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3642 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3643 *
3644 * @param pVCpu Handle to the current virtual CPU.
3645 * @param pCtxCore The context core.
3646 * @param pvDst Where to put the bytes we've read.
3647 * @param GCPtrSrc The source address.
3648 * @param cb The number of bytes to read. Not more than a page.
3649 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3650 * an appropriate error status will be returned (no
3651 * informational at all).
3652 *
3653 *
3654 * @remarks Takes the PGM lock.
3655 * @remarks A page fault on the 2nd page of the access will be raised without
3656 * writing the bits on the first page since we're ASSUMING that the
3657 * caller is emulating an instruction access.
3658 * @remarks This function will dynamically map physical pages in GC. This may
3659 * unmap mappings done by the caller. Be careful!
3660 */
3661VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb,
3662 bool fRaiseTrap)
3663{
3664 PVM pVM = pVCpu->CTX_SUFF(pVM);
3665 Assert(cb <= PAGE_SIZE);
3666 VMCPU_ASSERT_EMT(pVCpu);
3667
3668 /*
3669 * 1. Translate virtual to physical. This may fault.
3670 * 2. Map the physical address.
3671 * 3. Do the read operation.
3672 * 4. Set access bits if required.
3673 */
3674 int rc;
3675 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3676 if (cb <= cb1)
3677 {
3678 /*
3679 * Not crossing pages.
3680 */
3681 RTGCPHYS GCPhys;
3682 uint64_t fFlags;
3683 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3684 if (RT_SUCCESS(rc))
3685 {
3686 if (1) /** @todo we should check reserved bits ... */
3687 {
3688 const void *pvSrc;
3689 PGMPAGEMAPLOCK Lock;
3690 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3691 switch (rc)
3692 {
3693 case VINF_SUCCESS:
3694 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3695 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3696 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3697 PGMPhysReleasePageMappingLock(pVM, &Lock);
3698 break;
3699 case VERR_PGM_PHYS_PAGE_RESERVED:
3700 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3701 memset(pvDst, 0xff, cb);
3702 break;
3703 default:
3704 AssertMsgFailed(("%Rrc\n", rc));
3705 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3706 return rc;
3707 }
3708
3709 if (!(fFlags & X86_PTE_A))
3710 {
3711 /** @todo access bit emulation isn't 100% correct. */
3712 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3713 AssertRC(rc);
3714 }
3715 return VINF_SUCCESS;
3716 }
3717 }
3718 }
3719 else
3720 {
3721 /*
3722 * Crosses pages.
3723 */
3724 size_t cb2 = cb - cb1;
3725 uint64_t fFlags1;
3726 RTGCPHYS GCPhys1;
3727 uint64_t fFlags2;
3728 RTGCPHYS GCPhys2;
3729 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3730 if (RT_SUCCESS(rc))
3731 {
3732 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3733 if (RT_SUCCESS(rc))
3734 {
3735 if (1) /** @todo we should check reserved bits ... */
3736 {
3737 const void *pvSrc;
3738 PGMPAGEMAPLOCK Lock;
3739 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3740 switch (rc)
3741 {
3742 case VINF_SUCCESS:
3743 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3744 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3745 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3746 PGMPhysReleasePageMappingLock(pVM, &Lock);
3747 break;
3748 case VERR_PGM_PHYS_PAGE_RESERVED:
3749 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3750 memset(pvDst, 0xff, cb1);
3751 break;
3752 default:
3753 AssertMsgFailed(("%Rrc\n", rc));
3754 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3755 return rc;
3756 }
3757
3758 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3759 switch (rc)
3760 {
3761 case VINF_SUCCESS:
3762 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3763 PGMPhysReleasePageMappingLock(pVM, &Lock);
3764 break;
3765 case VERR_PGM_PHYS_PAGE_RESERVED:
3766 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3767 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3768 break;
3769 default:
3770 AssertMsgFailed(("%Rrc\n", rc));
3771 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3772 return rc;
3773 }
3774
3775 if (!(fFlags1 & X86_PTE_A))
3776 {
3777 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3778 AssertRC(rc);
3779 }
3780 if (!(fFlags2 & X86_PTE_A))
3781 {
3782 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3783 AssertRC(rc);
3784 }
3785 return VINF_SUCCESS;
3786 }
3787 /* sort out which page */
3788 }
3789 else
3790 GCPtrSrc += cb1; /* fault on 2nd page */
3791 }
3792 }
3793
3794 /*
3795 * Raise a #PF if we're allowed to do that.
3796 */
3797 /* Calc the error bits. */
3798 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3799 uint32_t uErr;
3800 switch (rc)
3801 {
3802 case VINF_SUCCESS:
3803 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3804 rc = VERR_ACCESS_DENIED;
3805 break;
3806
3807 case VERR_PAGE_NOT_PRESENT:
3808 case VERR_PAGE_TABLE_NOT_PRESENT:
3809 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3810 break;
3811
3812 default:
3813 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3814 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3815 return rc;
3816 }
3817 if (fRaiseTrap)
3818 {
3819 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3820 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3821 }
3822 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3823 return rc;
3824}
3825
3826
3827/**
3828 * Performs a write to guest virtual memory for instruction emulation.
3829 *
3830 * This will check permissions, raise exceptions and update the dirty and access
3831 * bits.
3832 *
3833 * @returns VBox status code suitable to scheduling.
3834 * @retval VINF_SUCCESS if the read was performed successfully.
3835 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3836 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3837 *
3838 * @param pVCpu Handle to the current virtual CPU.
3839 * @param pCtxCore The context core.
3840 * @param GCPtrDst The destination address.
3841 * @param pvSrc What to write.
3842 * @param cb The number of bytes to write. Not more than a page.
3843 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3844 * an appropriate error status will be returned (no
3845 * informational at all).
3846 *
3847 * @remarks Takes the PGM lock.
3848 * @remarks A page fault on the 2nd page of the access will be raised without
3849 * writing the bits on the first page since we're ASSUMING that the
3850 * caller is emulating an instruction access.
3851 * @remarks This function will dynamically map physical pages in GC. This may
3852 * unmap mappings done by the caller. Be careful!
3853 */
3854VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc,
3855 size_t cb, bool fRaiseTrap)
3856{
3857 Assert(cb <= PAGE_SIZE);
3858 PVM pVM = pVCpu->CTX_SUFF(pVM);
3859 VMCPU_ASSERT_EMT(pVCpu);
3860
3861 /*
3862 * 1. Translate virtual to physical. This may fault.
3863 * 2. Map the physical address.
3864 * 3. Do the write operation.
3865 * 4. Set access bits if required.
3866 */
3867 /** @todo Since this method is frequently used by EMInterpret or IOM
3868 * upon a write fault to an write access monitored page, we can
3869 * reuse the guest page table walking from the \#PF code. */
3870 int rc;
3871 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3872 if (cb <= cb1)
3873 {
3874 /*
3875 * Not crossing pages.
3876 */
3877 RTGCPHYS GCPhys;
3878 uint64_t fFlags;
3879 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3880 if (RT_SUCCESS(rc))
3881 {
3882 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3883 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3884 && CPUMGetGuestCPL(pVCpu) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3885 {
3886 void *pvDst;
3887 PGMPAGEMAPLOCK Lock;
3888 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3889 switch (rc)
3890 {
3891 case VINF_SUCCESS:
3892 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3893 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3894 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3895 PGMPhysReleasePageMappingLock(pVM, &Lock);
3896 break;
3897 case VERR_PGM_PHYS_PAGE_RESERVED:
3898 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3899 /* bit bucket */
3900 break;
3901 default:
3902 AssertMsgFailed(("%Rrc\n", rc));
3903 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3904 return rc;
3905 }
3906
3907 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3908 {
3909 /** @todo dirty & access bit emulation isn't 100% correct. */
3910 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3911 AssertRC(rc);
3912 }
3913 return VINF_SUCCESS;
3914 }
3915 rc = VERR_ACCESS_DENIED;
3916 }
3917 }
3918 else
3919 {
3920 /*
3921 * Crosses pages.
3922 */
3923 size_t cb2 = cb - cb1;
3924 uint64_t fFlags1;
3925 RTGCPHYS GCPhys1;
3926 uint64_t fFlags2;
3927 RTGCPHYS GCPhys2;
3928 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
3929 if (RT_SUCCESS(rc))
3930 {
3931 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3932 if (RT_SUCCESS(rc))
3933 {
3934 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3935 && (fFlags2 & X86_PTE_RW))
3936 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3937 && CPUMGetGuestCPL(pVCpu) <= 2) )
3938 {
3939 void *pvDst;
3940 PGMPAGEMAPLOCK Lock;
3941 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3942 switch (rc)
3943 {
3944 case VINF_SUCCESS:
3945 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3946 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3947 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3948 PGMPhysReleasePageMappingLock(pVM, &Lock);
3949 break;
3950 case VERR_PGM_PHYS_PAGE_RESERVED:
3951 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3952 /* bit bucket */
3953 break;
3954 default:
3955 AssertMsgFailed(("%Rrc\n", rc));
3956 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3957 return rc;
3958 }
3959
3960 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3961 switch (rc)
3962 {
3963 case VINF_SUCCESS:
3964 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3965 PGMPhysReleasePageMappingLock(pVM, &Lock);
3966 break;
3967 case VERR_PGM_PHYS_PAGE_RESERVED:
3968 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3969 /* bit bucket */
3970 break;
3971 default:
3972 AssertMsgFailed(("%Rrc\n", rc));
3973 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3974 return rc;
3975 }
3976
3977 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3978 {
3979 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3980 AssertRC(rc);
3981 }
3982 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3983 {
3984 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3985 AssertRC(rc);
3986 }
3987 return VINF_SUCCESS;
3988 }
3989 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3990 GCPtrDst += cb1; /* fault on the 2nd page. */
3991 rc = VERR_ACCESS_DENIED;
3992 }
3993 else
3994 GCPtrDst += cb1; /* fault on the 2nd page. */
3995 }
3996 }
3997
3998 /*
3999 * Raise a #PF if we're allowed to do that.
4000 */
4001 /* Calc the error bits. */
4002 uint32_t uErr;
4003 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
4004 switch (rc)
4005 {
4006 case VINF_SUCCESS:
4007 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
4008 rc = VERR_ACCESS_DENIED;
4009 break;
4010
4011 case VERR_ACCESS_DENIED:
4012 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
4013 break;
4014
4015 case VERR_PAGE_NOT_PRESENT:
4016 case VERR_PAGE_TABLE_NOT_PRESENT:
4017 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
4018 break;
4019
4020 default:
4021 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
4022 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4023 return rc;
4024 }
4025 if (fRaiseTrap)
4026 {
4027 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
4028 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
4029 }
4030 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
4031 return rc;
4032}
4033
4034
4035/**
4036 * Return the page type of the specified physical address.
4037 *
4038 * @returns The page type.
4039 * @param pVM Pointer to the VM.
4040 * @param GCPhys Guest physical address
4041 */
4042VMMDECL(PGMPAGETYPE) PGMPhysGetPageType(PVM pVM, RTGCPHYS GCPhys)
4043{
4044 pgmLock(pVM);
4045 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
4046 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
4047 pgmUnlock(pVM);
4048
4049 return enmPgType;
4050}
4051
4052
4053
4054
4055/**
4056 * Converts a GC physical address to a HC ring-3 pointer, with some
4057 * additional checks.
4058 *
4059 * @returns VBox status code (no informational statuses).
4060 * @retval VINF_SUCCESS on success.
4061 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4062 * access handler of some kind.
4063 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4064 * accesses or is odd in any way.
4065 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4066 *
4067 * @param pVM Pointer to the cross context VM structure.
4068 * @param pVCpu Pointer to the cross context virtual CPU structure of
4069 * the calling EMT.
4070 * @param GCPhys The GC physical address to convert. This API mask the
4071 * A20 line when necessary.
4072 * @param fWritable Whether write access is required.
4073 * @param ppv Where to store the pointer corresponding to GCPhys on
4074 * success.
4075 * @param pLock
4076 *
4077 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
4078 * @thread EMT(pVCpu).
4079 */
4080VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
4081 void **ppv, PPGMPAGEMAPLOCK pLock)
4082{
4083 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
4084
4085 pgmLock(pVM);
4086
4087 PPGMRAMRANGE pRam;
4088 PPGMPAGE pPage;
4089 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4090 if (RT_SUCCESS(rc))
4091 {
4092 if (PGM_PAGE_IS_BALLOONED(pPage))
4093 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4094 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4095 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4096 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4097 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4098 rc = VINF_SUCCESS;
4099 else
4100 {
4101 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4102 {
4103 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4104 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4105 }
4106 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4107 {
4108 Assert(!fByPassHandlers);
4109 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4110 }
4111 }
4112 if (RT_SUCCESS(rc))
4113 {
4114 int rc2;
4115
4116 /* Make sure what we return is writable. */
4117 if (fWritable)
4118 switch (PGM_PAGE_GET_STATE(pPage))
4119 {
4120 case PGM_PAGE_STATE_ALLOCATED:
4121 break;
4122 case PGM_PAGE_STATE_BALLOONED:
4123 AssertFailed();
4124 case PGM_PAGE_STATE_ZERO:
4125 case PGM_PAGE_STATE_SHARED:
4126 case PGM_PAGE_STATE_WRITE_MONITORED:
4127 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
4128 AssertLogRelRCReturn(rc2, rc2);
4129 break;
4130 }
4131
4132#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
4133 void *pv;
4134 rc = pgmRZDynMapHCPageInlined(pVCpu,
4135 PGM_PAGE_GET_HCPHYS(pPage),
4136 &pv
4137 RTLOG_COMMA_SRC_POS);
4138 if (RT_FAILURE(rc))
4139 return rc;
4140 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4141 pLock->pvPage = pv;
4142 pLock->pVCpu = pVCpu;
4143
4144#else
4145 /* Get a ring-3 mapping of the address. */
4146 PPGMPAGER3MAPTLBE pTlbe;
4147 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
4148 AssertLogRelRCReturn(rc2, rc2);
4149
4150 /* Lock it and calculate the address. */
4151 if (fWritable)
4152 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
4153 else
4154 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
4155 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4156#endif
4157
4158 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
4159 }
4160 else
4161 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
4162
4163 /* else: handler catching all access, no pointer returned. */
4164 }
4165 else
4166 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
4167
4168 pgmUnlock(pVM);
4169 return rc;
4170}
4171
4172
4173/**
4174 * Checks if the give GCPhys page requires special handling for the given access
4175 * because it's MMIO or otherwise monitored.
4176 *
4177 * @returns VBox status code (no informational statuses).
4178 * @retval VINF_SUCCESS on success.
4179 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4180 * access handler of some kind.
4181 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4182 * accesses or is odd in any way.
4183 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4184 *
4185 * @param pVM Pointer to the VM.
4186 * @param GCPhys The GC physical address to convert. Since this is only
4187 * used for filling the REM TLB, the A20 mask must be
4188 * applied before calling this API.
4189 * @param fWritable Whether write access is required.
4190 *
4191 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
4192 * a stop gap thing that should be removed once there is a better TLB
4193 * for virtual address accesses.
4194 */
4195VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVM pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
4196{
4197 pgmLock(pVM);
4198 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
4199
4200 PPGMRAMRANGE pRam;
4201 PPGMPAGE pPage;
4202 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4203 if (RT_SUCCESS(rc))
4204 {
4205 if (PGM_PAGE_IS_BALLOONED(pPage))
4206 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4207 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4208 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4209 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4210 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4211 rc = VINF_SUCCESS;
4212 else
4213 {
4214 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4215 {
4216 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4217 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4218 }
4219 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4220 {
4221 Assert(!fByPassHandlers);
4222 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4223 }
4224 }
4225 }
4226
4227 pgmUnlock(pVM);
4228 return rc;
4229}
4230
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette