VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 55900

Last change on this file since 55900 was 55900, checked in by vboxsync, 10 years ago

PGM: Added a pVCpu parameter to all virtual handler callouts and also a PGMACCESSORIGIN parameter to the ring-3 one.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 150.5 KB
Line 
1/* $Id: PGMAllPhys.cpp 55900 2015-05-18 10:17:35Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM_PHYS
22#include <VBox/vmm/pgm.h>
23#include <VBox/vmm/trpm.h>
24#include <VBox/vmm/vmm.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/em.h>
27#ifdef VBOX_WITH_REM
28# include <VBox/vmm/rem.h>
29#endif
30#include "PGMInternal.h"
31#include <VBox/vmm/vm.h>
32#include "PGMInline.h"
33#include <VBox/param.h>
34#include <VBox/err.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37#include <iprt/asm-amd64-x86.h>
38#include <VBox/log.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** Enable the physical TLB. */
48#define PGM_WITH_PHYS_TLB
49
50
51
52#ifndef IN_RING3
53
54/**
55 * \#PF Handler callback for physical memory accesses without a RC/R0 handler.
56 * This simply pushes everything to the HC handler.
57 *
58 * @returns VBox status code (appropriate for trap handling and GC return).
59 * @param pVM Pointer to the VM.
60 * @param uErrorCode CPU Error code.
61 * @param pRegFrame Trap register frame.
62 * @param pvFault The fault address (cr2).
63 * @param GCPhysFault The GC physical address corresponding to pvFault.
64 * @param pvUser User argument.
65 */
66VMMDECL(int) pgmPhysPfHandlerRedirectToHC(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
67 RTGCPHYS GCPhysFault, void *pvUser)
68{
69 NOREF(pVM); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(GCPhysFault); NOREF(pvUser);
70 return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ;
71}
72
73
74/**
75 * \#PF Handler callback for Guest ROM range write access.
76 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
77 *
78 * @returns VBox status code (appropriate for trap handling and GC return).
79 * @param pVM Pointer to the VM.
80 * @param uErrorCode CPU Error code.
81 * @param pRegFrame Trap register frame.
82 * @param pvFault The fault address (cr2).
83 * @param GCPhysFault The GC physical address corresponding to pvFault.
84 * @param pvUser User argument. Pointer to the ROM range structure.
85 */
86DECLEXPORT(int) pgmPhysRomWritePfHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
87 RTGCPHYS GCPhysFault, void *pvUser)
88{
89 int rc;
90 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
91 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
92 PVMCPU pVCpu = VMMGetCpu(pVM);
93 NOREF(uErrorCode); NOREF(pvFault);
94
95 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
96
97 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
98 switch (pRom->aPages[iPage].enmProt)
99 {
100 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
101 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
102 {
103 /*
104 * If it's a simple instruction which doesn't change the cpu state
105 * we will simply skip it. Otherwise we'll have to defer it to REM.
106 */
107 uint32_t cbOp;
108 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
109 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
110 if ( RT_SUCCESS(rc)
111 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
112 && !(pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
113 {
114 switch (pDis->bOpCode)
115 {
116 /** @todo Find other instructions we can safely skip, possibly
117 * adding this kind of detection to DIS or EM. */
118 case OP_MOV:
119 pRegFrame->rip += cbOp;
120 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteHandled);
121 return VINF_SUCCESS;
122 }
123 }
124 break;
125 }
126
127 case PGMROMPROT_READ_RAM_WRITE_RAM:
128 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
129 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
130 AssertRC(rc);
131 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
132
133 case PGMROMPROT_READ_ROM_WRITE_RAM:
134 /* Handle it in ring-3 because it's *way* easier there. */
135 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
136 break;
137
138 default:
139 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
140 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
141 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
142 }
143
144 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteUnhandled);
145 return VINF_EM_RAW_EMULATE_INSTR;
146}
147
148#endif /* IN_RING3 */
149
150/**
151 * Invalidates the RAM range TLBs.
152 *
153 * @param pVM Pointer to the VM.
154 */
155void pgmPhysInvalidRamRangeTlbs(PVM pVM)
156{
157 pgmLock(pVM);
158 for (uint32_t i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)
159 {
160 pVM->pgm.s.apRamRangesTlbR3[i] = NIL_RTR3PTR;
161 pVM->pgm.s.apRamRangesTlbR0[i] = NIL_RTR0PTR;
162 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;
163 }
164 pgmUnlock(pVM);
165}
166
167
168/**
169 * Tests if a value of type RTGCPHYS is negative if the type had been signed
170 * instead of unsigned.
171 *
172 * @returns @c true if negative, @c false if positive or zero.
173 * @param a_GCPhys The value to test.
174 * @todo Move me to iprt/types.h.
175 */
176#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
177
178
179/**
180 * Slow worker for pgmPhysGetRange.
181 *
182 * @copydoc pgmPhysGetRange
183 */
184PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
185{
186 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
187
188 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
189 while (pRam)
190 {
191 RTGCPHYS off = GCPhys - pRam->GCPhys;
192 if (off < pRam->cb)
193 {
194 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
195 return pRam;
196 }
197 if (RTGCPHYS_IS_NEGATIVE(off))
198 pRam = pRam->CTX_SUFF(pLeft);
199 else
200 pRam = pRam->CTX_SUFF(pRight);
201 }
202 return NULL;
203}
204
205
206/**
207 * Slow worker for pgmPhysGetRangeAtOrAbove.
208 *
209 * @copydoc pgmPhysGetRangeAtOrAbove
210 */
211PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
212{
213 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
214
215 PPGMRAMRANGE pLastLeft = NULL;
216 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
217 while (pRam)
218 {
219 RTGCPHYS off = GCPhys - pRam->GCPhys;
220 if (off < pRam->cb)
221 {
222 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
223 return pRam;
224 }
225 if (RTGCPHYS_IS_NEGATIVE(off))
226 {
227 pLastLeft = pRam;
228 pRam = pRam->CTX_SUFF(pLeft);
229 }
230 else
231 pRam = pRam->CTX_SUFF(pRight);
232 }
233 return pLastLeft;
234}
235
236
237/**
238 * Slow worker for pgmPhysGetPage.
239 *
240 * @copydoc pgmPhysGetPage
241 */
242PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
243{
244 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
245
246 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
247 while (pRam)
248 {
249 RTGCPHYS off = GCPhys - pRam->GCPhys;
250 if (off < pRam->cb)
251 {
252 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
253 return &pRam->aPages[off >> PAGE_SHIFT];
254 }
255
256 if (RTGCPHYS_IS_NEGATIVE(off))
257 pRam = pRam->CTX_SUFF(pLeft);
258 else
259 pRam = pRam->CTX_SUFF(pRight);
260 }
261 return NULL;
262}
263
264
265/**
266 * Slow worker for pgmPhysGetPageEx.
267 *
268 * @copydoc pgmPhysGetPageEx
269 */
270int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
271{
272 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
273
274 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
275 while (pRam)
276 {
277 RTGCPHYS off = GCPhys - pRam->GCPhys;
278 if (off < pRam->cb)
279 {
280 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
281 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
282 return VINF_SUCCESS;
283 }
284
285 if (RTGCPHYS_IS_NEGATIVE(off))
286 pRam = pRam->CTX_SUFF(pLeft);
287 else
288 pRam = pRam->CTX_SUFF(pRight);
289 }
290
291 *ppPage = NULL;
292 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
293}
294
295
296/**
297 * Slow worker for pgmPhysGetPageAndRangeEx.
298 *
299 * @copydoc pgmPhysGetPageAndRangeEx
300 */
301int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
302{
303 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
304
305 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
306 while (pRam)
307 {
308 RTGCPHYS off = GCPhys - pRam->GCPhys;
309 if (off < pRam->cb)
310 {
311 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
312 *ppRam = pRam;
313 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
314 return VINF_SUCCESS;
315 }
316
317 if (RTGCPHYS_IS_NEGATIVE(off))
318 pRam = pRam->CTX_SUFF(pLeft);
319 else
320 pRam = pRam->CTX_SUFF(pRight);
321 }
322
323 *ppRam = NULL;
324 *ppPage = NULL;
325 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
326}
327
328
329/**
330 * Checks if Address Gate 20 is enabled or not.
331 *
332 * @returns true if enabled.
333 * @returns false if disabled.
334 * @param pVCpu Pointer to the VMCPU.
335 */
336VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
337{
338 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
339 return pVCpu->pgm.s.fA20Enabled;
340}
341
342
343/**
344 * Validates a GC physical address.
345 *
346 * @returns true if valid.
347 * @returns false if invalid.
348 * @param pVM Pointer to the VM.
349 * @param GCPhys The physical address to validate.
350 */
351VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
352{
353 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
354 return pPage != NULL;
355}
356
357
358/**
359 * Checks if a GC physical address is a normal page,
360 * i.e. not ROM, MMIO or reserved.
361 *
362 * @returns true if normal.
363 * @returns false if invalid, ROM, MMIO or reserved page.
364 * @param pVM Pointer to the VM.
365 * @param GCPhys The physical address to check.
366 */
367VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
368{
369 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
370 return pPage
371 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
372}
373
374
375/**
376 * Converts a GC physical address to a HC physical address.
377 *
378 * @returns VINF_SUCCESS on success.
379 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
380 * page but has no physical backing.
381 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
382 * GC physical address.
383 *
384 * @param pVM Pointer to the VM.
385 * @param GCPhys The GC physical address to convert.
386 * @param pHCPhys Where to store the HC physical address on success.
387 */
388VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
389{
390 pgmLock(pVM);
391 PPGMPAGE pPage;
392 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
393 if (RT_SUCCESS(rc))
394 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
395 pgmUnlock(pVM);
396 return rc;
397}
398
399
400/**
401 * Invalidates all page mapping TLBs.
402 *
403 * @param pVM Pointer to the VM.
404 */
405void pgmPhysInvalidatePageMapTLB(PVM pVM)
406{
407 pgmLock(pVM);
408 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushes);
409
410 /* Clear the shared R0/R3 TLB completely. */
411 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
412 {
413 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
414 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
415 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
416 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
417 }
418
419 /** @todo clear the RC TLB whenever we add it. */
420
421 pgmUnlock(pVM);
422}
423
424
425/**
426 * Invalidates a page mapping TLB entry
427 *
428 * @param pVM Pointer to the VM.
429 * @param GCPhys GCPhys entry to flush
430 */
431void pgmPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
432{
433 PGM_LOCK_ASSERT_OWNER(pVM);
434
435 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushEntry);
436
437#ifdef IN_RC
438 unsigned idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
439 pVM->pgm.s.PhysTlbHC.aEntries[idx].GCPhys = NIL_RTGCPHYS;
440 pVM->pgm.s.PhysTlbHC.aEntries[idx].pPage = 0;
441 pVM->pgm.s.PhysTlbHC.aEntries[idx].pMap = 0;
442 pVM->pgm.s.PhysTlbHC.aEntries[idx].pv = 0;
443#else
444 /* Clear the shared R0/R3 TLB entry. */
445 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
446 pTlbe->GCPhys = NIL_RTGCPHYS;
447 pTlbe->pPage = 0;
448 pTlbe->pMap = 0;
449 pTlbe->pv = 0;
450#endif
451
452 /** @todo clear the RC TLB whenever we add it. */
453}
454
455/**
456 * Makes sure that there is at least one handy page ready for use.
457 *
458 * This will also take the appropriate actions when reaching water-marks.
459 *
460 * @returns VBox status code.
461 * @retval VINF_SUCCESS on success.
462 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
463 *
464 * @param pVM Pointer to the VM.
465 *
466 * @remarks Must be called from within the PGM critical section. It may
467 * nip back to ring-3/0 in some cases.
468 */
469static int pgmPhysEnsureHandyPage(PVM pVM)
470{
471 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
472
473 /*
474 * Do we need to do anything special?
475 */
476#ifdef IN_RING3
477 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
478#else
479 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
480#endif
481 {
482 /*
483 * Allocate pages only if we're out of them, or in ring-3, almost out.
484 */
485#ifdef IN_RING3
486 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
487#else
488 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
489#endif
490 {
491 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
492 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
493#ifdef IN_RING3
494 int rc = PGMR3PhysAllocateHandyPages(pVM);
495#else
496 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
497#endif
498 if (RT_UNLIKELY(rc != VINF_SUCCESS))
499 {
500 if (RT_FAILURE(rc))
501 return rc;
502 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
503 if (!pVM->pgm.s.cHandyPages)
504 {
505 LogRel(("PGM: no more handy pages!\n"));
506 return VERR_EM_NO_MEMORY;
507 }
508 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
509 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
510#ifdef IN_RING3
511# ifdef VBOX_WITH_REM
512 REMR3NotifyFF(pVM);
513# endif
514#else
515 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
516#endif
517 }
518 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
519 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
520 ("%u\n", pVM->pgm.s.cHandyPages),
521 VERR_PGM_HANDY_PAGE_IPE);
522 }
523 else
524 {
525 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
526 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
527#ifndef IN_RING3
528 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
529 {
530 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
531 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
532 }
533#endif
534 }
535 }
536
537 return VINF_SUCCESS;
538}
539
540
541/**
542 * Replace a zero or shared page with new page that we can write to.
543 *
544 * @returns The following VBox status codes.
545 * @retval VINF_SUCCESS on success, pPage is modified.
546 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
547 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
548 *
549 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
550 *
551 * @param pVM Pointer to the VM.
552 * @param pPage The physical page tracking structure. This will
553 * be modified on success.
554 * @param GCPhys The address of the page.
555 *
556 * @remarks Must be called from within the PGM critical section. It may
557 * nip back to ring-3/0 in some cases.
558 *
559 * @remarks This function shouldn't really fail, however if it does
560 * it probably means we've screwed up the size of handy pages and/or
561 * the low-water mark. Or, that some device I/O is causing a lot of
562 * pages to be allocated while while the host is in a low-memory
563 * condition. This latter should be handled elsewhere and in a more
564 * controlled manner, it's on the @bugref{3170} todo list...
565 */
566int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
567{
568 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
569
570 /*
571 * Prereqs.
572 */
573 PGM_LOCK_ASSERT_OWNER(pVM);
574 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
575 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
576
577# ifdef PGM_WITH_LARGE_PAGES
578 /*
579 * Try allocate a large page if applicable.
580 */
581 if ( PGMIsUsingLargePages(pVM)
582 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
583 {
584 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
585 PPGMPAGE pBasePage;
586
587 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
588 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
589 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
590 {
591 rc = pgmPhysAllocLargePage(pVM, GCPhys);
592 if (rc == VINF_SUCCESS)
593 return rc;
594 }
595 /* Mark the base as type page table, so we don't check over and over again. */
596 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
597
598 /* fall back to 4KB pages. */
599 }
600# endif
601
602 /*
603 * Flush any shadow page table mappings of the page.
604 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
605 */
606 bool fFlushTLBs = false;
607 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
608 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
609
610 /*
611 * Ensure that we've got a page handy, take it and use it.
612 */
613 int rc2 = pgmPhysEnsureHandyPage(pVM);
614 if (RT_FAILURE(rc2))
615 {
616 if (fFlushTLBs)
617 PGM_INVL_ALL_VCPU_TLBS(pVM);
618 Assert(rc2 == VERR_EM_NO_MEMORY);
619 return rc2;
620 }
621 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
622 PGM_LOCK_ASSERT_OWNER(pVM);
623 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
624 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
625
626 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
627 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
628 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
629 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
630 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
631 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
632
633 /*
634 * There are one or two action to be taken the next time we allocate handy pages:
635 * - Tell the GMM (global memory manager) what the page is being used for.
636 * (Speeds up replacement operations - sharing and defragmenting.)
637 * - If the current backing is shared, it must be freed.
638 */
639 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
640 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
641
642 void const *pvSharedPage = NULL;
643 if (PGM_PAGE_IS_SHARED(pPage))
644 {
645 /* Mark this shared page for freeing/dereferencing. */
646 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
647 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
648
649 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
650 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
651 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageReplaceShared));
652 pVM->pgm.s.cSharedPages--;
653
654 /* Grab the address of the page so we can make a copy later on. (safe) */
655 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
656 AssertRC(rc);
657 }
658 else
659 {
660 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
661 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRZPageReplaceZero);
662 pVM->pgm.s.cZeroPages--;
663 }
664
665 /*
666 * Do the PGMPAGE modifications.
667 */
668 pVM->pgm.s.cPrivatePages++;
669 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
670 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
671 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
672 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
673 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
674
675 /* Copy the shared page contents to the replacement page. */
676 if (pvSharedPage)
677 {
678 /* Get the virtual address of the new page. */
679 PGMPAGEMAPLOCK PgMpLck;
680 void *pvNewPage;
681 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
682 if (RT_SUCCESS(rc))
683 {
684 memcpy(pvNewPage, pvSharedPage, PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
685 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
686 }
687 }
688
689 if ( fFlushTLBs
690 && rc != VINF_PGM_GCPHYS_ALIASED)
691 PGM_INVL_ALL_VCPU_TLBS(pVM);
692 return rc;
693}
694
695#ifdef PGM_WITH_LARGE_PAGES
696
697/**
698 * Replace a 2 MB range of zero pages with new pages that we can write to.
699 *
700 * @returns The following VBox status codes.
701 * @retval VINF_SUCCESS on success, pPage is modified.
702 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
703 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
704 *
705 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
706 *
707 * @param pVM Pointer to the VM.
708 * @param GCPhys The address of the page.
709 *
710 * @remarks Must be called from within the PGM critical section. It may
711 * nip back to ring-3/0 in some cases.
712 */
713int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys)
714{
715 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
716 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
717
718 /*
719 * Prereqs.
720 */
721 PGM_LOCK_ASSERT_OWNER(pVM);
722 Assert(PGMIsUsingLargePages(pVM));
723
724 PPGMPAGE pFirstPage;
725 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
726 if ( RT_SUCCESS(rc)
727 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM)
728 {
729 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
730
731 /* Don't call this function for already allocated pages. */
732 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
733
734 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
735 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
736 {
737 /* Lazy approach: check all pages in the 2 MB range.
738 * The whole range must be ram and unallocated. */
739 GCPhys = GCPhysBase;
740 unsigned iPage;
741 for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
742 {
743 PPGMPAGE pSubPage;
744 rc = pgmPhysGetPageEx(pVM, GCPhys, &pSubPage);
745 if ( RT_FAILURE(rc)
746 || PGM_PAGE_GET_TYPE(pSubPage) != PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
747 || PGM_PAGE_GET_STATE(pSubPage) != PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
748 {
749 LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pSubPage), PGM_PAGE_GET_STATE(pSubPage), rc));
750 break;
751 }
752 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
753 GCPhys += PAGE_SIZE;
754 }
755 if (iPage != _2M/PAGE_SIZE)
756 {
757 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
758 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
759 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
760 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
761 }
762
763 /*
764 * Do the allocation.
765 */
766# ifdef IN_RING3
767 rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
768# else
769 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
770# endif
771 if (RT_SUCCESS(rc))
772 {
773 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
774 pVM->pgm.s.cLargePages++;
775 return VINF_SUCCESS;
776 }
777
778 /* If we fail once, it most likely means the host's memory is too
779 fragmented; don't bother trying again. */
780 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
781 PGMSetLargePageUsage(pVM, false);
782 return rc;
783 }
784 }
785 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
786}
787
788
789/**
790 * Recheck the entire 2 MB range to see if we can use it again as a large page.
791 *
792 * @returns The following VBox status codes.
793 * @retval VINF_SUCCESS on success, the large page can be used again
794 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
795 *
796 * @param pVM Pointer to the VM.
797 * @param GCPhys The address of the page.
798 * @param pLargePage Page structure of the base page
799 */
800int pgmPhysRecheckLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
801{
802 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
803
804 GCPhys &= X86_PDE2M_PAE_PG_MASK;
805
806 /* Check the base page. */
807 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
808 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
809 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
810 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
811 {
812 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
813 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
814 }
815
816 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
817 /* Check all remaining pages in the 2 MB range. */
818 unsigned i;
819 GCPhys += PAGE_SIZE;
820 for (i = 1; i < _2M/PAGE_SIZE; i++)
821 {
822 PPGMPAGE pPage;
823 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
824 AssertRCBreak(rc);
825
826 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
827 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
828 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
829 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
830 {
831 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
832 break;
833 }
834
835 GCPhys += PAGE_SIZE;
836 }
837 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
838
839 if (i == _2M/PAGE_SIZE)
840 {
841 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
842 pVM->pgm.s.cLargePagesDisabled--;
843 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
844 return VINF_SUCCESS;
845 }
846
847 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
848}
849
850#endif /* PGM_WITH_LARGE_PAGES */
851
852/**
853 * Deal with a write monitored page.
854 *
855 * @returns VBox strict status code.
856 *
857 * @param pVM Pointer to the VM.
858 * @param pPage The physical page tracking structure.
859 *
860 * @remarks Called from within the PGM critical section.
861 */
862void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage)
863{
864 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
865 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
866 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
867 Assert(pVM->pgm.s.cMonitoredPages > 0);
868 pVM->pgm.s.cMonitoredPages--;
869 pVM->pgm.s.cWrittenToPages++;
870}
871
872
873/**
874 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
875 *
876 * @returns VBox strict status code.
877 * @retval VINF_SUCCESS on success.
878 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
879 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
880 *
881 * @param pVM Pointer to the VM.
882 * @param pPage The physical page tracking structure.
883 * @param GCPhys The address of the page.
884 *
885 * @remarks Called from within the PGM critical section.
886 */
887int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
888{
889 PGM_LOCK_ASSERT_OWNER(pVM);
890 switch (PGM_PAGE_GET_STATE(pPage))
891 {
892 case PGM_PAGE_STATE_WRITE_MONITORED:
893 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
894 /* fall thru */
895 default: /* to shut up GCC */
896 case PGM_PAGE_STATE_ALLOCATED:
897 return VINF_SUCCESS;
898
899 /*
900 * Zero pages can be dummy pages for MMIO or reserved memory,
901 * so we need to check the flags before joining cause with
902 * shared page replacement.
903 */
904 case PGM_PAGE_STATE_ZERO:
905 if (PGM_PAGE_IS_MMIO(pPage))
906 return VERR_PGM_PHYS_PAGE_RESERVED;
907 /* fall thru */
908 case PGM_PAGE_STATE_SHARED:
909 return pgmPhysAllocPage(pVM, pPage, GCPhys);
910
911 /* Not allowed to write to ballooned pages. */
912 case PGM_PAGE_STATE_BALLOONED:
913 return VERR_PGM_PHYS_PAGE_BALLOONED;
914 }
915}
916
917
918/**
919 * Internal usage: Map the page specified by its GMM ID.
920 *
921 * This is similar to pgmPhysPageMap
922 *
923 * @returns VBox status code.
924 *
925 * @param pVM Pointer to the VM.
926 * @param idPage The Page ID.
927 * @param HCPhys The physical address (for RC).
928 * @param ppv Where to store the mapping address.
929 *
930 * @remarks Called from within the PGM critical section. The mapping is only
931 * valid while you are inside this section.
932 */
933int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
934{
935 /*
936 * Validation.
937 */
938 PGM_LOCK_ASSERT_OWNER(pVM);
939 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
940 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
941 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
942
943#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
944 /*
945 * Map it by HCPhys.
946 */
947 return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
948
949#else
950 /*
951 * Find/make Chunk TLB entry for the mapping chunk.
952 */
953 PPGMCHUNKR3MAP pMap;
954 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
955 if (pTlbe->idChunk == idChunk)
956 {
957 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
958 pMap = pTlbe->pChunk;
959 }
960 else
961 {
962 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
963
964 /*
965 * Find the chunk, map it if necessary.
966 */
967 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
968 if (pMap)
969 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
970 else
971 {
972# ifdef IN_RING0
973 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
974 AssertRCReturn(rc, rc);
975 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
976 Assert(pMap);
977# else
978 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
979 if (RT_FAILURE(rc))
980 return rc;
981# endif
982 }
983
984 /*
985 * Enter it into the Chunk TLB.
986 */
987 pTlbe->idChunk = idChunk;
988 pTlbe->pChunk = pMap;
989 }
990
991 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
992 return VINF_SUCCESS;
993#endif
994}
995
996
997/**
998 * Maps a page into the current virtual address space so it can be accessed.
999 *
1000 * @returns VBox status code.
1001 * @retval VINF_SUCCESS on success.
1002 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1003 *
1004 * @param pVM Pointer to the VM.
1005 * @param pPage The physical page tracking structure.
1006 * @param GCPhys The address of the page.
1007 * @param ppMap Where to store the address of the mapping tracking structure.
1008 * @param ppv Where to store the mapping address of the page. The page
1009 * offset is masked off!
1010 *
1011 * @remarks Called from within the PGM critical section.
1012 */
1013static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1014{
1015 PGM_LOCK_ASSERT_OWNER(pVM);
1016 NOREF(GCPhys);
1017
1018#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1019 /*
1020 * Just some sketchy GC/R0-darwin code.
1021 */
1022 *ppMap = NULL;
1023 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1024 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
1025 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
1026 return VINF_SUCCESS;
1027
1028#else /* IN_RING3 || IN_RING0 */
1029
1030
1031 /*
1032 * Special cases: MMIO2, ZERO and specially aliased MMIO pages.
1033 */
1034 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
1035 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1036 {
1037 /* Decode the page id to a page in a MMIO2 ram range. */
1038 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
1039 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
1040 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
1041 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
1042 RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys,
1043 pPage->s.idPage, pPage->s.uStateY),
1044 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1045 PPGMMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
1046 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1047 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1048 AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1049 *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << PAGE_SHIFT);
1050 *ppMap = NULL;
1051 return VINF_SUCCESS;
1052 }
1053
1054 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1055 if (idChunk == NIL_GMM_CHUNKID)
1056 {
1057 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
1058 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1059 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
1060 {
1061 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
1062 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1063 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
1064 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1065 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1066 }
1067 else
1068 {
1069 static uint8_t s_abPlayItSafe[0x1000*2]; /* I don't dare return the zero page at the moment. */
1070 *ppv = (uint8_t *)((uintptr_t)&s_abPlayItSafe[0x1000] & ~(uintptr_t)0xfff);
1071 }
1072 *ppMap = NULL;
1073 return VINF_SUCCESS;
1074 }
1075
1076 /*
1077 * Find/make Chunk TLB entry for the mapping chunk.
1078 */
1079 PPGMCHUNKR3MAP pMap;
1080 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1081 if (pTlbe->idChunk == idChunk)
1082 {
1083 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1084 pMap = pTlbe->pChunk;
1085 AssertPtr(pMap->pv);
1086 }
1087 else
1088 {
1089 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1090
1091 /*
1092 * Find the chunk, map it if necessary.
1093 */
1094 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1095 if (pMap)
1096 {
1097 AssertPtr(pMap->pv);
1098 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1099 }
1100 else
1101 {
1102#ifdef IN_RING0
1103 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1104 AssertRCReturn(rc, rc);
1105 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1106 Assert(pMap);
1107#else
1108 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1109 if (RT_FAILURE(rc))
1110 return rc;
1111#endif
1112 AssertPtr(pMap->pv);
1113 }
1114
1115 /*
1116 * Enter it into the Chunk TLB.
1117 */
1118 pTlbe->idChunk = idChunk;
1119 pTlbe->pChunk = pMap;
1120 }
1121
1122 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
1123 *ppMap = pMap;
1124 return VINF_SUCCESS;
1125#endif /* IN_RING3 */
1126}
1127
1128
1129/**
1130 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1131 *
1132 * This is typically used is paths where we cannot use the TLB methods (like ROM
1133 * pages) or where there is no point in using them since we won't get many hits.
1134 *
1135 * @returns VBox strict status code.
1136 * @retval VINF_SUCCESS on success.
1137 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1138 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1139 *
1140 * @param pVM Pointer to the VM.
1141 * @param pPage The physical page tracking structure.
1142 * @param GCPhys The address of the page.
1143 * @param ppv Where to store the mapping address of the page. The page
1144 * offset is masked off!
1145 *
1146 * @remarks Called from within the PGM critical section. The mapping is only
1147 * valid while you are inside section.
1148 */
1149int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1150{
1151 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1152 if (RT_SUCCESS(rc))
1153 {
1154 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1155 PPGMPAGEMAP pMapIgnore;
1156 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1157 if (RT_FAILURE(rc2)) /* preserve rc */
1158 rc = rc2;
1159 }
1160 return rc;
1161}
1162
1163
1164/**
1165 * Maps a page into the current virtual address space so it can be accessed for
1166 * both writing and reading.
1167 *
1168 * This is typically used is paths where we cannot use the TLB methods (like ROM
1169 * pages) or where there is no point in using them since we won't get many hits.
1170 *
1171 * @returns VBox status code.
1172 * @retval VINF_SUCCESS on success.
1173 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1174 *
1175 * @param pVM Pointer to the VM.
1176 * @param pPage The physical page tracking structure. Must be in the
1177 * allocated state.
1178 * @param GCPhys The address of the page.
1179 * @param ppv Where to store the mapping address of the page. The page
1180 * offset is masked off!
1181 *
1182 * @remarks Called from within the PGM critical section. The mapping is only
1183 * valid while you are inside section.
1184 */
1185int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1186{
1187 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1188 PPGMPAGEMAP pMapIgnore;
1189 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1190}
1191
1192
1193/**
1194 * Maps a page into the current virtual address space so it can be accessed for
1195 * reading.
1196 *
1197 * This is typically used is paths where we cannot use the TLB methods (like ROM
1198 * pages) or where there is no point in using them since we won't get many hits.
1199 *
1200 * @returns VBox status code.
1201 * @retval VINF_SUCCESS on success.
1202 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1203 *
1204 * @param pVM Pointer to the VM.
1205 * @param pPage The physical page tracking structure.
1206 * @param GCPhys The address of the page.
1207 * @param ppv Where to store the mapping address of the page. The page
1208 * offset is masked off!
1209 *
1210 * @remarks Called from within the PGM critical section. The mapping is only
1211 * valid while you are inside this section.
1212 */
1213int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1214{
1215 PPGMPAGEMAP pMapIgnore;
1216 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1217}
1218
1219#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1220
1221/**
1222 * Load a guest page into the ring-3 physical TLB.
1223 *
1224 * @returns VBox status code.
1225 * @retval VINF_SUCCESS on success
1226 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1227 * @param pPGM The PGM instance pointer.
1228 * @param GCPhys The guest physical address in question.
1229 */
1230int pgmPhysPageLoadIntoTlb(PVM pVM, RTGCPHYS GCPhys)
1231{
1232 PGM_LOCK_ASSERT_OWNER(pVM);
1233
1234 /*
1235 * Find the ram range and page and hand it over to the with-page function.
1236 * 99.8% of requests are expected to be in the first range.
1237 */
1238 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1239 if (!pPage)
1240 {
1241 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1242 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1243 }
1244
1245 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1246}
1247
1248
1249/**
1250 * Load a guest page into the ring-3 physical TLB.
1251 *
1252 * @returns VBox status code.
1253 * @retval VINF_SUCCESS on success
1254 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1255 *
1256 * @param pVM Pointer to the VM.
1257 * @param pPage Pointer to the PGMPAGE structure corresponding to
1258 * GCPhys.
1259 * @param GCPhys The guest physical address in question.
1260 */
1261int pgmPhysPageLoadIntoTlbWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1262{
1263 PGM_LOCK_ASSERT_OWNER(pVM);
1264 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1265
1266 /*
1267 * Map the page.
1268 * Make a special case for the zero page as it is kind of special.
1269 */
1270 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1271 if ( !PGM_PAGE_IS_ZERO(pPage)
1272 && !PGM_PAGE_IS_BALLOONED(pPage))
1273 {
1274 void *pv;
1275 PPGMPAGEMAP pMap;
1276 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1277 if (RT_FAILURE(rc))
1278 return rc;
1279 pTlbe->pMap = pMap;
1280 pTlbe->pv = pv;
1281 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1282 }
1283 else
1284 {
1285 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1286 pTlbe->pMap = NULL;
1287 pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1288 }
1289#ifdef PGM_WITH_PHYS_TLB
1290 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1291 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1292 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1293 else
1294 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1295#else
1296 pTlbe->GCPhys = NIL_RTGCPHYS;
1297#endif
1298 pTlbe->pPage = pPage;
1299 return VINF_SUCCESS;
1300}
1301
1302#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1303
1304/**
1305 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1306 * own the PGM lock and therefore not need to lock the mapped page.
1307 *
1308 * @returns VBox status code.
1309 * @retval VINF_SUCCESS on success.
1310 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1311 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1312 *
1313 * @param pVM Pointer to the VM.
1314 * @param GCPhys The guest physical address of the page that should be mapped.
1315 * @param pPage Pointer to the PGMPAGE structure for the page.
1316 * @param ppv Where to store the address corresponding to GCPhys.
1317 *
1318 * @internal
1319 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1320 */
1321int pgmPhysGCPhys2CCPtrInternalDepr(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1322{
1323 int rc;
1324 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1325 PGM_LOCK_ASSERT_OWNER(pVM);
1326 pVM->pgm.s.cDeprecatedPageLocks++;
1327
1328 /*
1329 * Make sure the page is writable.
1330 */
1331 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1332 {
1333 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1334 if (RT_FAILURE(rc))
1335 return rc;
1336 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1337 }
1338 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1339
1340 /*
1341 * Get the mapping address.
1342 */
1343#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1344 void *pv;
1345 rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),
1346 PGM_PAGE_GET_HCPHYS(pPage),
1347 &pv
1348 RTLOG_COMMA_SRC_POS);
1349 if (RT_FAILURE(rc))
1350 return rc;
1351 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1352#else
1353 PPGMPAGEMAPTLBE pTlbe;
1354 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1355 if (RT_FAILURE(rc))
1356 return rc;
1357 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1358#endif
1359 return VINF_SUCCESS;
1360}
1361
1362#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1363
1364/**
1365 * Locks a page mapping for writing.
1366 *
1367 * @param pVM Pointer to the VM.
1368 * @param pPage The page.
1369 * @param pTlbe The mapping TLB entry for the page.
1370 * @param pLock The lock structure (output).
1371 */
1372DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1373{
1374 PPGMPAGEMAP pMap = pTlbe->pMap;
1375 if (pMap)
1376 pMap->cRefs++;
1377
1378 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1379 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1380 {
1381 if (cLocks == 0)
1382 pVM->pgm.s.cWriteLockedPages++;
1383 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1384 }
1385 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1386 {
1387 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1388 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1389 if (pMap)
1390 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1391 }
1392
1393 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1394 pLock->pvMap = pMap;
1395}
1396
1397/**
1398 * Locks a page mapping for reading.
1399 *
1400 * @param pVM Pointer to the VM.
1401 * @param pPage The page.
1402 * @param pTlbe The mapping TLB entry for the page.
1403 * @param pLock The lock structure (output).
1404 */
1405DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1406{
1407 PPGMPAGEMAP pMap = pTlbe->pMap;
1408 if (pMap)
1409 pMap->cRefs++;
1410
1411 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1412 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1413 {
1414 if (cLocks == 0)
1415 pVM->pgm.s.cReadLockedPages++;
1416 PGM_PAGE_INC_READ_LOCKS(pPage);
1417 }
1418 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1419 {
1420 PGM_PAGE_INC_READ_LOCKS(pPage);
1421 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1422 if (pMap)
1423 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1424 }
1425
1426 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1427 pLock->pvMap = pMap;
1428}
1429
1430#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1431
1432
1433/**
1434 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1435 * own the PGM lock and have access to the page structure.
1436 *
1437 * @returns VBox status code.
1438 * @retval VINF_SUCCESS on success.
1439 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1440 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1441 *
1442 * @param pVM Pointer to the VM.
1443 * @param GCPhys The guest physical address of the page that should be mapped.
1444 * @param pPage Pointer to the PGMPAGE structure for the page.
1445 * @param ppv Where to store the address corresponding to GCPhys.
1446 * @param pLock Where to store the lock information that
1447 * pgmPhysReleaseInternalPageMappingLock needs.
1448 *
1449 * @internal
1450 */
1451int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1452{
1453 int rc;
1454 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1455 PGM_LOCK_ASSERT_OWNER(pVM);
1456
1457 /*
1458 * Make sure the page is writable.
1459 */
1460 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1461 {
1462 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1463 if (RT_FAILURE(rc))
1464 return rc;
1465 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1466 }
1467 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1468
1469 /*
1470 * Do the job.
1471 */
1472#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1473 void *pv;
1474 PVMCPU pVCpu = VMMGetCpu(pVM);
1475 rc = pgmRZDynMapHCPageInlined(pVCpu,
1476 PGM_PAGE_GET_HCPHYS(pPage),
1477 &pv
1478 RTLOG_COMMA_SRC_POS);
1479 if (RT_FAILURE(rc))
1480 return rc;
1481 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1482 pLock->pvPage = pv;
1483 pLock->pVCpu = pVCpu;
1484
1485#else
1486 PPGMPAGEMAPTLBE pTlbe;
1487 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1488 if (RT_FAILURE(rc))
1489 return rc;
1490 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1491 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1492#endif
1493 return VINF_SUCCESS;
1494}
1495
1496
1497/**
1498 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1499 * own the PGM lock and have access to the page structure.
1500 *
1501 * @returns VBox status code.
1502 * @retval VINF_SUCCESS on success.
1503 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1504 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1505 *
1506 * @param pVM Pointer to the VM.
1507 * @param GCPhys The guest physical address of the page that should be mapped.
1508 * @param pPage Pointer to the PGMPAGE structure for the page.
1509 * @param ppv Where to store the address corresponding to GCPhys.
1510 * @param pLock Where to store the lock information that
1511 * pgmPhysReleaseInternalPageMappingLock needs.
1512 *
1513 * @internal
1514 */
1515int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1516{
1517 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1518 PGM_LOCK_ASSERT_OWNER(pVM);
1519 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1520
1521 /*
1522 * Do the job.
1523 */
1524#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1525 void *pv;
1526 PVMCPU pVCpu = VMMGetCpu(pVM);
1527 int rc = pgmRZDynMapHCPageInlined(pVCpu,
1528 PGM_PAGE_GET_HCPHYS(pPage),
1529 &pv
1530 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1531 if (RT_FAILURE(rc))
1532 return rc;
1533 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1534 pLock->pvPage = pv;
1535 pLock->pVCpu = pVCpu;
1536
1537#else
1538 PPGMPAGEMAPTLBE pTlbe;
1539 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1540 if (RT_FAILURE(rc))
1541 return rc;
1542 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1543 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1544#endif
1545 return VINF_SUCCESS;
1546}
1547
1548
1549/**
1550 * Requests the mapping of a guest page into the current context.
1551 *
1552 * This API should only be used for very short term, as it will consume scarse
1553 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1554 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1555 *
1556 * This API will assume your intention is to write to the page, and will
1557 * therefore replace shared and zero pages. If you do not intend to modify
1558 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1559 *
1560 * @returns VBox status code.
1561 * @retval VINF_SUCCESS on success.
1562 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1563 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1564 *
1565 * @param pVM Pointer to the VM.
1566 * @param GCPhys The guest physical address of the page that should be
1567 * mapped.
1568 * @param ppv Where to store the address corresponding to GCPhys.
1569 * @param pLock Where to store the lock information that
1570 * PGMPhysReleasePageMappingLock needs.
1571 *
1572 * @remarks The caller is responsible for dealing with access handlers.
1573 * @todo Add an informational return code for pages with access handlers?
1574 *
1575 * @remark Avoid calling this API from within critical sections (other than
1576 * the PGM one) because of the deadlock risk. External threads may
1577 * need to delegate jobs to the EMTs.
1578 * @remarks Only one page is mapped! Make no assumption about what's after or
1579 * before the returned page!
1580 * @thread Any thread.
1581 */
1582VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1583{
1584 int rc = pgmLock(pVM);
1585 AssertRCReturn(rc, rc);
1586
1587#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1588 /*
1589 * Find the page and make sure it's writable.
1590 */
1591 PPGMPAGE pPage;
1592 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1593 if (RT_SUCCESS(rc))
1594 {
1595 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1596 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1597 if (RT_SUCCESS(rc))
1598 {
1599 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1600
1601 PVMCPU pVCpu = VMMGetCpu(pVM);
1602 void *pv;
1603 rc = pgmRZDynMapHCPageInlined(pVCpu,
1604 PGM_PAGE_GET_HCPHYS(pPage),
1605 &pv
1606 RTLOG_COMMA_SRC_POS);
1607 if (RT_SUCCESS(rc))
1608 {
1609 AssertRCSuccess(rc);
1610
1611 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1612 *ppv = pv;
1613 pLock->pvPage = pv;
1614 pLock->pVCpu = pVCpu;
1615 }
1616 }
1617 }
1618
1619#else /* IN_RING3 || IN_RING0 */
1620 /*
1621 * Query the Physical TLB entry for the page (may fail).
1622 */
1623 PPGMPAGEMAPTLBE pTlbe;
1624 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1625 if (RT_SUCCESS(rc))
1626 {
1627 /*
1628 * If the page is shared, the zero page, or being write monitored
1629 * it must be converted to a page that's writable if possible.
1630 */
1631 PPGMPAGE pPage = pTlbe->pPage;
1632 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1633 {
1634 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1635 if (RT_SUCCESS(rc))
1636 {
1637 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1638 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1639 }
1640 }
1641 if (RT_SUCCESS(rc))
1642 {
1643 /*
1644 * Now, just perform the locking and calculate the return address.
1645 */
1646 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1647 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1648 }
1649 }
1650
1651#endif /* IN_RING3 || IN_RING0 */
1652 pgmUnlock(pVM);
1653 return rc;
1654}
1655
1656
1657/**
1658 * Requests the mapping of a guest page into the current context.
1659 *
1660 * This API should only be used for very short term, as it will consume scarse
1661 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1662 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1663 *
1664 * @returns VBox status code.
1665 * @retval VINF_SUCCESS on success.
1666 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1667 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1668 *
1669 * @param pVM Pointer to the VM.
1670 * @param GCPhys The guest physical address of the page that should be
1671 * mapped.
1672 * @param ppv Where to store the address corresponding to GCPhys.
1673 * @param pLock Where to store the lock information that
1674 * PGMPhysReleasePageMappingLock needs.
1675 *
1676 * @remarks The caller is responsible for dealing with access handlers.
1677 * @todo Add an informational return code for pages with access handlers?
1678 *
1679 * @remarks Avoid calling this API from within critical sections (other than
1680 * the PGM one) because of the deadlock risk.
1681 * @remarks Only one page is mapped! Make no assumption about what's after or
1682 * before the returned page!
1683 * @thread Any thread.
1684 */
1685VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1686{
1687 int rc = pgmLock(pVM);
1688 AssertRCReturn(rc, rc);
1689
1690#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1691 /*
1692 * Find the page and make sure it's readable.
1693 */
1694 PPGMPAGE pPage;
1695 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1696 if (RT_SUCCESS(rc))
1697 {
1698 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1699 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1700 else
1701 {
1702 PVMCPU pVCpu = VMMGetCpu(pVM);
1703 void *pv;
1704 rc = pgmRZDynMapHCPageInlined(pVCpu,
1705 PGM_PAGE_GET_HCPHYS(pPage),
1706 &pv
1707 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1708 if (RT_SUCCESS(rc))
1709 {
1710 AssertRCSuccess(rc);
1711
1712 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1713 *ppv = pv;
1714 pLock->pvPage = pv;
1715 pLock->pVCpu = pVCpu;
1716 }
1717 }
1718 }
1719
1720#else /* IN_RING3 || IN_RING0 */
1721 /*
1722 * Query the Physical TLB entry for the page (may fail).
1723 */
1724 PPGMPAGEMAPTLBE pTlbe;
1725 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1726 if (RT_SUCCESS(rc))
1727 {
1728 /* MMIO pages doesn't have any readable backing. */
1729 PPGMPAGE pPage = pTlbe->pPage;
1730 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1731 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1732 else
1733 {
1734 /*
1735 * Now, just perform the locking and calculate the return address.
1736 */
1737 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1738 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1739 }
1740 }
1741
1742#endif /* IN_RING3 || IN_RING0 */
1743 pgmUnlock(pVM);
1744 return rc;
1745}
1746
1747
1748/**
1749 * Requests the mapping of a guest page given by virtual address into the current context.
1750 *
1751 * This API should only be used for very short term, as it will consume
1752 * scarse resources (R0 and GC) in the mapping cache. When you're done
1753 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1754 *
1755 * This API will assume your intention is to write to the page, and will
1756 * therefore replace shared and zero pages. If you do not intend to modify
1757 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1758 *
1759 * @returns VBox status code.
1760 * @retval VINF_SUCCESS on success.
1761 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1762 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1763 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1764 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1765 *
1766 * @param pVCpu Pointer to the VMCPU.
1767 * @param GCPhys The guest physical address of the page that should be mapped.
1768 * @param ppv Where to store the address corresponding to GCPhys.
1769 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1770 *
1771 * @remark Avoid calling this API from within critical sections (other than
1772 * the PGM one) because of the deadlock risk.
1773 * @thread EMT
1774 */
1775VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1776{
1777 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1778 RTGCPHYS GCPhys;
1779 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1780 if (RT_SUCCESS(rc))
1781 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1782 return rc;
1783}
1784
1785
1786/**
1787 * Requests the mapping of a guest page given by virtual address into the current context.
1788 *
1789 * This API should only be used for very short term, as it will consume
1790 * scarse resources (R0 and GC) in the mapping cache. When you're done
1791 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1792 *
1793 * @returns VBox status code.
1794 * @retval VINF_SUCCESS on success.
1795 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1796 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1797 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1798 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1799 *
1800 * @param pVCpu Pointer to the VMCPU.
1801 * @param GCPhys The guest physical address of the page that should be mapped.
1802 * @param ppv Where to store the address corresponding to GCPhys.
1803 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1804 *
1805 * @remark Avoid calling this API from within critical sections (other than
1806 * the PGM one) because of the deadlock risk.
1807 * @thread EMT
1808 */
1809VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1810{
1811 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1812 RTGCPHYS GCPhys;
1813 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1814 if (RT_SUCCESS(rc))
1815 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1816 return rc;
1817}
1818
1819
1820/**
1821 * Release the mapping of a guest page.
1822 *
1823 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1824 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1825 *
1826 * @param pVM Pointer to the VM.
1827 * @param pLock The lock structure initialized by the mapping function.
1828 */
1829VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1830{
1831#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1832 Assert(pLock->pvPage != NULL);
1833 Assert(pLock->pVCpu == VMMGetCpu(pVM));
1834 PGM_DYNMAP_UNUSED_HINT(pLock->pVCpu, pLock->pvPage);
1835 pLock->pVCpu = NULL;
1836 pLock->pvPage = NULL;
1837
1838#else
1839 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1840 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1841 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1842
1843 pLock->uPageAndType = 0;
1844 pLock->pvMap = NULL;
1845
1846 pgmLock(pVM);
1847 if (fWriteLock)
1848 {
1849 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1850 Assert(cLocks > 0);
1851 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1852 {
1853 if (cLocks == 1)
1854 {
1855 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1856 pVM->pgm.s.cWriteLockedPages--;
1857 }
1858 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1859 }
1860
1861 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1862 {
1863 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1864 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1865 Assert(pVM->pgm.s.cMonitoredPages > 0);
1866 pVM->pgm.s.cMonitoredPages--;
1867 pVM->pgm.s.cWrittenToPages++;
1868 }
1869 }
1870 else
1871 {
1872 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1873 Assert(cLocks > 0);
1874 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1875 {
1876 if (cLocks == 1)
1877 {
1878 Assert(pVM->pgm.s.cReadLockedPages > 0);
1879 pVM->pgm.s.cReadLockedPages--;
1880 }
1881 PGM_PAGE_DEC_READ_LOCKS(pPage);
1882 }
1883 }
1884
1885 if (pMap)
1886 {
1887 Assert(pMap->cRefs >= 1);
1888 pMap->cRefs--;
1889 }
1890 pgmUnlock(pVM);
1891#endif /* IN_RING3 */
1892}
1893
1894
1895/**
1896 * Release the internal mapping of a guest page.
1897 *
1898 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
1899 * pgmPhysGCPhys2CCPtrInternalReadOnly.
1900 *
1901 * @param pVM Pointer to the VM.
1902 * @param pLock The lock structure initialized by the mapping function.
1903 *
1904 * @remarks Caller must hold the PGM lock.
1905 */
1906void pgmPhysReleaseInternalPageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1907{
1908 PGM_LOCK_ASSERT_OWNER(pVM);
1909 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
1910}
1911
1912
1913/**
1914 * Converts a GC physical address to a HC ring-3 pointer.
1915 *
1916 * @returns VINF_SUCCESS on success.
1917 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1918 * page but has no physical backing.
1919 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1920 * GC physical address.
1921 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1922 * a dynamic ram chunk boundary
1923 *
1924 * @param pVM Pointer to the VM.
1925 * @param GCPhys The GC physical address to convert.
1926 * @param pR3Ptr Where to store the R3 pointer on success.
1927 *
1928 * @deprecated Avoid when possible!
1929 */
1930int pgmPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1931{
1932/** @todo this is kind of hacky and needs some more work. */
1933#ifndef DEBUG_sandervl
1934 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1935#endif
1936
1937 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
1938#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1939 NOREF(pVM); NOREF(pR3Ptr);
1940 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1941#else
1942 pgmLock(pVM);
1943
1944 PPGMRAMRANGE pRam;
1945 PPGMPAGE pPage;
1946 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
1947 if (RT_SUCCESS(rc))
1948 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
1949
1950 pgmUnlock(pVM);
1951 Assert(rc <= VINF_SUCCESS);
1952 return rc;
1953#endif
1954}
1955
1956#if 0 /*defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)*/
1957
1958/**
1959 * Maps and locks a guest CR3 or PD (PAE) page.
1960 *
1961 * @returns VINF_SUCCESS on success.
1962 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1963 * page but has no physical backing.
1964 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1965 * GC physical address.
1966 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1967 * a dynamic ram chunk boundary
1968 *
1969 * @param pVM Pointer to the VM.
1970 * @param GCPhys The GC physical address to convert.
1971 * @param pR3Ptr Where to store the R3 pointer on success. This may or
1972 * may not be valid in ring-0 depending on the
1973 * VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 build option.
1974 *
1975 * @remarks The caller must own the PGM lock.
1976 */
1977int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1978{
1979
1980 PPGMRAMRANGE pRam;
1981 PPGMPAGE pPage;
1982 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
1983 if (RT_SUCCESS(rc))
1984 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
1985 Assert(rc <= VINF_SUCCESS);
1986 return rc;
1987}
1988
1989
1990int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
1991{
1992
1993}
1994
1995#endif
1996
1997/**
1998 * Converts a guest pointer to a GC physical address.
1999 *
2000 * This uses the current CR3/CR0/CR4 of the guest.
2001 *
2002 * @returns VBox status code.
2003 * @param pVCpu Pointer to the VMCPU.
2004 * @param GCPtr The guest pointer to convert.
2005 * @param pGCPhys Where to store the GC physical address.
2006 */
2007VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
2008{
2009 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
2010 if (pGCPhys && RT_SUCCESS(rc))
2011 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
2012 return rc;
2013}
2014
2015
2016/**
2017 * Converts a guest pointer to a HC physical address.
2018 *
2019 * This uses the current CR3/CR0/CR4 of the guest.
2020 *
2021 * @returns VBox status code.
2022 * @param pVCpu Pointer to the VMCPU.
2023 * @param GCPtr The guest pointer to convert.
2024 * @param pHCPhys Where to store the HC physical address.
2025 */
2026VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2027{
2028 PVM pVM = pVCpu->CTX_SUFF(pVM);
2029 RTGCPHYS GCPhys;
2030 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
2031 if (RT_SUCCESS(rc))
2032 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
2033 return rc;
2034}
2035
2036
2037
2038#undef LOG_GROUP
2039#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2040
2041
2042#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2043/**
2044 * Cache PGMPhys memory access
2045 *
2046 * @param pVM Pointer to the VM.
2047 * @param pCache Cache structure pointer
2048 * @param GCPhys GC physical address
2049 * @param pbHC HC pointer corresponding to physical page
2050 *
2051 * @thread EMT.
2052 */
2053static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2054{
2055 uint32_t iCacheIndex;
2056
2057 Assert(VM_IS_EMT(pVM));
2058
2059 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
2060 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
2061
2062 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2063
2064 ASMBitSet(&pCache->aEntries, iCacheIndex);
2065
2066 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2067 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2068}
2069#endif /* IN_RING3 */
2070
2071
2072/**
2073 * Deals with reading from a page with one or more ALL access handlers.
2074 *
2075 * @returns VBox status code. Can be ignored in ring-3.
2076 * @retval VINF_SUCCESS.
2077 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2078 *
2079 * @param pVM Pointer to the VM.
2080 * @param pPage The page descriptor.
2081 * @param GCPhys The physical address to start reading at.
2082 * @param pvBuf Where to put the bits we read.
2083 * @param cb How much to read - less or equal to a page.
2084 * @param enmOrigin The origin of this call.
2085 */
2086static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb, PGMACCESSORIGIN enmOrigin)
2087{
2088 /*
2089 * The most frequent access here is MMIO and shadowed ROM.
2090 * The current code ASSUMES all these access handlers covers full pages!
2091 */
2092
2093 /*
2094 * Whatever we do we need the source page, map it first.
2095 */
2096 PGMPAGEMAPLOCK PgMpLck;
2097 const void *pvSrc = NULL;
2098 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2099 if (RT_FAILURE(rc))
2100 {
2101 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2102 GCPhys, pPage, rc));
2103 memset(pvBuf, 0xff, cb);
2104 return VINF_SUCCESS;
2105 }
2106 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2107
2108 /*
2109 * Deal with any physical handlers.
2110 */
2111 PVMCPU pVCpu = VMMGetCpu(pVM);
2112#ifdef IN_RING3
2113 PPGMPHYSHANDLER pPhys = NULL;
2114#endif
2115 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
2116 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2117 {
2118#ifdef IN_RING3
2119 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2120 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2121 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
2122 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
2123 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2124
2125 PFNPGMR3PHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler); Assert(pfnHandler);
2126 void *pvUser = pPhys->CTX_SUFF(pvUser);
2127
2128 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
2129 STAM_PROFILE_START(&pPhys->Stat, h);
2130 PGM_LOCK_ASSERT_OWNER(pVM);
2131 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2132 pgmUnlock(pVM);
2133 rc = pfnHandler(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pvUser);
2134 pgmLock(pVM);
2135# ifdef VBOX_WITH_STATISTICS
2136 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2137 if (pPhys)
2138 STAM_PROFILE_STOP(&pPhys->Stat, h);
2139# else
2140 pPhys = NULL; /* might not be valid anymore. */
2141# endif
2142 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
2143#else
2144 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2145 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2146 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2147 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2148#endif
2149 }
2150
2151 /*
2152 * Deal with any virtual handlers.
2153 */
2154 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
2155 {
2156 unsigned iPage;
2157 PPGMVIRTHANDLER pVirt;
2158
2159 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
2160 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
2161 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
2162 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2163 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
2164
2165 PPGMVIRTHANDLERTYPEINT pVirtType = PGMVIRTANDLER_GET_TYPE(pVM, pVirt);
2166#ifdef IN_RING3
2167 if (pVirtType->pfnHandlerR3)
2168 {
2169 if (!pPhys)
2170 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2171 else
2172 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
2173 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2174 + (iPage << PAGE_SHIFT)
2175 + (GCPhys & PAGE_OFFSET_MASK);
2176
2177 STAM_PROFILE_START(&pVirt->Stat, h);
2178 rc2 = pVirtType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin,
2179 pVirt->CTX_SUFF(pvUser));
2180 STAM_PROFILE_STOP(&pVirt->Stat, h);
2181 if (rc2 == VINF_SUCCESS)
2182 rc = VINF_SUCCESS;
2183 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
2184 }
2185 else
2186 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2187#else
2188 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2189 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2190 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2191 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2192#endif
2193 }
2194
2195 /*
2196 * Take the default action.
2197 */
2198 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2199 memcpy(pvBuf, pvSrc, cb);
2200 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2201 return rc;
2202}
2203
2204
2205/**
2206 * Read physical memory.
2207 *
2208 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2209 * want to ignore those.
2210 *
2211 * @returns VBox status code. Can be ignored in ring-3.
2212 * @retval VINF_SUCCESS.
2213 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2214 *
2215 * @param pVM Pointer to the VM.
2216 * @param GCPhys Physical address start reading from.
2217 * @param pvBuf Where to put the read bits.
2218 * @param cbRead How many bytes to read.
2219 * @param enmOrigin The origin of this call.
2220 */
2221VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
2222{
2223 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2224 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2225
2226 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysRead));
2227 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2228
2229 pgmLock(pVM);
2230
2231 /*
2232 * Copy loop on ram ranges.
2233 */
2234 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2235 for (;;)
2236 {
2237 /* Inside range or not? */
2238 if (pRam && GCPhys >= pRam->GCPhys)
2239 {
2240 /*
2241 * Must work our way thru this page by page.
2242 */
2243 RTGCPHYS off = GCPhys - pRam->GCPhys;
2244 while (off < pRam->cb)
2245 {
2246 unsigned iPage = off >> PAGE_SHIFT;
2247 PPGMPAGE pPage = &pRam->aPages[iPage];
2248 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2249 if (cb > cbRead)
2250 cb = cbRead;
2251
2252 /*
2253 * Any ALL access handlers?
2254 */
2255 if (RT_UNLIKELY( PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
2256 || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage)))
2257 {
2258 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2259 if (RT_FAILURE(rc))
2260 {
2261 pgmUnlock(pVM);
2262 return rc;
2263 }
2264 }
2265 else
2266 {
2267 /*
2268 * Get the pointer to the page.
2269 */
2270 PGMPAGEMAPLOCK PgMpLck;
2271 const void *pvSrc;
2272 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2273 if (RT_SUCCESS(rc))
2274 {
2275 memcpy(pvBuf, pvSrc, cb);
2276 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2277 }
2278 else
2279 {
2280 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2281 pRam->GCPhys + off, pPage, rc));
2282 memset(pvBuf, 0xff, cb);
2283 }
2284 }
2285
2286 /* next page */
2287 if (cb >= cbRead)
2288 {
2289 pgmUnlock(pVM);
2290 return VINF_SUCCESS;
2291 }
2292 cbRead -= cb;
2293 off += cb;
2294 pvBuf = (char *)pvBuf + cb;
2295 } /* walk pages in ram range. */
2296
2297 GCPhys = pRam->GCPhysLast + 1;
2298 }
2299 else
2300 {
2301 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2302
2303 /*
2304 * Unassigned address space.
2305 */
2306 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2307 if (cb >= cbRead)
2308 {
2309 memset(pvBuf, 0xff, cbRead);
2310 break;
2311 }
2312 memset(pvBuf, 0xff, cb);
2313
2314 cbRead -= cb;
2315 pvBuf = (char *)pvBuf + cb;
2316 GCPhys += cb;
2317 }
2318
2319 /* Advance range if necessary. */
2320 while (pRam && GCPhys > pRam->GCPhysLast)
2321 pRam = pRam->CTX_SUFF(pNext);
2322 } /* Ram range walk */
2323
2324 pgmUnlock(pVM);
2325 return VINF_SUCCESS;
2326}
2327
2328
2329/**
2330 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2331 *
2332 * @returns VBox status code. Can be ignored in ring-3.
2333 * @retval VINF_SUCCESS.
2334 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2335 *
2336 * @param pVM Pointer to the VM.
2337 * @param pPage The page descriptor.
2338 * @param GCPhys The physical address to start writing at.
2339 * @param pvBuf What to write.
2340 * @param cbWrite How much to write - less or equal to a page.
2341 * @param enmOrigin The origin of this call.
2342 */
2343static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite,
2344 PGMACCESSORIGIN enmOrigin)
2345{
2346 PGMPAGEMAPLOCK PgMpLck;
2347 void *pvDst = NULL;
2348 int rc;
2349
2350 /*
2351 * Give priority to physical handlers (like #PF does).
2352 *
2353 * Hope for a lonely physical handler first that covers the whole
2354 * write area. This should be a pretty frequent case with MMIO and
2355 * the heavy usage of full page handlers in the page pool.
2356 */
2357 PVMCPU pVCpu = VMMGetCpu(pVM);
2358 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
2359 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage) /* screw virtual handlers on MMIO pages */)
2360 {
2361 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2362 if (pCur)
2363 {
2364 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2365
2366 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2367 if (cbRange > cbWrite)
2368 cbRange = cbWrite;
2369
2370#ifndef IN_RING3
2371 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2372 NOREF(cbRange);
2373 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2374 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2375
2376#else /* IN_RING3 */
2377 Assert(PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler));
2378 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n",
2379 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2380 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2381 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2382 else
2383 rc = VINF_SUCCESS;
2384 if (RT_SUCCESS(rc))
2385 {
2386 PFNPGMR3PHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler);
2387 void *pvUser = pCur->CTX_SUFF(pvUser);
2388
2389 STAM_PROFILE_START(&pCur->Stat, h);
2390 PGM_LOCK_ASSERT_OWNER(pVM);
2391 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2392 pgmUnlock(pVM);
2393 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2394 pgmLock(pVM);
2395# ifdef VBOX_WITH_STATISTICS
2396 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2397 if (pCur)
2398 STAM_PROFILE_STOP(&pCur->Stat, h);
2399# else
2400 pCur = NULL; /* might not be valid anymore. */
2401# endif
2402 if (rc == VINF_PGM_HANDLER_DO_DEFAULT && pvDst)
2403 {
2404 if (pvDst)
2405 memcpy(pvDst, pvBuf, cbRange);
2406 }
2407 else
2408 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT,
2409 ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur ? pCur->pszDesc : ""));
2410 }
2411 else
2412 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2413 GCPhys, pPage, rc), rc);
2414 if (RT_LIKELY(cbRange == cbWrite))
2415 {
2416 if (pvDst)
2417 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2418 return VINF_SUCCESS;
2419 }
2420
2421 /* more fun to be had below */
2422 cbWrite -= cbRange;
2423 GCPhys += cbRange;
2424 pvBuf = (uint8_t *)pvBuf + cbRange;
2425 pvDst = (uint8_t *)pvDst + cbRange;
2426#endif /* IN_RING3 */
2427 }
2428 /* else: the handler is somewhere else in the page, deal with it below. */
2429 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2430 }
2431 /*
2432 * A virtual handler without any interfering physical handlers.
2433 * Hopefully it'll cover the whole write.
2434 */
2435 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
2436 {
2437 unsigned iPage;
2438 PPGMVIRTHANDLER pCur;
2439 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
2440 if (RT_SUCCESS(rc))
2441 {
2442 PPGMVIRTHANDLERTYPEINT pCurType = PGMVIRTANDLER_GET_TYPE(pVM, pCur);
2443
2444 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
2445 if (cbRange > cbWrite)
2446 cbRange = cbWrite;
2447
2448#ifndef IN_RING3
2449 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2450 NOREF(cbRange);
2451 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2452 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2453
2454#else /* IN_RING3 */
2455
2456 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2457 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2458 if (RT_SUCCESS(rc))
2459 {
2460 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2461 if (pCurType->pfnHandlerR3)
2462 {
2463 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
2464 + (iPage << PAGE_SHIFT)
2465 + (GCPhys & PAGE_OFFSET_MASK);
2466
2467 STAM_PROFILE_START(&pCur->Stat, h);
2468 rc = pCurType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE,
2469 enmOrigin, pCur->CTX_SUFF(pvUser));
2470 STAM_PROFILE_STOP(&pCur->Stat, h);
2471 }
2472 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2473 memcpy(pvDst, pvBuf, cbRange);
2474 else
2475 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
2476 }
2477 else
2478 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2479 GCPhys, pPage, rc), rc);
2480 if (RT_LIKELY(cbRange == cbWrite))
2481 {
2482 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2483 return VINF_SUCCESS;
2484 }
2485
2486 /* more fun to be had below */
2487 cbWrite -= cbRange;
2488 GCPhys += cbRange;
2489 pvBuf = (uint8_t *)pvBuf + cbRange;
2490 pvDst = (uint8_t *)pvDst + cbRange;
2491#endif
2492 }
2493 /* else: the handler is somewhere else in the page, deal with it below. */
2494 }
2495
2496 /*
2497 * Deal with all the odd ends.
2498 */
2499
2500 /* We need a writable destination page. */
2501 if (!pvDst)
2502 {
2503 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2504 AssertLogRelMsgReturn(RT_SUCCESS(rc),
2505 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2506 GCPhys, pPage, rc), rc);
2507 }
2508
2509 /* The loop state (big + ugly). */
2510 unsigned iVirtPage = 0;
2511 PPGMVIRTHANDLER pVirt = NULL;
2512 uint32_t offVirt = PAGE_SIZE;
2513 uint32_t offVirtLast = PAGE_SIZE;
2514 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
2515
2516 PPGMPHYSHANDLER pPhys = NULL;
2517 uint32_t offPhys = PAGE_SIZE;
2518 uint32_t offPhysLast = PAGE_SIZE;
2519 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2520
2521 /* The loop. */
2522 for (;;)
2523 {
2524 /*
2525 * Find the closest handler at or above GCPhys.
2526 */
2527 if (fMoreVirt && !pVirt)
2528 {
2529 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
2530 if (RT_SUCCESS(rc))
2531 {
2532 offVirt = 0;
2533 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2534 }
2535 else
2536 {
2537 PPGMPHYS2VIRTHANDLER pVirtPhys;
2538 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
2539 GCPhys, true /* fAbove */);
2540 if ( pVirtPhys
2541 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
2542 {
2543 /* ASSUME that pVirtPhys only covers one page. */
2544 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
2545 Assert(pVirtPhys->Core.Key > GCPhys);
2546
2547 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
2548 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
2549 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2550 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2551 }
2552 else
2553 {
2554 pVirt = NULL;
2555 fMoreVirt = false;
2556 offVirt = offVirtLast = PAGE_SIZE;
2557 }
2558 }
2559 }
2560
2561 if (fMorePhys && !pPhys)
2562 {
2563 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2564 if (pPhys)
2565 {
2566 offPhys = 0;
2567 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2568 }
2569 else
2570 {
2571 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2572 GCPhys, true /* fAbove */);
2573 if ( pPhys
2574 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2575 {
2576 offPhys = pPhys->Core.Key - GCPhys;
2577 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2578 }
2579 else
2580 {
2581 pPhys = NULL;
2582 fMorePhys = false;
2583 offPhys = offPhysLast = PAGE_SIZE;
2584 }
2585 }
2586 }
2587
2588 /*
2589 * Handle access to space without handlers (that's easy).
2590 */
2591 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2592 uint32_t cbRange = (uint32_t)cbWrite;
2593 if (offPhys && offVirt)
2594 {
2595 if (cbRange > offPhys)
2596 cbRange = offPhys;
2597 if (cbRange > offVirt)
2598 cbRange = offVirt;
2599 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2600 }
2601 /*
2602 * Physical handler.
2603 */
2604 else if (!offPhys && offVirt)
2605 {
2606 if (cbRange > offPhysLast + 1)
2607 cbRange = offPhysLast + 1;
2608 if (cbRange > offVirt)
2609 cbRange = offVirt;
2610#ifdef IN_RING3
2611 PFNPGMR3PHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler);
2612 void *pvUser = pPhys->CTX_SUFF(pvUser);
2613
2614 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2615 STAM_PROFILE_START(&pPhys->Stat, h);
2616 PGM_LOCK_ASSERT_OWNER(pVM);
2617 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2618 pgmUnlock(pVM);
2619 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2620 pgmLock(pVM);
2621# ifdef VBOX_WITH_STATISTICS
2622 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2623 if (pPhys)
2624 STAM_PROFILE_STOP(&pPhys->Stat, h);
2625# else
2626 pPhys = NULL; /* might not be valid anymore. */
2627# endif
2628 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2629#else
2630 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2631 NOREF(cbRange);
2632 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2633 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2634 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2635#endif
2636 }
2637 /*
2638 * Virtual handler.
2639 */
2640 else if (offPhys && !offVirt)
2641 {
2642 if (cbRange > offVirtLast + 1)
2643 cbRange = offVirtLast + 1;
2644 if (cbRange > offPhys)
2645 cbRange = offPhys;
2646
2647 PPGMVIRTHANDLERTYPEINT pVirtType = PGMVIRTANDLER_GET_TYPE(pVM, pVirt);
2648#ifdef IN_RING3
2649 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2650 if (pVirtType->pfnHandlerR3)
2651 {
2652 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2653 + (iVirtPage << PAGE_SHIFT)
2654 + (GCPhys & PAGE_OFFSET_MASK);
2655 STAM_PROFILE_START(&pVirt->Stat, h);
2656 rc = pVirtType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE,
2657 enmOrigin, pVirt->CTX_SUFF(pvUser));
2658 STAM_PROFILE_STOP(&pVirt->Stat, h);
2659 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2660 }
2661 pVirt = NULL;
2662#else
2663 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2664 NOREF(cbRange);
2665 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2666 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2667 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2668#endif
2669 }
2670 /*
2671 * Both... give the physical one priority.
2672 */
2673 else
2674 {
2675 Assert(!offPhys && !offVirt);
2676 if (cbRange > offVirtLast + 1)
2677 cbRange = offVirtLast + 1;
2678 if (cbRange > offPhysLast + 1)
2679 cbRange = offPhysLast + 1;
2680
2681 PPGMVIRTHANDLERTYPEINT pVirtType = PGMVIRTANDLER_GET_TYPE(pVM, pVirt);
2682#ifdef IN_RING3
2683 if (pVirtType->pfnHandlerR3)
2684 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2685 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2686
2687 PFNPGMR3PHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler);
2688 void *pvUser = pPhys->CTX_SUFF(pvUser);
2689
2690 STAM_PROFILE_START(&pPhys->Stat, h);
2691 PGM_LOCK_ASSERT_OWNER(pVM);
2692 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2693 pgmUnlock(pVM);
2694 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2695 pgmLock(pVM);
2696# ifdef VBOX_WITH_STATISTICS
2697 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2698 if (pPhys)
2699 STAM_PROFILE_STOP(&pPhys->Stat, h);
2700# else
2701 pPhys = NULL; /* might not be valid anymore. */
2702# endif
2703 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2704 if (pVirtType->pfnHandlerR3)
2705 {
2706
2707 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2708 + (iVirtPage << PAGE_SHIFT)
2709 + (GCPhys & PAGE_OFFSET_MASK);
2710 STAM_PROFILE_START(&pVirt->Stat, h2);
2711 int rc2 = pVirtType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE,
2712 enmOrigin, pVirt->CTX_SUFF(pvUser));
2713 STAM_PROFILE_STOP(&pVirt->Stat, h2);
2714 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2715 rc = VINF_SUCCESS;
2716 else
2717 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2718 }
2719 pPhys = NULL;
2720 pVirt = NULL;
2721#else
2722 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2723 NOREF(cbRange);
2724 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2725 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2726 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2727#endif
2728 }
2729 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2730 memcpy(pvDst, pvBuf, cbRange);
2731
2732 /*
2733 * Advance if we've got more stuff to do.
2734 */
2735 if (cbRange >= cbWrite)
2736 {
2737 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2738 return VINF_SUCCESS;
2739 }
2740
2741 cbWrite -= cbRange;
2742 GCPhys += cbRange;
2743 pvBuf = (uint8_t *)pvBuf + cbRange;
2744 pvDst = (uint8_t *)pvDst + cbRange;
2745
2746 offPhys -= cbRange;
2747 offPhysLast -= cbRange;
2748 offVirt -= cbRange;
2749 offVirtLast -= cbRange;
2750 }
2751}
2752
2753
2754/**
2755 * Write to physical memory.
2756 *
2757 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2758 * want to ignore those.
2759 *
2760 * @returns VBox status code. Can be ignored in ring-3.
2761 * @retval VINF_SUCCESS.
2762 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2763 *
2764 * @param pVM Pointer to the VM.
2765 * @param GCPhys Physical address to write to.
2766 * @param pvBuf What to write.
2767 * @param cbWrite How many bytes to write.
2768 * @param enmOrigin Who is calling.
2769 */
2770VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
2771{
2772 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()! enmOrigin=%d\n", enmOrigin));
2773 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2774 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2775
2776 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWrite));
2777 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2778
2779 pgmLock(pVM);
2780
2781 /*
2782 * Copy loop on ram ranges.
2783 */
2784 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2785 for (;;)
2786 {
2787 /* Inside range or not? */
2788 if (pRam && GCPhys >= pRam->GCPhys)
2789 {
2790 /*
2791 * Must work our way thru this page by page.
2792 */
2793 RTGCPTR off = GCPhys - pRam->GCPhys;
2794 while (off < pRam->cb)
2795 {
2796 RTGCPTR iPage = off >> PAGE_SHIFT;
2797 PPGMPAGE pPage = &pRam->aPages[iPage];
2798 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2799 if (cb > cbWrite)
2800 cb = cbWrite;
2801
2802 /*
2803 * Any active WRITE or ALL access handlers?
2804 */
2805 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
2806 || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2807 {
2808 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2809 if (RT_FAILURE(rc))
2810 {
2811 pgmUnlock(pVM);
2812 return rc;
2813 }
2814 }
2815 else
2816 {
2817 /*
2818 * Get the pointer to the page.
2819 */
2820 PGMPAGEMAPLOCK PgMpLck;
2821 void *pvDst;
2822 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
2823 if (RT_SUCCESS(rc))
2824 {
2825 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
2826 memcpy(pvDst, pvBuf, cb);
2827 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2828 }
2829 /* Ignore writes to ballooned pages. */
2830 else if (!PGM_PAGE_IS_BALLOONED(pPage))
2831 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2832 pRam->GCPhys + off, pPage, rc));
2833 }
2834
2835 /* next page */
2836 if (cb >= cbWrite)
2837 {
2838 pgmUnlock(pVM);
2839 return VINF_SUCCESS;
2840 }
2841
2842 cbWrite -= cb;
2843 off += cb;
2844 pvBuf = (const char *)pvBuf + cb;
2845 } /* walk pages in ram range */
2846
2847 GCPhys = pRam->GCPhysLast + 1;
2848 }
2849 else
2850 {
2851 /*
2852 * Unassigned address space, skip it.
2853 */
2854 if (!pRam)
2855 break;
2856 size_t cb = pRam->GCPhys - GCPhys;
2857 if (cb >= cbWrite)
2858 break;
2859 cbWrite -= cb;
2860 pvBuf = (const char *)pvBuf + cb;
2861 GCPhys += cb;
2862 }
2863
2864 /* Advance range if necessary. */
2865 while (pRam && GCPhys > pRam->GCPhysLast)
2866 pRam = pRam->CTX_SUFF(pNext);
2867 } /* Ram range walk */
2868
2869 pgmUnlock(pVM);
2870 return VINF_SUCCESS;
2871}
2872
2873
2874/**
2875 * Read from guest physical memory by GC physical address, bypassing
2876 * MMIO and access handlers.
2877 *
2878 * @returns VBox status.
2879 * @param pVM Pointer to the VM.
2880 * @param pvDst The destination address.
2881 * @param GCPhysSrc The source address (GC physical address).
2882 * @param cb The number of bytes to read.
2883 */
2884VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2885{
2886 /*
2887 * Treat the first page as a special case.
2888 */
2889 if (!cb)
2890 return VINF_SUCCESS;
2891
2892 /* map the 1st page */
2893 void const *pvSrc;
2894 PGMPAGEMAPLOCK Lock;
2895 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2896 if (RT_FAILURE(rc))
2897 return rc;
2898
2899 /* optimize for the case where access is completely within the first page. */
2900 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2901 if (RT_LIKELY(cb <= cbPage))
2902 {
2903 memcpy(pvDst, pvSrc, cb);
2904 PGMPhysReleasePageMappingLock(pVM, &Lock);
2905 return VINF_SUCCESS;
2906 }
2907
2908 /* copy to the end of the page. */
2909 memcpy(pvDst, pvSrc, cbPage);
2910 PGMPhysReleasePageMappingLock(pVM, &Lock);
2911 GCPhysSrc += cbPage;
2912 pvDst = (uint8_t *)pvDst + cbPage;
2913 cb -= cbPage;
2914
2915 /*
2916 * Page by page.
2917 */
2918 for (;;)
2919 {
2920 /* map the page */
2921 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2922 if (RT_FAILURE(rc))
2923 return rc;
2924
2925 /* last page? */
2926 if (cb <= PAGE_SIZE)
2927 {
2928 memcpy(pvDst, pvSrc, cb);
2929 PGMPhysReleasePageMappingLock(pVM, &Lock);
2930 return VINF_SUCCESS;
2931 }
2932
2933 /* copy the entire page and advance */
2934 memcpy(pvDst, pvSrc, PAGE_SIZE);
2935 PGMPhysReleasePageMappingLock(pVM, &Lock);
2936 GCPhysSrc += PAGE_SIZE;
2937 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2938 cb -= PAGE_SIZE;
2939 }
2940 /* won't ever get here. */
2941}
2942
2943
2944/**
2945 * Write to guest physical memory referenced by GC pointer.
2946 * Write memory to GC physical address in guest physical memory.
2947 *
2948 * This will bypass MMIO and access handlers.
2949 *
2950 * @returns VBox status.
2951 * @param pVM Pointer to the VM.
2952 * @param GCPhysDst The GC physical address of the destination.
2953 * @param pvSrc The source buffer.
2954 * @param cb The number of bytes to write.
2955 */
2956VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2957{
2958 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2959
2960 /*
2961 * Treat the first page as a special case.
2962 */
2963 if (!cb)
2964 return VINF_SUCCESS;
2965
2966 /* map the 1st page */
2967 void *pvDst;
2968 PGMPAGEMAPLOCK Lock;
2969 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2970 if (RT_FAILURE(rc))
2971 return rc;
2972
2973 /* optimize for the case where access is completely within the first page. */
2974 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2975 if (RT_LIKELY(cb <= cbPage))
2976 {
2977 memcpy(pvDst, pvSrc, cb);
2978 PGMPhysReleasePageMappingLock(pVM, &Lock);
2979 return VINF_SUCCESS;
2980 }
2981
2982 /* copy to the end of the page. */
2983 memcpy(pvDst, pvSrc, cbPage);
2984 PGMPhysReleasePageMappingLock(pVM, &Lock);
2985 GCPhysDst += cbPage;
2986 pvSrc = (const uint8_t *)pvSrc + cbPage;
2987 cb -= cbPage;
2988
2989 /*
2990 * Page by page.
2991 */
2992 for (;;)
2993 {
2994 /* map the page */
2995 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2996 if (RT_FAILURE(rc))
2997 return rc;
2998
2999 /* last page? */
3000 if (cb <= PAGE_SIZE)
3001 {
3002 memcpy(pvDst, pvSrc, cb);
3003 PGMPhysReleasePageMappingLock(pVM, &Lock);
3004 return VINF_SUCCESS;
3005 }
3006
3007 /* copy the entire page and advance */
3008 memcpy(pvDst, pvSrc, PAGE_SIZE);
3009 PGMPhysReleasePageMappingLock(pVM, &Lock);
3010 GCPhysDst += PAGE_SIZE;
3011 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3012 cb -= PAGE_SIZE;
3013 }
3014 /* won't ever get here. */
3015}
3016
3017
3018/**
3019 * Read from guest physical memory referenced by GC pointer.
3020 *
3021 * This function uses the current CR3/CR0/CR4 of the guest and will
3022 * bypass access handlers and not set any accessed bits.
3023 *
3024 * @returns VBox status.
3025 * @param pVCpu Handle to the current virtual CPU.
3026 * @param pvDst The destination address.
3027 * @param GCPtrSrc The source address (GC pointer).
3028 * @param cb The number of bytes to read.
3029 */
3030VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3031{
3032 PVM pVM = pVCpu->CTX_SUFF(pVM);
3033/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3034
3035 /*
3036 * Treat the first page as a special case.
3037 */
3038 if (!cb)
3039 return VINF_SUCCESS;
3040
3041 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleRead));
3042 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3043
3044 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3045 * when many VCPUs are fighting for the lock.
3046 */
3047 pgmLock(pVM);
3048
3049 /* map the 1st page */
3050 void const *pvSrc;
3051 PGMPAGEMAPLOCK Lock;
3052 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3053 if (RT_FAILURE(rc))
3054 {
3055 pgmUnlock(pVM);
3056 return rc;
3057 }
3058
3059 /* optimize for the case where access is completely within the first page. */
3060 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3061 if (RT_LIKELY(cb <= cbPage))
3062 {
3063 memcpy(pvDst, pvSrc, cb);
3064 PGMPhysReleasePageMappingLock(pVM, &Lock);
3065 pgmUnlock(pVM);
3066 return VINF_SUCCESS;
3067 }
3068
3069 /* copy to the end of the page. */
3070 memcpy(pvDst, pvSrc, cbPage);
3071 PGMPhysReleasePageMappingLock(pVM, &Lock);
3072 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3073 pvDst = (uint8_t *)pvDst + cbPage;
3074 cb -= cbPage;
3075
3076 /*
3077 * Page by page.
3078 */
3079 for (;;)
3080 {
3081 /* map the page */
3082 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3083 if (RT_FAILURE(rc))
3084 {
3085 pgmUnlock(pVM);
3086 return rc;
3087 }
3088
3089 /* last page? */
3090 if (cb <= PAGE_SIZE)
3091 {
3092 memcpy(pvDst, pvSrc, cb);
3093 PGMPhysReleasePageMappingLock(pVM, &Lock);
3094 pgmUnlock(pVM);
3095 return VINF_SUCCESS;
3096 }
3097
3098 /* copy the entire page and advance */
3099 memcpy(pvDst, pvSrc, PAGE_SIZE);
3100 PGMPhysReleasePageMappingLock(pVM, &Lock);
3101 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
3102 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3103 cb -= PAGE_SIZE;
3104 }
3105 /* won't ever get here. */
3106}
3107
3108
3109/**
3110 * Write to guest physical memory referenced by GC pointer.
3111 *
3112 * This function uses the current CR3/CR0/CR4 of the guest and will
3113 * bypass access handlers and not set dirty or accessed bits.
3114 *
3115 * @returns VBox status.
3116 * @param pVCpu Handle to the current virtual CPU.
3117 * @param GCPtrDst The destination address (GC pointer).
3118 * @param pvSrc The source address.
3119 * @param cb The number of bytes to write.
3120 */
3121VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3122{
3123 PVM pVM = pVCpu->CTX_SUFF(pVM);
3124 VMCPU_ASSERT_EMT(pVCpu);
3125
3126 /*
3127 * Treat the first page as a special case.
3128 */
3129 if (!cb)
3130 return VINF_SUCCESS;
3131
3132 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWrite));
3133 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3134
3135 /* map the 1st page */
3136 void *pvDst;
3137 PGMPAGEMAPLOCK Lock;
3138 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3139 if (RT_FAILURE(rc))
3140 return rc;
3141
3142 /* optimize for the case where access is completely within the first page. */
3143 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3144 if (RT_LIKELY(cb <= cbPage))
3145 {
3146 memcpy(pvDst, pvSrc, cb);
3147 PGMPhysReleasePageMappingLock(pVM, &Lock);
3148 return VINF_SUCCESS;
3149 }
3150
3151 /* copy to the end of the page. */
3152 memcpy(pvDst, pvSrc, cbPage);
3153 PGMPhysReleasePageMappingLock(pVM, &Lock);
3154 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3155 pvSrc = (const uint8_t *)pvSrc + cbPage;
3156 cb -= cbPage;
3157
3158 /*
3159 * Page by page.
3160 */
3161 for (;;)
3162 {
3163 /* map the page */
3164 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3165 if (RT_FAILURE(rc))
3166 return rc;
3167
3168 /* last page? */
3169 if (cb <= PAGE_SIZE)
3170 {
3171 memcpy(pvDst, pvSrc, cb);
3172 PGMPhysReleasePageMappingLock(pVM, &Lock);
3173 return VINF_SUCCESS;
3174 }
3175
3176 /* copy the entire page and advance */
3177 memcpy(pvDst, pvSrc, PAGE_SIZE);
3178 PGMPhysReleasePageMappingLock(pVM, &Lock);
3179 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3180 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3181 cb -= PAGE_SIZE;
3182 }
3183 /* won't ever get here. */
3184}
3185
3186
3187/**
3188 * Write to guest physical memory referenced by GC pointer and update the PTE.
3189 *
3190 * This function uses the current CR3/CR0/CR4 of the guest and will
3191 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3192 *
3193 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3194 *
3195 * @returns VBox status.
3196 * @param pVCpu Handle to the current virtual CPU.
3197 * @param GCPtrDst The destination address (GC pointer).
3198 * @param pvSrc The source address.
3199 * @param cb The number of bytes to write.
3200 */
3201VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3202{
3203 PVM pVM = pVCpu->CTX_SUFF(pVM);
3204 VMCPU_ASSERT_EMT(pVCpu);
3205
3206 /*
3207 * Treat the first page as a special case.
3208 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3209 */
3210 if (!cb)
3211 return VINF_SUCCESS;
3212
3213 /* map the 1st page */
3214 void *pvDst;
3215 PGMPAGEMAPLOCK Lock;
3216 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3217 if (RT_FAILURE(rc))
3218 return rc;
3219
3220 /* optimize for the case where access is completely within the first page. */
3221 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3222 if (RT_LIKELY(cb <= cbPage))
3223 {
3224 memcpy(pvDst, pvSrc, cb);
3225 PGMPhysReleasePageMappingLock(pVM, &Lock);
3226 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3227 return VINF_SUCCESS;
3228 }
3229
3230 /* copy to the end of the page. */
3231 memcpy(pvDst, pvSrc, cbPage);
3232 PGMPhysReleasePageMappingLock(pVM, &Lock);
3233 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3234 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3235 pvSrc = (const uint8_t *)pvSrc + cbPage;
3236 cb -= cbPage;
3237
3238 /*
3239 * Page by page.
3240 */
3241 for (;;)
3242 {
3243 /* map the page */
3244 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3245 if (RT_FAILURE(rc))
3246 return rc;
3247
3248 /* last page? */
3249 if (cb <= PAGE_SIZE)
3250 {
3251 memcpy(pvDst, pvSrc, cb);
3252 PGMPhysReleasePageMappingLock(pVM, &Lock);
3253 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3254 return VINF_SUCCESS;
3255 }
3256
3257 /* copy the entire page and advance */
3258 memcpy(pvDst, pvSrc, PAGE_SIZE);
3259 PGMPhysReleasePageMappingLock(pVM, &Lock);
3260 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3261 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3262 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3263 cb -= PAGE_SIZE;
3264 }
3265 /* won't ever get here. */
3266}
3267
3268
3269/**
3270 * Read from guest physical memory referenced by GC pointer.
3271 *
3272 * This function uses the current CR3/CR0/CR4 of the guest and will
3273 * respect access handlers and set accessed bits.
3274 *
3275 * @returns VBox status.
3276 * @param pVCpu Handle to the current virtual CPU.
3277 * @param pvDst The destination address.
3278 * @param GCPtrSrc The source address (GC pointer).
3279 * @param cb The number of bytes to read.
3280 * @param enmOrigin Who is calling.
3281 * @thread EMT(pVCpu)
3282 */
3283VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3284{
3285 RTGCPHYS GCPhys;
3286 uint64_t fFlags;
3287 int rc;
3288 PVM pVM = pVCpu->CTX_SUFF(pVM);
3289 VMCPU_ASSERT_EMT(pVCpu);
3290
3291 /*
3292 * Anything to do?
3293 */
3294 if (!cb)
3295 return VINF_SUCCESS;
3296
3297 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3298
3299 /*
3300 * Optimize reads within a single page.
3301 */
3302 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3303 {
3304 /* Convert virtual to physical address + flags */
3305 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3306 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3307 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3308
3309 /* mark the guest page as accessed. */
3310 if (!(fFlags & X86_PTE_A))
3311 {
3312 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3313 AssertRC(rc);
3314 }
3315
3316 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3317 }
3318
3319 /*
3320 * Page by page.
3321 */
3322 for (;;)
3323 {
3324 /* Convert virtual to physical address + flags */
3325 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3326 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3327 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3328
3329 /* mark the guest page as accessed. */
3330 if (!(fFlags & X86_PTE_A))
3331 {
3332 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3333 AssertRC(rc);
3334 }
3335
3336 /* copy */
3337 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3338 if (cbRead < cb)
3339 {
3340 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead, enmOrigin);
3341 if (RT_FAILURE(rc))
3342 return rc;
3343 }
3344 else /* Last page (cbRead is PAGE_SIZE, we only need cb!) */
3345 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3346
3347 /* next */
3348 Assert(cb > cbRead);
3349 cb -= cbRead;
3350 pvDst = (uint8_t *)pvDst + cbRead;
3351 GCPtrSrc += cbRead;
3352 }
3353}
3354
3355
3356/**
3357 * Write to guest physical memory referenced by GC pointer.
3358 *
3359 * This function uses the current CR3/CR0/CR4 of the guest and will
3360 * respect access handlers and set dirty and accessed bits.
3361 *
3362 * @returns VBox status.
3363 * @retval VINF_SUCCESS.
3364 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
3365 *
3366 * @param pVCpu Handle to the current virtual CPU.
3367 * @param GCPtrDst The destination address (GC pointer).
3368 * @param pvSrc The source address.
3369 * @param cb The number of bytes to write.
3370 * @param enmOrigin Who is calling.
3371 */
3372VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3373{
3374 RTGCPHYS GCPhys;
3375 uint64_t fFlags;
3376 int rc;
3377 PVM pVM = pVCpu->CTX_SUFF(pVM);
3378 VMCPU_ASSERT_EMT(pVCpu);
3379
3380 /*
3381 * Anything to do?
3382 */
3383 if (!cb)
3384 return VINF_SUCCESS;
3385
3386 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3387
3388 /*
3389 * Optimize writes within a single page.
3390 */
3391 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3392 {
3393 /* Convert virtual to physical address + flags */
3394 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3395 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3396 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3397
3398 /* Mention when we ignore X86_PTE_RW... */
3399 if (!(fFlags & X86_PTE_RW))
3400 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3401
3402 /* Mark the guest page as accessed and dirty if necessary. */
3403 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3404 {
3405 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3406 AssertRC(rc);
3407 }
3408
3409 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3410 }
3411
3412 /*
3413 * Page by page.
3414 */
3415 for (;;)
3416 {
3417 /* Convert virtual to physical address + flags */
3418 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3419 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3420 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3421
3422 /* Mention when we ignore X86_PTE_RW... */
3423 if (!(fFlags & X86_PTE_RW))
3424 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3425
3426 /* Mark the guest page as accessed and dirty if necessary. */
3427 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3428 {
3429 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3430 AssertRC(rc);
3431 }
3432
3433 /* copy */
3434 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3435 if (cbWrite < cb)
3436 {
3437 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite, enmOrigin);
3438 if (RT_FAILURE(rc))
3439 return rc;
3440 }
3441 else /* Last page (cbWrite is PAGE_SIZE, we only need cb!) */
3442 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3443
3444 /* next */
3445 Assert(cb > cbWrite);
3446 cb -= cbWrite;
3447 pvSrc = (uint8_t *)pvSrc + cbWrite;
3448 GCPtrDst += cbWrite;
3449 }
3450}
3451
3452
3453/**
3454 * Performs a read of guest virtual memory for instruction emulation.
3455 *
3456 * This will check permissions, raise exceptions and update the access bits.
3457 *
3458 * The current implementation will bypass all access handlers. It may later be
3459 * changed to at least respect MMIO.
3460 *
3461 *
3462 * @returns VBox status code suitable to scheduling.
3463 * @retval VINF_SUCCESS if the read was performed successfully.
3464 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3465 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3466 *
3467 * @param pVCpu Handle to the current virtual CPU.
3468 * @param pCtxCore The context core.
3469 * @param pvDst Where to put the bytes we've read.
3470 * @param GCPtrSrc The source address.
3471 * @param cb The number of bytes to read. Not more than a page.
3472 *
3473 * @remark This function will dynamically map physical pages in GC. This may unmap
3474 * mappings done by the caller. Be careful!
3475 */
3476VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3477{
3478 PVM pVM = pVCpu->CTX_SUFF(pVM);
3479 Assert(cb <= PAGE_SIZE);
3480 VMCPU_ASSERT_EMT(pVCpu);
3481
3482/** @todo r=bird: This isn't perfect!
3483 * -# It's not checking for reserved bits being 1.
3484 * -# It's not correctly dealing with the access bit.
3485 * -# It's not respecting MMIO memory or any other access handlers.
3486 */
3487 /*
3488 * 1. Translate virtual to physical. This may fault.
3489 * 2. Map the physical address.
3490 * 3. Do the read operation.
3491 * 4. Set access bits if required.
3492 */
3493 int rc;
3494 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3495 if (cb <= cb1)
3496 {
3497 /*
3498 * Not crossing pages.
3499 */
3500 RTGCPHYS GCPhys;
3501 uint64_t fFlags;
3502 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3503 if (RT_SUCCESS(rc))
3504 {
3505 /** @todo we should check reserved bits ... */
3506 PGMPAGEMAPLOCK PgMpLck;
3507 void const *pvSrc;
3508 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &PgMpLck);
3509 switch (rc)
3510 {
3511 case VINF_SUCCESS:
3512 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3513 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3514 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3515 break;
3516 case VERR_PGM_PHYS_PAGE_RESERVED:
3517 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3518 memset(pvDst, 0xff, cb);
3519 break;
3520 default:
3521 Assert(RT_FAILURE_NP(rc));
3522 return rc;
3523 }
3524
3525 /** @todo access bit emulation isn't 100% correct. */
3526 if (!(fFlags & X86_PTE_A))
3527 {
3528 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3529 AssertRC(rc);
3530 }
3531 return VINF_SUCCESS;
3532 }
3533 }
3534 else
3535 {
3536 /*
3537 * Crosses pages.
3538 */
3539 size_t cb2 = cb - cb1;
3540 uint64_t fFlags1;
3541 RTGCPHYS GCPhys1;
3542 uint64_t fFlags2;
3543 RTGCPHYS GCPhys2;
3544 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3545 if (RT_SUCCESS(rc))
3546 {
3547 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3548 if (RT_SUCCESS(rc))
3549 {
3550 /** @todo we should check reserved bits ... */
3551 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3552 PGMPAGEMAPLOCK PgMpLck;
3553 void const *pvSrc1;
3554 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck);
3555 switch (rc)
3556 {
3557 case VINF_SUCCESS:
3558 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3559 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3560 break;
3561 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3562 memset(pvDst, 0xff, cb1);
3563 break;
3564 default:
3565 Assert(RT_FAILURE_NP(rc));
3566 return rc;
3567 }
3568
3569 void const *pvSrc2;
3570 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);
3571 switch (rc)
3572 {
3573 case VINF_SUCCESS:
3574 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3575 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3576 break;
3577 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3578 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3579 break;
3580 default:
3581 Assert(RT_FAILURE_NP(rc));
3582 return rc;
3583 }
3584
3585 if (!(fFlags1 & X86_PTE_A))
3586 {
3587 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3588 AssertRC(rc);
3589 }
3590 if (!(fFlags2 & X86_PTE_A))
3591 {
3592 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3593 AssertRC(rc);
3594 }
3595 return VINF_SUCCESS;
3596 }
3597 }
3598 }
3599
3600 /*
3601 * Raise a #PF.
3602 */
3603 uint32_t uErr;
3604
3605 /* Get the current privilege level. */
3606 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3607 switch (rc)
3608 {
3609 case VINF_SUCCESS:
3610 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3611 break;
3612
3613 case VERR_PAGE_NOT_PRESENT:
3614 case VERR_PAGE_TABLE_NOT_PRESENT:
3615 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3616 break;
3617
3618 default:
3619 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3620 return rc;
3621 }
3622 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3623 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3624}
3625
3626
3627/**
3628 * Performs a read of guest virtual memory for instruction emulation.
3629 *
3630 * This will check permissions, raise exceptions and update the access bits.
3631 *
3632 * The current implementation will bypass all access handlers. It may later be
3633 * changed to at least respect MMIO.
3634 *
3635 *
3636 * @returns VBox status code suitable to scheduling.
3637 * @retval VINF_SUCCESS if the read was performed successfully.
3638 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3639 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3640 *
3641 * @param pVCpu Handle to the current virtual CPU.
3642 * @param pCtxCore The context core.
3643 * @param pvDst Where to put the bytes we've read.
3644 * @param GCPtrSrc The source address.
3645 * @param cb The number of bytes to read. Not more than a page.
3646 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3647 * an appropriate error status will be returned (no
3648 * informational at all).
3649 *
3650 *
3651 * @remarks Takes the PGM lock.
3652 * @remarks A page fault on the 2nd page of the access will be raised without
3653 * writing the bits on the first page since we're ASSUMING that the
3654 * caller is emulating an instruction access.
3655 * @remarks This function will dynamically map physical pages in GC. This may
3656 * unmap mappings done by the caller. Be careful!
3657 */
3658VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb,
3659 bool fRaiseTrap)
3660{
3661 PVM pVM = pVCpu->CTX_SUFF(pVM);
3662 Assert(cb <= PAGE_SIZE);
3663 VMCPU_ASSERT_EMT(pVCpu);
3664
3665 /*
3666 * 1. Translate virtual to physical. This may fault.
3667 * 2. Map the physical address.
3668 * 3. Do the read operation.
3669 * 4. Set access bits if required.
3670 */
3671 int rc;
3672 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3673 if (cb <= cb1)
3674 {
3675 /*
3676 * Not crossing pages.
3677 */
3678 RTGCPHYS GCPhys;
3679 uint64_t fFlags;
3680 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3681 if (RT_SUCCESS(rc))
3682 {
3683 if (1) /** @todo we should check reserved bits ... */
3684 {
3685 const void *pvSrc;
3686 PGMPAGEMAPLOCK Lock;
3687 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3688 switch (rc)
3689 {
3690 case VINF_SUCCESS:
3691 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3692 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3693 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3694 PGMPhysReleasePageMappingLock(pVM, &Lock);
3695 break;
3696 case VERR_PGM_PHYS_PAGE_RESERVED:
3697 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3698 memset(pvDst, 0xff, cb);
3699 break;
3700 default:
3701 AssertMsgFailed(("%Rrc\n", rc));
3702 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3703 return rc;
3704 }
3705
3706 if (!(fFlags & X86_PTE_A))
3707 {
3708 /** @todo access bit emulation isn't 100% correct. */
3709 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3710 AssertRC(rc);
3711 }
3712 return VINF_SUCCESS;
3713 }
3714 }
3715 }
3716 else
3717 {
3718 /*
3719 * Crosses pages.
3720 */
3721 size_t cb2 = cb - cb1;
3722 uint64_t fFlags1;
3723 RTGCPHYS GCPhys1;
3724 uint64_t fFlags2;
3725 RTGCPHYS GCPhys2;
3726 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3727 if (RT_SUCCESS(rc))
3728 {
3729 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3730 if (RT_SUCCESS(rc))
3731 {
3732 if (1) /** @todo we should check reserved bits ... */
3733 {
3734 const void *pvSrc;
3735 PGMPAGEMAPLOCK Lock;
3736 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3737 switch (rc)
3738 {
3739 case VINF_SUCCESS:
3740 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3741 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3742 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3743 PGMPhysReleasePageMappingLock(pVM, &Lock);
3744 break;
3745 case VERR_PGM_PHYS_PAGE_RESERVED:
3746 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3747 memset(pvDst, 0xff, cb1);
3748 break;
3749 default:
3750 AssertMsgFailed(("%Rrc\n", rc));
3751 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3752 return rc;
3753 }
3754
3755 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3756 switch (rc)
3757 {
3758 case VINF_SUCCESS:
3759 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3760 PGMPhysReleasePageMappingLock(pVM, &Lock);
3761 break;
3762 case VERR_PGM_PHYS_PAGE_RESERVED:
3763 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3764 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3765 break;
3766 default:
3767 AssertMsgFailed(("%Rrc\n", rc));
3768 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3769 return rc;
3770 }
3771
3772 if (!(fFlags1 & X86_PTE_A))
3773 {
3774 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3775 AssertRC(rc);
3776 }
3777 if (!(fFlags2 & X86_PTE_A))
3778 {
3779 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3780 AssertRC(rc);
3781 }
3782 return VINF_SUCCESS;
3783 }
3784 /* sort out which page */
3785 }
3786 else
3787 GCPtrSrc += cb1; /* fault on 2nd page */
3788 }
3789 }
3790
3791 /*
3792 * Raise a #PF if we're allowed to do that.
3793 */
3794 /* Calc the error bits. */
3795 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3796 uint32_t uErr;
3797 switch (rc)
3798 {
3799 case VINF_SUCCESS:
3800 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3801 rc = VERR_ACCESS_DENIED;
3802 break;
3803
3804 case VERR_PAGE_NOT_PRESENT:
3805 case VERR_PAGE_TABLE_NOT_PRESENT:
3806 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3807 break;
3808
3809 default:
3810 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3811 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3812 return rc;
3813 }
3814 if (fRaiseTrap)
3815 {
3816 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3817 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3818 }
3819 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3820 return rc;
3821}
3822
3823
3824/**
3825 * Performs a write to guest virtual memory for instruction emulation.
3826 *
3827 * This will check permissions, raise exceptions and update the dirty and access
3828 * bits.
3829 *
3830 * @returns VBox status code suitable to scheduling.
3831 * @retval VINF_SUCCESS if the read was performed successfully.
3832 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3833 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3834 *
3835 * @param pVCpu Handle to the current virtual CPU.
3836 * @param pCtxCore The context core.
3837 * @param GCPtrDst The destination address.
3838 * @param pvSrc What to write.
3839 * @param cb The number of bytes to write. Not more than a page.
3840 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3841 * an appropriate error status will be returned (no
3842 * informational at all).
3843 *
3844 * @remarks Takes the PGM lock.
3845 * @remarks A page fault on the 2nd page of the access will be raised without
3846 * writing the bits on the first page since we're ASSUMING that the
3847 * caller is emulating an instruction access.
3848 * @remarks This function will dynamically map physical pages in GC. This may
3849 * unmap mappings done by the caller. Be careful!
3850 */
3851VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc,
3852 size_t cb, bool fRaiseTrap)
3853{
3854 Assert(cb <= PAGE_SIZE);
3855 PVM pVM = pVCpu->CTX_SUFF(pVM);
3856 VMCPU_ASSERT_EMT(pVCpu);
3857
3858 /*
3859 * 1. Translate virtual to physical. This may fault.
3860 * 2. Map the physical address.
3861 * 3. Do the write operation.
3862 * 4. Set access bits if required.
3863 */
3864 /** @todo Since this method is frequently used by EMInterpret or IOM
3865 * upon a write fault to an write access monitored page, we can
3866 * reuse the guest page table walking from the \#PF code. */
3867 int rc;
3868 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3869 if (cb <= cb1)
3870 {
3871 /*
3872 * Not crossing pages.
3873 */
3874 RTGCPHYS GCPhys;
3875 uint64_t fFlags;
3876 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3877 if (RT_SUCCESS(rc))
3878 {
3879 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3880 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3881 && CPUMGetGuestCPL(pVCpu) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3882 {
3883 void *pvDst;
3884 PGMPAGEMAPLOCK Lock;
3885 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3886 switch (rc)
3887 {
3888 case VINF_SUCCESS:
3889 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3890 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3891 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3892 PGMPhysReleasePageMappingLock(pVM, &Lock);
3893 break;
3894 case VERR_PGM_PHYS_PAGE_RESERVED:
3895 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3896 /* bit bucket */
3897 break;
3898 default:
3899 AssertMsgFailed(("%Rrc\n", rc));
3900 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3901 return rc;
3902 }
3903
3904 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3905 {
3906 /** @todo dirty & access bit emulation isn't 100% correct. */
3907 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3908 AssertRC(rc);
3909 }
3910 return VINF_SUCCESS;
3911 }
3912 rc = VERR_ACCESS_DENIED;
3913 }
3914 }
3915 else
3916 {
3917 /*
3918 * Crosses pages.
3919 */
3920 size_t cb2 = cb - cb1;
3921 uint64_t fFlags1;
3922 RTGCPHYS GCPhys1;
3923 uint64_t fFlags2;
3924 RTGCPHYS GCPhys2;
3925 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
3926 if (RT_SUCCESS(rc))
3927 {
3928 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3929 if (RT_SUCCESS(rc))
3930 {
3931 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3932 && (fFlags2 & X86_PTE_RW))
3933 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3934 && CPUMGetGuestCPL(pVCpu) <= 2) )
3935 {
3936 void *pvDst;
3937 PGMPAGEMAPLOCK Lock;
3938 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3939 switch (rc)
3940 {
3941 case VINF_SUCCESS:
3942 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3943 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3944 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3945 PGMPhysReleasePageMappingLock(pVM, &Lock);
3946 break;
3947 case VERR_PGM_PHYS_PAGE_RESERVED:
3948 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3949 /* bit bucket */
3950 break;
3951 default:
3952 AssertMsgFailed(("%Rrc\n", rc));
3953 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3954 return rc;
3955 }
3956
3957 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3958 switch (rc)
3959 {
3960 case VINF_SUCCESS:
3961 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3962 PGMPhysReleasePageMappingLock(pVM, &Lock);
3963 break;
3964 case VERR_PGM_PHYS_PAGE_RESERVED:
3965 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3966 /* bit bucket */
3967 break;
3968 default:
3969 AssertMsgFailed(("%Rrc\n", rc));
3970 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3971 return rc;
3972 }
3973
3974 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3975 {
3976 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3977 AssertRC(rc);
3978 }
3979 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3980 {
3981 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3982 AssertRC(rc);
3983 }
3984 return VINF_SUCCESS;
3985 }
3986 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3987 GCPtrDst += cb1; /* fault on the 2nd page. */
3988 rc = VERR_ACCESS_DENIED;
3989 }
3990 else
3991 GCPtrDst += cb1; /* fault on the 2nd page. */
3992 }
3993 }
3994
3995 /*
3996 * Raise a #PF if we're allowed to do that.
3997 */
3998 /* Calc the error bits. */
3999 uint32_t uErr;
4000 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
4001 switch (rc)
4002 {
4003 case VINF_SUCCESS:
4004 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
4005 rc = VERR_ACCESS_DENIED;
4006 break;
4007
4008 case VERR_ACCESS_DENIED:
4009 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
4010 break;
4011
4012 case VERR_PAGE_NOT_PRESENT:
4013 case VERR_PAGE_TABLE_NOT_PRESENT:
4014 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
4015 break;
4016
4017 default:
4018 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
4019 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4020 return rc;
4021 }
4022 if (fRaiseTrap)
4023 {
4024 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
4025 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
4026 }
4027 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
4028 return rc;
4029}
4030
4031
4032/**
4033 * Return the page type of the specified physical address.
4034 *
4035 * @returns The page type.
4036 * @param pVM Pointer to the VM.
4037 * @param GCPhys Guest physical address
4038 */
4039VMMDECL(PGMPAGETYPE) PGMPhysGetPageType(PVM pVM, RTGCPHYS GCPhys)
4040{
4041 pgmLock(pVM);
4042 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
4043 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
4044 pgmUnlock(pVM);
4045
4046 return enmPgType;
4047}
4048
4049
4050
4051
4052/**
4053 * Converts a GC physical address to a HC ring-3 pointer, with some
4054 * additional checks.
4055 *
4056 * @returns VBox status code (no informational statuses).
4057 * @retval VINF_SUCCESS on success.
4058 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4059 * access handler of some kind.
4060 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4061 * accesses or is odd in any way.
4062 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4063 *
4064 * @param pVM Pointer to the cross context VM structure.
4065 * @param pVCpu Pointer to the cross context virtual CPU structure of
4066 * the calling EMT.
4067 * @param GCPhys The GC physical address to convert. This API mask the
4068 * A20 line when necessary.
4069 * @param fWritable Whether write access is required.
4070 * @param ppv Where to store the pointer corresponding to GCPhys on
4071 * success.
4072 * @param pLock
4073 *
4074 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
4075 * @thread EMT(pVCpu).
4076 */
4077VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
4078 void **ppv, PPGMPAGEMAPLOCK pLock)
4079{
4080 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
4081
4082 pgmLock(pVM);
4083
4084 PPGMRAMRANGE pRam;
4085 PPGMPAGE pPage;
4086 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4087 if (RT_SUCCESS(rc))
4088 {
4089 if (PGM_PAGE_IS_BALLOONED(pPage))
4090 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4091 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4092 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4093 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4094 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4095 rc = VINF_SUCCESS;
4096 else
4097 {
4098 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4099 {
4100 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4101 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4102 }
4103 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4104 {
4105 Assert(!fByPassHandlers);
4106 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4107 }
4108 }
4109 if (RT_SUCCESS(rc))
4110 {
4111 int rc2;
4112
4113 /* Make sure what we return is writable. */
4114 if (fWritable)
4115 switch (PGM_PAGE_GET_STATE(pPage))
4116 {
4117 case PGM_PAGE_STATE_ALLOCATED:
4118 break;
4119 case PGM_PAGE_STATE_BALLOONED:
4120 AssertFailed();
4121 case PGM_PAGE_STATE_ZERO:
4122 case PGM_PAGE_STATE_SHARED:
4123 case PGM_PAGE_STATE_WRITE_MONITORED:
4124 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
4125 AssertLogRelRCReturn(rc2, rc2);
4126 break;
4127 }
4128
4129#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
4130 void *pv;
4131 rc = pgmRZDynMapHCPageInlined(pVCpu,
4132 PGM_PAGE_GET_HCPHYS(pPage),
4133 &pv
4134 RTLOG_COMMA_SRC_POS);
4135 if (RT_FAILURE(rc))
4136 return rc;
4137 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4138 pLock->pvPage = pv;
4139 pLock->pVCpu = pVCpu;
4140
4141#else
4142 /* Get a ring-3 mapping of the address. */
4143 PPGMPAGER3MAPTLBE pTlbe;
4144 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
4145 AssertLogRelRCReturn(rc2, rc2);
4146
4147 /* Lock it and calculate the address. */
4148 if (fWritable)
4149 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
4150 else
4151 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
4152 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4153#endif
4154
4155 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
4156 }
4157 else
4158 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
4159
4160 /* else: handler catching all access, no pointer returned. */
4161 }
4162 else
4163 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
4164
4165 pgmUnlock(pVM);
4166 return rc;
4167}
4168
4169
4170/**
4171 * Checks if the give GCPhys page requires special handling for the given access
4172 * because it's MMIO or otherwise monitored.
4173 *
4174 * @returns VBox status code (no informational statuses).
4175 * @retval VINF_SUCCESS on success.
4176 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4177 * access handler of some kind.
4178 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4179 * accesses or is odd in any way.
4180 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4181 *
4182 * @param pVM Pointer to the VM.
4183 * @param GCPhys The GC physical address to convert. Since this is only
4184 * used for filling the REM TLB, the A20 mask must be
4185 * applied before calling this API.
4186 * @param fWritable Whether write access is required.
4187 *
4188 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
4189 * a stop gap thing that should be removed once there is a better TLB
4190 * for virtual address accesses.
4191 */
4192VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVM pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
4193{
4194 pgmLock(pVM);
4195 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
4196
4197 PPGMRAMRANGE pRam;
4198 PPGMPAGE pPage;
4199 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4200 if (RT_SUCCESS(rc))
4201 {
4202 if (PGM_PAGE_IS_BALLOONED(pPage))
4203 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4204 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4205 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4206 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4207 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4208 rc = VINF_SUCCESS;
4209 else
4210 {
4211 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4212 {
4213 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4214 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4215 }
4216 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4217 {
4218 Assert(!fByPassHandlers);
4219 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4220 }
4221 }
4222 }
4223
4224 pgmUnlock(pVM);
4225 return rc;
4226}
4227
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette