VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 55909

Last change on this file since 55909 was 55909, checked in by vboxsync, 10 years ago

PGM,++: Made the ring-3 physical access handler callbacks present in all contexts, where applicable. They are not yet registered or used. Taking things slowly.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 155.0 KB
Line 
1/* $Id: PGMAllPhys.cpp 55909 2015-05-18 13:09:16Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_PGM_PHYS
22#include <VBox/vmm/pgm.h>
23#include <VBox/vmm/trpm.h>
24#include <VBox/vmm/vmm.h>
25#include <VBox/vmm/iom.h>
26#include <VBox/vmm/em.h>
27#ifdef VBOX_WITH_REM
28# include <VBox/vmm/rem.h>
29#endif
30#include "PGMInternal.h"
31#include <VBox/vmm/vm.h>
32#include "PGMInline.h"
33#include <VBox/param.h>
34#include <VBox/err.h>
35#include <iprt/assert.h>
36#include <iprt/string.h>
37#include <iprt/asm-amd64-x86.h>
38#include <VBox/log.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** Enable the physical TLB. */
48#define PGM_WITH_PHYS_TLB
49
50
51
52#ifndef IN_RING3
53
54/**
55 * \#PF Handler callback for physical memory accesses without a RC/R0 handler.
56 * This simply pushes everything to the HC handler.
57 *
58 * @returns VBox status code (appropriate for trap handling and GC return).
59 * @param pVM Pointer to the VM.
60 * @param pVCpu Pointer to the cross context CPU context for the
61 * calling EMT.
62 * @param uErrorCode CPU Error code.
63 * @param pRegFrame Trap register frame.
64 * @param pvFault The fault address (cr2).
65 * @param GCPhysFault The GC physical address corresponding to pvFault.
66 * @param pvUser User argument.
67 */
68VMMDECL(int) pgmPhysPfHandlerRedirectToHC(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
69 RTGCPHYS GCPhysFault, void *pvUser)
70{
71 NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(GCPhysFault); NOREF(pvUser);
72 return (uErrorCode & X86_TRAP_PF_RW) ? VINF_IOM_R3_MMIO_WRITE : VINF_IOM_R3_MMIO_READ;
73}
74
75
76/**
77 * \#PF Handler callback for Guest ROM range write access.
78 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
79 *
80 * @returns VBox status code (appropriate for trap handling and GC return).
81 * @param pVM Pointer to the VM.
82 * @param pVCpu Pointer to the cross context CPU context for the
83 * calling EMT.
84 * @param uErrorCode CPU Error code.
85 * @param pRegFrame Trap register frame.
86 * @param pvFault The fault address (cr2).
87 * @param GCPhysFault The GC physical address corresponding to pvFault.
88 * @param pvUser User argument. Pointer to the ROM range structure.
89 */
90DECLEXPORT(int) pgmPhysRomWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,
91 RTGCPHYS GCPhysFault, void *pvUser)
92{
93 int rc;
94 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
95 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
96 NOREF(uErrorCode); NOREF(pvFault);
97
98 Assert(uErrorCode & X86_TRAP_PF_RW); /* This shall not be used for read access! */
99
100 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
101 switch (pRom->aPages[iPage].enmProt)
102 {
103 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
104 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
105 {
106 /*
107 * If it's a simple instruction which doesn't change the cpu state
108 * we will simply skip it. Otherwise we'll have to defer it to REM.
109 */
110 uint32_t cbOp;
111 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
112 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
113 if ( RT_SUCCESS(rc)
114 && pDis->uCpuMode == DISCPUMODE_32BIT /** @todo why does this matter? */
115 && !(pDis->fPrefix & (DISPREFIX_REPNE | DISPREFIX_REP | DISPREFIX_SEG)))
116 {
117 switch (pDis->bOpCode)
118 {
119 /** @todo Find other instructions we can safely skip, possibly
120 * adding this kind of detection to DIS or EM. */
121 case OP_MOV:
122 pRegFrame->rip += cbOp;
123 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteHandled);
124 return VINF_SUCCESS;
125 }
126 }
127 break;
128 }
129
130 case PGMROMPROT_READ_RAM_WRITE_RAM:
131 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
132 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
133 AssertRC(rc);
134 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
135
136 case PGMROMPROT_READ_ROM_WRITE_RAM:
137 /* Handle it in ring-3 because it's *way* easier there. */
138 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
139 break;
140
141 default:
142 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
143 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
144 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
145 }
146
147 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestROMWriteUnhandled);
148 return VINF_EM_RAW_EMULATE_INSTR;
149}
150
151#endif /* !IN_RING3 */
152
153
154/**
155 * Access handler callback for ROM write accesses.
156 *
157 * @returns VINF_SUCCESS if the handler have carried out the operation.
158 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
159 * @param pVM Pointer to the VM.
160 * @param pVCpu The cross context CPU structure for the calling EMT.
161 * @param GCPhys The physical address the guest is writing to.
162 * @param pvPhys The HC mapping of that address.
163 * @param pvBuf What the guest is reading/writing.
164 * @param cbBuf How much it's reading/writing.
165 * @param enmAccessType The access type.
166 * @param enmOrigin Who is making the access.
167 * @param pvUser User argument.
168 */
169PGM_ALL_CB2_DECL(int) pgmPhysRomWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,
170 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
171{
172 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
173 const uint32_t iPage = (GCPhys - pRom->GCPhys) >> PAGE_SHIFT;
174 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
175 PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
176 Log5(("pgmPhysRomWriteHandler: %d %c %#08RGp %#04zx\n", pRomPage->enmProt, enmAccessType == PGMACCESSTYPE_READ ? 'R' : 'W', GCPhys, cbBuf));
177 NOREF(pVCpu); NOREF(pvPhys); NOREF(enmOrigin);
178
179 if (enmAccessType == PGMACCESSTYPE_READ)
180 {
181 switch (pRomPage->enmProt)
182 {
183 /*
184 * Take the default action.
185 */
186 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
187 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
188 case PGMROMPROT_READ_ROM_WRITE_RAM:
189 case PGMROMPROT_READ_RAM_WRITE_RAM:
190 return VINF_PGM_HANDLER_DO_DEFAULT;
191
192 default:
193 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
194 pRom->aPages[iPage].enmProt, iPage, GCPhys),
195 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
196 }
197 }
198 else
199 {
200 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
201 switch (pRomPage->enmProt)
202 {
203 /*
204 * Ignore writes.
205 */
206 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
207 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
208 return VINF_SUCCESS;
209
210 /*
211 * Write to the RAM page.
212 */
213 case PGMROMPROT_READ_ROM_WRITE_RAM:
214 case PGMROMPROT_READ_RAM_WRITE_RAM: /* yes this will get here too, it's *way* simpler that way. */
215 {
216 /* This should be impossible now, pvPhys doesn't work cross page anylonger. */
217 Assert(((GCPhys - pRom->GCPhys + cbBuf - 1) >> PAGE_SHIFT) == iPage);
218
219 /*
220 * Take the lock, do lazy allocation, map the page and copy the data.
221 *
222 * Note that we have to bypass the mapping TLB since it works on
223 * guest physical addresses and entering the shadow page would
224 * kind of screw things up...
225 */
226 int rc = pgmLock(pVM);
227 AssertRC(rc);
228
229 PPGMPAGE pShadowPage = &pRomPage->Shadow;
230 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt))
231 {
232 pShadowPage = pgmPhysGetPage(pVM, GCPhys);
233 AssertLogRelReturn(pShadowPage, VERR_PGM_PHYS_PAGE_GET_IPE);
234 }
235
236 void *pvDstPage;
237 rc = pgmPhysPageMakeWritableAndMap(pVM, pShadowPage, GCPhys & X86_PTE_PG_MASK, &pvDstPage);
238 if (RT_SUCCESS(rc))
239 {
240 memcpy((uint8_t *)pvDstPage + (GCPhys & PAGE_OFFSET_MASK), pvBuf, cbBuf);
241 pRomPage->LiveSave.fWrittenTo = true;
242 }
243
244 pgmUnlock(pVM);
245 return rc;
246 }
247
248 default:
249 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhys=%RGp\n",
250 pRom->aPages[iPage].enmProt, iPage, GCPhys),
251 VERR_IPE_NOT_REACHED_DEFAULT_CASE);
252 }
253 }
254}
255
256
257/**
258 * Invalidates the RAM range TLBs.
259 *
260 * @param pVM Pointer to the VM.
261 */
262void pgmPhysInvalidRamRangeTlbs(PVM pVM)
263{
264 pgmLock(pVM);
265 for (uint32_t i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++)
266 {
267 pVM->pgm.s.apRamRangesTlbR3[i] = NIL_RTR3PTR;
268 pVM->pgm.s.apRamRangesTlbR0[i] = NIL_RTR0PTR;
269 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR;
270 }
271 pgmUnlock(pVM);
272}
273
274
275/**
276 * Tests if a value of type RTGCPHYS is negative if the type had been signed
277 * instead of unsigned.
278 *
279 * @returns @c true if negative, @c false if positive or zero.
280 * @param a_GCPhys The value to test.
281 * @todo Move me to iprt/types.h.
282 */
283#define RTGCPHYS_IS_NEGATIVE(a_GCPhys) ((a_GCPhys) & ((RTGCPHYS)1 << (sizeof(RTGCPHYS)*8 - 1)))
284
285
286/**
287 * Slow worker for pgmPhysGetRange.
288 *
289 * @copydoc pgmPhysGetRange
290 */
291PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys)
292{
293 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
294
295 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
296 while (pRam)
297 {
298 RTGCPHYS off = GCPhys - pRam->GCPhys;
299 if (off < pRam->cb)
300 {
301 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
302 return pRam;
303 }
304 if (RTGCPHYS_IS_NEGATIVE(off))
305 pRam = pRam->CTX_SUFF(pLeft);
306 else
307 pRam = pRam->CTX_SUFF(pRight);
308 }
309 return NULL;
310}
311
312
313/**
314 * Slow worker for pgmPhysGetRangeAtOrAbove.
315 *
316 * @copydoc pgmPhysGetRangeAtOrAbove
317 */
318PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys)
319{
320 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
321
322 PPGMRAMRANGE pLastLeft = NULL;
323 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
324 while (pRam)
325 {
326 RTGCPHYS off = GCPhys - pRam->GCPhys;
327 if (off < pRam->cb)
328 {
329 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
330 return pRam;
331 }
332 if (RTGCPHYS_IS_NEGATIVE(off))
333 {
334 pLastLeft = pRam;
335 pRam = pRam->CTX_SUFF(pLeft);
336 }
337 else
338 pRam = pRam->CTX_SUFF(pRight);
339 }
340 return pLastLeft;
341}
342
343
344/**
345 * Slow worker for pgmPhysGetPage.
346 *
347 * @copydoc pgmPhysGetPage
348 */
349PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys)
350{
351 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
352
353 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
354 while (pRam)
355 {
356 RTGCPHYS off = GCPhys - pRam->GCPhys;
357 if (off < pRam->cb)
358 {
359 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
360 return &pRam->aPages[off >> PAGE_SHIFT];
361 }
362
363 if (RTGCPHYS_IS_NEGATIVE(off))
364 pRam = pRam->CTX_SUFF(pLeft);
365 else
366 pRam = pRam->CTX_SUFF(pRight);
367 }
368 return NULL;
369}
370
371
372/**
373 * Slow worker for pgmPhysGetPageEx.
374 *
375 * @copydoc pgmPhysGetPageEx
376 */
377int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage)
378{
379 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
380
381 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
382 while (pRam)
383 {
384 RTGCPHYS off = GCPhys - pRam->GCPhys;
385 if (off < pRam->cb)
386 {
387 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
388 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
389 return VINF_SUCCESS;
390 }
391
392 if (RTGCPHYS_IS_NEGATIVE(off))
393 pRam = pRam->CTX_SUFF(pLeft);
394 else
395 pRam = pRam->CTX_SUFF(pRight);
396 }
397
398 *ppPage = NULL;
399 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
400}
401
402
403/**
404 * Slow worker for pgmPhysGetPageAndRangeEx.
405 *
406 * @copydoc pgmPhysGetPageAndRangeEx
407 */
408int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam)
409{
410 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses));
411
412 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangeTree);
413 while (pRam)
414 {
415 RTGCPHYS off = GCPhys - pRam->GCPhys;
416 if (off < pRam->cb)
417 {
418 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam;
419 *ppRam = pRam;
420 *ppPage = &pRam->aPages[off >> PAGE_SHIFT];
421 return VINF_SUCCESS;
422 }
423
424 if (RTGCPHYS_IS_NEGATIVE(off))
425 pRam = pRam->CTX_SUFF(pLeft);
426 else
427 pRam = pRam->CTX_SUFF(pRight);
428 }
429
430 *ppRam = NULL;
431 *ppPage = NULL;
432 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
433}
434
435
436/**
437 * Checks if Address Gate 20 is enabled or not.
438 *
439 * @returns true if enabled.
440 * @returns false if disabled.
441 * @param pVCpu Pointer to the VMCPU.
442 */
443VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
444{
445 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
446 return pVCpu->pgm.s.fA20Enabled;
447}
448
449
450/**
451 * Validates a GC physical address.
452 *
453 * @returns true if valid.
454 * @returns false if invalid.
455 * @param pVM Pointer to the VM.
456 * @param GCPhys The physical address to validate.
457 */
458VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
459{
460 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
461 return pPage != NULL;
462}
463
464
465/**
466 * Checks if a GC physical address is a normal page,
467 * i.e. not ROM, MMIO or reserved.
468 *
469 * @returns true if normal.
470 * @returns false if invalid, ROM, MMIO or reserved page.
471 * @param pVM Pointer to the VM.
472 * @param GCPhys The physical address to check.
473 */
474VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
475{
476 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
477 return pPage
478 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
479}
480
481
482/**
483 * Converts a GC physical address to a HC physical address.
484 *
485 * @returns VINF_SUCCESS on success.
486 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
487 * page but has no physical backing.
488 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
489 * GC physical address.
490 *
491 * @param pVM Pointer to the VM.
492 * @param GCPhys The GC physical address to convert.
493 * @param pHCPhys Where to store the HC physical address on success.
494 */
495VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
496{
497 pgmLock(pVM);
498 PPGMPAGE pPage;
499 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
500 if (RT_SUCCESS(rc))
501 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
502 pgmUnlock(pVM);
503 return rc;
504}
505
506
507/**
508 * Invalidates all page mapping TLBs.
509 *
510 * @param pVM Pointer to the VM.
511 */
512void pgmPhysInvalidatePageMapTLB(PVM pVM)
513{
514 pgmLock(pVM);
515 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushes);
516
517 /* Clear the shared R0/R3 TLB completely. */
518 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
519 {
520 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
521 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
522 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
523 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
524 }
525
526 /** @todo clear the RC TLB whenever we add it. */
527
528 pgmUnlock(pVM);
529}
530
531
532/**
533 * Invalidates a page mapping TLB entry
534 *
535 * @param pVM Pointer to the VM.
536 * @param GCPhys GCPhys entry to flush
537 */
538void pgmPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
539{
540 PGM_LOCK_ASSERT_OWNER(pVM);
541
542 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatPageMapTlbFlushEntry);
543
544#ifdef IN_RC
545 unsigned idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
546 pVM->pgm.s.PhysTlbHC.aEntries[idx].GCPhys = NIL_RTGCPHYS;
547 pVM->pgm.s.PhysTlbHC.aEntries[idx].pPage = 0;
548 pVM->pgm.s.PhysTlbHC.aEntries[idx].pMap = 0;
549 pVM->pgm.s.PhysTlbHC.aEntries[idx].pv = 0;
550#else
551 /* Clear the shared R0/R3 TLB entry. */
552 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
553 pTlbe->GCPhys = NIL_RTGCPHYS;
554 pTlbe->pPage = 0;
555 pTlbe->pMap = 0;
556 pTlbe->pv = 0;
557#endif
558
559 /** @todo clear the RC TLB whenever we add it. */
560}
561
562/**
563 * Makes sure that there is at least one handy page ready for use.
564 *
565 * This will also take the appropriate actions when reaching water-marks.
566 *
567 * @returns VBox status code.
568 * @retval VINF_SUCCESS on success.
569 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
570 *
571 * @param pVM Pointer to the VM.
572 *
573 * @remarks Must be called from within the PGM critical section. It may
574 * nip back to ring-3/0 in some cases.
575 */
576static int pgmPhysEnsureHandyPage(PVM pVM)
577{
578 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
579
580 /*
581 * Do we need to do anything special?
582 */
583#ifdef IN_RING3
584 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
585#else
586 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
587#endif
588 {
589 /*
590 * Allocate pages only if we're out of them, or in ring-3, almost out.
591 */
592#ifdef IN_RING3
593 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
594#else
595 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
596#endif
597 {
598 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
599 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) ));
600#ifdef IN_RING3
601 int rc = PGMR3PhysAllocateHandyPages(pVM);
602#else
603 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
604#endif
605 if (RT_UNLIKELY(rc != VINF_SUCCESS))
606 {
607 if (RT_FAILURE(rc))
608 return rc;
609 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
610 if (!pVM->pgm.s.cHandyPages)
611 {
612 LogRel(("PGM: no more handy pages!\n"));
613 return VERR_EM_NO_MEMORY;
614 }
615 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
616 Assert(VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY));
617#ifdef IN_RING3
618# ifdef VBOX_WITH_REM
619 REMR3NotifyFF(pVM);
620# endif
621#else
622 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
623#endif
624 }
625 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
626 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
627 ("%u\n", pVM->pgm.s.cHandyPages),
628 VERR_PGM_HANDY_PAGE_IPE);
629 }
630 else
631 {
632 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
633 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
634#ifndef IN_RING3
635 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
636 {
637 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
638 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
639 }
640#endif
641 }
642 }
643
644 return VINF_SUCCESS;
645}
646
647
648/**
649 * Replace a zero or shared page with new page that we can write to.
650 *
651 * @returns The following VBox status codes.
652 * @retval VINF_SUCCESS on success, pPage is modified.
653 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
654 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
655 *
656 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
657 *
658 * @param pVM Pointer to the VM.
659 * @param pPage The physical page tracking structure. This will
660 * be modified on success.
661 * @param GCPhys The address of the page.
662 *
663 * @remarks Must be called from within the PGM critical section. It may
664 * nip back to ring-3/0 in some cases.
665 *
666 * @remarks This function shouldn't really fail, however if it does
667 * it probably means we've screwed up the size of handy pages and/or
668 * the low-water mark. Or, that some device I/O is causing a lot of
669 * pages to be allocated while while the host is in a low-memory
670 * condition. This latter should be handled elsewhere and in a more
671 * controlled manner, it's on the @bugref{3170} todo list...
672 */
673int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
674{
675 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
676
677 /*
678 * Prereqs.
679 */
680 PGM_LOCK_ASSERT_OWNER(pVM);
681 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
682 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
683
684# ifdef PGM_WITH_LARGE_PAGES
685 /*
686 * Try allocate a large page if applicable.
687 */
688 if ( PGMIsUsingLargePages(pVM)
689 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
690 {
691 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
692 PPGMPAGE pBasePage;
693
694 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage);
695 AssertRCReturn(rc, rc); /* paranoia; can't happen. */
696 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE)
697 {
698 rc = pgmPhysAllocLargePage(pVM, GCPhys);
699 if (rc == VINF_SUCCESS)
700 return rc;
701 }
702 /* Mark the base as type page table, so we don't check over and over again. */
703 PGM_PAGE_SET_PDE_TYPE(pVM, pBasePage, PGM_PAGE_PDE_TYPE_PT);
704
705 /* fall back to 4KB pages. */
706 }
707# endif
708
709 /*
710 * Flush any shadow page table mappings of the page.
711 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
712 */
713 bool fFlushTLBs = false;
714 int rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhys, pPage, true /*fFlushTLBs*/, &fFlushTLBs);
715 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
716
717 /*
718 * Ensure that we've got a page handy, take it and use it.
719 */
720 int rc2 = pgmPhysEnsureHandyPage(pVM);
721 if (RT_FAILURE(rc2))
722 {
723 if (fFlushTLBs)
724 PGM_INVL_ALL_VCPU_TLBS(pVM);
725 Assert(rc2 == VERR_EM_NO_MEMORY);
726 return rc2;
727 }
728 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
729 PGM_LOCK_ASSERT_OWNER(pVM);
730 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
731 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage));
732
733 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
734 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
735 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
736 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
737 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
738 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
739
740 /*
741 * There are one or two action to be taken the next time we allocate handy pages:
742 * - Tell the GMM (global memory manager) what the page is being used for.
743 * (Speeds up replacement operations - sharing and defragmenting.)
744 * - If the current backing is shared, it must be freed.
745 */
746 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
747 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
748
749 void const *pvSharedPage = NULL;
750 if (PGM_PAGE_IS_SHARED(pPage))
751 {
752 /* Mark this shared page for freeing/dereferencing. */
753 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
754 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
755
756 Log(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
757 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
758 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageReplaceShared));
759 pVM->pgm.s.cSharedPages--;
760
761 /* Grab the address of the page so we can make a copy later on. (safe) */
762 rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvSharedPage);
763 AssertRC(rc);
764 }
765 else
766 {
767 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
768 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->StatRZPageReplaceZero);
769 pVM->pgm.s.cZeroPages--;
770 }
771
772 /*
773 * Do the PGMPAGE modifications.
774 */
775 pVM->pgm.s.cPrivatePages++;
776 PGM_PAGE_SET_HCPHYS(pVM, pPage, HCPhys);
777 PGM_PAGE_SET_PAGEID(pVM, pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
778 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
779 PGM_PAGE_SET_PDE_TYPE(pVM, pPage, PGM_PAGE_PDE_TYPE_PT);
780 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
781
782 /* Copy the shared page contents to the replacement page. */
783 if (pvSharedPage)
784 {
785 /* Get the virtual address of the new page. */
786 PGMPAGEMAPLOCK PgMpLck;
787 void *pvNewPage;
788 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvNewPage, &PgMpLck); AssertRC(rc);
789 if (RT_SUCCESS(rc))
790 {
791 memcpy(pvNewPage, pvSharedPage, PAGE_SIZE); /** @todo todo write ASMMemCopyPage */
792 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
793 }
794 }
795
796 if ( fFlushTLBs
797 && rc != VINF_PGM_GCPHYS_ALIASED)
798 PGM_INVL_ALL_VCPU_TLBS(pVM);
799 return rc;
800}
801
802#ifdef PGM_WITH_LARGE_PAGES
803
804/**
805 * Replace a 2 MB range of zero pages with new pages that we can write to.
806 *
807 * @returns The following VBox status codes.
808 * @retval VINF_SUCCESS on success, pPage is modified.
809 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
810 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
811 *
812 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
813 *
814 * @param pVM Pointer to the VM.
815 * @param GCPhys The address of the page.
816 *
817 * @remarks Must be called from within the PGM critical section. It may
818 * nip back to ring-3/0 in some cases.
819 */
820int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys)
821{
822 RTGCPHYS GCPhysBase = GCPhys & X86_PDE2M_PAE_PG_MASK;
823 LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
824
825 /*
826 * Prereqs.
827 */
828 PGM_LOCK_ASSERT_OWNER(pVM);
829 Assert(PGMIsUsingLargePages(pVM));
830
831 PPGMPAGE pFirstPage;
832 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage);
833 if ( RT_SUCCESS(rc)
834 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM)
835 {
836 unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
837
838 /* Don't call this function for already allocated pages. */
839 Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
840
841 if ( uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
842 && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
843 {
844 /* Lazy approach: check all pages in the 2 MB range.
845 * The whole range must be ram and unallocated. */
846 GCPhys = GCPhysBase;
847 unsigned iPage;
848 for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
849 {
850 PPGMPAGE pSubPage;
851 rc = pgmPhysGetPageEx(pVM, GCPhys, &pSubPage);
852 if ( RT_FAILURE(rc)
853 || PGM_PAGE_GET_TYPE(pSubPage) != PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */
854 || PGM_PAGE_GET_STATE(pSubPage) != PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
855 {
856 LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pSubPage), PGM_PAGE_GET_STATE(pSubPage), rc));
857 break;
858 }
859 Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
860 GCPhys += PAGE_SIZE;
861 }
862 if (iPage != _2M/PAGE_SIZE)
863 {
864 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
865 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
866 PGM_PAGE_SET_PDE_TYPE(pVM, pFirstPage, PGM_PAGE_PDE_TYPE_PT);
867 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
868 }
869
870 /*
871 * Do the allocation.
872 */
873# ifdef IN_RING3
874 rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
875# else
876 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
877# endif
878 if (RT_SUCCESS(rc))
879 {
880 Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
881 pVM->pgm.s.cLargePages++;
882 return VINF_SUCCESS;
883 }
884
885 /* If we fail once, it most likely means the host's memory is too
886 fragmented; don't bother trying again. */
887 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
888 PGMSetLargePageUsage(pVM, false);
889 return rc;
890 }
891 }
892 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
893}
894
895
896/**
897 * Recheck the entire 2 MB range to see if we can use it again as a large page.
898 *
899 * @returns The following VBox status codes.
900 * @retval VINF_SUCCESS on success, the large page can be used again
901 * @retval VERR_PGM_INVALID_LARGE_PAGE_RANGE if it can't be reused
902 *
903 * @param pVM Pointer to the VM.
904 * @param GCPhys The address of the page.
905 * @param pLargePage Page structure of the base page
906 */
907int pgmPhysRecheckLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
908{
909 STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
910
911 GCPhys &= X86_PDE2M_PAE_PG_MASK;
912
913 /* Check the base page. */
914 Assert(PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
915 if ( PGM_PAGE_GET_STATE(pLargePage) != PGM_PAGE_STATE_ALLOCATED
916 || PGM_PAGE_GET_TYPE(pLargePage) != PGMPAGETYPE_RAM
917 || PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
918 {
919 LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
920 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
921 }
922
923 STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
924 /* Check all remaining pages in the 2 MB range. */
925 unsigned i;
926 GCPhys += PAGE_SIZE;
927 for (i = 1; i < _2M/PAGE_SIZE; i++)
928 {
929 PPGMPAGE pPage;
930 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
931 AssertRCBreak(rc);
932
933 if ( PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
934 || PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
935 || PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_RAM
936 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
937 {
938 LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
939 break;
940 }
941
942 GCPhys += PAGE_SIZE;
943 }
944 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
945
946 if (i == _2M/PAGE_SIZE)
947 {
948 PGM_PAGE_SET_PDE_TYPE(pVM, pLargePage, PGM_PAGE_PDE_TYPE_PDE);
949 pVM->pgm.s.cLargePagesDisabled--;
950 Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
951 return VINF_SUCCESS;
952 }
953
954 return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
955}
956
957#endif /* PGM_WITH_LARGE_PAGES */
958
959/**
960 * Deal with a write monitored page.
961 *
962 * @returns VBox strict status code.
963 *
964 * @param pVM Pointer to the VM.
965 * @param pPage The physical page tracking structure.
966 *
967 * @remarks Called from within the PGM critical section.
968 */
969void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage)
970{
971 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
972 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
973 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
974 Assert(pVM->pgm.s.cMonitoredPages > 0);
975 pVM->pgm.s.cMonitoredPages--;
976 pVM->pgm.s.cWrittenToPages++;
977}
978
979
980/**
981 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
982 *
983 * @returns VBox strict status code.
984 * @retval VINF_SUCCESS on success.
985 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
986 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
987 *
988 * @param pVM Pointer to the VM.
989 * @param pPage The physical page tracking structure.
990 * @param GCPhys The address of the page.
991 *
992 * @remarks Called from within the PGM critical section.
993 */
994int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
995{
996 PGM_LOCK_ASSERT_OWNER(pVM);
997 switch (PGM_PAGE_GET_STATE(pPage))
998 {
999 case PGM_PAGE_STATE_WRITE_MONITORED:
1000 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
1001 /* fall thru */
1002 default: /* to shut up GCC */
1003 case PGM_PAGE_STATE_ALLOCATED:
1004 return VINF_SUCCESS;
1005
1006 /*
1007 * Zero pages can be dummy pages for MMIO or reserved memory,
1008 * so we need to check the flags before joining cause with
1009 * shared page replacement.
1010 */
1011 case PGM_PAGE_STATE_ZERO:
1012 if (PGM_PAGE_IS_MMIO(pPage))
1013 return VERR_PGM_PHYS_PAGE_RESERVED;
1014 /* fall thru */
1015 case PGM_PAGE_STATE_SHARED:
1016 return pgmPhysAllocPage(pVM, pPage, GCPhys);
1017
1018 /* Not allowed to write to ballooned pages. */
1019 case PGM_PAGE_STATE_BALLOONED:
1020 return VERR_PGM_PHYS_PAGE_BALLOONED;
1021 }
1022}
1023
1024
1025/**
1026 * Internal usage: Map the page specified by its GMM ID.
1027 *
1028 * This is similar to pgmPhysPageMap
1029 *
1030 * @returns VBox status code.
1031 *
1032 * @param pVM Pointer to the VM.
1033 * @param idPage The Page ID.
1034 * @param HCPhys The physical address (for RC).
1035 * @param ppv Where to store the mapping address.
1036 *
1037 * @remarks Called from within the PGM critical section. The mapping is only
1038 * valid while you are inside this section.
1039 */
1040int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
1041{
1042 /*
1043 * Validation.
1044 */
1045 PGM_LOCK_ASSERT_OWNER(pVM);
1046 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
1047 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
1048 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
1049
1050#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1051 /*
1052 * Map it by HCPhys.
1053 */
1054 return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
1055
1056#else
1057 /*
1058 * Find/make Chunk TLB entry for the mapping chunk.
1059 */
1060 PPGMCHUNKR3MAP pMap;
1061 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1062 if (pTlbe->idChunk == idChunk)
1063 {
1064 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1065 pMap = pTlbe->pChunk;
1066 }
1067 else
1068 {
1069 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1070
1071 /*
1072 * Find the chunk, map it if necessary.
1073 */
1074 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1075 if (pMap)
1076 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1077 else
1078 {
1079# ifdef IN_RING0
1080 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1081 AssertRCReturn(rc, rc);
1082 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1083 Assert(pMap);
1084# else
1085 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1086 if (RT_FAILURE(rc))
1087 return rc;
1088# endif
1089 }
1090
1091 /*
1092 * Enter it into the Chunk TLB.
1093 */
1094 pTlbe->idChunk = idChunk;
1095 pTlbe->pChunk = pMap;
1096 }
1097
1098 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
1099 return VINF_SUCCESS;
1100#endif
1101}
1102
1103
1104/**
1105 * Maps a page into the current virtual address space so it can be accessed.
1106 *
1107 * @returns VBox status code.
1108 * @retval VINF_SUCCESS on success.
1109 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1110 *
1111 * @param pVM Pointer to the VM.
1112 * @param pPage The physical page tracking structure.
1113 * @param GCPhys The address of the page.
1114 * @param ppMap Where to store the address of the mapping tracking structure.
1115 * @param ppv Where to store the mapping address of the page. The page
1116 * offset is masked off!
1117 *
1118 * @remarks Called from within the PGM critical section.
1119 */
1120static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
1121{
1122 PGM_LOCK_ASSERT_OWNER(pVM);
1123 NOREF(GCPhys);
1124
1125#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1126 /*
1127 * Just some sketchy GC/R0-darwin code.
1128 */
1129 *ppMap = NULL;
1130 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
1131 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
1132 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);
1133 return VINF_SUCCESS;
1134
1135#else /* IN_RING3 || IN_RING0 */
1136
1137
1138 /*
1139 * Special cases: MMIO2, ZERO and specially aliased MMIO pages.
1140 */
1141 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2
1142 || PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
1143 {
1144 /* Decode the page id to a page in a MMIO2 ram range. */
1145 uint8_t idMmio2 = PGM_MMIO2_PAGEID_GET_MMIO2_ID(PGM_PAGE_GET_PAGEID(pPage));
1146 uint32_t iPage = PGM_MMIO2_PAGEID_GET_IDX(PGM_PAGE_GET_PAGEID(pPage));
1147 AssertLogRelMsgReturn((uint8_t)(idMmio2 - 1U) < RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)),
1148 ("idMmio2=%u size=%u type=%u GCPHys=%#RGp Id=%u State=%u", idMmio2,
1149 RT_ELEMENTS(pVM->pgm.s.CTX_SUFF(apMmio2Ranges)), PGM_PAGE_GET_TYPE(pPage), GCPhys,
1150 pPage->s.idPage, pPage->s.uStateY),
1151 VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1152 PPGMMMIO2RANGE pMmio2Range = pVM->pgm.s.CTX_SUFF(apMmio2Ranges)[idMmio2 - 1];
1153 AssertLogRelReturn(pMmio2Range, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1154 AssertLogRelReturn(pMmio2Range->idMmio2 == idMmio2, VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1155 AssertLogRelReturn(iPage < (pMmio2Range->RamRange.cb >> PAGE_SHIFT), VERR_PGM_PHYS_PAGE_MAP_MMIO2_IPE);
1156 *ppv = (uint8_t *)pMmio2Range->RamRange.pvR3 + ((uintptr_t)iPage << PAGE_SHIFT);
1157 *ppMap = NULL;
1158 return VINF_SUCCESS;
1159 }
1160
1161 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
1162 if (idChunk == NIL_GMM_CHUNKID)
1163 {
1164 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage),
1165 VERR_PGM_PHYS_PAGE_MAP_IPE_1);
1166 if (!PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
1167 {
1168 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage),
1169 VERR_PGM_PHYS_PAGE_MAP_IPE_3);
1170 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage)== pVM->pgm.s.HCPhysZeroPg, ("pPage=%R[pgmpage]\n", pPage),
1171 VERR_PGM_PHYS_PAGE_MAP_IPE_4);
1172 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1173 }
1174 else
1175 {
1176 static uint8_t s_abPlayItSafe[0x1000*2]; /* I don't dare return the zero page at the moment. */
1177 *ppv = (uint8_t *)((uintptr_t)&s_abPlayItSafe[0x1000] & ~(uintptr_t)0xfff);
1178 }
1179 *ppMap = NULL;
1180 return VINF_SUCCESS;
1181 }
1182
1183 /*
1184 * Find/make Chunk TLB entry for the mapping chunk.
1185 */
1186 PPGMCHUNKR3MAP pMap;
1187 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
1188 if (pTlbe->idChunk == idChunk)
1189 {
1190 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbHits));
1191 pMap = pTlbe->pChunk;
1192 AssertPtr(pMap->pv);
1193 }
1194 else
1195 {
1196 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
1197
1198 /*
1199 * Find the chunk, map it if necessary.
1200 */
1201 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1202 if (pMap)
1203 {
1204 AssertPtr(pMap->pv);
1205 pMap->iLastUsed = pVM->pgm.s.ChunkR3Map.iNow;
1206 }
1207 else
1208 {
1209#ifdef IN_RING0
1210 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
1211 AssertRCReturn(rc, rc);
1212 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
1213 Assert(pMap);
1214#else
1215 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
1216 if (RT_FAILURE(rc))
1217 return rc;
1218#endif
1219 AssertPtr(pMap->pv);
1220 }
1221
1222 /*
1223 * Enter it into the Chunk TLB.
1224 */
1225 pTlbe->idChunk = idChunk;
1226 pTlbe->pChunk = pMap;
1227 }
1228
1229 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
1230 *ppMap = pMap;
1231 return VINF_SUCCESS;
1232#endif /* IN_RING3 */
1233}
1234
1235
1236/**
1237 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
1238 *
1239 * This is typically used is paths where we cannot use the TLB methods (like ROM
1240 * pages) or where there is no point in using them since we won't get many hits.
1241 *
1242 * @returns VBox strict status code.
1243 * @retval VINF_SUCCESS on success.
1244 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
1245 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1246 *
1247 * @param pVM Pointer to the VM.
1248 * @param pPage The physical page tracking structure.
1249 * @param GCPhys The address of the page.
1250 * @param ppv Where to store the mapping address of the page. The page
1251 * offset is masked off!
1252 *
1253 * @remarks Called from within the PGM critical section. The mapping is only
1254 * valid while you are inside section.
1255 */
1256int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1257{
1258 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1259 if (RT_SUCCESS(rc))
1260 {
1261 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
1262 PPGMPAGEMAP pMapIgnore;
1263 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1264 if (RT_FAILURE(rc2)) /* preserve rc */
1265 rc = rc2;
1266 }
1267 return rc;
1268}
1269
1270
1271/**
1272 * Maps a page into the current virtual address space so it can be accessed for
1273 * both writing and reading.
1274 *
1275 * This is typically used is paths where we cannot use the TLB methods (like ROM
1276 * pages) or where there is no point in using them since we won't get many hits.
1277 *
1278 * @returns VBox status code.
1279 * @retval VINF_SUCCESS on success.
1280 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1281 *
1282 * @param pVM Pointer to the VM.
1283 * @param pPage The physical page tracking structure. Must be in the
1284 * allocated state.
1285 * @param GCPhys The address of the page.
1286 * @param ppv Where to store the mapping address of the page. The page
1287 * offset is masked off!
1288 *
1289 * @remarks Called from within the PGM critical section. The mapping is only
1290 * valid while you are inside section.
1291 */
1292int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1293{
1294 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
1295 PPGMPAGEMAP pMapIgnore;
1296 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
1297}
1298
1299
1300/**
1301 * Maps a page into the current virtual address space so it can be accessed for
1302 * reading.
1303 *
1304 * This is typically used is paths where we cannot use the TLB methods (like ROM
1305 * pages) or where there is no point in using them since we won't get many hits.
1306 *
1307 * @returns VBox status code.
1308 * @retval VINF_SUCCESS on success.
1309 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1310 *
1311 * @param pVM Pointer to the VM.
1312 * @param pPage The physical page tracking structure.
1313 * @param GCPhys The address of the page.
1314 * @param ppv Where to store the mapping address of the page. The page
1315 * offset is masked off!
1316 *
1317 * @remarks Called from within the PGM critical section. The mapping is only
1318 * valid while you are inside this section.
1319 */
1320int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
1321{
1322 PPGMPAGEMAP pMapIgnore;
1323 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
1324}
1325
1326#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1327
1328/**
1329 * Load a guest page into the ring-3 physical TLB.
1330 *
1331 * @returns VBox status code.
1332 * @retval VINF_SUCCESS on success
1333 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1334 * @param pPGM The PGM instance pointer.
1335 * @param GCPhys The guest physical address in question.
1336 */
1337int pgmPhysPageLoadIntoTlb(PVM pVM, RTGCPHYS GCPhys)
1338{
1339 PGM_LOCK_ASSERT_OWNER(pVM);
1340
1341 /*
1342 * Find the ram range and page and hand it over to the with-page function.
1343 * 99.8% of requests are expected to be in the first range.
1344 */
1345 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
1346 if (!pPage)
1347 {
1348 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1349 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
1350 }
1351
1352 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys);
1353}
1354
1355
1356/**
1357 * Load a guest page into the ring-3 physical TLB.
1358 *
1359 * @returns VBox status code.
1360 * @retval VINF_SUCCESS on success
1361 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1362 *
1363 * @param pVM Pointer to the VM.
1364 * @param pPage Pointer to the PGMPAGE structure corresponding to
1365 * GCPhys.
1366 * @param GCPhys The guest physical address in question.
1367 */
1368int pgmPhysPageLoadIntoTlbWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
1369{
1370 PGM_LOCK_ASSERT_OWNER(pVM);
1371 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));
1372
1373 /*
1374 * Map the page.
1375 * Make a special case for the zero page as it is kind of special.
1376 */
1377 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
1378 if ( !PGM_PAGE_IS_ZERO(pPage)
1379 && !PGM_PAGE_IS_BALLOONED(pPage))
1380 {
1381 void *pv;
1382 PPGMPAGEMAP pMap;
1383 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv);
1384 if (RT_FAILURE(rc))
1385 return rc;
1386 pTlbe->pMap = pMap;
1387 pTlbe->pv = pv;
1388 Assert(!((uintptr_t)pTlbe->pv & PAGE_OFFSET_MASK));
1389 }
1390 else
1391 {
1392 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
1393 pTlbe->pMap = NULL;
1394 pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
1395 }
1396#ifdef PGM_WITH_PHYS_TLB
1397 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
1398 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
1399 pTlbe->GCPhys = GCPhys & X86_PTE_PAE_PG_MASK;
1400 else
1401 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
1402#else
1403 pTlbe->GCPhys = NIL_RTGCPHYS;
1404#endif
1405 pTlbe->pPage = pPage;
1406 return VINF_SUCCESS;
1407}
1408
1409#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1410
1411/**
1412 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1413 * own the PGM lock and therefore not need to lock the mapped page.
1414 *
1415 * @returns VBox status code.
1416 * @retval VINF_SUCCESS on success.
1417 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1418 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1419 *
1420 * @param pVM Pointer to the VM.
1421 * @param GCPhys The guest physical address of the page that should be mapped.
1422 * @param pPage Pointer to the PGMPAGE structure for the page.
1423 * @param ppv Where to store the address corresponding to GCPhys.
1424 *
1425 * @internal
1426 * @deprecated Use pgmPhysGCPhys2CCPtrInternalEx.
1427 */
1428int pgmPhysGCPhys2CCPtrInternalDepr(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
1429{
1430 int rc;
1431 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1432 PGM_LOCK_ASSERT_OWNER(pVM);
1433 pVM->pgm.s.cDeprecatedPageLocks++;
1434
1435 /*
1436 * Make sure the page is writable.
1437 */
1438 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1439 {
1440 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1441 if (RT_FAILURE(rc))
1442 return rc;
1443 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1444 }
1445 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1446
1447 /*
1448 * Get the mapping address.
1449 */
1450#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1451 void *pv;
1452 rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),
1453 PGM_PAGE_GET_HCPHYS(pPage),
1454 &pv
1455 RTLOG_COMMA_SRC_POS);
1456 if (RT_FAILURE(rc))
1457 return rc;
1458 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1459#else
1460 PPGMPAGEMAPTLBE pTlbe;
1461 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1462 if (RT_FAILURE(rc))
1463 return rc;
1464 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1465#endif
1466 return VINF_SUCCESS;
1467}
1468
1469#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1470
1471/**
1472 * Locks a page mapping for writing.
1473 *
1474 * @param pVM Pointer to the VM.
1475 * @param pPage The page.
1476 * @param pTlbe The mapping TLB entry for the page.
1477 * @param pLock The lock structure (output).
1478 */
1479DECLINLINE(void) pgmPhysPageMapLockForWriting(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1480{
1481 PPGMPAGEMAP pMap = pTlbe->pMap;
1482 if (pMap)
1483 pMap->cRefs++;
1484
1485 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1486 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1487 {
1488 if (cLocks == 0)
1489 pVM->pgm.s.cWriteLockedPages++;
1490 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1491 }
1492 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1493 {
1494 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1495 AssertMsgFailed(("%R[pgmpage] is entering permanent write locked state!\n", pPage));
1496 if (pMap)
1497 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1498 }
1499
1500 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1501 pLock->pvMap = pMap;
1502}
1503
1504/**
1505 * Locks a page mapping for reading.
1506 *
1507 * @param pVM Pointer to the VM.
1508 * @param pPage The page.
1509 * @param pTlbe The mapping TLB entry for the page.
1510 * @param pLock The lock structure (output).
1511 */
1512DECLINLINE(void) pgmPhysPageMapLockForReading(PVM pVM, PPGMPAGE pPage, PPGMPAGEMAPTLBE pTlbe, PPGMPAGEMAPLOCK pLock)
1513{
1514 PPGMPAGEMAP pMap = pTlbe->pMap;
1515 if (pMap)
1516 pMap->cRefs++;
1517
1518 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1519 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1520 {
1521 if (cLocks == 0)
1522 pVM->pgm.s.cReadLockedPages++;
1523 PGM_PAGE_INC_READ_LOCKS(pPage);
1524 }
1525 else if (cLocks != PGM_PAGE_MAX_LOCKS)
1526 {
1527 PGM_PAGE_INC_READ_LOCKS(pPage);
1528 AssertMsgFailed(("%R[pgmpage] is entering permanent read locked state!\n", pPage));
1529 if (pMap)
1530 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1531 }
1532
1533 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1534 pLock->pvMap = pMap;
1535}
1536
1537#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
1538
1539
1540/**
1541 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
1542 * own the PGM lock and have access to the page structure.
1543 *
1544 * @returns VBox status code.
1545 * @retval VINF_SUCCESS on success.
1546 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1547 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1548 *
1549 * @param pVM Pointer to the VM.
1550 * @param GCPhys The guest physical address of the page that should be mapped.
1551 * @param pPage Pointer to the PGMPAGE structure for the page.
1552 * @param ppv Where to store the address corresponding to GCPhys.
1553 * @param pLock Where to store the lock information that
1554 * pgmPhysReleaseInternalPageMappingLock needs.
1555 *
1556 * @internal
1557 */
1558int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1559{
1560 int rc;
1561 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1562 PGM_LOCK_ASSERT_OWNER(pVM);
1563
1564 /*
1565 * Make sure the page is writable.
1566 */
1567 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1568 {
1569 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1570 if (RT_FAILURE(rc))
1571 return rc;
1572 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1573 }
1574 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1575
1576 /*
1577 * Do the job.
1578 */
1579#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1580 void *pv;
1581 PVMCPU pVCpu = VMMGetCpu(pVM);
1582 rc = pgmRZDynMapHCPageInlined(pVCpu,
1583 PGM_PAGE_GET_HCPHYS(pPage),
1584 &pv
1585 RTLOG_COMMA_SRC_POS);
1586 if (RT_FAILURE(rc))
1587 return rc;
1588 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1589 pLock->pvPage = pv;
1590 pLock->pVCpu = pVCpu;
1591
1592#else
1593 PPGMPAGEMAPTLBE pTlbe;
1594 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1595 if (RT_FAILURE(rc))
1596 return rc;
1597 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1598 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1599#endif
1600 return VINF_SUCCESS;
1601}
1602
1603
1604/**
1605 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
1606 * own the PGM lock and have access to the page structure.
1607 *
1608 * @returns VBox status code.
1609 * @retval VINF_SUCCESS on success.
1610 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1611 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1612 *
1613 * @param pVM Pointer to the VM.
1614 * @param GCPhys The guest physical address of the page that should be mapped.
1615 * @param pPage Pointer to the PGMPAGE structure for the page.
1616 * @param ppv Where to store the address corresponding to GCPhys.
1617 * @param pLock Where to store the lock information that
1618 * pgmPhysReleaseInternalPageMappingLock needs.
1619 *
1620 * @internal
1621 */
1622int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv, PPGMPAGEMAPLOCK pLock)
1623{
1624 AssertReturn(pPage, VERR_PGM_PHYS_NULL_PAGE_PARAM);
1625 PGM_LOCK_ASSERT_OWNER(pVM);
1626 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1627
1628 /*
1629 * Do the job.
1630 */
1631#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1632 void *pv;
1633 PVMCPU pVCpu = VMMGetCpu(pVM);
1634 int rc = pgmRZDynMapHCPageInlined(pVCpu,
1635 PGM_PAGE_GET_HCPHYS(pPage),
1636 &pv
1637 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1638 if (RT_FAILURE(rc))
1639 return rc;
1640 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1641 pLock->pvPage = pv;
1642 pLock->pVCpu = pVCpu;
1643
1644#else
1645 PPGMPAGEMAPTLBE pTlbe;
1646 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1647 if (RT_FAILURE(rc))
1648 return rc;
1649 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1650 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1651#endif
1652 return VINF_SUCCESS;
1653}
1654
1655
1656/**
1657 * Requests the mapping of a guest page into the current context.
1658 *
1659 * This API should only be used for very short term, as it will consume scarse
1660 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1661 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1662 *
1663 * This API will assume your intention is to write to the page, and will
1664 * therefore replace shared and zero pages. If you do not intend to modify
1665 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1666 *
1667 * @returns VBox status code.
1668 * @retval VINF_SUCCESS on success.
1669 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1670 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1671 *
1672 * @param pVM Pointer to the VM.
1673 * @param GCPhys The guest physical address of the page that should be
1674 * mapped.
1675 * @param ppv Where to store the address corresponding to GCPhys.
1676 * @param pLock Where to store the lock information that
1677 * PGMPhysReleasePageMappingLock needs.
1678 *
1679 * @remarks The caller is responsible for dealing with access handlers.
1680 * @todo Add an informational return code for pages with access handlers?
1681 *
1682 * @remark Avoid calling this API from within critical sections (other than
1683 * the PGM one) because of the deadlock risk. External threads may
1684 * need to delegate jobs to the EMTs.
1685 * @remarks Only one page is mapped! Make no assumption about what's after or
1686 * before the returned page!
1687 * @thread Any thread.
1688 */
1689VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1690{
1691 int rc = pgmLock(pVM);
1692 AssertRCReturn(rc, rc);
1693
1694#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1695 /*
1696 * Find the page and make sure it's writable.
1697 */
1698 PPGMPAGE pPage;
1699 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1700 if (RT_SUCCESS(rc))
1701 {
1702 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1703 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1704 if (RT_SUCCESS(rc))
1705 {
1706 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1707
1708 PVMCPU pVCpu = VMMGetCpu(pVM);
1709 void *pv;
1710 rc = pgmRZDynMapHCPageInlined(pVCpu,
1711 PGM_PAGE_GET_HCPHYS(pPage),
1712 &pv
1713 RTLOG_COMMA_SRC_POS);
1714 if (RT_SUCCESS(rc))
1715 {
1716 AssertRCSuccess(rc);
1717
1718 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1719 *ppv = pv;
1720 pLock->pvPage = pv;
1721 pLock->pVCpu = pVCpu;
1722 }
1723 }
1724 }
1725
1726#else /* IN_RING3 || IN_RING0 */
1727 /*
1728 * Query the Physical TLB entry for the page (may fail).
1729 */
1730 PPGMPAGEMAPTLBE pTlbe;
1731 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1732 if (RT_SUCCESS(rc))
1733 {
1734 /*
1735 * If the page is shared, the zero page, or being write monitored
1736 * it must be converted to a page that's writable if possible.
1737 */
1738 PPGMPAGE pPage = pTlbe->pPage;
1739 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1740 {
1741 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1742 if (RT_SUCCESS(rc))
1743 {
1744 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1745 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
1746 }
1747 }
1748 if (RT_SUCCESS(rc))
1749 {
1750 /*
1751 * Now, just perform the locking and calculate the return address.
1752 */
1753 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
1754 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1755 }
1756 }
1757
1758#endif /* IN_RING3 || IN_RING0 */
1759 pgmUnlock(pVM);
1760 return rc;
1761}
1762
1763
1764/**
1765 * Requests the mapping of a guest page into the current context.
1766 *
1767 * This API should only be used for very short term, as it will consume scarse
1768 * resources (R0 and GC) in the mapping cache. When you're done with the page,
1769 * call PGMPhysReleasePageMappingLock() ASAP to release it.
1770 *
1771 * @returns VBox status code.
1772 * @retval VINF_SUCCESS on success.
1773 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1774 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1775 *
1776 * @param pVM Pointer to the VM.
1777 * @param GCPhys The guest physical address of the page that should be
1778 * mapped.
1779 * @param ppv Where to store the address corresponding to GCPhys.
1780 * @param pLock Where to store the lock information that
1781 * PGMPhysReleasePageMappingLock needs.
1782 *
1783 * @remarks The caller is responsible for dealing with access handlers.
1784 * @todo Add an informational return code for pages with access handlers?
1785 *
1786 * @remarks Avoid calling this API from within critical sections (other than
1787 * the PGM one) because of the deadlock risk.
1788 * @remarks Only one page is mapped! Make no assumption about what's after or
1789 * before the returned page!
1790 * @thread Any thread.
1791 */
1792VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1793{
1794 int rc = pgmLock(pVM);
1795 AssertRCReturn(rc, rc);
1796
1797#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1798 /*
1799 * Find the page and make sure it's readable.
1800 */
1801 PPGMPAGE pPage;
1802 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);
1803 if (RT_SUCCESS(rc))
1804 {
1805 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1806 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1807 else
1808 {
1809 PVMCPU pVCpu = VMMGetCpu(pVM);
1810 void *pv;
1811 rc = pgmRZDynMapHCPageInlined(pVCpu,
1812 PGM_PAGE_GET_HCPHYS(pPage),
1813 &pv
1814 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */
1815 if (RT_SUCCESS(rc))
1816 {
1817 AssertRCSuccess(rc);
1818
1819 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1820 *ppv = pv;
1821 pLock->pvPage = pv;
1822 pLock->pVCpu = pVCpu;
1823 }
1824 }
1825 }
1826
1827#else /* IN_RING3 || IN_RING0 */
1828 /*
1829 * Query the Physical TLB entry for the page (may fail).
1830 */
1831 PPGMPAGEMAPTLBE pTlbe;
1832 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe);
1833 if (RT_SUCCESS(rc))
1834 {
1835 /* MMIO pages doesn't have any readable backing. */
1836 PPGMPAGE pPage = pTlbe->pPage;
1837 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))
1838 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1839 else
1840 {
1841 /*
1842 * Now, just perform the locking and calculate the return address.
1843 */
1844 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
1845 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
1846 }
1847 }
1848
1849#endif /* IN_RING3 || IN_RING0 */
1850 pgmUnlock(pVM);
1851 return rc;
1852}
1853
1854
1855/**
1856 * Requests the mapping of a guest page given by virtual address into the current context.
1857 *
1858 * This API should only be used for very short term, as it will consume
1859 * scarse resources (R0 and GC) in the mapping cache. When you're done
1860 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1861 *
1862 * This API will assume your intention is to write to the page, and will
1863 * therefore replace shared and zero pages. If you do not intend to modify
1864 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1865 *
1866 * @returns VBox status code.
1867 * @retval VINF_SUCCESS on success.
1868 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1869 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1870 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1871 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1872 *
1873 * @param pVCpu Pointer to the VMCPU.
1874 * @param GCPhys The guest physical address of the page that should be mapped.
1875 * @param ppv Where to store the address corresponding to GCPhys.
1876 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1877 *
1878 * @remark Avoid calling this API from within critical sections (other than
1879 * the PGM one) because of the deadlock risk.
1880 * @thread EMT
1881 */
1882VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1883{
1884 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1885 RTGCPHYS GCPhys;
1886 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1887 if (RT_SUCCESS(rc))
1888 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1889 return rc;
1890}
1891
1892
1893/**
1894 * Requests the mapping of a guest page given by virtual address into the current context.
1895 *
1896 * This API should only be used for very short term, as it will consume
1897 * scarse resources (R0 and GC) in the mapping cache. When you're done
1898 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1899 *
1900 * @returns VBox status code.
1901 * @retval VINF_SUCCESS on success.
1902 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1903 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1904 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1905 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1906 *
1907 * @param pVCpu Pointer to the VMCPU.
1908 * @param GCPhys The guest physical address of the page that should be mapped.
1909 * @param ppv Where to store the address corresponding to GCPhys.
1910 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1911 *
1912 * @remark Avoid calling this API from within critical sections (other than
1913 * the PGM one) because of the deadlock risk.
1914 * @thread EMT
1915 */
1916VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1917{
1918 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1919 RTGCPHYS GCPhys;
1920 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1921 if (RT_SUCCESS(rc))
1922 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1923 return rc;
1924}
1925
1926
1927/**
1928 * Release the mapping of a guest page.
1929 *
1930 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1931 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1932 *
1933 * @param pVM Pointer to the VM.
1934 * @param pLock The lock structure initialized by the mapping function.
1935 */
1936VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1937{
1938#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1939 Assert(pLock->pvPage != NULL);
1940 Assert(pLock->pVCpu == VMMGetCpu(pVM));
1941 PGM_DYNMAP_UNUSED_HINT(pLock->pVCpu, pLock->pvPage);
1942 pLock->pVCpu = NULL;
1943 pLock->pvPage = NULL;
1944
1945#else
1946 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1947 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1948 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1949
1950 pLock->uPageAndType = 0;
1951 pLock->pvMap = NULL;
1952
1953 pgmLock(pVM);
1954 if (fWriteLock)
1955 {
1956 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1957 Assert(cLocks > 0);
1958 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1959 {
1960 if (cLocks == 1)
1961 {
1962 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1963 pVM->pgm.s.cWriteLockedPages--;
1964 }
1965 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1966 }
1967
1968 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1969 {
1970 PGM_PAGE_SET_WRITTEN_TO(pVM, pPage);
1971 PGM_PAGE_SET_STATE(pVM, pPage, PGM_PAGE_STATE_ALLOCATED);
1972 Assert(pVM->pgm.s.cMonitoredPages > 0);
1973 pVM->pgm.s.cMonitoredPages--;
1974 pVM->pgm.s.cWrittenToPages++;
1975 }
1976 }
1977 else
1978 {
1979 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1980 Assert(cLocks > 0);
1981 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1982 {
1983 if (cLocks == 1)
1984 {
1985 Assert(pVM->pgm.s.cReadLockedPages > 0);
1986 pVM->pgm.s.cReadLockedPages--;
1987 }
1988 PGM_PAGE_DEC_READ_LOCKS(pPage);
1989 }
1990 }
1991
1992 if (pMap)
1993 {
1994 Assert(pMap->cRefs >= 1);
1995 pMap->cRefs--;
1996 }
1997 pgmUnlock(pVM);
1998#endif /* IN_RING3 */
1999}
2000
2001
2002/**
2003 * Release the internal mapping of a guest page.
2004 *
2005 * This is the counter part of pgmPhysGCPhys2CCPtrInternalEx and
2006 * pgmPhysGCPhys2CCPtrInternalReadOnly.
2007 *
2008 * @param pVM Pointer to the VM.
2009 * @param pLock The lock structure initialized by the mapping function.
2010 *
2011 * @remarks Caller must hold the PGM lock.
2012 */
2013void pgmPhysReleaseInternalPageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
2014{
2015 PGM_LOCK_ASSERT_OWNER(pVM);
2016 PGMPhysReleasePageMappingLock(pVM, pLock); /* lazy for now */
2017}
2018
2019
2020/**
2021 * Converts a GC physical address to a HC ring-3 pointer.
2022 *
2023 * @returns VINF_SUCCESS on success.
2024 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
2025 * page but has no physical backing.
2026 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
2027 * GC physical address.
2028 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
2029 * a dynamic ram chunk boundary
2030 *
2031 * @param pVM Pointer to the VM.
2032 * @param GCPhys The GC physical address to convert.
2033 * @param pR3Ptr Where to store the R3 pointer on success.
2034 *
2035 * @deprecated Avoid when possible!
2036 */
2037int pgmPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2038{
2039/** @todo this is kind of hacky and needs some more work. */
2040#ifndef DEBUG_sandervl
2041 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
2042#endif
2043
2044 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */
2045#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
2046 NOREF(pVM); NOREF(pR3Ptr);
2047 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
2048#else
2049 pgmLock(pVM);
2050
2051 PPGMRAMRANGE pRam;
2052 PPGMPAGE pPage;
2053 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
2054 if (RT_SUCCESS(rc))
2055 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
2056
2057 pgmUnlock(pVM);
2058 Assert(rc <= VINF_SUCCESS);
2059 return rc;
2060#endif
2061}
2062
2063#if 0 /*defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)*/
2064
2065/**
2066 * Maps and locks a guest CR3 or PD (PAE) page.
2067 *
2068 * @returns VINF_SUCCESS on success.
2069 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
2070 * page but has no physical backing.
2071 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
2072 * GC physical address.
2073 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
2074 * a dynamic ram chunk boundary
2075 *
2076 * @param pVM Pointer to the VM.
2077 * @param GCPhys The GC physical address to convert.
2078 * @param pR3Ptr Where to store the R3 pointer on success. This may or
2079 * may not be valid in ring-0 depending on the
2080 * VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 build option.
2081 *
2082 * @remarks The caller must own the PGM lock.
2083 */
2084int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2085{
2086
2087 PPGMRAMRANGE pRam;
2088 PPGMPAGE pPage;
2089 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
2090 if (RT_SUCCESS(rc))
2091 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr);
2092 Assert(rc <= VINF_SUCCESS);
2093 return rc;
2094}
2095
2096
2097int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr)
2098{
2099
2100}
2101
2102#endif
2103
2104/**
2105 * Converts a guest pointer to a GC physical address.
2106 *
2107 * This uses the current CR3/CR0/CR4 of the guest.
2108 *
2109 * @returns VBox status code.
2110 * @param pVCpu Pointer to the VMCPU.
2111 * @param GCPtr The guest pointer to convert.
2112 * @param pGCPhys Where to store the GC physical address.
2113 */
2114VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
2115{
2116 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
2117 if (pGCPhys && RT_SUCCESS(rc))
2118 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
2119 return rc;
2120}
2121
2122
2123/**
2124 * Converts a guest pointer to a HC physical address.
2125 *
2126 * This uses the current CR3/CR0/CR4 of the guest.
2127 *
2128 * @returns VBox status code.
2129 * @param pVCpu Pointer to the VMCPU.
2130 * @param GCPtr The guest pointer to convert.
2131 * @param pHCPhys Where to store the HC physical address.
2132 */
2133VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
2134{
2135 PVM pVM = pVCpu->CTX_SUFF(pVM);
2136 RTGCPHYS GCPhys;
2137 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
2138 if (RT_SUCCESS(rc))
2139 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
2140 return rc;
2141}
2142
2143
2144
2145#undef LOG_GROUP
2146#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
2147
2148
2149#if defined(IN_RING3) && defined(SOME_UNUSED_FUNCTION)
2150/**
2151 * Cache PGMPhys memory access
2152 *
2153 * @param pVM Pointer to the VM.
2154 * @param pCache Cache structure pointer
2155 * @param GCPhys GC physical address
2156 * @param pbHC HC pointer corresponding to physical page
2157 *
2158 * @thread EMT.
2159 */
2160static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
2161{
2162 uint32_t iCacheIndex;
2163
2164 Assert(VM_IS_EMT(pVM));
2165
2166 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
2167 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
2168
2169 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
2170
2171 ASMBitSet(&pCache->aEntries, iCacheIndex);
2172
2173 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
2174 pCache->Entry[iCacheIndex].pbR3 = pbR3;
2175}
2176#endif /* IN_RING3 */
2177
2178
2179/**
2180 * Deals with reading from a page with one or more ALL access handlers.
2181 *
2182 * @returns VBox status code. Can be ignored in ring-3.
2183 * @retval VINF_SUCCESS.
2184 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2185 *
2186 * @param pVM Pointer to the VM.
2187 * @param pPage The page descriptor.
2188 * @param GCPhys The physical address to start reading at.
2189 * @param pvBuf Where to put the bits we read.
2190 * @param cb How much to read - less or equal to a page.
2191 * @param enmOrigin The origin of this call.
2192 */
2193static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb, PGMACCESSORIGIN enmOrigin)
2194{
2195 /*
2196 * The most frequent access here is MMIO and shadowed ROM.
2197 * The current code ASSUMES all these access handlers covers full pages!
2198 */
2199
2200 /*
2201 * Whatever we do we need the source page, map it first.
2202 */
2203 PGMPAGEMAPLOCK PgMpLck;
2204 const void *pvSrc = NULL;
2205 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc, &PgMpLck);
2206 if (RT_FAILURE(rc))
2207 {
2208 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2209 GCPhys, pPage, rc));
2210 memset(pvBuf, 0xff, cb);
2211 return VINF_SUCCESS;
2212 }
2213 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2214
2215 /*
2216 * Deal with any physical handlers.
2217 */
2218 PVMCPU pVCpu = VMMGetCpu(pVM);
2219#ifdef IN_RING3
2220 PPGMPHYSHANDLER pPhys = NULL;
2221#endif
2222 if ( PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL
2223 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2224 {
2225#ifdef IN_RING3
2226 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2227 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2228 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
2229 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
2230 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2231
2232 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler); Assert(pfnHandler);
2233 void *pvUser = pPhys->CTX_SUFF(pvUser);
2234
2235 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
2236 STAM_PROFILE_START(&pPhys->Stat, h);
2237 PGM_LOCK_ASSERT_OWNER(pVM);
2238 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2239 pgmUnlock(pVM);
2240 rc = pfnHandler(pVM, pVCpu, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin, pvUser);
2241 pgmLock(pVM);
2242# ifdef VBOX_WITH_STATISTICS
2243 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2244 if (pPhys)
2245 STAM_PROFILE_STOP(&pPhys->Stat, h);
2246# else
2247 pPhys = NULL; /* might not be valid anymore. */
2248# endif
2249 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
2250#else
2251 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2252 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2253 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2254 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2255#endif
2256 }
2257
2258 /*
2259 * Deal with any virtual handlers.
2260 */
2261 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
2262 {
2263 unsigned iPage;
2264 PPGMVIRTHANDLER pVirt;
2265
2266 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
2267 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
2268 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
2269 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2270 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
2271
2272 PPGMVIRTHANDLERTYPEINT pVirtType = PGMVIRTANDLER_GET_TYPE(pVM, pVirt);
2273#ifdef IN_RING3
2274 if (pVirtType->pfnHandlerR3)
2275 {
2276 if (!pPhys)
2277 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2278 else
2279 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
2280 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2281 + (iPage << PAGE_SHIFT)
2282 + (GCPhys & PAGE_OFFSET_MASK);
2283
2284 STAM_PROFILE_START(&pVirt->Stat, h);
2285 rc2 = pVirtType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, enmOrigin,
2286 pVirt->CTX_SUFF(pvUser));
2287 STAM_PROFILE_STOP(&pVirt->Stat, h);
2288 if (rc2 == VINF_SUCCESS)
2289 rc = VINF_SUCCESS;
2290 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
2291 }
2292 else
2293 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
2294#else
2295 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2296 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
2297 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2298 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2299#endif
2300 }
2301
2302 /*
2303 * Take the default action.
2304 */
2305 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2306 memcpy(pvBuf, pvSrc, cb);
2307 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2308 return rc;
2309}
2310
2311
2312/**
2313 * Read physical memory.
2314 *
2315 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2316 * want to ignore those.
2317 *
2318 * @returns VBox status code. Can be ignored in ring-3.
2319 * @retval VINF_SUCCESS.
2320 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2321 *
2322 * @param pVM Pointer to the VM.
2323 * @param GCPhys Physical address start reading from.
2324 * @param pvBuf Where to put the read bits.
2325 * @param cbRead How many bytes to read.
2326 * @param enmOrigin The origin of this call.
2327 */
2328VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead, PGMACCESSORIGIN enmOrigin)
2329{
2330 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
2331 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
2332
2333 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysRead));
2334 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysReadBytes), cbRead);
2335
2336 pgmLock(pVM);
2337
2338 /*
2339 * Copy loop on ram ranges.
2340 */
2341 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2342 for (;;)
2343 {
2344 /* Inside range or not? */
2345 if (pRam && GCPhys >= pRam->GCPhys)
2346 {
2347 /*
2348 * Must work our way thru this page by page.
2349 */
2350 RTGCPHYS off = GCPhys - pRam->GCPhys;
2351 while (off < pRam->cb)
2352 {
2353 unsigned iPage = off >> PAGE_SHIFT;
2354 PPGMPAGE pPage = &pRam->aPages[iPage];
2355 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2356 if (cb > cbRead)
2357 cb = cbRead;
2358
2359 /*
2360 * Any ALL access handlers?
2361 */
2362 if (RT_UNLIKELY( PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
2363 || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage)))
2364 {
2365 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2366 if (RT_FAILURE(rc))
2367 {
2368 pgmUnlock(pVM);
2369 return rc;
2370 }
2371 }
2372 else
2373 {
2374 /*
2375 * Get the pointer to the page.
2376 */
2377 PGMPAGEMAPLOCK PgMpLck;
2378 const void *pvSrc;
2379 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc, &PgMpLck);
2380 if (RT_SUCCESS(rc))
2381 {
2382 memcpy(pvBuf, pvSrc, cb);
2383 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2384 }
2385 else
2386 {
2387 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
2388 pRam->GCPhys + off, pPage, rc));
2389 memset(pvBuf, 0xff, cb);
2390 }
2391 }
2392
2393 /* next page */
2394 if (cb >= cbRead)
2395 {
2396 pgmUnlock(pVM);
2397 return VINF_SUCCESS;
2398 }
2399 cbRead -= cb;
2400 off += cb;
2401 pvBuf = (char *)pvBuf + cb;
2402 } /* walk pages in ram range. */
2403
2404 GCPhys = pRam->GCPhysLast + 1;
2405 }
2406 else
2407 {
2408 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
2409
2410 /*
2411 * Unassigned address space.
2412 */
2413 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0;
2414 if (cb >= cbRead)
2415 {
2416 memset(pvBuf, 0xff, cbRead);
2417 break;
2418 }
2419 memset(pvBuf, 0xff, cb);
2420
2421 cbRead -= cb;
2422 pvBuf = (char *)pvBuf + cb;
2423 GCPhys += cb;
2424 }
2425
2426 /* Advance range if necessary. */
2427 while (pRam && GCPhys > pRam->GCPhysLast)
2428 pRam = pRam->CTX_SUFF(pNext);
2429 } /* Ram range walk */
2430
2431 pgmUnlock(pVM);
2432 return VINF_SUCCESS;
2433}
2434
2435
2436/**
2437 * Deals with writing to a page with one or more WRITE or ALL access handlers.
2438 *
2439 * @returns VBox status code. Can be ignored in ring-3.
2440 * @retval VINF_SUCCESS.
2441 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2442 *
2443 * @param pVM Pointer to the VM.
2444 * @param pPage The page descriptor.
2445 * @param GCPhys The physical address to start writing at.
2446 * @param pvBuf What to write.
2447 * @param cbWrite How much to write - less or equal to a page.
2448 * @param enmOrigin The origin of this call.
2449 */
2450static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite,
2451 PGMACCESSORIGIN enmOrigin)
2452{
2453 PGMPAGEMAPLOCK PgMpLck;
2454 void *pvDst = NULL;
2455 int rc;
2456
2457 /*
2458 * Give priority to physical handlers (like #PF does).
2459 *
2460 * Hope for a lonely physical handler first that covers the whole
2461 * write area. This should be a pretty frequent case with MMIO and
2462 * the heavy usage of full page handlers in the page pool.
2463 */
2464 PVMCPU pVCpu = VMMGetCpu(pVM);
2465 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
2466 || PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage) /* screw virtual handlers on MMIO pages */)
2467 {
2468 PPGMPHYSHANDLER pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2469 if (pCur)
2470 {
2471 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
2472
2473 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
2474 if (cbRange > cbWrite)
2475 cbRange = cbWrite;
2476
2477#ifndef IN_RING3
2478 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2479 NOREF(cbRange);
2480 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2481 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2482
2483#else /* IN_RING3 */
2484 Assert(PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler));
2485 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n",
2486 GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2487 if (!PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage))
2488 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2489 else
2490 rc = VINF_SUCCESS;
2491 if (RT_SUCCESS(rc))
2492 {
2493 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pCur)->CTX_SUFF(pfnHandler);
2494 void *pvUser = pCur->CTX_SUFF(pvUser);
2495
2496 STAM_PROFILE_START(&pCur->Stat, h);
2497 PGM_LOCK_ASSERT_OWNER(pVM);
2498 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2499 pgmUnlock(pVM);
2500 rc = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2501 pgmLock(pVM);
2502# ifdef VBOX_WITH_STATISTICS
2503 pCur = pgmHandlerPhysicalLookup(pVM, GCPhys);
2504 if (pCur)
2505 STAM_PROFILE_STOP(&pCur->Stat, h);
2506# else
2507 pCur = NULL; /* might not be valid anymore. */
2508# endif
2509 if (rc == VINF_PGM_HANDLER_DO_DEFAULT && pvDst)
2510 {
2511 if (pvDst)
2512 memcpy(pvDst, pvBuf, cbRange);
2513 }
2514 else
2515 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT,
2516 ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur ? pCur->pszDesc : ""));
2517 }
2518 else
2519 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2520 GCPhys, pPage, rc), rc);
2521 if (RT_LIKELY(cbRange == cbWrite))
2522 {
2523 if (pvDst)
2524 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2525 return VINF_SUCCESS;
2526 }
2527
2528 /* more fun to be had below */
2529 cbWrite -= cbRange;
2530 GCPhys += cbRange;
2531 pvBuf = (uint8_t *)pvBuf + cbRange;
2532 pvDst = (uint8_t *)pvDst + cbRange;
2533#endif /* IN_RING3 */
2534 }
2535 /* else: the handler is somewhere else in the page, deal with it below. */
2536 Assert(!PGM_PAGE_IS_MMIO_OR_ALIAS(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
2537 }
2538 /*
2539 * A virtual handler without any interfering physical handlers.
2540 * Hopefully it'll cover the whole write.
2541 */
2542 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
2543 {
2544 unsigned iPage;
2545 PPGMVIRTHANDLER pCur;
2546 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
2547 if (RT_SUCCESS(rc))
2548 {
2549 PPGMVIRTHANDLERTYPEINT pCurType = PGMVIRTANDLER_GET_TYPE(pVM, pCur);
2550
2551 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
2552 if (cbRange > cbWrite)
2553 cbRange = cbWrite;
2554
2555#ifndef IN_RING3
2556 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2557 NOREF(cbRange);
2558 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2559 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2560
2561#else /* IN_RING3 */
2562
2563 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
2564 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2565 if (RT_SUCCESS(rc))
2566 {
2567 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2568 if (pCurType->pfnHandlerR3)
2569 {
2570 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
2571 + (iPage << PAGE_SHIFT)
2572 + (GCPhys & PAGE_OFFSET_MASK);
2573
2574 STAM_PROFILE_START(&pCur->Stat, h);
2575 rc = pCurType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE,
2576 enmOrigin, pCur->CTX_SUFF(pvUser));
2577 STAM_PROFILE_STOP(&pCur->Stat, h);
2578 }
2579 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2580 memcpy(pvDst, pvBuf, cbRange);
2581 else
2582 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
2583 }
2584 else
2585 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2586 GCPhys, pPage, rc), rc);
2587 if (RT_LIKELY(cbRange == cbWrite))
2588 {
2589 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2590 return VINF_SUCCESS;
2591 }
2592
2593 /* more fun to be had below */
2594 cbWrite -= cbRange;
2595 GCPhys += cbRange;
2596 pvBuf = (uint8_t *)pvBuf + cbRange;
2597 pvDst = (uint8_t *)pvDst + cbRange;
2598#endif
2599 }
2600 /* else: the handler is somewhere else in the page, deal with it below. */
2601 }
2602
2603 /*
2604 * Deal with all the odd ends.
2605 */
2606
2607 /* We need a writable destination page. */
2608 if (!pvDst)
2609 {
2610 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst, &PgMpLck);
2611 AssertLogRelMsgReturn(RT_SUCCESS(rc),
2612 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2613 GCPhys, pPage, rc), rc);
2614 }
2615
2616 /* The loop state (big + ugly). */
2617 unsigned iVirtPage = 0;
2618 PPGMVIRTHANDLER pVirt = NULL;
2619 uint32_t offVirt = PAGE_SIZE;
2620 uint32_t offVirtLast = PAGE_SIZE;
2621 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
2622
2623 PPGMPHYSHANDLER pPhys = NULL;
2624 uint32_t offPhys = PAGE_SIZE;
2625 uint32_t offPhysLast = PAGE_SIZE;
2626 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
2627
2628 /* The loop. */
2629 for (;;)
2630 {
2631 /*
2632 * Find the closest handler at or above GCPhys.
2633 */
2634 if (fMoreVirt && !pVirt)
2635 {
2636 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
2637 if (RT_SUCCESS(rc))
2638 {
2639 offVirt = 0;
2640 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2641 }
2642 else
2643 {
2644 PPGMPHYS2VIRTHANDLER pVirtPhys;
2645 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
2646 GCPhys, true /* fAbove */);
2647 if ( pVirtPhys
2648 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
2649 {
2650 /* ASSUME that pVirtPhys only covers one page. */
2651 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
2652 Assert(pVirtPhys->Core.Key > GCPhys);
2653
2654 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
2655 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
2656 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2657 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
2658 }
2659 else
2660 {
2661 pVirt = NULL;
2662 fMoreVirt = false;
2663 offVirt = offVirtLast = PAGE_SIZE;
2664 }
2665 }
2666 }
2667
2668 if (fMorePhys && !pPhys)
2669 {
2670 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2671 if (pPhys)
2672 {
2673 offPhys = 0;
2674 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2675 }
2676 else
2677 {
2678 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2679 GCPhys, true /* fAbove */);
2680 if ( pPhys
2681 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2682 {
2683 offPhys = pPhys->Core.Key - GCPhys;
2684 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2685 }
2686 else
2687 {
2688 pPhys = NULL;
2689 fMorePhys = false;
2690 offPhys = offPhysLast = PAGE_SIZE;
2691 }
2692 }
2693 }
2694
2695 /*
2696 * Handle access to space without handlers (that's easy).
2697 */
2698 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2699 uint32_t cbRange = (uint32_t)cbWrite;
2700 if (offPhys && offVirt)
2701 {
2702 if (cbRange > offPhys)
2703 cbRange = offPhys;
2704 if (cbRange > offVirt)
2705 cbRange = offVirt;
2706 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2707 }
2708 /*
2709 * Physical handler.
2710 */
2711 else if (!offPhys && offVirt)
2712 {
2713 if (cbRange > offPhysLast + 1)
2714 cbRange = offPhysLast + 1;
2715 if (cbRange > offVirt)
2716 cbRange = offVirt;
2717#ifdef IN_RING3
2718 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler);
2719 void *pvUser = pPhys->CTX_SUFF(pvUser);
2720
2721 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2722 STAM_PROFILE_START(&pPhys->Stat, h);
2723 PGM_LOCK_ASSERT_OWNER(pVM);
2724 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2725 pgmUnlock(pVM);
2726 rc = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2727 pgmLock(pVM);
2728# ifdef VBOX_WITH_STATISTICS
2729 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2730 if (pPhys)
2731 STAM_PROFILE_STOP(&pPhys->Stat, h);
2732# else
2733 pPhys = NULL; /* might not be valid anymore. */
2734# endif
2735 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2736#else
2737 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2738 NOREF(cbRange);
2739 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2740 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2741 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2742#endif
2743 }
2744 /*
2745 * Virtual handler.
2746 */
2747 else if (offPhys && !offVirt)
2748 {
2749 if (cbRange > offVirtLast + 1)
2750 cbRange = offVirtLast + 1;
2751 if (cbRange > offPhys)
2752 cbRange = offPhys;
2753
2754 PPGMVIRTHANDLERTYPEINT pVirtType = PGMVIRTANDLER_GET_TYPE(pVM, pVirt);
2755#ifdef IN_RING3
2756 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2757 if (pVirtType->pfnHandlerR3)
2758 {
2759 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2760 + (iVirtPage << PAGE_SHIFT)
2761 + (GCPhys & PAGE_OFFSET_MASK);
2762 STAM_PROFILE_START(&pVirt->Stat, h);
2763 rc = pVirtType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE,
2764 enmOrigin, pVirt->CTX_SUFF(pvUser));
2765 STAM_PROFILE_STOP(&pVirt->Stat, h);
2766 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2767 }
2768 pVirt = NULL;
2769#else
2770 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2771 NOREF(cbRange);
2772 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2773 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2774 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2775#endif
2776 }
2777 /*
2778 * Both... give the physical one priority.
2779 */
2780 else
2781 {
2782 Assert(!offPhys && !offVirt);
2783 if (cbRange > offVirtLast + 1)
2784 cbRange = offVirtLast + 1;
2785 if (cbRange > offPhysLast + 1)
2786 cbRange = offPhysLast + 1;
2787
2788 PPGMVIRTHANDLERTYPEINT pVirtType = PGMVIRTANDLER_GET_TYPE(pVM, pVirt);
2789#ifdef IN_RING3
2790 if (pVirtType->pfnHandlerR3)
2791 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2792 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2793
2794 PFNPGMPHYSHANDLER pfnHandler = PGMPHYSHANDLER_GET_TYPE(pVM, pPhys)->CTX_SUFF(pfnHandler);
2795 void *pvUser = pPhys->CTX_SUFF(pvUser);
2796
2797 STAM_PROFILE_START(&pPhys->Stat, h);
2798 PGM_LOCK_ASSERT_OWNER(pVM);
2799 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2800 pgmUnlock(pVM);
2801 rc = pfnHandler(pVM, pVCpu, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, enmOrigin, pvUser);
2802 pgmLock(pVM);
2803# ifdef VBOX_WITH_STATISTICS
2804 pPhys = pgmHandlerPhysicalLookup(pVM, GCPhys);
2805 if (pPhys)
2806 STAM_PROFILE_STOP(&pPhys->Stat, h);
2807# else
2808 pPhys = NULL; /* might not be valid anymore. */
2809# endif
2810 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2811 if (pVirtType->pfnHandlerR3)
2812 {
2813
2814 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2815 + (iVirtPage << PAGE_SHIFT)
2816 + (GCPhys & PAGE_OFFSET_MASK);
2817 STAM_PROFILE_START(&pVirt->Stat, h2);
2818 int rc2 = pVirtType->CTX_SUFF(pfnHandler)(pVM, pVCpu, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE,
2819 enmOrigin, pVirt->CTX_SUFF(pvUser));
2820 STAM_PROFILE_STOP(&pVirt->Stat, h2);
2821 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2822 rc = VINF_SUCCESS;
2823 else
2824 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2825 }
2826 pPhys = NULL;
2827 pVirt = NULL;
2828#else
2829 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2830 NOREF(cbRange);
2831 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2832 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2833 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2834#endif
2835 }
2836 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2837 memcpy(pvDst, pvBuf, cbRange);
2838
2839 /*
2840 * Advance if we've got more stuff to do.
2841 */
2842 if (cbRange >= cbWrite)
2843 {
2844 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2845 return VINF_SUCCESS;
2846 }
2847
2848 cbWrite -= cbRange;
2849 GCPhys += cbRange;
2850 pvBuf = (uint8_t *)pvBuf + cbRange;
2851 pvDst = (uint8_t *)pvDst + cbRange;
2852
2853 offPhys -= cbRange;
2854 offPhysLast -= cbRange;
2855 offVirt -= cbRange;
2856 offVirtLast -= cbRange;
2857 }
2858}
2859
2860
2861/**
2862 * Write to physical memory.
2863 *
2864 * This API respects access handlers and MMIO. Use PGMPhysSimpleWriteGCPhys() if you
2865 * want to ignore those.
2866 *
2867 * @returns VBox status code. Can be ignored in ring-3.
2868 * @retval VINF_SUCCESS.
2869 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2870 *
2871 * @param pVM Pointer to the VM.
2872 * @param GCPhys Physical address to write to.
2873 * @param pvBuf What to write.
2874 * @param cbWrite How many bytes to write.
2875 * @param enmOrigin Who is calling.
2876 */
2877VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite, PGMACCESSORIGIN enmOrigin)
2878{
2879 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()! enmOrigin=%d\n", enmOrigin));
2880 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2881 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2882
2883 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWrite));
2884 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2885
2886 pgmLock(pVM);
2887
2888 /*
2889 * Copy loop on ram ranges.
2890 */
2891 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys);
2892 for (;;)
2893 {
2894 /* Inside range or not? */
2895 if (pRam && GCPhys >= pRam->GCPhys)
2896 {
2897 /*
2898 * Must work our way thru this page by page.
2899 */
2900 RTGCPTR off = GCPhys - pRam->GCPhys;
2901 while (off < pRam->cb)
2902 {
2903 RTGCPTR iPage = off >> PAGE_SHIFT;
2904 PPGMPAGE pPage = &pRam->aPages[iPage];
2905 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2906 if (cb > cbWrite)
2907 cb = cbWrite;
2908
2909 /*
2910 * Any active WRITE or ALL access handlers?
2911 */
2912 if ( PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)
2913 || PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
2914 {
2915 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb, enmOrigin);
2916 if (RT_FAILURE(rc))
2917 {
2918 pgmUnlock(pVM);
2919 return rc;
2920 }
2921 }
2922 else
2923 {
2924 /*
2925 * Get the pointer to the page.
2926 */
2927 PGMPAGEMAPLOCK PgMpLck;
2928 void *pvDst;
2929 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst, &PgMpLck);
2930 if (RT_SUCCESS(rc))
2931 {
2932 Assert(!PGM_PAGE_IS_BALLOONED(pPage));
2933 memcpy(pvDst, pvBuf, cb);
2934 pgmPhysReleaseInternalPageMappingLock(pVM, &PgMpLck);
2935 }
2936 /* Ignore writes to ballooned pages. */
2937 else if (!PGM_PAGE_IS_BALLOONED(pPage))
2938 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2939 pRam->GCPhys + off, pPage, rc));
2940 }
2941
2942 /* next page */
2943 if (cb >= cbWrite)
2944 {
2945 pgmUnlock(pVM);
2946 return VINF_SUCCESS;
2947 }
2948
2949 cbWrite -= cb;
2950 off += cb;
2951 pvBuf = (const char *)pvBuf + cb;
2952 } /* walk pages in ram range */
2953
2954 GCPhys = pRam->GCPhysLast + 1;
2955 }
2956 else
2957 {
2958 /*
2959 * Unassigned address space, skip it.
2960 */
2961 if (!pRam)
2962 break;
2963 size_t cb = pRam->GCPhys - GCPhys;
2964 if (cb >= cbWrite)
2965 break;
2966 cbWrite -= cb;
2967 pvBuf = (const char *)pvBuf + cb;
2968 GCPhys += cb;
2969 }
2970
2971 /* Advance range if necessary. */
2972 while (pRam && GCPhys > pRam->GCPhysLast)
2973 pRam = pRam->CTX_SUFF(pNext);
2974 } /* Ram range walk */
2975
2976 pgmUnlock(pVM);
2977 return VINF_SUCCESS;
2978}
2979
2980
2981/**
2982 * Read from guest physical memory by GC physical address, bypassing
2983 * MMIO and access handlers.
2984 *
2985 * @returns VBox status.
2986 * @param pVM Pointer to the VM.
2987 * @param pvDst The destination address.
2988 * @param GCPhysSrc The source address (GC physical address).
2989 * @param cb The number of bytes to read.
2990 */
2991VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2992{
2993 /*
2994 * Treat the first page as a special case.
2995 */
2996 if (!cb)
2997 return VINF_SUCCESS;
2998
2999 /* map the 1st page */
3000 void const *pvSrc;
3001 PGMPAGEMAPLOCK Lock;
3002 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3003 if (RT_FAILURE(rc))
3004 return rc;
3005
3006 /* optimize for the case where access is completely within the first page. */
3007 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
3008 if (RT_LIKELY(cb <= cbPage))
3009 {
3010 memcpy(pvDst, pvSrc, cb);
3011 PGMPhysReleasePageMappingLock(pVM, &Lock);
3012 return VINF_SUCCESS;
3013 }
3014
3015 /* copy to the end of the page. */
3016 memcpy(pvDst, pvSrc, cbPage);
3017 PGMPhysReleasePageMappingLock(pVM, &Lock);
3018 GCPhysSrc += cbPage;
3019 pvDst = (uint8_t *)pvDst + cbPage;
3020 cb -= cbPage;
3021
3022 /*
3023 * Page by page.
3024 */
3025 for (;;)
3026 {
3027 /* map the page */
3028 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
3029 if (RT_FAILURE(rc))
3030 return rc;
3031
3032 /* last page? */
3033 if (cb <= PAGE_SIZE)
3034 {
3035 memcpy(pvDst, pvSrc, cb);
3036 PGMPhysReleasePageMappingLock(pVM, &Lock);
3037 return VINF_SUCCESS;
3038 }
3039
3040 /* copy the entire page and advance */
3041 memcpy(pvDst, pvSrc, PAGE_SIZE);
3042 PGMPhysReleasePageMappingLock(pVM, &Lock);
3043 GCPhysSrc += PAGE_SIZE;
3044 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3045 cb -= PAGE_SIZE;
3046 }
3047 /* won't ever get here. */
3048}
3049
3050
3051/**
3052 * Write to guest physical memory referenced by GC pointer.
3053 * Write memory to GC physical address in guest physical memory.
3054 *
3055 * This will bypass MMIO and access handlers.
3056 *
3057 * @returns VBox status.
3058 * @param pVM Pointer to the VM.
3059 * @param GCPhysDst The GC physical address of the destination.
3060 * @param pvSrc The source buffer.
3061 * @param cb The number of bytes to write.
3062 */
3063VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
3064{
3065 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
3066
3067 /*
3068 * Treat the first page as a special case.
3069 */
3070 if (!cb)
3071 return VINF_SUCCESS;
3072
3073 /* map the 1st page */
3074 void *pvDst;
3075 PGMPAGEMAPLOCK Lock;
3076 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3077 if (RT_FAILURE(rc))
3078 return rc;
3079
3080 /* optimize for the case where access is completely within the first page. */
3081 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
3082 if (RT_LIKELY(cb <= cbPage))
3083 {
3084 memcpy(pvDst, pvSrc, cb);
3085 PGMPhysReleasePageMappingLock(pVM, &Lock);
3086 return VINF_SUCCESS;
3087 }
3088
3089 /* copy to the end of the page. */
3090 memcpy(pvDst, pvSrc, cbPage);
3091 PGMPhysReleasePageMappingLock(pVM, &Lock);
3092 GCPhysDst += cbPage;
3093 pvSrc = (const uint8_t *)pvSrc + cbPage;
3094 cb -= cbPage;
3095
3096 /*
3097 * Page by page.
3098 */
3099 for (;;)
3100 {
3101 /* map the page */
3102 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
3103 if (RT_FAILURE(rc))
3104 return rc;
3105
3106 /* last page? */
3107 if (cb <= PAGE_SIZE)
3108 {
3109 memcpy(pvDst, pvSrc, cb);
3110 PGMPhysReleasePageMappingLock(pVM, &Lock);
3111 return VINF_SUCCESS;
3112 }
3113
3114 /* copy the entire page and advance */
3115 memcpy(pvDst, pvSrc, PAGE_SIZE);
3116 PGMPhysReleasePageMappingLock(pVM, &Lock);
3117 GCPhysDst += PAGE_SIZE;
3118 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3119 cb -= PAGE_SIZE;
3120 }
3121 /* won't ever get here. */
3122}
3123
3124
3125/**
3126 * Read from guest physical memory referenced by GC pointer.
3127 *
3128 * This function uses the current CR3/CR0/CR4 of the guest and will
3129 * bypass access handlers and not set any accessed bits.
3130 *
3131 * @returns VBox status.
3132 * @param pVCpu Handle to the current virtual CPU.
3133 * @param pvDst The destination address.
3134 * @param GCPtrSrc The source address (GC pointer).
3135 * @param cb The number of bytes to read.
3136 */
3137VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
3138{
3139 PVM pVM = pVCpu->CTX_SUFF(pVM);
3140/** @todo fix the macro / state handling: VMCPU_ASSERT_EMT_OR_GURU(pVCpu); */
3141
3142 /*
3143 * Treat the first page as a special case.
3144 */
3145 if (!cb)
3146 return VINF_SUCCESS;
3147
3148 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleRead));
3149 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
3150
3151 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
3152 * when many VCPUs are fighting for the lock.
3153 */
3154 pgmLock(pVM);
3155
3156 /* map the 1st page */
3157 void const *pvSrc;
3158 PGMPAGEMAPLOCK Lock;
3159 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3160 if (RT_FAILURE(rc))
3161 {
3162 pgmUnlock(pVM);
3163 return rc;
3164 }
3165
3166 /* optimize for the case where access is completely within the first page. */
3167 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3168 if (RT_LIKELY(cb <= cbPage))
3169 {
3170 memcpy(pvDst, pvSrc, cb);
3171 PGMPhysReleasePageMappingLock(pVM, &Lock);
3172 pgmUnlock(pVM);
3173 return VINF_SUCCESS;
3174 }
3175
3176 /* copy to the end of the page. */
3177 memcpy(pvDst, pvSrc, cbPage);
3178 PGMPhysReleasePageMappingLock(pVM, &Lock);
3179 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
3180 pvDst = (uint8_t *)pvDst + cbPage;
3181 cb -= cbPage;
3182
3183 /*
3184 * Page by page.
3185 */
3186 for (;;)
3187 {
3188 /* map the page */
3189 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
3190 if (RT_FAILURE(rc))
3191 {
3192 pgmUnlock(pVM);
3193 return rc;
3194 }
3195
3196 /* last page? */
3197 if (cb <= PAGE_SIZE)
3198 {
3199 memcpy(pvDst, pvSrc, cb);
3200 PGMPhysReleasePageMappingLock(pVM, &Lock);
3201 pgmUnlock(pVM);
3202 return VINF_SUCCESS;
3203 }
3204
3205 /* copy the entire page and advance */
3206 memcpy(pvDst, pvSrc, PAGE_SIZE);
3207 PGMPhysReleasePageMappingLock(pVM, &Lock);
3208 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
3209 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
3210 cb -= PAGE_SIZE;
3211 }
3212 /* won't ever get here. */
3213}
3214
3215
3216/**
3217 * Write to guest physical memory referenced by GC pointer.
3218 *
3219 * This function uses the current CR3/CR0/CR4 of the guest and will
3220 * bypass access handlers and not set dirty or accessed bits.
3221 *
3222 * @returns VBox status.
3223 * @param pVCpu Handle to the current virtual CPU.
3224 * @param GCPtrDst The destination address (GC pointer).
3225 * @param pvSrc The source address.
3226 * @param cb The number of bytes to write.
3227 */
3228VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3229{
3230 PVM pVM = pVCpu->CTX_SUFF(pVM);
3231 VMCPU_ASSERT_EMT(pVCpu);
3232
3233 /*
3234 * Treat the first page as a special case.
3235 */
3236 if (!cb)
3237 return VINF_SUCCESS;
3238
3239 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWrite));
3240 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
3241
3242 /* map the 1st page */
3243 void *pvDst;
3244 PGMPAGEMAPLOCK Lock;
3245 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3246 if (RT_FAILURE(rc))
3247 return rc;
3248
3249 /* optimize for the case where access is completely within the first page. */
3250 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3251 if (RT_LIKELY(cb <= cbPage))
3252 {
3253 memcpy(pvDst, pvSrc, cb);
3254 PGMPhysReleasePageMappingLock(pVM, &Lock);
3255 return VINF_SUCCESS;
3256 }
3257
3258 /* copy to the end of the page. */
3259 memcpy(pvDst, pvSrc, cbPage);
3260 PGMPhysReleasePageMappingLock(pVM, &Lock);
3261 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3262 pvSrc = (const uint8_t *)pvSrc + cbPage;
3263 cb -= cbPage;
3264
3265 /*
3266 * Page by page.
3267 */
3268 for (;;)
3269 {
3270 /* map the page */
3271 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3272 if (RT_FAILURE(rc))
3273 return rc;
3274
3275 /* last page? */
3276 if (cb <= PAGE_SIZE)
3277 {
3278 memcpy(pvDst, pvSrc, cb);
3279 PGMPhysReleasePageMappingLock(pVM, &Lock);
3280 return VINF_SUCCESS;
3281 }
3282
3283 /* copy the entire page and advance */
3284 memcpy(pvDst, pvSrc, PAGE_SIZE);
3285 PGMPhysReleasePageMappingLock(pVM, &Lock);
3286 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3287 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3288 cb -= PAGE_SIZE;
3289 }
3290 /* won't ever get here. */
3291}
3292
3293
3294/**
3295 * Write to guest physical memory referenced by GC pointer and update the PTE.
3296 *
3297 * This function uses the current CR3/CR0/CR4 of the guest and will
3298 * bypass access handlers but will set any dirty and accessed bits in the PTE.
3299 *
3300 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
3301 *
3302 * @returns VBox status.
3303 * @param pVCpu Handle to the current virtual CPU.
3304 * @param GCPtrDst The destination address (GC pointer).
3305 * @param pvSrc The source address.
3306 * @param cb The number of bytes to write.
3307 */
3308VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
3309{
3310 PVM pVM = pVCpu->CTX_SUFF(pVM);
3311 VMCPU_ASSERT_EMT(pVCpu);
3312
3313 /*
3314 * Treat the first page as a special case.
3315 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
3316 */
3317 if (!cb)
3318 return VINF_SUCCESS;
3319
3320 /* map the 1st page */
3321 void *pvDst;
3322 PGMPAGEMAPLOCK Lock;
3323 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3324 if (RT_FAILURE(rc))
3325 return rc;
3326
3327 /* optimize for the case where access is completely within the first page. */
3328 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3329 if (RT_LIKELY(cb <= cbPage))
3330 {
3331 memcpy(pvDst, pvSrc, cb);
3332 PGMPhysReleasePageMappingLock(pVM, &Lock);
3333 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3334 return VINF_SUCCESS;
3335 }
3336
3337 /* copy to the end of the page. */
3338 memcpy(pvDst, pvSrc, cbPage);
3339 PGMPhysReleasePageMappingLock(pVM, &Lock);
3340 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3341 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
3342 pvSrc = (const uint8_t *)pvSrc + cbPage;
3343 cb -= cbPage;
3344
3345 /*
3346 * Page by page.
3347 */
3348 for (;;)
3349 {
3350 /* map the page */
3351 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
3352 if (RT_FAILURE(rc))
3353 return rc;
3354
3355 /* last page? */
3356 if (cb <= PAGE_SIZE)
3357 {
3358 memcpy(pvDst, pvSrc, cb);
3359 PGMPhysReleasePageMappingLock(pVM, &Lock);
3360 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3361 return VINF_SUCCESS;
3362 }
3363
3364 /* copy the entire page and advance */
3365 memcpy(pvDst, pvSrc, PAGE_SIZE);
3366 PGMPhysReleasePageMappingLock(pVM, &Lock);
3367 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
3368 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
3369 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
3370 cb -= PAGE_SIZE;
3371 }
3372 /* won't ever get here. */
3373}
3374
3375
3376/**
3377 * Read from guest physical memory referenced by GC pointer.
3378 *
3379 * This function uses the current CR3/CR0/CR4 of the guest and will
3380 * respect access handlers and set accessed bits.
3381 *
3382 * @returns VBox status.
3383 * @param pVCpu Handle to the current virtual CPU.
3384 * @param pvDst The destination address.
3385 * @param GCPtrSrc The source address (GC pointer).
3386 * @param cb The number of bytes to read.
3387 * @param enmOrigin Who is calling.
3388 * @thread EMT(pVCpu)
3389 */
3390VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3391{
3392 RTGCPHYS GCPhys;
3393 uint64_t fFlags;
3394 int rc;
3395 PVM pVM = pVCpu->CTX_SUFF(pVM);
3396 VMCPU_ASSERT_EMT(pVCpu);
3397
3398 /*
3399 * Anything to do?
3400 */
3401 if (!cb)
3402 return VINF_SUCCESS;
3403
3404 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
3405
3406 /*
3407 * Optimize reads within a single page.
3408 */
3409 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3410 {
3411 /* Convert virtual to physical address + flags */
3412 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3413 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3414 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3415
3416 /* mark the guest page as accessed. */
3417 if (!(fFlags & X86_PTE_A))
3418 {
3419 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3420 AssertRC(rc);
3421 }
3422
3423 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3424 }
3425
3426 /*
3427 * Page by page.
3428 */
3429 for (;;)
3430 {
3431 /* Convert virtual to physical address + flags */
3432 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
3433 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
3434 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
3435
3436 /* mark the guest page as accessed. */
3437 if (!(fFlags & X86_PTE_A))
3438 {
3439 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
3440 AssertRC(rc);
3441 }
3442
3443 /* copy */
3444 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
3445 if (cbRead < cb)
3446 {
3447 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead, enmOrigin);
3448 if (RT_FAILURE(rc))
3449 return rc;
3450 }
3451 else /* Last page (cbRead is PAGE_SIZE, we only need cb!) */
3452 return PGMPhysRead(pVM, GCPhys, pvDst, cb, enmOrigin);
3453
3454 /* next */
3455 Assert(cb > cbRead);
3456 cb -= cbRead;
3457 pvDst = (uint8_t *)pvDst + cbRead;
3458 GCPtrSrc += cbRead;
3459 }
3460}
3461
3462
3463/**
3464 * Write to guest physical memory referenced by GC pointer.
3465 *
3466 * This function uses the current CR3/CR0/CR4 of the guest and will
3467 * respect access handlers and set dirty and accessed bits.
3468 *
3469 * @returns VBox status.
3470 * @retval VINF_SUCCESS.
3471 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
3472 *
3473 * @param pVCpu Handle to the current virtual CPU.
3474 * @param GCPtrDst The destination address (GC pointer).
3475 * @param pvSrc The source address.
3476 * @param cb The number of bytes to write.
3477 * @param enmOrigin Who is calling.
3478 */
3479VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, PGMACCESSORIGIN enmOrigin)
3480{
3481 RTGCPHYS GCPhys;
3482 uint64_t fFlags;
3483 int rc;
3484 PVM pVM = pVCpu->CTX_SUFF(pVM);
3485 VMCPU_ASSERT_EMT(pVCpu);
3486
3487 /*
3488 * Anything to do?
3489 */
3490 if (!cb)
3491 return VINF_SUCCESS;
3492
3493 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
3494
3495 /*
3496 * Optimize writes within a single page.
3497 */
3498 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
3499 {
3500 /* Convert virtual to physical address + flags */
3501 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3502 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3503 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3504
3505 /* Mention when we ignore X86_PTE_RW... */
3506 if (!(fFlags & X86_PTE_RW))
3507 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3508
3509 /* Mark the guest page as accessed and dirty if necessary. */
3510 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3511 {
3512 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3513 AssertRC(rc);
3514 }
3515
3516 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3517 }
3518
3519 /*
3520 * Page by page.
3521 */
3522 for (;;)
3523 {
3524 /* Convert virtual to physical address + flags */
3525 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
3526 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
3527 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
3528
3529 /* Mention when we ignore X86_PTE_RW... */
3530 if (!(fFlags & X86_PTE_RW))
3531 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
3532
3533 /* Mark the guest page as accessed and dirty if necessary. */
3534 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
3535 {
3536 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3537 AssertRC(rc);
3538 }
3539
3540 /* copy */
3541 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
3542 if (cbWrite < cb)
3543 {
3544 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite, enmOrigin);
3545 if (RT_FAILURE(rc))
3546 return rc;
3547 }
3548 else /* Last page (cbWrite is PAGE_SIZE, we only need cb!) */
3549 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cb, enmOrigin);
3550
3551 /* next */
3552 Assert(cb > cbWrite);
3553 cb -= cbWrite;
3554 pvSrc = (uint8_t *)pvSrc + cbWrite;
3555 GCPtrDst += cbWrite;
3556 }
3557}
3558
3559
3560/**
3561 * Performs a read of guest virtual memory for instruction emulation.
3562 *
3563 * This will check permissions, raise exceptions and update the access bits.
3564 *
3565 * The current implementation will bypass all access handlers. It may later be
3566 * changed to at least respect MMIO.
3567 *
3568 *
3569 * @returns VBox status code suitable to scheduling.
3570 * @retval VINF_SUCCESS if the read was performed successfully.
3571 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3572 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3573 *
3574 * @param pVCpu Handle to the current virtual CPU.
3575 * @param pCtxCore The context core.
3576 * @param pvDst Where to put the bytes we've read.
3577 * @param GCPtrSrc The source address.
3578 * @param cb The number of bytes to read. Not more than a page.
3579 *
3580 * @remark This function will dynamically map physical pages in GC. This may unmap
3581 * mappings done by the caller. Be careful!
3582 */
3583VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
3584{
3585 PVM pVM = pVCpu->CTX_SUFF(pVM);
3586 Assert(cb <= PAGE_SIZE);
3587 VMCPU_ASSERT_EMT(pVCpu);
3588
3589/** @todo r=bird: This isn't perfect!
3590 * -# It's not checking for reserved bits being 1.
3591 * -# It's not correctly dealing with the access bit.
3592 * -# It's not respecting MMIO memory or any other access handlers.
3593 */
3594 /*
3595 * 1. Translate virtual to physical. This may fault.
3596 * 2. Map the physical address.
3597 * 3. Do the read operation.
3598 * 4. Set access bits if required.
3599 */
3600 int rc;
3601 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3602 if (cb <= cb1)
3603 {
3604 /*
3605 * Not crossing pages.
3606 */
3607 RTGCPHYS GCPhys;
3608 uint64_t fFlags;
3609 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3610 if (RT_SUCCESS(rc))
3611 {
3612 /** @todo we should check reserved bits ... */
3613 PGMPAGEMAPLOCK PgMpLck;
3614 void const *pvSrc;
3615 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &PgMpLck);
3616 switch (rc)
3617 {
3618 case VINF_SUCCESS:
3619 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
3620 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3621 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3622 break;
3623 case VERR_PGM_PHYS_PAGE_RESERVED:
3624 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3625 memset(pvDst, 0xff, cb);
3626 break;
3627 default:
3628 Assert(RT_FAILURE_NP(rc));
3629 return rc;
3630 }
3631
3632 /** @todo access bit emulation isn't 100% correct. */
3633 if (!(fFlags & X86_PTE_A))
3634 {
3635 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3636 AssertRC(rc);
3637 }
3638 return VINF_SUCCESS;
3639 }
3640 }
3641 else
3642 {
3643 /*
3644 * Crosses pages.
3645 */
3646 size_t cb2 = cb - cb1;
3647 uint64_t fFlags1;
3648 RTGCPHYS GCPhys1;
3649 uint64_t fFlags2;
3650 RTGCPHYS GCPhys2;
3651 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3652 if (RT_SUCCESS(rc))
3653 {
3654 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3655 if (RT_SUCCESS(rc))
3656 {
3657 /** @todo we should check reserved bits ... */
3658 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
3659 PGMPAGEMAPLOCK PgMpLck;
3660 void const *pvSrc1;
3661 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc1, &PgMpLck);
3662 switch (rc)
3663 {
3664 case VINF_SUCCESS:
3665 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3666 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3667 break;
3668 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3669 memset(pvDst, 0xff, cb1);
3670 break;
3671 default:
3672 Assert(RT_FAILURE_NP(rc));
3673 return rc;
3674 }
3675
3676 void const *pvSrc2;
3677 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc2, &PgMpLck);
3678 switch (rc)
3679 {
3680 case VINF_SUCCESS:
3681 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
3682 PGMPhysReleasePageMappingLock(pVM, &PgMpLck);
3683 break;
3684 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3685 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3686 break;
3687 default:
3688 Assert(RT_FAILURE_NP(rc));
3689 return rc;
3690 }
3691
3692 if (!(fFlags1 & X86_PTE_A))
3693 {
3694 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3695 AssertRC(rc);
3696 }
3697 if (!(fFlags2 & X86_PTE_A))
3698 {
3699 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3700 AssertRC(rc);
3701 }
3702 return VINF_SUCCESS;
3703 }
3704 }
3705 }
3706
3707 /*
3708 * Raise a #PF.
3709 */
3710 uint32_t uErr;
3711
3712 /* Get the current privilege level. */
3713 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3714 switch (rc)
3715 {
3716 case VINF_SUCCESS:
3717 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3718 break;
3719
3720 case VERR_PAGE_NOT_PRESENT:
3721 case VERR_PAGE_TABLE_NOT_PRESENT:
3722 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3723 break;
3724
3725 default:
3726 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3727 return rc;
3728 }
3729 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3730 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3731}
3732
3733
3734/**
3735 * Performs a read of guest virtual memory for instruction emulation.
3736 *
3737 * This will check permissions, raise exceptions and update the access bits.
3738 *
3739 * The current implementation will bypass all access handlers. It may later be
3740 * changed to at least respect MMIO.
3741 *
3742 *
3743 * @returns VBox status code suitable to scheduling.
3744 * @retval VINF_SUCCESS if the read was performed successfully.
3745 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3746 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3747 *
3748 * @param pVCpu Handle to the current virtual CPU.
3749 * @param pCtxCore The context core.
3750 * @param pvDst Where to put the bytes we've read.
3751 * @param GCPtrSrc The source address.
3752 * @param cb The number of bytes to read. Not more than a page.
3753 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3754 * an appropriate error status will be returned (no
3755 * informational at all).
3756 *
3757 *
3758 * @remarks Takes the PGM lock.
3759 * @remarks A page fault on the 2nd page of the access will be raised without
3760 * writing the bits on the first page since we're ASSUMING that the
3761 * caller is emulating an instruction access.
3762 * @remarks This function will dynamically map physical pages in GC. This may
3763 * unmap mappings done by the caller. Be careful!
3764 */
3765VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb,
3766 bool fRaiseTrap)
3767{
3768 PVM pVM = pVCpu->CTX_SUFF(pVM);
3769 Assert(cb <= PAGE_SIZE);
3770 VMCPU_ASSERT_EMT(pVCpu);
3771
3772 /*
3773 * 1. Translate virtual to physical. This may fault.
3774 * 2. Map the physical address.
3775 * 3. Do the read operation.
3776 * 4. Set access bits if required.
3777 */
3778 int rc;
3779 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3780 if (cb <= cb1)
3781 {
3782 /*
3783 * Not crossing pages.
3784 */
3785 RTGCPHYS GCPhys;
3786 uint64_t fFlags;
3787 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3788 if (RT_SUCCESS(rc))
3789 {
3790 if (1) /** @todo we should check reserved bits ... */
3791 {
3792 const void *pvSrc;
3793 PGMPAGEMAPLOCK Lock;
3794 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3795 switch (rc)
3796 {
3797 case VINF_SUCCESS:
3798 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3799 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3800 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3801 PGMPhysReleasePageMappingLock(pVM, &Lock);
3802 break;
3803 case VERR_PGM_PHYS_PAGE_RESERVED:
3804 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3805 memset(pvDst, 0xff, cb);
3806 break;
3807 default:
3808 AssertMsgFailed(("%Rrc\n", rc));
3809 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3810 return rc;
3811 }
3812
3813 if (!(fFlags & X86_PTE_A))
3814 {
3815 /** @todo access bit emulation isn't 100% correct. */
3816 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3817 AssertRC(rc);
3818 }
3819 return VINF_SUCCESS;
3820 }
3821 }
3822 }
3823 else
3824 {
3825 /*
3826 * Crosses pages.
3827 */
3828 size_t cb2 = cb - cb1;
3829 uint64_t fFlags1;
3830 RTGCPHYS GCPhys1;
3831 uint64_t fFlags2;
3832 RTGCPHYS GCPhys2;
3833 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3834 if (RT_SUCCESS(rc))
3835 {
3836 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3837 if (RT_SUCCESS(rc))
3838 {
3839 if (1) /** @todo we should check reserved bits ... */
3840 {
3841 const void *pvSrc;
3842 PGMPAGEMAPLOCK Lock;
3843 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3844 switch (rc)
3845 {
3846 case VINF_SUCCESS:
3847 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3848 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3849 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3850 PGMPhysReleasePageMappingLock(pVM, &Lock);
3851 break;
3852 case VERR_PGM_PHYS_PAGE_RESERVED:
3853 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3854 memset(pvDst, 0xff, cb1);
3855 break;
3856 default:
3857 AssertMsgFailed(("%Rrc\n", rc));
3858 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3859 return rc;
3860 }
3861
3862 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3863 switch (rc)
3864 {
3865 case VINF_SUCCESS:
3866 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3867 PGMPhysReleasePageMappingLock(pVM, &Lock);
3868 break;
3869 case VERR_PGM_PHYS_PAGE_RESERVED:
3870 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3871 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3872 break;
3873 default:
3874 AssertMsgFailed(("%Rrc\n", rc));
3875 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3876 return rc;
3877 }
3878
3879 if (!(fFlags1 & X86_PTE_A))
3880 {
3881 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3882 AssertRC(rc);
3883 }
3884 if (!(fFlags2 & X86_PTE_A))
3885 {
3886 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3887 AssertRC(rc);
3888 }
3889 return VINF_SUCCESS;
3890 }
3891 /* sort out which page */
3892 }
3893 else
3894 GCPtrSrc += cb1; /* fault on 2nd page */
3895 }
3896 }
3897
3898 /*
3899 * Raise a #PF if we're allowed to do that.
3900 */
3901 /* Calc the error bits. */
3902 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
3903 uint32_t uErr;
3904 switch (rc)
3905 {
3906 case VINF_SUCCESS:
3907 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3908 rc = VERR_ACCESS_DENIED;
3909 break;
3910
3911 case VERR_PAGE_NOT_PRESENT:
3912 case VERR_PAGE_TABLE_NOT_PRESENT:
3913 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3914 break;
3915
3916 default:
3917 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3918 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3919 return rc;
3920 }
3921 if (fRaiseTrap)
3922 {
3923 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3924 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3925 }
3926 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3927 return rc;
3928}
3929
3930
3931/**
3932 * Performs a write to guest virtual memory for instruction emulation.
3933 *
3934 * This will check permissions, raise exceptions and update the dirty and access
3935 * bits.
3936 *
3937 * @returns VBox status code suitable to scheduling.
3938 * @retval VINF_SUCCESS if the read was performed successfully.
3939 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3940 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3941 *
3942 * @param pVCpu Handle to the current virtual CPU.
3943 * @param pCtxCore The context core.
3944 * @param GCPtrDst The destination address.
3945 * @param pvSrc What to write.
3946 * @param cb The number of bytes to write. Not more than a page.
3947 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3948 * an appropriate error status will be returned (no
3949 * informational at all).
3950 *
3951 * @remarks Takes the PGM lock.
3952 * @remarks A page fault on the 2nd page of the access will be raised without
3953 * writing the bits on the first page since we're ASSUMING that the
3954 * caller is emulating an instruction access.
3955 * @remarks This function will dynamically map physical pages in GC. This may
3956 * unmap mappings done by the caller. Be careful!
3957 */
3958VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc,
3959 size_t cb, bool fRaiseTrap)
3960{
3961 Assert(cb <= PAGE_SIZE);
3962 PVM pVM = pVCpu->CTX_SUFF(pVM);
3963 VMCPU_ASSERT_EMT(pVCpu);
3964
3965 /*
3966 * 1. Translate virtual to physical. This may fault.
3967 * 2. Map the physical address.
3968 * 3. Do the write operation.
3969 * 4. Set access bits if required.
3970 */
3971 /** @todo Since this method is frequently used by EMInterpret or IOM
3972 * upon a write fault to an write access monitored page, we can
3973 * reuse the guest page table walking from the \#PF code. */
3974 int rc;
3975 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3976 if (cb <= cb1)
3977 {
3978 /*
3979 * Not crossing pages.
3980 */
3981 RTGCPHYS GCPhys;
3982 uint64_t fFlags;
3983 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3984 if (RT_SUCCESS(rc))
3985 {
3986 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3987 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3988 && CPUMGetGuestCPL(pVCpu) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3989 {
3990 void *pvDst;
3991 PGMPAGEMAPLOCK Lock;
3992 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3993 switch (rc)
3994 {
3995 case VINF_SUCCESS:
3996 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3997 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3998 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3999 PGMPhysReleasePageMappingLock(pVM, &Lock);
4000 break;
4001 case VERR_PGM_PHYS_PAGE_RESERVED:
4002 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
4003 /* bit bucket */
4004 break;
4005 default:
4006 AssertMsgFailed(("%Rrc\n", rc));
4007 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4008 return rc;
4009 }
4010
4011 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
4012 {
4013 /** @todo dirty & access bit emulation isn't 100% correct. */
4014 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
4015 AssertRC(rc);
4016 }
4017 return VINF_SUCCESS;
4018 }
4019 rc = VERR_ACCESS_DENIED;
4020 }
4021 }
4022 else
4023 {
4024 /*
4025 * Crosses pages.
4026 */
4027 size_t cb2 = cb - cb1;
4028 uint64_t fFlags1;
4029 RTGCPHYS GCPhys1;
4030 uint64_t fFlags2;
4031 RTGCPHYS GCPhys2;
4032 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
4033 if (RT_SUCCESS(rc))
4034 {
4035 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
4036 if (RT_SUCCESS(rc))
4037 {
4038 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
4039 && (fFlags2 & X86_PTE_RW))
4040 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
4041 && CPUMGetGuestCPL(pVCpu) <= 2) )
4042 {
4043 void *pvDst;
4044 PGMPAGEMAPLOCK Lock;
4045 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
4046 switch (rc)
4047 {
4048 case VINF_SUCCESS:
4049 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
4050 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
4051 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
4052 PGMPhysReleasePageMappingLock(pVM, &Lock);
4053 break;
4054 case VERR_PGM_PHYS_PAGE_RESERVED:
4055 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
4056 /* bit bucket */
4057 break;
4058 default:
4059 AssertMsgFailed(("%Rrc\n", rc));
4060 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4061 return rc;
4062 }
4063
4064 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
4065 switch (rc)
4066 {
4067 case VINF_SUCCESS:
4068 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
4069 PGMPhysReleasePageMappingLock(pVM, &Lock);
4070 break;
4071 case VERR_PGM_PHYS_PAGE_RESERVED:
4072 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
4073 /* bit bucket */
4074 break;
4075 default:
4076 AssertMsgFailed(("%Rrc\n", rc));
4077 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4078 return rc;
4079 }
4080
4081 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
4082 {
4083 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
4084 AssertRC(rc);
4085 }
4086 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
4087 {
4088 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
4089 AssertRC(rc);
4090 }
4091 return VINF_SUCCESS;
4092 }
4093 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
4094 GCPtrDst += cb1; /* fault on the 2nd page. */
4095 rc = VERR_ACCESS_DENIED;
4096 }
4097 else
4098 GCPtrDst += cb1; /* fault on the 2nd page. */
4099 }
4100 }
4101
4102 /*
4103 * Raise a #PF if we're allowed to do that.
4104 */
4105 /* Calc the error bits. */
4106 uint32_t uErr;
4107 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
4108 switch (rc)
4109 {
4110 case VINF_SUCCESS:
4111 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
4112 rc = VERR_ACCESS_DENIED;
4113 break;
4114
4115 case VERR_ACCESS_DENIED:
4116 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
4117 break;
4118
4119 case VERR_PAGE_NOT_PRESENT:
4120 case VERR_PAGE_TABLE_NOT_PRESENT:
4121 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
4122 break;
4123
4124 default:
4125 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
4126 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
4127 return rc;
4128 }
4129 if (fRaiseTrap)
4130 {
4131 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
4132 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
4133 }
4134 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
4135 return rc;
4136}
4137
4138
4139/**
4140 * Return the page type of the specified physical address.
4141 *
4142 * @returns The page type.
4143 * @param pVM Pointer to the VM.
4144 * @param GCPhys Guest physical address
4145 */
4146VMMDECL(PGMPAGETYPE) PGMPhysGetPageType(PVM pVM, RTGCPHYS GCPhys)
4147{
4148 pgmLock(pVM);
4149 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys);
4150 PGMPAGETYPE enmPgType = pPage ? (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage) : PGMPAGETYPE_INVALID;
4151 pgmUnlock(pVM);
4152
4153 return enmPgType;
4154}
4155
4156
4157
4158
4159/**
4160 * Converts a GC physical address to a HC ring-3 pointer, with some
4161 * additional checks.
4162 *
4163 * @returns VBox status code (no informational statuses).
4164 * @retval VINF_SUCCESS on success.
4165 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4166 * access handler of some kind.
4167 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4168 * accesses or is odd in any way.
4169 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4170 *
4171 * @param pVM Pointer to the cross context VM structure.
4172 * @param pVCpu Pointer to the cross context virtual CPU structure of
4173 * the calling EMT.
4174 * @param GCPhys The GC physical address to convert. This API mask the
4175 * A20 line when necessary.
4176 * @param fWritable Whether write access is required.
4177 * @param ppv Where to store the pointer corresponding to GCPhys on
4178 * success.
4179 * @param pLock
4180 *
4181 * @remarks This is more or a less a copy of PGMR3PhysTlbGCPhys2Ptr.
4182 * @thread EMT(pVCpu).
4183 */
4184VMM_INT_DECL(int) PGMPhysIemGCPhys2Ptr(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers,
4185 void **ppv, PPGMPAGEMAPLOCK pLock)
4186{
4187 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
4188
4189 pgmLock(pVM);
4190
4191 PPGMRAMRANGE pRam;
4192 PPGMPAGE pPage;
4193 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4194 if (RT_SUCCESS(rc))
4195 {
4196 if (PGM_PAGE_IS_BALLOONED(pPage))
4197 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4198 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4199 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4200 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4201 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4202 rc = VINF_SUCCESS;
4203 else
4204 {
4205 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4206 {
4207 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4208 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4209 }
4210 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4211 {
4212 Assert(!fByPassHandlers);
4213 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4214 }
4215 }
4216 if (RT_SUCCESS(rc))
4217 {
4218 int rc2;
4219
4220 /* Make sure what we return is writable. */
4221 if (fWritable)
4222 switch (PGM_PAGE_GET_STATE(pPage))
4223 {
4224 case PGM_PAGE_STATE_ALLOCATED:
4225 break;
4226 case PGM_PAGE_STATE_BALLOONED:
4227 AssertFailed();
4228 case PGM_PAGE_STATE_ZERO:
4229 case PGM_PAGE_STATE_SHARED:
4230 case PGM_PAGE_STATE_WRITE_MONITORED:
4231 rc2 = pgmPhysPageMakeWritable(pVM, pPage, GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK);
4232 AssertLogRelRCReturn(rc2, rc2);
4233 break;
4234 }
4235
4236#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
4237 void *pv;
4238 rc = pgmRZDynMapHCPageInlined(pVCpu,
4239 PGM_PAGE_GET_HCPHYS(pPage),
4240 &pv
4241 RTLOG_COMMA_SRC_POS);
4242 if (RT_FAILURE(rc))
4243 return rc;
4244 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4245 pLock->pvPage = pv;
4246 pLock->pVCpu = pVCpu;
4247
4248#else
4249 /* Get a ring-3 mapping of the address. */
4250 PPGMPAGER3MAPTLBE pTlbe;
4251 rc2 = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe);
4252 AssertLogRelRCReturn(rc2, rc2);
4253
4254 /* Lock it and calculate the address. */
4255 if (fWritable)
4256 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock);
4257 else
4258 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock);
4259 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));
4260#endif
4261
4262 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv));
4263 }
4264 else
4265 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage]\n", GCPhys, rc, pPage));
4266
4267 /* else: handler catching all access, no pointer returned. */
4268 }
4269 else
4270 rc = VERR_PGM_PHYS_TLB_UNASSIGNED;
4271
4272 pgmUnlock(pVM);
4273 return rc;
4274}
4275
4276
4277/**
4278 * Checks if the give GCPhys page requires special handling for the given access
4279 * because it's MMIO or otherwise monitored.
4280 *
4281 * @returns VBox status code (no informational statuses).
4282 * @retval VINF_SUCCESS on success.
4283 * @retval VERR_PGM_PHYS_TLB_CATCH_WRITE and *ppv set if the page has a write
4284 * access handler of some kind.
4285 * @retval VERR_PGM_PHYS_TLB_CATCH_ALL if the page has a handler catching all
4286 * accesses or is odd in any way.
4287 * @retval VERR_PGM_PHYS_TLB_UNASSIGNED if the page doesn't exist.
4288 *
4289 * @param pVM Pointer to the VM.
4290 * @param GCPhys The GC physical address to convert. Since this is only
4291 * used for filling the REM TLB, the A20 mask must be
4292 * applied before calling this API.
4293 * @param fWritable Whether write access is required.
4294 *
4295 * @remarks This is a watered down version PGMPhysIemGCPhys2Ptr and really just
4296 * a stop gap thing that should be removed once there is a better TLB
4297 * for virtual address accesses.
4298 */
4299VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVM pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers)
4300{
4301 pgmLock(pVM);
4302 PGM_A20_ASSERT_MASKED(VMMGetCpu(pVM), GCPhys);
4303
4304 PPGMRAMRANGE pRam;
4305 PPGMPAGE pPage;
4306 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam);
4307 if (RT_SUCCESS(rc))
4308 {
4309 if (PGM_PAGE_IS_BALLOONED(pPage))
4310 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4311 else if (PGM_PAGE_IS_SPECIAL_ALIAS_MMIO(pPage))
4312 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4313 else if ( !PGM_PAGE_HAS_ANY_HANDLERS(pPage)
4314 || (fByPassHandlers && !PGM_PAGE_IS_MMIO(pPage)) )
4315 rc = VINF_SUCCESS;
4316 else
4317 {
4318 if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) /* catches MMIO */
4319 {
4320 Assert(!fByPassHandlers || PGM_PAGE_IS_MMIO(pPage));
4321 rc = VERR_PGM_PHYS_TLB_CATCH_ALL;
4322 }
4323 else if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && fWritable)
4324 {
4325 Assert(!fByPassHandlers);
4326 rc = VERR_PGM_PHYS_TLB_CATCH_WRITE;
4327 }
4328 }
4329 }
4330
4331 pgmUnlock(pVM);
4332 return rc;
4333}
4334
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette