VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp@ 25584

Last change on this file since 25584 was 25584, checked in by vboxsync, 15 years ago

Only use the physical address TLB in ring 0. (the REM already has a TLB of its own)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 120.9 KB
Line 
1/* $Id: PGMAllPhys.cpp 25584 2009-12-26 10:42:47Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#define LOG_GROUP LOG_GROUP_PGM_PHYS
26#include <VBox/pgm.h>
27#include <VBox/trpm.h>
28#include <VBox/vmm.h>
29#include <VBox/iom.h>
30#include <VBox/em.h>
31#include <VBox/rem.h>
32#include "PGMInternal.h"
33#include <VBox/vm.h>
34#include <VBox/param.h>
35#include <VBox/err.h>
36#include <iprt/assert.h>
37#include <iprt/string.h>
38#include <iprt/asm.h>
39#include <VBox/log.h>
40#ifdef IN_RING3
41# include <iprt/thread.h>
42#endif
43
44
45
46#ifndef IN_RING3
47
48/**
49 * \#PF Handler callback for Guest ROM range write access.
50 * We simply ignore the writes or fall back to the recompiler if we don't support the instruction.
51 *
52 * @returns VBox status code (appropritate for trap handling and GC return).
53 * @param pVM VM Handle.
54 * @param uErrorCode CPU Error code.
55 * @param pRegFrame Trap register frame.
56 * @param pvFault The fault address (cr2).
57 * @param GCPhysFault The GC physical address corresponding to pvFault.
58 * @param pvUser User argument. Pointer to the ROM range structure.
59 */
60VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
61{
62 int rc;
63 PPGMROMRANGE pRom = (PPGMROMRANGE)pvUser;
64 uint32_t iPage = (GCPhysFault - pRom->GCPhys) >> PAGE_SHIFT;
65 PVMCPU pVCpu = VMMGetCpu(pVM);
66
67 Assert(iPage < (pRom->cb >> PAGE_SHIFT));
68 switch (pRom->aPages[iPage].enmProt)
69 {
70 case PGMROMPROT_READ_ROM_WRITE_IGNORE:
71 case PGMROMPROT_READ_RAM_WRITE_IGNORE:
72 {
73 /*
74 * If it's a simple instruction which doesn't change the cpu state
75 * we will simply skip it. Otherwise we'll have to defer it to REM.
76 */
77 uint32_t cbOp;
78 PDISCPUSTATE pDis = &pVCpu->pgm.s.DisState;
79 rc = EMInterpretDisasOne(pVM, pVCpu, pRegFrame, pDis, &cbOp);
80 if ( RT_SUCCESS(rc)
81 && pDis->mode == CPUMODE_32BIT /** @todo why does this matter? */
82 && !(pDis->prefix & (PREFIX_REPNE | PREFIX_REP | PREFIX_SEG)))
83 {
84 switch (pDis->opcode)
85 {
86 /** @todo Find other instructions we can safely skip, possibly
87 * adding this kind of detection to DIS or EM. */
88 case OP_MOV:
89 pRegFrame->rip += cbOp;
90 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZGuestROMWriteHandled);
91 return VINF_SUCCESS;
92 }
93 }
94 else if (RT_UNLIKELY(rc == VERR_INTERNAL_ERROR))
95 return rc;
96 break;
97 }
98
99 case PGMROMPROT_READ_RAM_WRITE_RAM:
100 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
101 rc = PGMHandlerPhysicalPageTempOff(pVM, pRom->GCPhys, GCPhysFault & X86_PTE_PG_MASK);
102 AssertRC(rc);
103 break; /** @todo Must edit the shadow PT and restart the instruction, not use the interpreter! */
104
105 case PGMROMPROT_READ_ROM_WRITE_RAM:
106 /* Handle it in ring-3 because it's *way* easier there. */
107 pRom->aPages[iPage].LiveSave.fWrittenTo = true;
108 break;
109
110 default:
111 AssertMsgFailedReturn(("enmProt=%d iPage=%d GCPhysFault=%RGp\n",
112 pRom->aPages[iPage].enmProt, iPage, GCPhysFault),
113 VERR_INTERNAL_ERROR);
114 }
115
116 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZGuestROMWriteUnhandled);
117 return VINF_EM_RAW_EMULATE_INSTR;
118}
119
120#endif /* IN_RING3 */
121
122/**
123 * Checks if Address Gate 20 is enabled or not.
124 *
125 * @returns true if enabled.
126 * @returns false if disabled.
127 * @param pVCpu VMCPU handle.
128 */
129VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu)
130{
131 LogFlow(("PGMPhysIsA20Enabled %d\n", pVCpu->pgm.s.fA20Enabled));
132 return pVCpu->pgm.s.fA20Enabled;
133}
134
135
136/**
137 * Validates a GC physical address.
138 *
139 * @returns true if valid.
140 * @returns false if invalid.
141 * @param pVM The VM handle.
142 * @param GCPhys The physical address to validate.
143 */
144VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys)
145{
146 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
147 return pPage != NULL;
148}
149
150
151/**
152 * Checks if a GC physical address is a normal page,
153 * i.e. not ROM, MMIO or reserved.
154 *
155 * @returns true if normal.
156 * @returns false if invalid, ROM, MMIO or reserved page.
157 * @param pVM The VM handle.
158 * @param GCPhys The physical address to check.
159 */
160VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys)
161{
162 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
163 return pPage
164 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM;
165}
166
167
168/**
169 * Converts a GC physical address to a HC physical address.
170 *
171 * @returns VINF_SUCCESS on success.
172 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
173 * page but has no physical backing.
174 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
175 * GC physical address.
176 *
177 * @param pVM The VM handle.
178 * @param GCPhys The GC physical address to convert.
179 * @param pHCPhys Where to store the HC physical address on success.
180 */
181VMMDECL(int) PGMPhysGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)
182{
183 pgmLock(pVM);
184 PPGMPAGE pPage;
185 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
186 if (RT_SUCCESS(rc))
187 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK);
188 pgmUnlock(pVM);
189 return rc;
190}
191
192
193/**
194 * Invalidates all page mapping TLBs.
195 *
196 * @param pVM The VM handle.
197 */
198VMMDECL(void) PGMPhysInvalidatePageMapTLB(PVM pVM)
199{
200 pgmLock(pVM);
201 STAM_COUNTER_INC(&pVM->pgm.s.StatPageMapTlbFlushes);
202 /* Clear the shared R0/R3 TLB completely. */
203 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
204 {
205 pVM->pgm.s.PhysTlbHC.aEntries[i].GCPhys = NIL_RTGCPHYS;
206 pVM->pgm.s.PhysTlbHC.aEntries[i].pPage = 0;
207 pVM->pgm.s.PhysTlbHC.aEntries[i].pMap = 0;
208 pVM->pgm.s.PhysTlbHC.aEntries[i].pv = 0;
209 }
210 /* @todo clear the RC TLB whenever we add it. */
211 pgmUnlock(pVM);
212}
213
214/**
215 * Invalidates a page mapping TLB entry
216 *
217 * @param pVM The VM handle.
218 * @param GCPhys GCPhys entry to flush
219 */
220VMMDECL(void) PGMPhysInvalidatePageMapTLBEntry(PVM pVM, RTGCPHYS GCPhys)
221{
222 Assert(PGMIsLocked(pVM));
223
224 STAM_COUNTER_INC(&pVM->pgm.s.StatPageMapTlbFlushEntry);
225 /* Clear the shared R0/R3 TLB entry. */
226#ifdef IN_RC
227 unsigned idx = PGM_PAGER3MAPTLB_IDX(GCPhys);
228 pVM->pgm.s.PhysTlbHC.aEntries[idx].GCPhys = NIL_RTGCPHYS;
229 pVM->pgm.s.PhysTlbHC.aEntries[idx].pPage = 0;
230 pVM->pgm.s.PhysTlbHC.aEntries[idx].pMap = 0;
231 pVM->pgm.s.PhysTlbHC.aEntries[idx].pv = 0;
232#else
233 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
234 pTlbe->GCPhys = NIL_RTGCPHYS;
235 pTlbe->pPage = 0;
236 pTlbe->pMap = 0;
237 pTlbe->pv = 0;
238#endif
239 /* @todo clear the RC TLB whenever we add it. */
240}
241
242/**
243 * Makes sure that there is at least one handy page ready for use.
244 *
245 * This will also take the appropriate actions when reaching water-marks.
246 *
247 * @returns VBox status code.
248 * @retval VINF_SUCCESS on success.
249 * @retval VERR_EM_NO_MEMORY if we're really out of memory.
250 *
251 * @param pVM The VM handle.
252 *
253 * @remarks Must be called from within the PGM critical section. It may
254 * nip back to ring-3/0 in some cases.
255 */
256static int pgmPhysEnsureHandyPage(PVM pVM)
257{
258 AssertMsg(pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", pVM->pgm.s.cHandyPages));
259
260 /*
261 * Do we need to do anything special?
262 */
263#ifdef IN_RING3
264 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_R3_ALLOC))
265#else
266 if (pVM->pgm.s.cHandyPages <= RT_MAX(PGM_HANDY_PAGES_SET_FF, PGM_HANDY_PAGES_RZ_TO_R3))
267#endif
268 {
269 /*
270 * Allocate pages only if we're out of them, or in ring-3, almost out.
271 */
272#ifdef IN_RING3
273 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_R3_ALLOC)
274#else
275 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_ALLOC)
276#endif
277 {
278 Log(("PGM: cHandyPages=%u out of %u -> allocate more; VM_FF_PGM_NO_MEMORY=%RTbool\n",
279 pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages), VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY) ));
280#ifdef IN_RING3
281 int rc = PGMR3PhysAllocateHandyPages(pVM);
282#else
283 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES, 0);
284#endif
285 if (RT_UNLIKELY(rc != VINF_SUCCESS))
286 {
287 if (RT_FAILURE(rc))
288 return rc;
289 AssertMsgReturn(rc == VINF_EM_NO_MEMORY, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
290 if (!pVM->pgm.s.cHandyPages)
291 {
292 LogRel(("PGM: no more handy pages!\n"));
293 return VERR_EM_NO_MEMORY;
294 }
295 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES));
296 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_NO_MEMORY));
297#ifdef IN_RING3
298 REMR3NotifyFF(pVM);
299#else
300 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3); /* paranoia */
301#endif
302 }
303 AssertMsgReturn( pVM->pgm.s.cHandyPages > 0
304 && pVM->pgm.s.cHandyPages <= RT_ELEMENTS(pVM->pgm.s.aHandyPages),
305 ("%u\n", pVM->pgm.s.cHandyPages),
306 VERR_INTERNAL_ERROR);
307 }
308 else
309 {
310 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_SET_FF)
311 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
312#ifndef IN_RING3
313 if (pVM->pgm.s.cHandyPages <= PGM_HANDY_PAGES_RZ_TO_R3)
314 {
315 Log(("PGM: VM_FF_TO_R3 - cHandyPages=%u out of %u\n", pVM->pgm.s.cHandyPages, RT_ELEMENTS(pVM->pgm.s.aHandyPages)));
316 VMCPU_FF_SET(VMMGetCpu(pVM), VMCPU_FF_TO_R3);
317 }
318#endif
319 }
320 }
321
322 return VINF_SUCCESS;
323}
324
325
326/**
327 * Replace a zero or shared page with new page that we can write to.
328 *
329 * @returns The following VBox status codes.
330 * @retval VINF_SUCCESS on success, pPage is modified.
331 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
332 * @retval VERR_EM_NO_MEMORY if we're totally out of memory.
333 *
334 * @todo Propagate VERR_EM_NO_MEMORY up the call tree.
335 *
336 * @param pVM The VM address.
337 * @param pPage The physical page tracking structure. This will
338 * be modified on success.
339 * @param GCPhys The address of the page.
340 *
341 * @remarks Must be called from within the PGM critical section. It may
342 * nip back to ring-3/0 in some cases.
343 *
344 * @remarks This function shouldn't really fail, however if it does
345 * it probably means we've screwed up the size of handy pages and/or
346 * the low-water mark. Or, that some device I/O is causing a lot of
347 * pages to be allocated while while the host is in a low-memory
348 * condition. This latter should be handled elsewhere and in a more
349 * controlled manner, it's on the @bugref{3170} todo list...
350 */
351int pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
352{
353 LogFlow(("pgmPhysAllocPage: %R[pgmpage] %RGp\n", pPage, GCPhys));
354
355 /*
356 * Prereqs.
357 */
358 Assert(PGMIsLocked(pVM));
359 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
360 Assert(!PGM_PAGE_IS_MMIO(pPage));
361
362
363 /*
364 * Flush any shadow page table mappings of the page.
365 * When VBOX_WITH_NEW_LAZY_PAGE_ALLOC isn't defined, there shouldn't be any.
366 */
367 bool fFlushTLBs = false;
368 int rc = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);
369 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS);
370
371 /*
372 * Ensure that we've got a page handy, take it and use it.
373 */
374 int rc2 = pgmPhysEnsureHandyPage(pVM);
375 if (RT_FAILURE(rc2))
376 {
377 if (fFlushTLBs)
378 PGM_INVL_ALL_VCPU_TLBS(pVM);
379 Assert(rc2 == VERR_EM_NO_MEMORY);
380 return rc2;
381 }
382 /* re-assert preconditions since pgmPhysEnsureHandyPage may do a context switch. */
383 Assert(PGMIsLocked(pVM));
384 AssertMsg(PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_SHARED(pPage), ("%R[pgmpage] %RGp\n", pPage, GCPhys));
385 Assert(!PGM_PAGE_IS_MMIO(pPage));
386
387 uint32_t iHandyPage = --pVM->pgm.s.cHandyPages;
388 AssertMsg(iHandyPage < RT_ELEMENTS(pVM->pgm.s.aHandyPages), ("%d\n", iHandyPage));
389 Assert(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys != NIL_RTHCPHYS);
390 Assert(!(pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys & ~X86_PTE_PAE_PG_MASK));
391 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idPage != NIL_GMM_PAGEID);
392 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
393
394 /*
395 * There are one or two action to be taken the next time we allocate handy pages:
396 * - Tell the GMM (global memory manager) what the page is being used for.
397 * (Speeds up replacement operations - sharing and defragmenting.)
398 * - If the current backing is shared, it must be freed.
399 */
400 const RTHCPHYS HCPhys = pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys;
401 pVM->pgm.s.aHandyPages[iHandyPage].HCPhysGCPhys = GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
402
403 if (PGM_PAGE_IS_SHARED(pPage))
404 {
405 pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage = PGM_PAGE_GET_PAGEID(pPage);
406 Assert(PGM_PAGE_GET_PAGEID(pPage) != NIL_GMM_PAGEID);
407 VM_FF_SET(pVM, VM_FF_PGM_NEED_HANDY_PAGES);
408
409 Log2(("PGM: Replaced shared page %#x at %RGp with %#x / %RHp\n", PGM_PAGE_GET_PAGEID(pPage),
410 GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
411 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PageReplaceShared));
412 pVM->pgm.s.cSharedPages--;
413 AssertMsgFailed(("TODO: copy shared page content")); /** @todo err.. what about copying the page content? */
414 }
415 else
416 {
417 Log2(("PGM: Replaced zero page %RGp with %#x / %RHp\n", GCPhys, pVM->pgm.s.aHandyPages[iHandyPage].idPage, HCPhys));
418 STAM_COUNTER_INC(&pVM->pgm.s.StatRZPageReplaceZero);
419 pVM->pgm.s.cZeroPages--;
420 Assert(pVM->pgm.s.aHandyPages[iHandyPage].idSharedPage == NIL_GMM_PAGEID);
421 }
422
423 /*
424 * Do the PGMPAGE modifications.
425 */
426 pVM->pgm.s.cPrivatePages++;
427 PGM_PAGE_SET_HCPHYS(pPage, HCPhys);
428 PGM_PAGE_SET_PAGEID(pPage, pVM->pgm.s.aHandyPages[iHandyPage].idPage);
429 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
430 PGMPhysInvalidatePageMapTLBEntry(pVM, GCPhys);
431
432 if ( fFlushTLBs
433 && rc != VINF_PGM_GCPHYS_ALIASED)
434 PGM_INVL_ALL_VCPU_TLBS(pVM);
435 return rc;
436}
437
438
439/**
440 * Deal with a write monitored page.
441 *
442 * @returns VBox strict status code.
443 *
444 * @param pVM The VM address.
445 * @param pPage The physical page tracking structure.
446 *
447 * @remarks Called from within the PGM critical section.
448 */
449void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage)
450{
451 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED);
452 PGM_PAGE_SET_WRITTEN_TO(pPage);
453 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
454 Assert(pVM->pgm.s.cMonitoredPages > 0);
455 pVM->pgm.s.cMonitoredPages--;
456 pVM->pgm.s.cWrittenToPages++;
457}
458
459
460/**
461 * Deal with pages that are not writable, i.e. not in the ALLOCATED state.
462 *
463 * @returns VBox strict status code.
464 * @retval VINF_SUCCESS on success.
465 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
466 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
467 *
468 * @param pVM The VM address.
469 * @param pPage The physical page tracking structure.
470 * @param GCPhys The address of the page.
471 *
472 * @remarks Called from within the PGM critical section.
473 */
474int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
475{
476 switch (PGM_PAGE_GET_STATE(pPage))
477 {
478 case PGM_PAGE_STATE_WRITE_MONITORED:
479 pgmPhysPageMakeWriteMonitoredWritable(pVM, pPage);
480 /* fall thru */
481 default: /* to shut up GCC */
482 case PGM_PAGE_STATE_ALLOCATED:
483 return VINF_SUCCESS;
484
485 /*
486 * Zero pages can be dummy pages for MMIO or reserved memory,
487 * so we need to check the flags before joining cause with
488 * shared page replacement.
489 */
490 case PGM_PAGE_STATE_ZERO:
491 if (PGM_PAGE_IS_MMIO(pPage))
492 return VERR_PGM_PHYS_PAGE_RESERVED;
493 /* fall thru */
494 case PGM_PAGE_STATE_SHARED:
495 return pgmPhysAllocPage(pVM, pPage, GCPhys);
496 }
497}
498
499
500/**
501 * Wrapper for pgmPhysPageMakeWritable which enters the critsect.
502 *
503 * @returns VBox strict status code.
504 * @retval VINF_SUCCESS on success.
505 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
506 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
507 *
508 * @param pVM The VM address.
509 * @param pPage The physical page tracking structure.
510 * @param GCPhys The address of the page.
511 */
512int pgmPhysPageMakeWritableUnlocked(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys)
513{
514 int rc = pgmLock(pVM);
515 if (RT_SUCCESS(rc))
516 {
517 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
518 pgmUnlock(pVM);
519 }
520 return rc;
521}
522
523
524/**
525 * Internal usage: Map the page specified by its GMM ID.
526 *
527 * This is similar to pgmPhysPageMap
528 *
529 * @returns VBox status code.
530 *
531 * @param pVM The VM handle.
532 * @param idPage The Page ID.
533 * @param HCPhys The physical address (for RC).
534 * @param ppv Where to store the mapping address.
535 *
536 * @remarks Called from within the PGM critical section. The mapping is only
537 * valid while your inside this section.
538 */
539int pgmPhysPageMapByPageID(PVM pVM, uint32_t idPage, RTHCPHYS HCPhys, void **ppv)
540{
541 /*
542 * Validation.
543 */
544 Assert(PGMIsLocked(pVM));
545 AssertReturn(HCPhys && !(HCPhys & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
546 const uint32_t idChunk = idPage >> GMM_CHUNKID_SHIFT;
547 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER);
548
549#ifdef IN_RC
550 /*
551 * Map it by HCPhys.
552 */
553 return PGMDynMapHCPage(pVM, HCPhys, ppv);
554
555#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
556 /*
557 * Map it by HCPhys.
558 */
559 return pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
560
561#else
562 /*
563 * Find/make Chunk TLB entry for the mapping chunk.
564 */
565 PPGMCHUNKR3MAP pMap;
566 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
567 if (pTlbe->idChunk == idChunk)
568 {
569 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
570 pMap = pTlbe->pChunk;
571 }
572 else
573 {
574 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
575
576 /*
577 * Find the chunk, map it if necessary.
578 */
579 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
580 if (!pMap)
581 {
582# ifdef IN_RING0
583 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
584 AssertRCReturn(rc, rc);
585 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
586 Assert(pMap);
587# else
588 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
589 if (RT_FAILURE(rc))
590 return rc;
591# endif
592 }
593
594 /*
595 * Enter it into the Chunk TLB.
596 */
597 pTlbe->idChunk = idChunk;
598 pTlbe->pChunk = pMap;
599 pMap->iAge = 0;
600 }
601
602 *ppv = (uint8_t *)pMap->pv + ((idPage &GMM_PAGEID_IDX_MASK) << PAGE_SHIFT);
603 return VINF_SUCCESS;
604#endif
605}
606
607
608/**
609 * Maps a page into the current virtual address space so it can be accessed.
610 *
611 * @returns VBox status code.
612 * @retval VINF_SUCCESS on success.
613 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
614 *
615 * @param pVM The VM address.
616 * @param pPage The physical page tracking structure.
617 * @param GCPhys The address of the page.
618 * @param ppMap Where to store the address of the mapping tracking structure.
619 * @param ppv Where to store the mapping address of the page. The page
620 * offset is masked off!
621 *
622 * @remarks Called from within the PGM critical section.
623 */
624static int pgmPhysPageMapCommon(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAP ppMap, void **ppv)
625{
626 Assert(PGMIsLocked(pVM));
627
628#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
629 /*
630 * Just some sketchy GC/R0-darwin code.
631 */
632 *ppMap = NULL;
633 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
634 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);
635# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
636 pgmR0DynMapHCPageInlined(&pVM->pgm.s, HCPhys, ppv);
637# else
638 PGMDynMapHCPage(pVM, HCPhys, ppv);
639# endif
640 return VINF_SUCCESS;
641
642#else /* IN_RING3 || IN_RING0 */
643
644
645 /*
646 * Special case: ZERO and MMIO2 pages.
647 */
648 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
649 if (idChunk == NIL_GMM_CHUNKID)
650 {
651 AssertMsgReturn(PGM_PAGE_GET_PAGEID(pPage) == NIL_GMM_PAGEID, ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
652 if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2)
653 {
654 /* Lookup the MMIO2 range and use pvR3 to calc the address. */
655 PPGMRAMRANGE pRam = pgmPhysGetRange(&pVM->pgm.s, GCPhys);
656 AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR_2);
657 *ppv = (void *)((uintptr_t)pRam->pvR3 + (GCPhys - pRam->GCPhys));
658 }
659 else if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO2_ALIAS_MMIO)
660 {
661 /** @todo deal with aliased MMIO2 pages somehow...
662 * One solution would be to seed MMIO2 pages to GMM and get unique Page IDs for
663 * them, that would also avoid this mess. It would actually be kind of
664 * elegant... */
665 AssertLogRelMsgFailedReturn(("%RGp\n", GCPhys), VERR_INTERNAL_ERROR_3);
666 }
667 else
668 {
669 /** @todo handle MMIO2 */
670 AssertMsgReturn(PGM_PAGE_IS_ZERO(pPage), ("pPage=%R[pgmpage]\n", pPage), VERR_INTERNAL_ERROR_2);
671 AssertMsgReturn(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg,
672 ("pPage=%R[pgmpage]\n", pPage),
673 VERR_INTERNAL_ERROR_2);
674 *ppv = pVM->pgm.s.CTXALLSUFF(pvZeroPg);
675 }
676 *ppMap = NULL;
677 return VINF_SUCCESS;
678 }
679
680 /*
681 * Find/make Chunk TLB entry for the mapping chunk.
682 */
683 PPGMCHUNKR3MAP pMap;
684 PPGMCHUNKR3MAPTLBE pTlbe = &pVM->pgm.s.ChunkR3Map.Tlb.aEntries[PGM_CHUNKR3MAPTLB_IDX(idChunk)];
685 if (pTlbe->idChunk == idChunk)
686 {
687 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbHits));
688 pMap = pTlbe->pChunk;
689 }
690 else
691 {
692 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,ChunkR3MapTlbMisses));
693
694 /*
695 * Find the chunk, map it if necessary.
696 */
697 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
698 if (!pMap)
699 {
700#ifdef IN_RING0
701 int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_MAP_CHUNK, idChunk);
702 AssertRCReturn(rc, rc);
703 pMap = (PPGMCHUNKR3MAP)RTAvlU32Get(&pVM->pgm.s.ChunkR3Map.pTree, idChunk);
704 Assert(pMap);
705#else
706 int rc = pgmR3PhysChunkMap(pVM, idChunk, &pMap);
707 if (RT_FAILURE(rc))
708 return rc;
709#endif
710 }
711
712 /*
713 * Enter it into the Chunk TLB.
714 */
715 pTlbe->idChunk = idChunk;
716 pTlbe->pChunk = pMap;
717 pMap->iAge = 0;
718 }
719
720 *ppv = (uint8_t *)pMap->pv + (PGM_PAGE_GET_PAGE_IN_CHUNK(pPage) << PAGE_SHIFT);
721 *ppMap = pMap;
722 return VINF_SUCCESS;
723#endif /* IN_RING3 */
724}
725
726
727/**
728 * Combination of pgmPhysPageMakeWritable and pgmPhysPageMapWritable.
729 *
730 * This is typically used is paths where we cannot use the TLB methods (like ROM
731 * pages) or where there is no point in using them since we won't get many hits.
732 *
733 * @returns VBox strict status code.
734 * @retval VINF_SUCCESS on success.
735 * @retval VINF_PGM_SYNC_CR3 on success and a page pool flush is pending.
736 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
737 *
738 * @param pVM The VM address.
739 * @param pPage The physical page tracking structure.
740 * @param GCPhys The address of the page.
741 * @param ppv Where to store the mapping address of the page. The page
742 * offset is masked off!
743 *
744 * @remarks Called from within the PGM critical section. The mapping is only
745 * valid while your inside this section.
746 */
747int pgmPhysPageMakeWritableAndMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
748{
749 int rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
750 if (RT_SUCCESS(rc))
751 {
752 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* returned */, ("%Rrc\n", rc));
753 PPGMPAGEMAP pMapIgnore;
754 int rc2 = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
755 if (RT_FAILURE(rc2)) /* preserve rc */
756 rc = rc2;
757 }
758 return rc;
759}
760
761
762/**
763 * Maps a page into the current virtual address space so it can be accessed for
764 * both writing and reading.
765 *
766 * This is typically used is paths where we cannot use the TLB methods (like ROM
767 * pages) or where there is no point in using them since we won't get many hits.
768 *
769 * @returns VBox status code.
770 * @retval VINF_SUCCESS on success.
771 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
772 *
773 * @param pVM The VM address.
774 * @param pPage The physical page tracking structure. Must be in the
775 * allocated state.
776 * @param GCPhys The address of the page.
777 * @param ppv Where to store the mapping address of the page. The page
778 * offset is masked off!
779 *
780 * @remarks Called from within the PGM critical section. The mapping is only
781 * valid while your inside this section.
782 */
783int pgmPhysPageMap(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
784{
785 Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
786 PPGMPAGEMAP pMapIgnore;
787 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, ppv);
788}
789
790
791/**
792 * Maps a page into the current virtual address space so it can be accessed for
793 * reading.
794 *
795 * This is typically used is paths where we cannot use the TLB methods (like ROM
796 * pages) or where there is no point in using them since we won't get many hits.
797 *
798 * @returns VBox status code.
799 * @retval VINF_SUCCESS on success.
800 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
801 *
802 * @param pVM The VM address.
803 * @param pPage The physical page tracking structure.
804 * @param GCPhys The address of the page.
805 * @param ppv Where to store the mapping address of the page. The page
806 * offset is masked off!
807 *
808 * @remarks Called from within the PGM critical section. The mapping is only
809 * valid while your inside this section.
810 */
811int pgmPhysPageMapReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const **ppv)
812{
813 PPGMPAGEMAP pMapIgnore;
814 return pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMapIgnore, (void **)ppv);
815}
816
817
818#if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
819/**
820 * Load a guest page into the ring-3 physical TLB.
821 *
822 * @returns VBox status code.
823 * @retval VINF_SUCCESS on success
824 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
825 * @param pPGM The PGM instance pointer.
826 * @param GCPhys The guest physical address in question.
827 */
828int pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys)
829{
830 Assert(PGMIsLocked(PGM2VM(pPGM)));
831 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
832
833 /*
834 * Find the ram range.
835 * 99.8% of requests are expected to be in the first range.
836 */
837 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges);
838 RTGCPHYS off = GCPhys - pRam->GCPhys;
839 if (RT_UNLIKELY(off >= pRam->cb))
840 {
841 do
842 {
843 pRam = pRam->CTX_SUFF(pNext);
844 if (!pRam)
845 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
846 off = GCPhys - pRam->GCPhys;
847 } while (off >= pRam->cb);
848 }
849
850 /*
851 * Map the page.
852 * Make a special case for the zero page as it is kind of special.
853 */
854 PPGMPAGE pPage = &pRam->aPages[off >> PAGE_SHIFT];
855 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
856 if (!PGM_PAGE_IS_ZERO(pPage))
857 {
858 void *pv;
859 PPGMPAGEMAP pMap;
860 int rc = pgmPhysPageMapCommon(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
861 if (RT_FAILURE(rc))
862 return rc;
863 pTlbe->pMap = pMap;
864 pTlbe->pv = pv;
865 }
866 else
867 {
868 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
869 pTlbe->pMap = NULL;
870 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
871 }
872#ifdef IN_RING0
873 pTlbe->GCPhys = (GCPhys & X86_PTE_PAE_PG_MASK);
874#else
875 /* REM already has a TLB of its own; no point in having two
876 * and keeping both in sync will eliminate any benefit there might be.
877 */
878#endif
879 pTlbe->pPage = pPage;
880 return VINF_SUCCESS;
881}
882
883
884/**
885 * Load a guest page into the ring-3 physical TLB.
886 *
887 * @returns VBox status code.
888 * @retval VINF_SUCCESS on success
889 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
890 *
891 * @param pPGM The PGM instance pointer.
892 * @param pPage Pointer to the PGMPAGE structure corresponding to
893 * GCPhys.
894 * @param GCPhys The guest physical address in question.
895 */
896int pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys)
897{
898 Assert(PGMIsLocked(PGM2VM(pPGM)));
899 STAM_COUNTER_INC(&pPGM->CTX_MID_Z(Stat,PageMapTlbMisses));
900
901 /*
902 * Map the page.
903 * Make a special case for the zero page as it is kind of special.
904 */
905 PPGMPAGEMAPTLBE pTlbe = &pPGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];
906 if (!PGM_PAGE_IS_ZERO(pPage))
907 {
908 void *pv;
909 PPGMPAGEMAP pMap;
910 int rc = pgmPhysPageMapCommon(PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);
911 if (RT_FAILURE(rc))
912 return rc;
913 pTlbe->pMap = pMap;
914 pTlbe->pv = pv;
915 }
916 else
917 {
918 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pPGM->HCPhysZeroPg);
919 pTlbe->pMap = NULL;
920 pTlbe->pv = pPGM->CTXALLSUFF(pvZeroPg);
921 }
922#ifdef IN_RING0
923 pTlbe->GCPhys = (GCPhys & X86_PTE_PAE_PG_MASK);
924#else
925 /* REM already has a TLB of its own; no point in having two
926 * and keeping both in sync will eliminate any benefit there might be.
927 */
928#endif
929 pTlbe->pPage = pPage;
930 return VINF_SUCCESS;
931}
932#endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
933
934
935/**
936 * Internal version of PGMPhysGCPhys2CCPtr that expects the caller to
937 * own the PGM lock and therefore not need to lock the mapped page.
938 *
939 * @returns VBox status code.
940 * @retval VINF_SUCCESS on success.
941 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
942 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
943 *
944 * @param pVM The VM handle.
945 * @param GCPhys The guest physical address of the page that should be mapped.
946 * @param pPage Pointer to the PGMPAGE structure for the page.
947 * @param ppv Where to store the address corresponding to GCPhys.
948 *
949 * @internal
950 */
951int pgmPhysGCPhys2CCPtrInternal(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void **ppv)
952{
953 int rc;
954 AssertReturn(pPage, VERR_INTERNAL_ERROR);
955 Assert(PGMIsLocked(pVM));
956
957 /*
958 * Make sure the page is writable.
959 */
960 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
961 {
962 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
963 if (RT_FAILURE(rc))
964 return rc;
965 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
966 }
967 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
968
969 /*
970 * Get the mapping address.
971 */
972#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
973 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK));
974#else
975 PPGMPAGEMAPTLBE pTlbe;
976 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
977 if (RT_FAILURE(rc))
978 return rc;
979 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
980#endif
981 return VINF_SUCCESS;
982}
983
984
985/**
986 * Internal version of PGMPhysGCPhys2CCPtrReadOnly that expects the caller to
987 * own the PGM lock and therefore not need to lock the mapped page.
988 *
989 * @returns VBox status code.
990 * @retval VINF_SUCCESS on success.
991 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
992 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
993 *
994 * @param pVM The VM handle.
995 * @param GCPhys The guest physical address of the page that should be mapped.
996 * @param pPage Pointer to the PGMPAGE structure for the page.
997 * @param ppv Where to store the address corresponding to GCPhys.
998 *
999 * @internal
1000 */
1001int pgmPhysGCPhys2CCPtrInternalReadOnly(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, const void **ppv)
1002{
1003 AssertReturn(pPage, VERR_INTERNAL_ERROR);
1004 Assert(PGMIsLocked(pVM));
1005 Assert(PGM_PAGE_GET_HCPHYS(pPage) != 0);
1006
1007 /*
1008 * Get the mapping address.
1009 */
1010#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1011 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1012#else
1013 PPGMPAGEMAPTLBE pTlbe;
1014 int rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
1015 if (RT_FAILURE(rc))
1016 return rc;
1017 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
1018#endif
1019 return VINF_SUCCESS;
1020}
1021
1022
1023/**
1024 * Requests the mapping of a guest page into the current context.
1025 *
1026 * This API should only be used for very short term, as it will consume
1027 * scarse resources (R0 and GC) in the mapping cache. When you're done
1028 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1029 *
1030 * This API will assume your intention is to write to the page, and will
1031 * therefore replace shared and zero pages. If you do not intend to modify
1032 * the page, use the PGMPhysGCPhys2CCPtrReadOnly() API.
1033 *
1034 * @returns VBox status code.
1035 * @retval VINF_SUCCESS on success.
1036 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1037 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1038 *
1039 * @param pVM The VM handle.
1040 * @param GCPhys The guest physical address of the page that should be mapped.
1041 * @param ppv Where to store the address corresponding to GCPhys.
1042 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1043 *
1044 * @remarks The caller is responsible for dealing with access handlers.
1045 * @todo Add an informational return code for pages with access handlers?
1046 *
1047 * @remark Avoid calling this API from within critical sections (other than the
1048 * PGM one) because of the deadlock risk. External threads may need to
1049 * delegate jobs to the EMTs.
1050 * @thread Any thread.
1051 */
1052VMMDECL(int) PGMPhysGCPhys2CCPtr(PVM pVM, RTGCPHYS GCPhys, void **ppv, PPGMPAGEMAPLOCK pLock)
1053{
1054#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1055
1056 /*
1057 * Find the page and make sure it's writable.
1058 */
1059 PPGMPAGE pPage;
1060 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1061 if (RT_SUCCESS(rc))
1062 {
1063 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1064 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1065 if (RT_SUCCESS(rc))
1066 {
1067 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1068# if 0
1069 pLock->pvMap = 0;
1070 pLock->pvPage = pPage;
1071# else
1072 pLock->u32Dummy = UINT32_MAX;
1073# endif
1074 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1075 rc = VINF_SUCCESS;
1076 }
1077 }
1078
1079#else /* IN_RING3 || IN_RING0 */
1080 int rc = pgmLock(pVM);
1081 AssertRCReturn(rc, rc);
1082
1083 /*
1084 * Query the Physical TLB entry for the page (may fail).
1085 */
1086 PPGMPAGEMAPTLBE pTlbe;
1087 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1088 if (RT_SUCCESS(rc))
1089 {
1090 /*
1091 * If the page is shared, the zero page, or being write monitored
1092 * it must be converted to an page that's writable if possible.
1093 */
1094 PPGMPAGE pPage = pTlbe->pPage;
1095 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))
1096 {
1097 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);
1098 if (RT_SUCCESS(rc))
1099 {
1100 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1101 rc = pgmPhysPageQueryTlbeWithPage(&pVM->pgm.s, pPage, GCPhys, &pTlbe);
1102 }
1103 }
1104 if (RT_SUCCESS(rc))
1105 {
1106 /*
1107 * Now, just perform the locking and calculate the return address.
1108 */
1109 PPGMPAGEMAP pMap = pTlbe->pMap;
1110 if (pMap)
1111 pMap->cRefs++;
1112
1113 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1114 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1115 {
1116 if (cLocks == 0)
1117 pVM->pgm.s.cWriteLockedPages++;
1118 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1119 }
1120 else if (cLocks != PGM_PAGE_GET_WRITE_LOCKS(pPage))
1121 {
1122 PGM_PAGE_INC_WRITE_LOCKS(pPage);
1123 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent write locked state!\n", GCPhys, pPage));
1124 if (pMap)
1125 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1126 }
1127
1128 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
1129 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_WRITE;
1130 pLock->pvMap = pMap;
1131 }
1132 }
1133
1134 pgmUnlock(pVM);
1135#endif /* IN_RING3 || IN_RING0 */
1136 return rc;
1137}
1138
1139
1140/**
1141 * Requests the mapping of a guest page into the current context.
1142 *
1143 * This API should only be used for very short term, as it will consume
1144 * scarse resources (R0 and GC) in the mapping cache. When you're done
1145 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1146 *
1147 * @returns VBox status code.
1148 * @retval VINF_SUCCESS on success.
1149 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1150 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1151 *
1152 * @param pVM The VM handle.
1153 * @param GCPhys The guest physical address of the page that should be mapped.
1154 * @param ppv Where to store the address corresponding to GCPhys.
1155 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1156 *
1157 * @remarks The caller is responsible for dealing with access handlers.
1158 * @todo Add an informational return code for pages with access handlers?
1159 *
1160 * @remark Avoid calling this API from within critical sections (other than
1161 * the PGM one) because of the deadlock risk.
1162 * @thread Any thread.
1163 */
1164VMMDECL(int) PGMPhysGCPhys2CCPtrReadOnly(PVM pVM, RTGCPHYS GCPhys, void const **ppv, PPGMPAGEMAPLOCK pLock)
1165{
1166#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1167
1168 /*
1169 * Find the page and make sure it's readable.
1170 */
1171 PPGMPAGE pPage;
1172 int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
1173 if (RT_SUCCESS(rc))
1174 {
1175 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1176 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1177 else
1178 {
1179 *ppv = pgmDynMapHCPageOff(&pVM->pgm.s, PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK)); /** @todo add a read only flag? */
1180# if 0
1181 pLock->pvMap = 0;
1182 pLock->pvPage = pPage;
1183# else
1184 pLock->u32Dummy = UINT32_MAX;
1185# endif
1186 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));
1187 rc = VINF_SUCCESS;
1188 }
1189 }
1190
1191#else /* IN_RING3 || IN_RING0 */
1192 int rc = pgmLock(pVM);
1193 AssertRCReturn(rc, rc);
1194
1195 /*
1196 * Query the Physical TLB entry for the page (may fail).
1197 */
1198 PPGMPAGEMAPTLBE pTlbe;
1199 rc = pgmPhysPageQueryTlbe(&pVM->pgm.s, GCPhys, &pTlbe);
1200 if (RT_SUCCESS(rc))
1201 {
1202 /* MMIO pages doesn't have any readable backing. */
1203 PPGMPAGE pPage = pTlbe->pPage;
1204 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO(pPage)))
1205 rc = VERR_PGM_PHYS_PAGE_RESERVED;
1206 else
1207 {
1208 /*
1209 * Now, just perform the locking and calculate the return address.
1210 */
1211 PPGMPAGEMAP pMap = pTlbe->pMap;
1212 if (pMap)
1213 pMap->cRefs++;
1214
1215 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1216 if (RT_LIKELY(cLocks < PGM_PAGE_MAX_LOCKS - 1))
1217 {
1218 if (cLocks == 0)
1219 pVM->pgm.s.cReadLockedPages++;
1220 PGM_PAGE_INC_READ_LOCKS(pPage);
1221 }
1222 else if (cLocks != PGM_PAGE_GET_READ_LOCKS(pPage))
1223 {
1224 PGM_PAGE_INC_READ_LOCKS(pPage);
1225 AssertMsgFailed(("%RGp / %R[pgmpage] is entering permanent readonly locked state!\n", GCPhys, pPage));
1226 if (pMap)
1227 pMap->cRefs++; /* Extra ref to prevent it from going away. */
1228 }
1229
1230 *ppv = (void *)((uintptr_t)pTlbe->pv | (GCPhys & PAGE_OFFSET_MASK));
1231 pLock->uPageAndType = (uintptr_t)pPage | PGMPAGEMAPLOCK_TYPE_READ;
1232 pLock->pvMap = pMap;
1233 }
1234 }
1235
1236 pgmUnlock(pVM);
1237#endif /* IN_RING3 || IN_RING0 */
1238 return rc;
1239}
1240
1241
1242/**
1243 * Requests the mapping of a guest page given by virtual address into the current context.
1244 *
1245 * This API should only be used for very short term, as it will consume
1246 * scarse resources (R0 and GC) in the mapping cache. When you're done
1247 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1248 *
1249 * This API will assume your intention is to write to the page, and will
1250 * therefore replace shared and zero pages. If you do not intend to modify
1251 * the page, use the PGMPhysGCPtr2CCPtrReadOnly() API.
1252 *
1253 * @returns VBox status code.
1254 * @retval VINF_SUCCESS on success.
1255 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1256 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1257 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1258 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1259 *
1260 * @param pVCpu VMCPU handle.
1261 * @param GCPhys The guest physical address of the page that should be mapped.
1262 * @param ppv Where to store the address corresponding to GCPhys.
1263 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1264 *
1265 * @remark Avoid calling this API from within critical sections (other than
1266 * the PGM one) because of the deadlock risk.
1267 * @thread EMT
1268 */
1269VMMDECL(int) PGMPhysGCPtr2CCPtr(PVMCPU pVCpu, RTGCPTR GCPtr, void **ppv, PPGMPAGEMAPLOCK pLock)
1270{
1271 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1272 RTGCPHYS GCPhys;
1273 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1274 if (RT_SUCCESS(rc))
1275 rc = PGMPhysGCPhys2CCPtr(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1276 return rc;
1277}
1278
1279
1280/**
1281 * Requests the mapping of a guest page given by virtual address into the current context.
1282 *
1283 * This API should only be used for very short term, as it will consume
1284 * scarse resources (R0 and GC) in the mapping cache. When you're done
1285 * with the page, call PGMPhysReleasePageMappingLock() ASAP to release it.
1286 *
1287 * @returns VBox status code.
1288 * @retval VINF_SUCCESS on success.
1289 * @retval VERR_PAGE_TABLE_NOT_PRESENT if the page directory for the virtual address isn't present.
1290 * @retval VERR_PAGE_NOT_PRESENT if the page at the virtual address isn't present.
1291 * @retval VERR_PGM_PHYS_PAGE_RESERVED it it's a valid page but has no physical backing.
1292 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address.
1293 *
1294 * @param pVCpu VMCPU handle.
1295 * @param GCPhys The guest physical address of the page that should be mapped.
1296 * @param ppv Where to store the address corresponding to GCPhys.
1297 * @param pLock Where to store the lock information that PGMPhysReleasePageMappingLock needs.
1298 *
1299 * @remark Avoid calling this API from within critical sections (other than
1300 * the PGM one) because of the deadlock risk.
1301 * @thread EMT
1302 */
1303VMMDECL(int) PGMPhysGCPtr2CCPtrReadOnly(PVMCPU pVCpu, RTGCPTR GCPtr, void const **ppv, PPGMPAGEMAPLOCK pLock)
1304{
1305 VM_ASSERT_EMT(pVCpu->CTX_SUFF(pVM));
1306 RTGCPHYS GCPhys;
1307 int rc = PGMPhysGCPtr2GCPhys(pVCpu, GCPtr, &GCPhys);
1308 if (RT_SUCCESS(rc))
1309 rc = PGMPhysGCPhys2CCPtrReadOnly(pVCpu->CTX_SUFF(pVM), GCPhys, ppv, pLock);
1310 return rc;
1311}
1312
1313
1314/**
1315 * Release the mapping of a guest page.
1316 *
1317 * This is the counter part of PGMPhysGCPhys2CCPtr, PGMPhysGCPhys2CCPtrReadOnly
1318 * PGMPhysGCPtr2CCPtr and PGMPhysGCPtr2CCPtrReadOnly.
1319 *
1320 * @param pVM The VM handle.
1321 * @param pLock The lock structure initialized by the mapping function.
1322 */
1323VMMDECL(void) PGMPhysReleasePageMappingLock(PVM pVM, PPGMPAGEMAPLOCK pLock)
1324{
1325#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1326 /* currently nothing to do here. */
1327 Assert(pLock->u32Dummy == UINT32_MAX);
1328 pLock->u32Dummy = 0;
1329
1330#else /* IN_RING3 */
1331 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap;
1332 PPGMPAGE pPage = (PPGMPAGE)(pLock->uPageAndType & ~PGMPAGEMAPLOCK_TYPE_MASK);
1333 bool fWriteLock = (pLock->uPageAndType & PGMPAGEMAPLOCK_TYPE_MASK) == PGMPAGEMAPLOCK_TYPE_WRITE;
1334
1335 pLock->uPageAndType = 0;
1336 pLock->pvMap = NULL;
1337
1338 pgmLock(pVM);
1339 if (fWriteLock)
1340 {
1341 unsigned cLocks = PGM_PAGE_GET_WRITE_LOCKS(pPage);
1342 Assert(cLocks > 0);
1343 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1344 {
1345 if (cLocks == 1)
1346 {
1347 Assert(pVM->pgm.s.cWriteLockedPages > 0);
1348 pVM->pgm.s.cWriteLockedPages--;
1349 }
1350 PGM_PAGE_DEC_WRITE_LOCKS(pPage);
1351 }
1352
1353 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
1354 {
1355 PGM_PAGE_SET_WRITTEN_TO(pPage);
1356 PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
1357 Assert(pVM->pgm.s.cMonitoredPages > 0);
1358 pVM->pgm.s.cMonitoredPages--;
1359 pVM->pgm.s.cWrittenToPages++;
1360 }
1361 }
1362 else
1363 {
1364 unsigned cLocks = PGM_PAGE_GET_READ_LOCKS(pPage);
1365 Assert(cLocks > 0);
1366 if (RT_LIKELY(cLocks > 0 && cLocks < PGM_PAGE_MAX_LOCKS))
1367 {
1368 if (cLocks == 1)
1369 {
1370 Assert(pVM->pgm.s.cReadLockedPages > 0);
1371 pVM->pgm.s.cReadLockedPages--;
1372 }
1373 PGM_PAGE_DEC_READ_LOCKS(pPage);
1374 }
1375 }
1376
1377 if (pMap)
1378 {
1379 Assert(pMap->cRefs >= 1);
1380 pMap->cRefs--;
1381 pMap->iAge = 0;
1382 }
1383 pgmUnlock(pVM);
1384#endif /* IN_RING3 */
1385}
1386
1387
1388/**
1389 * Converts a GC physical address to a HC ring-3 pointer.
1390 *
1391 * @returns VINF_SUCCESS on success.
1392 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical
1393 * page but has no physical backing.
1394 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid
1395 * GC physical address.
1396 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses
1397 * a dynamic ram chunk boundary
1398 *
1399 * @param pVM The VM handle.
1400 * @param GCPhys The GC physical address to convert.
1401 * @param cbRange Physical range
1402 * @param pR3Ptr Where to store the R3 pointer on success.
1403 *
1404 * @deprecated Avoid when possible!
1405 */
1406VMMDECL(int) PGMPhysGCPhys2R3Ptr(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange, PRTR3PTR pR3Ptr)
1407{
1408/** @todo this is kind of hacky and needs some more work. */
1409#ifndef DEBUG_sandervl
1410 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1411#endif
1412
1413 Log(("PGMPhysGCPhys2R3Ptr(,%RGp,%#x,): dont use this API!\n", GCPhys, cbRange)); /** @todo eliminate this API! */
1414#if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
1415 AssertFailedReturn(VERR_NOT_IMPLEMENTED);
1416#else
1417 pgmLock(pVM);
1418
1419 PPGMRAMRANGE pRam;
1420 PPGMPAGE pPage;
1421 int rc = pgmPhysGetPageAndRangeEx(&pVM->pgm.s, GCPhys, &pPage, &pRam);
1422 if (RT_SUCCESS(rc))
1423 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)pR3Ptr);
1424
1425 pgmUnlock(pVM);
1426 Assert(rc <= VINF_SUCCESS);
1427 return rc;
1428#endif
1429}
1430
1431
1432#ifdef VBOX_STRICT
1433/**
1434 * PGMPhysGCPhys2R3Ptr convenience for use with assertions.
1435 *
1436 * @returns The R3Ptr, NIL_RTR3PTR on failure.
1437 * @param pVM The VM handle.
1438 * @param GCPhys The GC Physical addresss.
1439 * @param cbRange Physical range.
1440 *
1441 * @deprecated Avoid when possible.
1442 */
1443VMMDECL(RTR3PTR) PGMPhysGCPhys2R3PtrAssert(PVM pVM, RTGCPHYS GCPhys, RTUINT cbRange)
1444{
1445 RTR3PTR R3Ptr;
1446 int rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, cbRange, &R3Ptr);
1447 if (RT_SUCCESS(rc))
1448 return R3Ptr;
1449 return NIL_RTR3PTR;
1450}
1451#endif /* VBOX_STRICT */
1452
1453
1454/**
1455 * Converts a guest pointer to a GC physical address.
1456 *
1457 * This uses the current CR3/CR0/CR4 of the guest.
1458 *
1459 * @returns VBox status code.
1460 * @param pVCpu The VMCPU Handle
1461 * @param GCPtr The guest pointer to convert.
1462 * @param pGCPhys Where to store the GC physical address.
1463 */
1464VMMDECL(int) PGMPhysGCPtr2GCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTGCPHYS pGCPhys)
1465{
1466 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, pGCPhys);
1467 if (pGCPhys && RT_SUCCESS(rc))
1468 *pGCPhys |= (RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK;
1469 return rc;
1470}
1471
1472
1473/**
1474 * Converts a guest pointer to a HC physical address.
1475 *
1476 * This uses the current CR3/CR0/CR4 of the guest.
1477 *
1478 * @returns VBox status code.
1479 * @param pVCpu The VMCPU Handle
1480 * @param GCPtr The guest pointer to convert.
1481 * @param pHCPhys Where to store the HC physical address.
1482 */
1483VMMDECL(int) PGMPhysGCPtr2HCPhys(PVMCPU pVCpu, RTGCPTR GCPtr, PRTHCPHYS pHCPhys)
1484{
1485 PVM pVM = pVCpu->CTX_SUFF(pVM);
1486 RTGCPHYS GCPhys;
1487 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1488 if (RT_SUCCESS(rc))
1489 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), pHCPhys);
1490 return rc;
1491}
1492
1493
1494/**
1495 * Converts a guest pointer to a R3 pointer.
1496 *
1497 * This uses the current CR3/CR0/CR4 of the guest.
1498 *
1499 * @returns VBox status code.
1500 * @param pVCpu The VMCPU Handle
1501 * @param GCPtr The guest pointer to convert.
1502 * @param pR3Ptr Where to store the R3 virtual address.
1503 *
1504 * @deprecated Don't use this.
1505 */
1506VMMDECL(int) PGMPhysGCPtr2R3Ptr(PVMCPU pVCpu, RTGCPTR GCPtr, PRTR3PTR pR3Ptr)
1507{
1508 PVM pVM = pVCpu->CTX_SUFF(pVM);
1509 VM_ASSERT_EMT(pVM); /* no longer safe for use outside the EMT thread! */
1510 RTGCPHYS GCPhys;
1511 int rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtr, NULL, &GCPhys);
1512 if (RT_SUCCESS(rc))
1513 rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys | ((RTGCUINTPTR)GCPtr & PAGE_OFFSET_MASK), 1 /* we always stay within one page */, pR3Ptr);
1514 return rc;
1515}
1516
1517
1518
1519#undef LOG_GROUP
1520#define LOG_GROUP LOG_GROUP_PGM_PHYS_ACCESS
1521
1522
1523#ifdef IN_RING3
1524/**
1525 * Cache PGMPhys memory access
1526 *
1527 * @param pVM VM Handle.
1528 * @param pCache Cache structure pointer
1529 * @param GCPhys GC physical address
1530 * @param pbHC HC pointer corresponding to physical page
1531 *
1532 * @thread EMT.
1533 */
1534static void pgmPhysCacheAdd(PVM pVM, PGMPHYSCACHE *pCache, RTGCPHYS GCPhys, uint8_t *pbR3)
1535{
1536 uint32_t iCacheIndex;
1537
1538 Assert(VM_IS_EMT(pVM));
1539
1540 GCPhys = PHYS_PAGE_ADDRESS(GCPhys);
1541 pbR3 = (uint8_t *)PAGE_ADDRESS(pbR3);
1542
1543 iCacheIndex = ((GCPhys >> PAGE_SHIFT) & PGM_MAX_PHYSCACHE_ENTRIES_MASK);
1544
1545 ASMBitSet(&pCache->aEntries, iCacheIndex);
1546
1547 pCache->Entry[iCacheIndex].GCPhys = GCPhys;
1548 pCache->Entry[iCacheIndex].pbR3 = pbR3;
1549}
1550#endif /* IN_RING3 */
1551
1552
1553/**
1554 * Deals with reading from a page with one or more ALL access handlers.
1555 *
1556 * @returns VBox status code. Can be ignored in ring-3.
1557 * @retval VINF_SUCCESS.
1558 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1559 *
1560 * @param pVM The VM handle.
1561 * @param pPage The page descriptor.
1562 * @param GCPhys The physical address to start reading at.
1563 * @param pvBuf Where to put the bits we read.
1564 * @param cb How much to read - less or equal to a page.
1565 */
1566static int pgmPhysReadHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void *pvBuf, size_t cb)
1567{
1568 /*
1569 * The most frequent access here is MMIO and shadowed ROM.
1570 * The current code ASSUMES all these access handlers covers full pages!
1571 */
1572
1573 /*
1574 * Whatever we do we need the source page, map it first.
1575 */
1576 const void *pvSrc = NULL;
1577 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, GCPhys, &pvSrc);
1578 if (RT_FAILURE(rc))
1579 {
1580 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1581 GCPhys, pPage, rc));
1582 memset(pvBuf, 0xff, cb);
1583 return VINF_SUCCESS;
1584 }
1585 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1586
1587 /*
1588 * Deal with any physical handlers.
1589 */
1590 PPGMPHYSHANDLER pPhys = NULL;
1591 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_ALL)
1592 {
1593#ifdef IN_RING3
1594 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1595 AssertReleaseMsg(pPhys, ("GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1596 Assert(GCPhys >= pPhys->Core.Key && GCPhys <= pPhys->Core.KeyLast);
1597 Assert((pPhys->Core.Key & PAGE_OFFSET_MASK) == 0);
1598 Assert((pPhys->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1599 Assert(pPhys->CTX_SUFF(pfnHandler));
1600
1601 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
1602 void *pvUser = pPhys->CTX_SUFF(pvUser);
1603
1604 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cb, pPage, R3STRING(pPhys->pszDesc) ));
1605 STAM_PROFILE_START(&pPhys->Stat, h);
1606 Assert(PGMIsLockOwner(pVM));
1607 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
1608 pgmUnlock(pVM);
1609 rc = pfnHandler(pVM, GCPhys, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, pvUser);
1610 pgmLock(pVM);
1611# ifdef VBOX_WITH_STATISTICS
1612 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1613 if (pPhys)
1614 STAM_PROFILE_STOP(&pPhys->Stat, h);
1615# else
1616 pPhys = NULL; /* might not be valid anymore. */
1617# endif
1618 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys));
1619#else
1620 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1621 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1622 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1623#endif
1624 }
1625
1626 /*
1627 * Deal with any virtual handlers.
1628 */
1629 if (PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) == PGM_PAGE_HNDL_VIRT_STATE_ALL)
1630 {
1631 unsigned iPage;
1632 PPGMVIRTHANDLER pVirt;
1633
1634 int rc2 = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iPage);
1635 AssertReleaseMsg(RT_SUCCESS(rc2), ("GCPhys=%RGp cb=%#x rc2=%Rrc\n", GCPhys, cb, rc2));
1636 Assert((pVirt->Core.Key & PAGE_OFFSET_MASK) == 0);
1637 Assert((pVirt->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
1638 Assert(GCPhys >= pVirt->aPhysToVirt[iPage].Core.Key && GCPhys <= pVirt->aPhysToVirt[iPage].Core.KeyLast);
1639
1640#ifdef IN_RING3
1641 if (pVirt->pfnHandlerR3)
1642 {
1643 if (!pPhys)
1644 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1645 else
1646 Log(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc), R3STRING(pPhys->pszDesc) ));
1647 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
1648 + (iPage << PAGE_SHIFT)
1649 + (GCPhys & PAGE_OFFSET_MASK);
1650
1651 STAM_PROFILE_START(&pVirt->Stat, h);
1652 rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, (void *)pvSrc, pvBuf, cb, PGMACCESSTYPE_READ, /*pVirt->CTX_SUFF(pvUser)*/ NULL);
1653 STAM_PROFILE_STOP(&pVirt->Stat, h);
1654 if (rc2 == VINF_SUCCESS)
1655 rc = VINF_SUCCESS;
1656 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc2, GCPhys, pPage, pVirt->pszDesc));
1657 }
1658 else
1659 Log5(("pgmPhysReadHandler: GCPhys=%RGp cb=%#x pPage=%R[pgmpage] virt %s [no handler]\n", GCPhys, cb, pPage, R3STRING(pVirt->pszDesc) ));
1660#else
1661 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1662 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cb=%#x\n", GCPhys, cb));
1663 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1664#endif
1665 }
1666
1667 /*
1668 * Take the default action.
1669 */
1670 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1671 memcpy(pvBuf, pvSrc, cb);
1672 return rc;
1673}
1674
1675
1676/**
1677 * Read physical memory.
1678 *
1679 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
1680 * want to ignore those.
1681 *
1682 * @returns VBox status code. Can be ignored in ring-3.
1683 * @retval VINF_SUCCESS.
1684 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1685 *
1686 * @param pVM VM Handle.
1687 * @param GCPhys Physical address start reading from.
1688 * @param pvBuf Where to put the read bits.
1689 * @param cbRead How many bytes to read.
1690 */
1691VMMDECL(int) PGMPhysRead(PVM pVM, RTGCPHYS GCPhys, void *pvBuf, size_t cbRead)
1692{
1693 AssertMsgReturn(cbRead > 0, ("don't even think about reading zero bytes!\n"), VINF_SUCCESS);
1694 LogFlow(("PGMPhysRead: %RGp %d\n", GCPhys, cbRead));
1695
1696 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysRead));
1697 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysReadBytes), cbRead);
1698
1699 pgmLock(pVM);
1700
1701 /*
1702 * Copy loop on ram ranges.
1703 */
1704 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
1705 for (;;)
1706 {
1707 /* Find range. */
1708 while (pRam && GCPhys > pRam->GCPhysLast)
1709 pRam = pRam->CTX_SUFF(pNext);
1710 /* Inside range or not? */
1711 if (pRam && GCPhys >= pRam->GCPhys)
1712 {
1713 /*
1714 * Must work our way thru this page by page.
1715 */
1716 RTGCPHYS off = GCPhys - pRam->GCPhys;
1717 while (off < pRam->cb)
1718 {
1719 unsigned iPage = off >> PAGE_SHIFT;
1720 PPGMPAGE pPage = &pRam->aPages[iPage];
1721 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
1722 if (cb > cbRead)
1723 cb = cbRead;
1724
1725 /*
1726 * Any ALL access handlers?
1727 */
1728 if (RT_UNLIKELY(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)))
1729 {
1730 int rc = pgmPhysReadHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
1731 if (RT_FAILURE(rc))
1732 {
1733 pgmUnlock(pVM);
1734 return rc;
1735 }
1736 }
1737 else
1738 {
1739 /*
1740 * Get the pointer to the page.
1741 */
1742 const void *pvSrc;
1743 int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, pPage, pRam->GCPhys + off, &pvSrc);
1744 if (RT_SUCCESS(rc))
1745 memcpy(pvBuf, pvSrc, cb);
1746 else
1747 {
1748 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternalReadOnly failed on %RGp / %R[pgmpage] -> %Rrc\n",
1749 pRam->GCPhys + off, pPage, rc));
1750 memset(pvBuf, 0xff, cb);
1751 }
1752 }
1753
1754 /* next page */
1755 if (cb >= cbRead)
1756 {
1757 pgmUnlock(pVM);
1758 return VINF_SUCCESS;
1759 }
1760 cbRead -= cb;
1761 off += cb;
1762 pvBuf = (char *)pvBuf + cb;
1763 } /* walk pages in ram range. */
1764
1765 GCPhys = pRam->GCPhysLast + 1;
1766 }
1767 else
1768 {
1769 LogFlow(("PGMPhysRead: Unassigned %RGp size=%u\n", GCPhys, cbRead));
1770
1771 /*
1772 * Unassigned address space.
1773 */
1774 if (!pRam)
1775 break;
1776 size_t cb = pRam->GCPhys - GCPhys;
1777 if (cb >= cbRead)
1778 {
1779 memset(pvBuf, 0xff, cbRead);
1780 break;
1781 }
1782 memset(pvBuf, 0xff, cb);
1783
1784 cbRead -= cb;
1785 pvBuf = (char *)pvBuf + cb;
1786 GCPhys += cb;
1787 }
1788 } /* Ram range walk */
1789
1790 pgmUnlock(pVM);
1791 return VINF_SUCCESS;
1792}
1793
1794
1795/**
1796 * Deals with writing to a page with one or more WRITE or ALL access handlers.
1797 *
1798 * @returns VBox status code. Can be ignored in ring-3.
1799 * @retval VINF_SUCCESS.
1800 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
1801 *
1802 * @param pVM The VM handle.
1803 * @param pPage The page descriptor.
1804 * @param GCPhys The physical address to start writing at.
1805 * @param pvBuf What to write.
1806 * @param cbWrite How much to write - less or equal to a page.
1807 */
1808static int pgmPhysWriteHandler(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, void const *pvBuf, size_t cbWrite)
1809{
1810 void *pvDst = NULL;
1811 int rc;
1812
1813 /*
1814 * Give priority to physical handlers (like #PF does).
1815 *
1816 * Hope for a lonely physical handler first that covers the whole
1817 * write area. This should be a pretty frequent case with MMIO and
1818 * the heavy usage of full page handlers in the page pool.
1819 */
1820 if ( !PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage)
1821 || PGM_PAGE_IS_MMIO(pPage) /* screw virtual handlers on MMIO pages */)
1822 {
1823 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1824 if (pCur)
1825 {
1826 Assert(GCPhys >= pCur->Core.Key && GCPhys <= pCur->Core.KeyLast);
1827 Assert(pCur->CTX_SUFF(pfnHandler));
1828
1829 size_t cbRange = pCur->Core.KeyLast - GCPhys + 1;
1830 if (cbRange > cbWrite)
1831 cbRange = cbWrite;
1832
1833#ifndef IN_RING3
1834 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1835 NOREF(cbRange);
1836 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1837 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1838
1839#else /* IN_RING3 */
1840 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
1841 if (!PGM_PAGE_IS_MMIO(pPage))
1842 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1843 else
1844 rc = VINF_SUCCESS;
1845 if (RT_SUCCESS(rc))
1846 {
1847 PFNPGMR3PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler);
1848 void *pvUser = pCur->CTX_SUFF(pvUser);
1849
1850 STAM_PROFILE_START(&pCur->Stat, h);
1851 Assert(PGMIsLockOwner(pVM));
1852 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
1853 pgmUnlock(pVM);
1854 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
1855 pgmLock(pVM);
1856# ifdef VBOX_WITH_STATISTICS
1857 pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
1858 if (pCur)
1859 STAM_PROFILE_STOP(&pCur->Stat, h);
1860# else
1861 pCur = NULL; /* might not be valid anymore. */
1862# endif
1863 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1864 memcpy(pvDst, pvBuf, cbRange);
1865 else
1866 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pCur) ? pCur->pszDesc : ""));
1867 }
1868 else
1869 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1870 GCPhys, pPage, rc), rc);
1871 if (RT_LIKELY(cbRange == cbWrite))
1872 return VINF_SUCCESS;
1873
1874 /* more fun to be had below */
1875 cbWrite -= cbRange;
1876 GCPhys += cbRange;
1877 pvBuf = (uint8_t *)pvBuf + cbRange;
1878 pvDst = (uint8_t *)pvDst + cbRange;
1879#endif /* IN_RING3 */
1880 }
1881 /* else: the handler is somewhere else in the page, deal with it below. */
1882 Assert(!PGM_PAGE_IS_MMIO(pPage)); /* MMIO handlers are all PAGE_SIZEed! */
1883 }
1884 /*
1885 * A virtual handler without any interfering physical handlers.
1886 * Hopefully it'll conver the whole write.
1887 */
1888 else if (!PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage))
1889 {
1890 unsigned iPage;
1891 PPGMVIRTHANDLER pCur;
1892 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pCur, &iPage);
1893 if (RT_SUCCESS(rc))
1894 {
1895 size_t cbRange = (PAGE_OFFSET_MASK & pCur->Core.KeyLast) - (PAGE_OFFSET_MASK & GCPhys) + 1;
1896 if (cbRange > cbWrite)
1897 cbRange = cbWrite;
1898
1899#ifndef IN_RING3
1900 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
1901 NOREF(cbRange);
1902 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
1903 return VERR_PGM_PHYS_WR_HIT_HANDLER;
1904
1905#else /* IN_RING3 */
1906
1907 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] virt %s\n", GCPhys, cbRange, pPage, R3STRING(pCur->pszDesc) ));
1908 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1909 if (RT_SUCCESS(rc))
1910 {
1911 rc = VINF_PGM_HANDLER_DO_DEFAULT;
1912 if (pCur->pfnHandlerR3)
1913 {
1914 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pCur->Core.Key & PAGE_BASE_GC_MASK)
1915 + (iPage << PAGE_SHIFT)
1916 + (GCPhys & PAGE_OFFSET_MASK);
1917
1918 STAM_PROFILE_START(&pCur->Stat, h);
1919 rc = pCur->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
1920 STAM_PROFILE_STOP(&pCur->Stat, h);
1921 }
1922 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
1923 memcpy(pvDst, pvBuf, cbRange);
1924 else
1925 AssertLogRelMsg(rc == VINF_SUCCESS, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pCur->pszDesc));
1926 }
1927 else
1928 AssertLogRelMsgFailedReturn(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1929 GCPhys, pPage, rc), rc);
1930 if (RT_LIKELY(cbRange == cbWrite))
1931 return VINF_SUCCESS;
1932
1933 /* more fun to be had below */
1934 cbWrite -= cbRange;
1935 GCPhys += cbRange;
1936 pvBuf = (uint8_t *)pvBuf + cbRange;
1937 pvDst = (uint8_t *)pvDst + cbRange;
1938#endif
1939 }
1940 /* else: the handler is somewhere else in the page, deal with it below. */
1941 }
1942
1943 /*
1944 * Deal with all the odd ends.
1945 */
1946
1947 /* We need a writable destination page. */
1948 if (!pvDst)
1949 {
1950 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDst);
1951 AssertLogRelMsgReturn(RT_SUCCESS(rc),
1952 ("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
1953 GCPhys, pPage, rc), rc);
1954 }
1955
1956 /* The loop state (big + ugly). */
1957 unsigned iVirtPage = 0;
1958 PPGMVIRTHANDLER pVirt = NULL;
1959 uint32_t offVirt = PAGE_SIZE;
1960 uint32_t offVirtLast = PAGE_SIZE;
1961 bool fMoreVirt = PGM_PAGE_HAS_ACTIVE_VIRTUAL_HANDLERS(pPage);
1962
1963 PPGMPHYSHANDLER pPhys = NULL;
1964 uint32_t offPhys = PAGE_SIZE;
1965 uint32_t offPhysLast = PAGE_SIZE;
1966 bool fMorePhys = PGM_PAGE_HAS_ACTIVE_PHYSICAL_HANDLERS(pPage);
1967
1968 /* The loop. */
1969 for (;;)
1970 {
1971 /*
1972 * Find the closest handler at or above GCPhys.
1973 */
1974 if (fMoreVirt && !pVirt)
1975 {
1976 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys, &pVirt, &iVirtPage);
1977 if (RT_SUCCESS(rc))
1978 {
1979 offVirt = 0;
1980 offVirtLast = (pVirt->aPhysToVirt[iVirtPage].Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1981 }
1982 else
1983 {
1984 PPGMPHYS2VIRTHANDLER pVirtPhys;
1985 pVirtPhys = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers,
1986 GCPhys, true /* fAbove */);
1987 if ( pVirtPhys
1988 && (pVirtPhys->Core.Key >> PAGE_SHIFT) == (GCPhys >> PAGE_SHIFT))
1989 {
1990 /* ASSUME that pVirtPhys only covers one page. */
1991 Assert((pVirtPhys->Core.Key >> PAGE_SHIFT) == (pVirtPhys->Core.KeyLast >> PAGE_SHIFT));
1992 Assert(pVirtPhys->Core.Key > GCPhys);
1993
1994 pVirt = (PPGMVIRTHANDLER)((uintptr_t)pVirtPhys + pVirtPhys->offVirtHandler);
1995 iVirtPage = pVirtPhys - &pVirt->aPhysToVirt[0]; Assert(iVirtPage == 0);
1996 offVirt = (pVirtPhys->Core.Key & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1997 offVirtLast = (pVirtPhys->Core.KeyLast & PAGE_OFFSET_MASK) - (GCPhys & PAGE_OFFSET_MASK);
1998 }
1999 else
2000 {
2001 pVirt = NULL;
2002 fMoreVirt = false;
2003 offVirt = offVirtLast = PAGE_SIZE;
2004 }
2005 }
2006 }
2007
2008 if (fMorePhys && !pPhys)
2009 {
2010 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2011 if (pPhys)
2012 {
2013 offPhys = 0;
2014 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2015 }
2016 else
2017 {
2018 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers,
2019 GCPhys, true /* fAbove */);
2020 if ( pPhys
2021 && pPhys->Core.Key <= GCPhys + (cbWrite - 1))
2022 {
2023 offPhys = pPhys->Core.Key - GCPhys;
2024 offPhysLast = pPhys->Core.KeyLast - GCPhys; /* ASSUMES < 4GB handlers... */
2025 }
2026 else
2027 {
2028 pPhys = NULL;
2029 fMorePhys = false;
2030 offPhys = offPhysLast = PAGE_SIZE;
2031 }
2032 }
2033 }
2034
2035 /*
2036 * Handle access to space without handlers (that's easy).
2037 */
2038 rc = VINF_PGM_HANDLER_DO_DEFAULT;
2039 uint32_t cbRange = (uint32_t)cbWrite;
2040 if (offPhys && offVirt)
2041 {
2042 if (cbRange > offPhys)
2043 cbRange = offPhys;
2044 if (cbRange > offVirt)
2045 cbRange = offVirt;
2046 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] miss\n", GCPhys, cbRange, pPage));
2047 }
2048 /*
2049 * Physical handler.
2050 */
2051 else if (!offPhys && offVirt)
2052 {
2053 if (cbRange > offPhysLast + 1)
2054 cbRange = offPhysLast + 1;
2055 if (cbRange > offVirt)
2056 cbRange = offVirt;
2057#ifdef IN_RING3
2058 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2059 void *pvUser = pPhys->CTX_SUFF(pvUser);
2060
2061 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc) ));
2062 STAM_PROFILE_START(&pPhys->Stat, h);
2063 Assert(PGMIsLockOwner(pVM));
2064 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2065 pgmUnlock(pVM);
2066 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2067 pgmLock(pVM);
2068# ifdef VBOX_WITH_STATISTICS
2069 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2070 if (pPhys)
2071 STAM_PROFILE_STOP(&pPhys->Stat, h);
2072# else
2073 pPhys = NULL; /* might not be valid anymore. */
2074# endif
2075 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2076#else
2077 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2078 NOREF(cbRange);
2079 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2080 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2081#endif
2082 }
2083 /*
2084 * Virtual handler.
2085 */
2086 else if (offPhys && !offVirt)
2087 {
2088 if (cbRange > offVirtLast + 1)
2089 cbRange = offVirtLast + 1;
2090 if (cbRange > offPhys)
2091 cbRange = offPhys;
2092#ifdef IN_RING3
2093 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys %s\n", GCPhys, cbRange, pPage, R3STRING(pVirt->pszDesc) ));
2094 if (pVirt->pfnHandlerR3)
2095 {
2096 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2097 + (iVirtPage << PAGE_SHIFT)
2098 + (GCPhys & PAGE_OFFSET_MASK);
2099 STAM_PROFILE_START(&pVirt->Stat, h);
2100 rc = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2101 STAM_PROFILE_STOP(&pVirt->Stat, h);
2102 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2103 }
2104 pVirt = NULL;
2105#else
2106 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2107 NOREF(cbRange);
2108 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2109 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2110#endif
2111 }
2112 /*
2113 * Both... give the physical one priority.
2114 */
2115 else
2116 {
2117 Assert(!offPhys && !offVirt);
2118 if (cbRange > offVirtLast + 1)
2119 cbRange = offVirtLast + 1;
2120 if (cbRange > offPhysLast + 1)
2121 cbRange = offPhysLast + 1;
2122
2123#ifdef IN_RING3
2124 if (pVirt->pfnHandlerR3)
2125 Log(("pgmPhysWriteHandler: overlapping phys and virt handlers at %RGp %R[pgmpage]; cbRange=%#x\n", GCPhys, pPage, cbRange));
2126 Log5(("pgmPhysWriteHandler: GCPhys=%RGp cbRange=%#x pPage=%R[pgmpage] phys/virt %s/%s\n", GCPhys, cbRange, pPage, R3STRING(pPhys->pszDesc), R3STRING(pVirt->pszDesc) ));
2127
2128 PFNPGMR3PHYSHANDLER pfnHandler = pPhys->CTX_SUFF(pfnHandler);
2129 void *pvUser = pPhys->CTX_SUFF(pvUser);
2130
2131 STAM_PROFILE_START(&pPhys->Stat, h);
2132 Assert(PGMIsLockOwner(pVM));
2133 /* Release the PGM lock as MMIO handlers take the IOM lock. (deadlock prevention) */
2134 pgmUnlock(pVM);
2135 rc = pfnHandler(pVM, GCPhys, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, pvUser);
2136 pgmLock(pVM);
2137# ifdef VBOX_WITH_STATISTICS
2138 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys);
2139 if (pPhys)
2140 STAM_PROFILE_STOP(&pPhys->Stat, h);
2141# else
2142 pPhys = NULL; /* might not be valid anymore. */
2143# endif
2144 AssertLogRelMsg(rc == VINF_SUCCESS || rc == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, (pPhys) ? pPhys->pszDesc : ""));
2145 if (pVirt->pfnHandlerR3)
2146 {
2147
2148 RTGCUINTPTR GCPtr = ((RTGCUINTPTR)pVirt->Core.Key & PAGE_BASE_GC_MASK)
2149 + (iVirtPage << PAGE_SHIFT)
2150 + (GCPhys & PAGE_OFFSET_MASK);
2151 STAM_PROFILE_START(&pVirt->Stat, h2);
2152 int rc2 = pVirt->CTX_SUFF(pfnHandler)(pVM, GCPtr, pvDst, (void *)pvBuf, cbRange, PGMACCESSTYPE_WRITE, /*pCur->CTX_SUFF(pvUser)*/ NULL);
2153 STAM_PROFILE_STOP(&pVirt->Stat, h2);
2154 if (rc2 == VINF_SUCCESS && rc == VINF_PGM_HANDLER_DO_DEFAULT)
2155 rc = VINF_SUCCESS;
2156 else
2157 AssertLogRelMsg(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_HANDLER_DO_DEFAULT, ("rc=%Rrc GCPhys=%RGp pPage=%R[pgmpage] %s\n", rc, GCPhys, pPage, pVirt->pszDesc));
2158 }
2159 pPhys = NULL;
2160 pVirt = NULL;
2161#else
2162 /* In R0 and RC the callbacks cannot handle this context, so we'll fail. */
2163 NOREF(cbRange);
2164 //AssertReleaseMsgFailed(("Wrong API! GCPhys=%RGp cbRange=%#x\n", GCPhys, cbRange));
2165 return VERR_PGM_PHYS_WR_HIT_HANDLER;
2166#endif
2167 }
2168 if (rc == VINF_PGM_HANDLER_DO_DEFAULT)
2169 memcpy(pvDst, pvBuf, cbRange);
2170
2171 /*
2172 * Advance if we've got more stuff to do.
2173 */
2174 if (cbRange >= cbWrite)
2175 return VINF_SUCCESS;
2176
2177 cbWrite -= cbRange;
2178 GCPhys += cbRange;
2179 pvBuf = (uint8_t *)pvBuf + cbRange;
2180 pvDst = (uint8_t *)pvDst + cbRange;
2181
2182 offPhys -= cbRange;
2183 offPhysLast -= cbRange;
2184 offVirt -= cbRange;
2185 offVirtLast -= cbRange;
2186 }
2187}
2188
2189
2190/**
2191 * Write to physical memory.
2192 *
2193 * This API respects access handlers and MMIO. Use PGMPhysSimpleReadGCPhys() if you
2194 * want to ignore those.
2195 *
2196 * @returns VBox status code. Can be ignored in ring-3.
2197 * @retval VINF_SUCCESS.
2198 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2199 *
2200 * @param pVM VM Handle.
2201 * @param GCPhys Physical address to write to.
2202 * @param pvBuf What to write.
2203 * @param cbWrite How many bytes to write.
2204 */
2205VMMDECL(int) PGMPhysWrite(PVM pVM, RTGCPHYS GCPhys, const void *pvBuf, size_t cbWrite)
2206{
2207 AssertMsg(!pVM->pgm.s.fNoMorePhysWrites, ("Calling PGMPhysWrite after pgmR3Save()!\n"));
2208 AssertMsgReturn(cbWrite > 0, ("don't even think about writing zero bytes!\n"), VINF_SUCCESS);
2209 LogFlow(("PGMPhysWrite: %RGp %d\n", GCPhys, cbWrite));
2210
2211 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysWrite));
2212 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysWriteBytes), cbWrite);
2213
2214 pgmLock(pVM);
2215
2216 /*
2217 * Copy loop on ram ranges.
2218 */
2219 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
2220 for (;;)
2221 {
2222 /* Find range. */
2223 while (pRam && GCPhys > pRam->GCPhysLast)
2224 pRam = pRam->CTX_SUFF(pNext);
2225 /* Inside range or not? */
2226 if (pRam && GCPhys >= pRam->GCPhys)
2227 {
2228 /*
2229 * Must work our way thru this page by page.
2230 */
2231 RTGCPTR off = GCPhys - pRam->GCPhys;
2232 while (off < pRam->cb)
2233 {
2234 RTGCPTR iPage = off >> PAGE_SHIFT;
2235 PPGMPAGE pPage = &pRam->aPages[iPage];
2236 size_t cb = PAGE_SIZE - (off & PAGE_OFFSET_MASK);
2237 if (cb > cbWrite)
2238 cb = cbWrite;
2239
2240 /*
2241 * Any active WRITE or ALL access handlers?
2242 */
2243 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
2244 {
2245 int rc = pgmPhysWriteHandler(pVM, pPage, pRam->GCPhys + off, pvBuf, cb);
2246 if (RT_FAILURE(rc))
2247 {
2248 pgmUnlock(pVM);
2249 return rc;
2250 }
2251 }
2252 else
2253 {
2254 /*
2255 * Get the pointer to the page.
2256 */
2257 void *pvDst;
2258 int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, pRam->GCPhys + off, &pvDst);
2259 if (RT_SUCCESS(rc))
2260 memcpy(pvDst, pvBuf, cb);
2261 else
2262 AssertLogRelMsgFailed(("pgmPhysGCPhys2CCPtrInternal failed on %RGp / %R[pgmpage] -> %Rrc\n",
2263 pRam->GCPhys + off, pPage, rc));
2264 }
2265
2266 /* next page */
2267 if (cb >= cbWrite)
2268 {
2269 pgmUnlock(pVM);
2270 return VINF_SUCCESS;
2271 }
2272
2273 cbWrite -= cb;
2274 off += cb;
2275 pvBuf = (const char *)pvBuf + cb;
2276 } /* walk pages in ram range */
2277
2278 GCPhys = pRam->GCPhysLast + 1;
2279 }
2280 else
2281 {
2282 /*
2283 * Unassigned address space, skip it.
2284 */
2285 if (!pRam)
2286 break;
2287 size_t cb = pRam->GCPhys - GCPhys;
2288 if (cb >= cbWrite)
2289 break;
2290 cbWrite -= cb;
2291 pvBuf = (const char *)pvBuf + cb;
2292 GCPhys += cb;
2293 }
2294 } /* Ram range walk */
2295
2296 pgmUnlock(pVM);
2297 return VINF_SUCCESS;
2298}
2299
2300
2301/**
2302 * Read from guest physical memory by GC physical address, bypassing
2303 * MMIO and access handlers.
2304 *
2305 * @returns VBox status.
2306 * @param pVM VM handle.
2307 * @param pvDst The destination address.
2308 * @param GCPhysSrc The source address (GC physical address).
2309 * @param cb The number of bytes to read.
2310 */
2311VMMDECL(int) PGMPhysSimpleReadGCPhys(PVM pVM, void *pvDst, RTGCPHYS GCPhysSrc, size_t cb)
2312{
2313 /*
2314 * Treat the first page as a special case.
2315 */
2316 if (!cb)
2317 return VINF_SUCCESS;
2318
2319 /* map the 1st page */
2320 void const *pvSrc;
2321 PGMPAGEMAPLOCK Lock;
2322 int rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2323 if (RT_FAILURE(rc))
2324 return rc;
2325
2326 /* optimize for the case where access is completely within the first page. */
2327 size_t cbPage = PAGE_SIZE - (GCPhysSrc & PAGE_OFFSET_MASK);
2328 if (RT_LIKELY(cb <= cbPage))
2329 {
2330 memcpy(pvDst, pvSrc, cb);
2331 PGMPhysReleasePageMappingLock(pVM, &Lock);
2332 return VINF_SUCCESS;
2333 }
2334
2335 /* copy to the end of the page. */
2336 memcpy(pvDst, pvSrc, cbPage);
2337 PGMPhysReleasePageMappingLock(pVM, &Lock);
2338 GCPhysSrc += cbPage;
2339 pvDst = (uint8_t *)pvDst + cbPage;
2340 cb -= cbPage;
2341
2342 /*
2343 * Page by page.
2344 */
2345 for (;;)
2346 {
2347 /* map the page */
2348 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysSrc, &pvSrc, &Lock);
2349 if (RT_FAILURE(rc))
2350 return rc;
2351
2352 /* last page? */
2353 if (cb <= PAGE_SIZE)
2354 {
2355 memcpy(pvDst, pvSrc, cb);
2356 PGMPhysReleasePageMappingLock(pVM, &Lock);
2357 return VINF_SUCCESS;
2358 }
2359
2360 /* copy the entire page and advance */
2361 memcpy(pvDst, pvSrc, PAGE_SIZE);
2362 PGMPhysReleasePageMappingLock(pVM, &Lock);
2363 GCPhysSrc += PAGE_SIZE;
2364 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2365 cb -= PAGE_SIZE;
2366 }
2367 /* won't ever get here. */
2368}
2369
2370
2371/**
2372 * Write to guest physical memory referenced by GC pointer.
2373 * Write memory to GC physical address in guest physical memory.
2374 *
2375 * This will bypass MMIO and access handlers.
2376 *
2377 * @returns VBox status.
2378 * @param pVM VM handle.
2379 * @param GCPhysDst The GC physical address of the destination.
2380 * @param pvSrc The source buffer.
2381 * @param cb The number of bytes to write.
2382 */
2383VMMDECL(int) PGMPhysSimpleWriteGCPhys(PVM pVM, RTGCPHYS GCPhysDst, const void *pvSrc, size_t cb)
2384{
2385 LogFlow(("PGMPhysSimpleWriteGCPhys: %RGp %zu\n", GCPhysDst, cb));
2386
2387 /*
2388 * Treat the first page as a special case.
2389 */
2390 if (!cb)
2391 return VINF_SUCCESS;
2392
2393 /* map the 1st page */
2394 void *pvDst;
2395 PGMPAGEMAPLOCK Lock;
2396 int rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2397 if (RT_FAILURE(rc))
2398 return rc;
2399
2400 /* optimize for the case where access is completely within the first page. */
2401 size_t cbPage = PAGE_SIZE - (GCPhysDst & PAGE_OFFSET_MASK);
2402 if (RT_LIKELY(cb <= cbPage))
2403 {
2404 memcpy(pvDst, pvSrc, cb);
2405 PGMPhysReleasePageMappingLock(pVM, &Lock);
2406 return VINF_SUCCESS;
2407 }
2408
2409 /* copy to the end of the page. */
2410 memcpy(pvDst, pvSrc, cbPage);
2411 PGMPhysReleasePageMappingLock(pVM, &Lock);
2412 GCPhysDst += cbPage;
2413 pvSrc = (const uint8_t *)pvSrc + cbPage;
2414 cb -= cbPage;
2415
2416 /*
2417 * Page by page.
2418 */
2419 for (;;)
2420 {
2421 /* map the page */
2422 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhysDst, &pvDst, &Lock);
2423 if (RT_FAILURE(rc))
2424 return rc;
2425
2426 /* last page? */
2427 if (cb <= PAGE_SIZE)
2428 {
2429 memcpy(pvDst, pvSrc, cb);
2430 PGMPhysReleasePageMappingLock(pVM, &Lock);
2431 return VINF_SUCCESS;
2432 }
2433
2434 /* copy the entire page and advance */
2435 memcpy(pvDst, pvSrc, PAGE_SIZE);
2436 PGMPhysReleasePageMappingLock(pVM, &Lock);
2437 GCPhysDst += PAGE_SIZE;
2438 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2439 cb -= PAGE_SIZE;
2440 }
2441 /* won't ever get here. */
2442}
2443
2444
2445/**
2446 * Read from guest physical memory referenced by GC pointer.
2447 *
2448 * This function uses the current CR3/CR0/CR4 of the guest and will
2449 * bypass access handlers and not set any accessed bits.
2450 *
2451 * @returns VBox status.
2452 * @param pVCpu The VMCPU handle.
2453 * @param pvDst The destination address.
2454 * @param GCPtrSrc The source address (GC pointer).
2455 * @param cb The number of bytes to read.
2456 */
2457VMMDECL(int) PGMPhysSimpleReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2458{
2459 PVM pVM = pVCpu->CTX_SUFF(pVM);
2460
2461 /*
2462 * Treat the first page as a special case.
2463 */
2464 if (!cb)
2465 return VINF_SUCCESS;
2466
2467 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleRead));
2468 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleReadBytes), cb);
2469
2470 /* Take the PGM lock here, because many called functions take the lock for a very short period. That's counter-productive
2471 * when many VCPUs are fighting for the lock.
2472 */
2473 pgmLock(pVM);
2474
2475 /* map the 1st page */
2476 void const *pvSrc;
2477 PGMPAGEMAPLOCK Lock;
2478 int rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2479 if (RT_FAILURE(rc))
2480 {
2481 pgmUnlock(pVM);
2482 return rc;
2483 }
2484
2485 /* optimize for the case where access is completely within the first page. */
2486 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2487 if (RT_LIKELY(cb <= cbPage))
2488 {
2489 memcpy(pvDst, pvSrc, cb);
2490 PGMPhysReleasePageMappingLock(pVM, &Lock);
2491 pgmUnlock(pVM);
2492 return VINF_SUCCESS;
2493 }
2494
2495 /* copy to the end of the page. */
2496 memcpy(pvDst, pvSrc, cbPage);
2497 PGMPhysReleasePageMappingLock(pVM, &Lock);
2498 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + cbPage);
2499 pvDst = (uint8_t *)pvDst + cbPage;
2500 cb -= cbPage;
2501
2502 /*
2503 * Page by page.
2504 */
2505 for (;;)
2506 {
2507 /* map the page */
2508 rc = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2509 if (RT_FAILURE(rc))
2510 {
2511 pgmUnlock(pVM);
2512 return rc;
2513 }
2514
2515 /* last page? */
2516 if (cb <= PAGE_SIZE)
2517 {
2518 memcpy(pvDst, pvSrc, cb);
2519 PGMPhysReleasePageMappingLock(pVM, &Lock);
2520 pgmUnlock(pVM);
2521 return VINF_SUCCESS;
2522 }
2523
2524 /* copy the entire page and advance */
2525 memcpy(pvDst, pvSrc, PAGE_SIZE);
2526 PGMPhysReleasePageMappingLock(pVM, &Lock);
2527 GCPtrSrc = (RTGCPTR)((RTGCUINTPTR)GCPtrSrc + PAGE_SIZE);
2528 pvDst = (uint8_t *)pvDst + PAGE_SIZE;
2529 cb -= PAGE_SIZE;
2530 }
2531 /* won't ever get here. */
2532}
2533
2534
2535/**
2536 * Write to guest physical memory referenced by GC pointer.
2537 *
2538 * This function uses the current CR3/CR0/CR4 of the guest and will
2539 * bypass access handlers and not set dirty or accessed bits.
2540 *
2541 * @returns VBox status.
2542 * @param pVCpu The VMCPU handle.
2543 * @param GCPtrDst The destination address (GC pointer).
2544 * @param pvSrc The source address.
2545 * @param cb The number of bytes to write.
2546 */
2547VMMDECL(int) PGMPhysSimpleWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2548{
2549 PVM pVM = pVCpu->CTX_SUFF(pVM);
2550
2551 /*
2552 * Treat the first page as a special case.
2553 */
2554 if (!cb)
2555 return VINF_SUCCESS;
2556
2557 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleWrite));
2558 STAM_COUNTER_ADD(&pVM->pgm.s.CTX_MID_Z(Stat,PhysSimpleWriteBytes), cb);
2559
2560 /* map the 1st page */
2561 void *pvDst;
2562 PGMPAGEMAPLOCK Lock;
2563 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2564 if (RT_FAILURE(rc))
2565 return rc;
2566
2567 /* optimize for the case where access is completely within the first page. */
2568 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2569 if (RT_LIKELY(cb <= cbPage))
2570 {
2571 memcpy(pvDst, pvSrc, cb);
2572 PGMPhysReleasePageMappingLock(pVM, &Lock);
2573 return VINF_SUCCESS;
2574 }
2575
2576 /* copy to the end of the page. */
2577 memcpy(pvDst, pvSrc, cbPage);
2578 PGMPhysReleasePageMappingLock(pVM, &Lock);
2579 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2580 pvSrc = (const uint8_t *)pvSrc + cbPage;
2581 cb -= cbPage;
2582
2583 /*
2584 * Page by page.
2585 */
2586 for (;;)
2587 {
2588 /* map the page */
2589 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2590 if (RT_FAILURE(rc))
2591 return rc;
2592
2593 /* last page? */
2594 if (cb <= PAGE_SIZE)
2595 {
2596 memcpy(pvDst, pvSrc, cb);
2597 PGMPhysReleasePageMappingLock(pVM, &Lock);
2598 return VINF_SUCCESS;
2599 }
2600
2601 /* copy the entire page and advance */
2602 memcpy(pvDst, pvSrc, PAGE_SIZE);
2603 PGMPhysReleasePageMappingLock(pVM, &Lock);
2604 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2605 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2606 cb -= PAGE_SIZE;
2607 }
2608 /* won't ever get here. */
2609}
2610
2611
2612/**
2613 * Write to guest physical memory referenced by GC pointer and update the PTE.
2614 *
2615 * This function uses the current CR3/CR0/CR4 of the guest and will
2616 * bypass access handlers but will set any dirty and accessed bits in the PTE.
2617 *
2618 * If you don't want to set the dirty bit, use PGMPhysSimpleWriteGCPtr().
2619 *
2620 * @returns VBox status.
2621 * @param pVCpu The VMCPU handle.
2622 * @param GCPtrDst The destination address (GC pointer).
2623 * @param pvSrc The source address.
2624 * @param cb The number of bytes to write.
2625 */
2626VMMDECL(int) PGMPhysSimpleDirtyWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2627{
2628 PVM pVM = pVCpu->CTX_SUFF(pVM);
2629
2630 /*
2631 * Treat the first page as a special case.
2632 * Btw. this is the same code as in PGMPhyssimpleWriteGCPtr excep for the PGMGstModifyPage.
2633 */
2634 if (!cb)
2635 return VINF_SUCCESS;
2636
2637 /* map the 1st page */
2638 void *pvDst;
2639 PGMPAGEMAPLOCK Lock;
2640 int rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2641 if (RT_FAILURE(rc))
2642 return rc;
2643
2644 /* optimize for the case where access is completely within the first page. */
2645 size_t cbPage = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2646 if (RT_LIKELY(cb <= cbPage))
2647 {
2648 memcpy(pvDst, pvSrc, cb);
2649 PGMPhysReleasePageMappingLock(pVM, &Lock);
2650 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2651 return VINF_SUCCESS;
2652 }
2653
2654 /* copy to the end of the page. */
2655 memcpy(pvDst, pvSrc, cbPage);
2656 PGMPhysReleasePageMappingLock(pVM, &Lock);
2657 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2658 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbPage);
2659 pvSrc = (const uint8_t *)pvSrc + cbPage;
2660 cb -= cbPage;
2661
2662 /*
2663 * Page by page.
2664 */
2665 for (;;)
2666 {
2667 /* map the page */
2668 rc = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2669 if (RT_FAILURE(rc))
2670 return rc;
2671
2672 /* last page? */
2673 if (cb <= PAGE_SIZE)
2674 {
2675 memcpy(pvDst, pvSrc, cb);
2676 PGMPhysReleasePageMappingLock(pVM, &Lock);
2677 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2678 return VINF_SUCCESS;
2679 }
2680
2681 /* copy the entire page and advance */
2682 memcpy(pvDst, pvSrc, PAGE_SIZE);
2683 PGMPhysReleasePageMappingLock(pVM, &Lock);
2684 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D)); AssertRC(rc);
2685 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + PAGE_SIZE);
2686 pvSrc = (const uint8_t *)pvSrc + PAGE_SIZE;
2687 cb -= PAGE_SIZE;
2688 }
2689 /* won't ever get here. */
2690}
2691
2692
2693/**
2694 * Read from guest physical memory referenced by GC pointer.
2695 *
2696 * This function uses the current CR3/CR0/CR4 of the guest and will
2697 * respect access handlers and set accessed bits.
2698 *
2699 * @returns VBox status.
2700 * @param pVCpu The VMCPU handle.
2701 * @param pvDst The destination address.
2702 * @param GCPtrSrc The source address (GC pointer).
2703 * @param cb The number of bytes to read.
2704 * @thread The vCPU EMT.
2705 */
2706VMMDECL(int) PGMPhysReadGCPtr(PVMCPU pVCpu, void *pvDst, RTGCPTR GCPtrSrc, size_t cb)
2707{
2708 RTGCPHYS GCPhys;
2709 uint64_t fFlags;
2710 int rc;
2711 PVM pVM = pVCpu->CTX_SUFF(pVM);
2712
2713 /*
2714 * Anything to do?
2715 */
2716 if (!cb)
2717 return VINF_SUCCESS;
2718
2719 LogFlow(("PGMPhysReadGCPtr: %RGv %zu\n", GCPtrSrc, cb));
2720
2721 /*
2722 * Optimize reads within a single page.
2723 */
2724 if (((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2725 {
2726 /* Convert virtual to physical address + flags */
2727 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2728 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2729 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2730
2731 /* mark the guest page as accessed. */
2732 if (!(fFlags & X86_PTE_A))
2733 {
2734 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2735 AssertRC(rc);
2736 }
2737
2738 return PGMPhysRead(pVM, GCPhys, pvDst, cb);
2739 }
2740
2741 /*
2742 * Page by page.
2743 */
2744 for (;;)
2745 {
2746 /* Convert virtual to physical address + flags */
2747 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrSrc, &fFlags, &GCPhys);
2748 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrSrc), rc);
2749 GCPhys |= (RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK;
2750
2751 /* mark the guest page as accessed. */
2752 if (!(fFlags & X86_PTE_A))
2753 {
2754 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)(X86_PTE_A));
2755 AssertRC(rc);
2756 }
2757
2758 /* copy */
2759 size_t cbRead = PAGE_SIZE - ((RTGCUINTPTR)GCPtrSrc & PAGE_OFFSET_MASK);
2760 rc = PGMPhysRead(pVM, GCPhys, pvDst, cbRead);
2761 if (cbRead >= cb || RT_FAILURE(rc))
2762 return rc;
2763
2764 /* next */
2765 cb -= cbRead;
2766 pvDst = (uint8_t *)pvDst + cbRead;
2767 GCPtrSrc += cbRead;
2768 }
2769}
2770
2771
2772/**
2773 * Write to guest physical memory referenced by GC pointer.
2774 *
2775 * This function uses the current CR3/CR0/CR4 of the guest and will
2776 * respect access handlers and set dirty and accessed bits.
2777 *
2778 * @returns VBox status.
2779 * @retval VINF_SUCCESS.
2780 * @retval VERR_PGM_PHYS_WR_HIT_HANDLER in R0 and GC, NEVER in R3.
2781 *
2782 * @param pVCpu The VMCPU handle.
2783 * @param GCPtrDst The destination address (GC pointer).
2784 * @param pvSrc The source address.
2785 * @param cb The number of bytes to write.
2786 */
2787VMMDECL(int) PGMPhysWriteGCPtr(PVMCPU pVCpu, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb)
2788{
2789 RTGCPHYS GCPhys;
2790 uint64_t fFlags;
2791 int rc;
2792 PVM pVM = pVCpu->CTX_SUFF(pVM);
2793
2794 /*
2795 * Anything to do?
2796 */
2797 if (!cb)
2798 return VINF_SUCCESS;
2799
2800 LogFlow(("PGMPhysWriteGCPtr: %RGv %zu\n", GCPtrDst, cb));
2801
2802 /*
2803 * Optimize writes within a single page.
2804 */
2805 if (((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK) + cb <= PAGE_SIZE)
2806 {
2807 /* Convert virtual to physical address + flags */
2808 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
2809 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
2810 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
2811
2812 /* Mention when we ignore X86_PTE_RW... */
2813 if (!(fFlags & X86_PTE_RW))
2814 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
2815
2816 /* Mark the guest page as accessed and dirty if necessary. */
2817 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
2818 {
2819 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2820 AssertRC(rc);
2821 }
2822
2823 return PGMPhysWrite(pVM, GCPhys, pvSrc, cb);
2824 }
2825
2826 /*
2827 * Page by page.
2828 */
2829 for (;;)
2830 {
2831 /* Convert virtual to physical address + flags */
2832 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, (RTGCUINTPTR)GCPtrDst, &fFlags, &GCPhys);
2833 AssertMsgRCReturn(rc, ("GetPage failed with %Rrc for %RGv\n", rc, GCPtrDst), rc);
2834 GCPhys |= (RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK;
2835
2836 /* Mention when we ignore X86_PTE_RW... */
2837 if (!(fFlags & X86_PTE_RW))
2838 Log(("PGMPhysGCPtr2GCPhys: Writing to RO page %RGv %#x\n", GCPtrDst, cb));
2839
2840 /* Mark the guest page as accessed and dirty if necessary. */
2841 if ((fFlags & (X86_PTE_A | X86_PTE_D)) != (X86_PTE_A | X86_PTE_D))
2842 {
2843 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
2844 AssertRC(rc);
2845 }
2846
2847 /* copy */
2848 size_t cbWrite = PAGE_SIZE - ((RTGCUINTPTR)GCPtrDst & PAGE_OFFSET_MASK);
2849 rc = PGMPhysWrite(pVM, GCPhys, pvSrc, cbWrite);
2850 if (cbWrite >= cb || RT_FAILURE(rc))
2851 return rc;
2852
2853 /* next */
2854 cb -= cbWrite;
2855 pvSrc = (uint8_t *)pvSrc + cbWrite;
2856 GCPtrDst += cbWrite;
2857 }
2858}
2859
2860
2861/**
2862 * Performs a read of guest virtual memory for instruction emulation.
2863 *
2864 * This will check permissions, raise exceptions and update the access bits.
2865 *
2866 * The current implementation will bypass all access handlers. It may later be
2867 * changed to at least respect MMIO.
2868 *
2869 *
2870 * @returns VBox status code suitable to scheduling.
2871 * @retval VINF_SUCCESS if the read was performed successfully.
2872 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
2873 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
2874 *
2875 * @param pVCpu The VMCPU handle.
2876 * @param pCtxCore The context core.
2877 * @param pvDst Where to put the bytes we've read.
2878 * @param GCPtrSrc The source address.
2879 * @param cb The number of bytes to read. Not more than a page.
2880 *
2881 * @remark This function will dynamically map physical pages in GC. This may unmap
2882 * mappings done by the caller. Be careful!
2883 */
2884VMMDECL(int) PGMPhysInterpretedRead(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb)
2885{
2886 PVM pVM = pVCpu->CTX_SUFF(pVM);
2887 Assert(cb <= PAGE_SIZE);
2888
2889/** @todo r=bird: This isn't perfect!
2890 * -# It's not checking for reserved bits being 1.
2891 * -# It's not correctly dealing with the access bit.
2892 * -# It's not respecting MMIO memory or any other access handlers.
2893 */
2894 /*
2895 * 1. Translate virtual to physical. This may fault.
2896 * 2. Map the physical address.
2897 * 3. Do the read operation.
2898 * 4. Set access bits if required.
2899 */
2900 int rc;
2901 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
2902 if (cb <= cb1)
2903 {
2904 /*
2905 * Not crossing pages.
2906 */
2907 RTGCPHYS GCPhys;
2908 uint64_t fFlags;
2909 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
2910 if (RT_SUCCESS(rc))
2911 {
2912 /** @todo we should check reserved bits ... */
2913 void *pvSrc;
2914 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pvSrc);
2915 switch (rc)
2916 {
2917 case VINF_SUCCESS:
2918 Log(("PGMPhysInterpretedRead: pvDst=%p pvSrc=%p cb=%d\n", pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb));
2919 memcpy(pvDst, (uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
2920 break;
2921 case VERR_PGM_PHYS_PAGE_RESERVED:
2922 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2923 memset(pvDst, 0, cb); /** @todo this is wrong, it should be 0xff */
2924 break;
2925 default:
2926 return rc;
2927 }
2928
2929 /** @todo access bit emulation isn't 100% correct. */
2930 if (!(fFlags & X86_PTE_A))
2931 {
2932 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2933 AssertRC(rc);
2934 }
2935 return VINF_SUCCESS;
2936 }
2937 }
2938 else
2939 {
2940 /*
2941 * Crosses pages.
2942 */
2943 size_t cb2 = cb - cb1;
2944 uint64_t fFlags1;
2945 RTGCPHYS GCPhys1;
2946 uint64_t fFlags2;
2947 RTGCPHYS GCPhys2;
2948 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
2949 if (RT_SUCCESS(rc))
2950 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
2951 if (RT_SUCCESS(rc))
2952 {
2953 /** @todo we should check reserved bits ... */
2954 AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
2955 void *pvSrc1;
2956 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys1, &pvSrc1);
2957 switch (rc)
2958 {
2959 case VINF_SUCCESS:
2960 memcpy(pvDst, (uint8_t *)pvSrc1 + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
2961 break;
2962 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2963 memset(pvDst, 0, cb1); /** @todo this is wrong, it should be 0xff */
2964 break;
2965 default:
2966 return rc;
2967 }
2968
2969 void *pvSrc2;
2970 rc = PGM_GCPHYS_2_PTR(pVM, GCPhys2, &pvSrc2);
2971 switch (rc)
2972 {
2973 case VINF_SUCCESS:
2974 memcpy((uint8_t *)pvDst + cb1, pvSrc2, cb2);
2975 break;
2976 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
2977 memset((uint8_t *)pvDst + cb1, 0, cb2); /** @todo this is wrong, it should be 0xff */
2978 break;
2979 default:
2980 return rc;
2981 }
2982
2983 if (!(fFlags1 & X86_PTE_A))
2984 {
2985 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2986 AssertRC(rc);
2987 }
2988 if (!(fFlags2 & X86_PTE_A))
2989 {
2990 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
2991 AssertRC(rc);
2992 }
2993 return VINF_SUCCESS;
2994 }
2995 }
2996
2997 /*
2998 * Raise a #PF.
2999 */
3000 uint32_t uErr;
3001
3002 /* Get the current privilege level. */
3003 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3004 switch (rc)
3005 {
3006 case VINF_SUCCESS:
3007 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3008 break;
3009
3010 case VERR_PAGE_NOT_PRESENT:
3011 case VERR_PAGE_TABLE_NOT_PRESENT:
3012 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3013 break;
3014
3015 default:
3016 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3017 return rc;
3018 }
3019 Log(("PGMPhysInterpretedRead: GCPtrSrc=%RGv cb=%#x -> #PF(%#x)\n", GCPtrSrc, cb, uErr));
3020 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3021}
3022
3023
3024/**
3025 * Performs a read of guest virtual memory for instruction emulation.
3026 *
3027 * This will check permissions, raise exceptions and update the access bits.
3028 *
3029 * The current implementation will bypass all access handlers. It may later be
3030 * changed to at least respect MMIO.
3031 *
3032 *
3033 * @returns VBox status code suitable to scheduling.
3034 * @retval VINF_SUCCESS if the read was performed successfully.
3035 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3036 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3037 *
3038 * @param pVCpu The VMCPU handle.
3039 * @param pCtxCore The context core.
3040 * @param pvDst Where to put the bytes we've read.
3041 * @param GCPtrSrc The source address.
3042 * @param cb The number of bytes to read. Not more than a page.
3043 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3044 * an appropriate error status will be returned (no
3045 * informational at all).
3046 *
3047 *
3048 * @remarks Takes the PGM lock.
3049 * @remarks A page fault on the 2nd page of the access will be raised without
3050 * writing the bits on the first page since we're ASSUMING that the
3051 * caller is emulating an instruction access.
3052 * @remarks This function will dynamically map physical pages in GC. This may
3053 * unmap mappings done by the caller. Be careful!
3054 */
3055VMMDECL(int) PGMPhysInterpretedReadNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, void *pvDst, RTGCUINTPTR GCPtrSrc, size_t cb, bool fRaiseTrap)
3056{
3057 PVM pVM = pVCpu->CTX_SUFF(pVM);
3058 Assert(cb <= PAGE_SIZE);
3059
3060 /*
3061 * 1. Translate virtual to physical. This may fault.
3062 * 2. Map the physical address.
3063 * 3. Do the read operation.
3064 * 4. Set access bits if required.
3065 */
3066 int rc;
3067 unsigned cb1 = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK);
3068 if (cb <= cb1)
3069 {
3070 /*
3071 * Not crossing pages.
3072 */
3073 RTGCPHYS GCPhys;
3074 uint64_t fFlags;
3075 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags, &GCPhys);
3076 if (RT_SUCCESS(rc))
3077 {
3078 if (1) /** @todo we should check reserved bits ... */
3079 {
3080 const void *pvSrc;
3081 PGMPAGEMAPLOCK Lock;
3082 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys, &pvSrc, &Lock);
3083 switch (rc)
3084 {
3085 case VINF_SUCCESS:
3086 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d\n",
3087 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb));
3088 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb);
3089 break;
3090 case VERR_PGM_PHYS_PAGE_RESERVED:
3091 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3092 memset(pvDst, 0xff, cb);
3093 break;
3094 default:
3095 AssertMsgFailed(("%Rrc\n", rc));
3096 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3097 return rc;
3098 }
3099 PGMPhysReleasePageMappingLock(pVM, &Lock);
3100
3101 if (!(fFlags & X86_PTE_A))
3102 {
3103 /** @todo access bit emulation isn't 100% correct. */
3104 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3105 AssertRC(rc);
3106 }
3107 return VINF_SUCCESS;
3108 }
3109 }
3110 }
3111 else
3112 {
3113 /*
3114 * Crosses pages.
3115 */
3116 size_t cb2 = cb - cb1;
3117 uint64_t fFlags1;
3118 RTGCPHYS GCPhys1;
3119 uint64_t fFlags2;
3120 RTGCPHYS GCPhys2;
3121 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc, &fFlags1, &GCPhys1);
3122 if (RT_SUCCESS(rc))
3123 {
3124 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrSrc + cb1, &fFlags2, &GCPhys2);
3125 if (RT_SUCCESS(rc))
3126 {
3127 if (1) /** @todo we should check reserved bits ... */
3128 {
3129 const void *pvSrc;
3130 PGMPAGEMAPLOCK Lock;
3131 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys1, &pvSrc, &Lock);
3132 switch (rc)
3133 {
3134 case VINF_SUCCESS:
3135 Log(("PGMPhysInterpretedReadNoHandlers: pvDst=%p pvSrc=%p (%RGv) cb=%d [2]\n",
3136 pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), GCPtrSrc, cb1));
3137 memcpy(pvDst, (const uint8_t *)pvSrc + (GCPtrSrc & PAGE_OFFSET_MASK), cb1);
3138 PGMPhysReleasePageMappingLock(pVM, &Lock);
3139 break;
3140 case VERR_PGM_PHYS_PAGE_RESERVED:
3141 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3142 memset(pvDst, 0xff, cb1);
3143 break;
3144 default:
3145 AssertMsgFailed(("%Rrc\n", rc));
3146 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3147 return rc;
3148 }
3149
3150 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhys2, &pvSrc, &Lock);
3151 switch (rc)
3152 {
3153 case VINF_SUCCESS:
3154 memcpy((uint8_t *)pvDst + cb1, pvSrc, cb2);
3155 PGMPhysReleasePageMappingLock(pVM, &Lock);
3156 break;
3157 case VERR_PGM_PHYS_PAGE_RESERVED:
3158 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3159 memset((uint8_t *)pvDst + cb1, 0xff, cb2);
3160 break;
3161 default:
3162 AssertMsgFailed(("%Rrc\n", rc));
3163 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3164 return rc;
3165 }
3166
3167 if (!(fFlags1 & X86_PTE_A))
3168 {
3169 rc = PGMGstModifyPage(pVCpu, GCPtrSrc, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3170 AssertRC(rc);
3171 }
3172 if (!(fFlags2 & X86_PTE_A))
3173 {
3174 rc = PGMGstModifyPage(pVCpu, GCPtrSrc + cb1, 1, X86_PTE_A, ~(uint64_t)X86_PTE_A);
3175 AssertRC(rc);
3176 }
3177 return VINF_SUCCESS;
3178 }
3179 /* sort out which page */
3180 }
3181 else
3182 GCPtrSrc += cb1; /* fault on 2nd page */
3183 }
3184 }
3185
3186 /*
3187 * Raise a #PF if we're allowed to do that.
3188 */
3189 /* Calc the error bits. */
3190 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3191 uint32_t uErr;
3192 switch (rc)
3193 {
3194 case VINF_SUCCESS:
3195 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3196 rc = VERR_ACCESS_DENIED;
3197 break;
3198
3199 case VERR_PAGE_NOT_PRESENT:
3200 case VERR_PAGE_TABLE_NOT_PRESENT:
3201 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3202 break;
3203
3204 default:
3205 AssertMsgFailed(("rc=%Rrc GCPtrSrc=%RGv cb=%#x\n", rc, GCPtrSrc, cb));
3206 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3207 return rc;
3208 }
3209 if (fRaiseTrap)
3210 {
3211 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrSrc, cb, uErr));
3212 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrSrc);
3213 }
3214 Log(("PGMPhysInterpretedReadNoHandlers: GCPtrSrc=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrSrc, cb, uErr));
3215 return rc;
3216}
3217
3218
3219/**
3220 * Performs a write to guest virtual memory for instruction emulation.
3221 *
3222 * This will check permissions, raise exceptions and update the dirty and access
3223 * bits.
3224 *
3225 * @returns VBox status code suitable to scheduling.
3226 * @retval VINF_SUCCESS if the read was performed successfully.
3227 * @retval VINF_EM_RAW_GUEST_TRAP if an exception was raised but not dispatched yet.
3228 * @retval VINF_TRPM_XCPT_DISPATCHED if an exception was raised and dispatched.
3229 *
3230 * @param pVCpu The VMCPU handle.
3231 * @param pCtxCore The context core.
3232 * @param GCPtrDst The destination address.
3233 * @param pvSrc What to write.
3234 * @param cb The number of bytes to write. Not more than a page.
3235 * @param fRaiseTrap If set the trap will be raised on as per spec, if clear
3236 * an appropriate error status will be returned (no
3237 * informational at all).
3238 *
3239 * @remarks Takes the PGM lock.
3240 * @remarks A page fault on the 2nd page of the access will be raised without
3241 * writing the bits on the first page since we're ASSUMING that the
3242 * caller is emulating an instruction access.
3243 * @remarks This function will dynamically map physical pages in GC. This may
3244 * unmap mappings done by the caller. Be careful!
3245 */
3246VMMDECL(int) PGMPhysInterpretedWriteNoHandlers(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, const void *pvSrc, size_t cb, bool fRaiseTrap)
3247{
3248 Assert(cb <= PAGE_SIZE);
3249 PVM pVM = pVCpu->CTX_SUFF(pVM);
3250
3251 /*
3252 * 1. Translate virtual to physical. This may fault.
3253 * 2. Map the physical address.
3254 * 3. Do the write operation.
3255 * 4. Set access bits if required.
3256 */
3257 int rc;
3258 unsigned cb1 = PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK);
3259 if (cb <= cb1)
3260 {
3261 /*
3262 * Not crossing pages.
3263 */
3264 RTGCPHYS GCPhys;
3265 uint64_t fFlags;
3266 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags, &GCPhys);
3267 if (RT_SUCCESS(rc))
3268 {
3269 if ( (fFlags & X86_PTE_RW) /** @todo Also check reserved bits. */
3270 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3271 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) ) /** @todo it's 2, right? Check cpl check below as well. */
3272 {
3273 void *pvDst;
3274 PGMPAGEMAPLOCK Lock;
3275 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys, &pvDst, &Lock);
3276 switch (rc)
3277 {
3278 case VINF_SUCCESS:
3279 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3280 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb));
3281 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb);
3282 PGMPhysReleasePageMappingLock(pVM, &Lock);
3283 break;
3284 case VERR_PGM_PHYS_PAGE_RESERVED:
3285 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3286 /* bit bucket */
3287 break;
3288 default:
3289 AssertMsgFailed(("%Rrc\n", rc));
3290 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3291 return rc;
3292 }
3293
3294 if (!(fFlags & (X86_PTE_A | X86_PTE_D)))
3295 {
3296 /** @todo dirty & access bit emulation isn't 100% correct. */
3297 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, X86_PTE_A | X86_PTE_D, ~(uint64_t)(X86_PTE_A | X86_PTE_D));
3298 AssertRC(rc);
3299 }
3300 return VINF_SUCCESS;
3301 }
3302 rc = VERR_ACCESS_DENIED;
3303 }
3304 }
3305 else
3306 {
3307 /*
3308 * Crosses pages.
3309 */
3310 size_t cb2 = cb - cb1;
3311 uint64_t fFlags1;
3312 RTGCPHYS GCPhys1;
3313 uint64_t fFlags2;
3314 RTGCPHYS GCPhys2;
3315 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst, &fFlags1, &GCPhys1);
3316 if (RT_SUCCESS(rc))
3317 {
3318 rc = PGM_GST_PFN(GetPage,pVCpu)(pVCpu, GCPtrDst + cb1, &fFlags2, &GCPhys2);
3319 if (RT_SUCCESS(rc))
3320 {
3321 if ( ( (fFlags1 & X86_PTE_RW) /** @todo Also check reserved bits. */
3322 && (fFlags2 & X86_PTE_RW))
3323 || ( !(CPUMGetGuestCR0(pVCpu) & X86_CR0_WP)
3324 && CPUMGetGuestCPL(pVCpu, pCtxCore) <= 2) )
3325 {
3326 void *pvDst;
3327 PGMPAGEMAPLOCK Lock;
3328 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys1, &pvDst, &Lock);
3329 switch (rc)
3330 {
3331 case VINF_SUCCESS:
3332 Log(("PGMPhysInterpretedWriteNoHandlers: pvDst=%p (%RGv) pvSrc=%p cb=%d\n",
3333 (uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), GCPtrDst, pvSrc, cb1));
3334 memcpy((uint8_t *)pvDst + (GCPtrDst & PAGE_OFFSET_MASK), pvSrc, cb1);
3335 PGMPhysReleasePageMappingLock(pVM, &Lock);
3336 break;
3337 case VERR_PGM_PHYS_PAGE_RESERVED:
3338 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3339 /* bit bucket */
3340 break;
3341 default:
3342 AssertMsgFailed(("%Rrc\n", rc));
3343 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3344 return rc;
3345 }
3346
3347 rc = PGMPhysGCPhys2CCPtr(pVM, GCPhys2, &pvDst, &Lock);
3348 switch (rc)
3349 {
3350 case VINF_SUCCESS:
3351 memcpy(pvDst, (const uint8_t *)pvSrc + cb1, cb2);
3352 PGMPhysReleasePageMappingLock(pVM, &Lock);
3353 break;
3354 case VERR_PGM_PHYS_PAGE_RESERVED:
3355 case VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS:
3356 /* bit bucket */
3357 break;
3358 default:
3359 AssertMsgFailed(("%Rrc\n", rc));
3360 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3361 return rc;
3362 }
3363
3364 if (!(fFlags1 & (X86_PTE_A | X86_PTE_RW)))
3365 {
3366 rc = PGMGstModifyPage(pVCpu, GCPtrDst, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3367 AssertRC(rc);
3368 }
3369 if (!(fFlags2 & (X86_PTE_A | X86_PTE_RW)))
3370 {
3371 rc = PGMGstModifyPage(pVCpu, GCPtrDst + cb1, 1, (X86_PTE_A | X86_PTE_RW), ~(uint64_t)(X86_PTE_A | X86_PTE_RW));
3372 AssertRC(rc);
3373 }
3374 return VINF_SUCCESS;
3375 }
3376 if ((fFlags1 & (X86_PTE_RW)) == X86_PTE_RW)
3377 GCPtrDst += cb1; /* fault on the 2nd page. */
3378 rc = VERR_ACCESS_DENIED;
3379 }
3380 else
3381 GCPtrDst += cb1; /* fault on the 2nd page. */
3382 }
3383 }
3384
3385 /*
3386 * Raise a #PF if we're allowed to do that.
3387 */
3388 /* Calc the error bits. */
3389 uint32_t uErr;
3390 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pCtxCore);
3391 switch (rc)
3392 {
3393 case VINF_SUCCESS:
3394 uErr = (cpl >= 2) ? X86_TRAP_PF_RSVD | X86_TRAP_PF_US : X86_TRAP_PF_RSVD;
3395 rc = VERR_ACCESS_DENIED;
3396 break;
3397
3398 case VERR_ACCESS_DENIED:
3399 uErr = (cpl >= 2) ? X86_TRAP_PF_RW | X86_TRAP_PF_US : X86_TRAP_PF_RW;
3400 break;
3401
3402 case VERR_PAGE_NOT_PRESENT:
3403 case VERR_PAGE_TABLE_NOT_PRESENT:
3404 uErr = (cpl >= 2) ? X86_TRAP_PF_US : 0;
3405 break;
3406
3407 default:
3408 AssertMsgFailed(("rc=%Rrc GCPtrDst=%RGv cb=%#x\n", rc, GCPtrDst, cb));
3409 AssertReturn(RT_FAILURE(rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
3410 return rc;
3411 }
3412 if (fRaiseTrap)
3413 {
3414 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> Raised #PF(%#x)\n", GCPtrDst, cb, uErr));
3415 return TRPMRaiseXcptErrCR2(pVCpu, pCtxCore, X86_XCPT_PF, uErr, GCPtrDst);
3416 }
3417 Log(("PGMPhysInterpretedWriteNoHandlers: GCPtrDst=%RGv cb=%#x -> #PF(%#x) [!raised]\n", GCPtrDst, cb, uErr));
3418 return rc;
3419}
3420
3421
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette