VirtualBox

source: vbox/trunk/src/VBox/VMM/PGMPhys.cpp@ 6820

Last change on this file since 6820 was 6820, checked in by vboxsync, 17 years ago

Implemented PGMR3PhysRegisterRam (not used). Enforced alignment of aPages in PGMRAMRANGES. Added pszDesc to PGMRAMRANGES (only set by the new code).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 33.6 KB
Line 
1/* $Id: PGMPhys.cpp 6820 2008-02-05 21:54:28Z vboxsync $ */
2/** @file
3 * PGM - Page Manager and Monitor, Physical Memory Addressing.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PGM
23#include <VBox/pgm.h>
24#include <VBox/cpum.h>
25#include <VBox/iom.h>
26#include <VBox/sup.h>
27#include <VBox/mm.h>
28#include <VBox/stam.h>
29#include <VBox/rem.h>
30#include <VBox/csam.h>
31#include "PGMInternal.h"
32#include <VBox/vm.h>
33#include <VBox/dbg.h>
34#include <VBox/param.h>
35#include <VBox/err.h>
36#include <iprt/assert.h>
37#include <iprt/alloc.h>
38#include <iprt/asm.h>
39#include <VBox/log.h>
40#include <iprt/thread.h>
41#include <iprt/string.h>
42
43
44
45/*
46 * PGMR3PhysReadByte/Word/Dword
47 * PGMR3PhysWriteByte/Word/Dword
48 */
49/** @todo rename and add U64. */
50
51#define PGMPHYSFN_READNAME PGMR3PhysReadByte
52#define PGMPHYSFN_WRITENAME PGMR3PhysWriteByte
53#define PGMPHYS_DATASIZE 1
54#define PGMPHYS_DATATYPE uint8_t
55#include "PGMPhys.h"
56
57#define PGMPHYSFN_READNAME PGMR3PhysReadWord
58#define PGMPHYSFN_WRITENAME PGMR3PhysWriteWord
59#define PGMPHYS_DATASIZE 2
60#define PGMPHYS_DATATYPE uint16_t
61#include "PGMPhys.h"
62
63#define PGMPHYSFN_READNAME PGMR3PhysReadDword
64#define PGMPHYSFN_WRITENAME PGMR3PhysWriteDword
65#define PGMPHYS_DATASIZE 4
66#define PGMPHYS_DATATYPE uint32_t
67#include "PGMPhys.h"
68
69
70
71/**
72 * Sets up a range RAM.
73 *
74 * This will check for conflicting registrations, make a resource
75 * reservation for the memory (with GMM), and setup the per-page
76 * tracking structures (PGMPAGE).
77 *
78 * @returns VBox stutus code.
79 * @param pVM Pointer to the shared VM structure.
80 * @param GCPhys The physical address of the RAM.
81 * @param cb The size of the RAM.
82 * @param pszDesc The description - not copied, so, don't free or change it.
83 */
84PGMR3DECL(int) PGMR3PhysRegisterRam(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, const char *pszDesc)
85{
86 /*
87 * Validate input.
88 */
89 Log(("PGMR3PhysRegisterRam: GCPhys=%RGp cb=%RGp pszDesc=%s\n", GCPhys, cb, pszDesc));
90 AssertReturn(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys, VERR_INVALID_PARAMETER);
91 AssertReturn(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb, VERR_INVALID_PARAMETER);
92 AssertReturn(cb > 0, VERR_INVALID_PARAMETER);
93 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
94 AssertMsgReturn(GCPhysLast > GCPhys, ("The range wraps! GCPhys=%RGp cb=%RGp\n", GCPhys, cb), VERR_INVALID_PARAMETER);
95 AssertPtrReturn(pszDesc, VERR_INVALID_POINTER);
96 VM_ASSERT_EMT_RETURN(pVM, VERR_VM_THREAD_NOT_EMT);
97
98 /*
99 * Find range location and check for conflicts.
100 * (We don't lock here because the locking by EMT is only required on update.)
101 */
102 PPGMRAMRANGE pPrev = NULL;
103 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesHC;
104 while (pCur && GCPhysLast >= pCur->GCPhys)
105 {
106 if ( GCPhys <= pCur->GCPhysLast
107 && GCPhysLast >= pCur->GCPhys)
108 AssertLogRelMsgFailedReturn(("%RGp-%RGp (%s) conflicts with existing %RGp-%RGp (%s)\n",
109 GCPhys, GCPhysLast, pszDesc,
110 pCur->GCPhys, pCur->GCPhysLast, pCur->pszDesc),
111 VERR_PGM_RAM_CONFLICT);
112
113 /* next */
114 pPrev = pCur;
115 pCur = pCur->pNextHC;
116 }
117
118 /*
119 * Register it with GMM (the API bitches).
120 */
121 const RTGCPHYS cPages = cb >> PAGE_SHIFT;
122 int rc = MMR3IncreaseBaseReservation(pVM, cPages);
123 if (RT_FAILURE(rc))
124 return rc;
125
126 /*
127 * Allocate RAM range.
128 */
129 const size_t cbRamRange = RT_OFFSETOF(PGMRAMRANGE, aPages[cPages]);
130 PPGMRAMRANGE pNew;
131 rc = MMR3HyperAllocOnceNoRel(pVM, cbRamRange, 0, MM_TAG_PGM_PHYS, (void **)&pNew);
132 AssertLogRelMsgRCReturn(rc, ("cbRamRange=%zd\n", cbRamRange), rc);
133
134 /*
135 * Initialize the range.
136 */
137 pNew->GCPhys = GCPhys;
138 pNew->GCPhysLast = GCPhysLast;
139 pNew->pszDesc = pszDesc;
140 pNew->cb = cb;
141 pNew->fFlags = 0;
142
143 pNew->pavHCChunkHC = NULL;
144 pNew->pavHCChunkGC = 0;
145 pNew->pvHC = NULL;
146
147 RTGCPHYS iPage = cPages;
148 while (iPage-- > 0)
149 {
150 pNew->aPages[iPage].HCPhys = pVM->pgm.s.HCPhysZeroPg;
151 pNew->aPages[iPage].u2State = PGM_PAGE_STATE_ZERO;
152 pNew->aPages[iPage].fWrittenTo = 0;
153 pNew->aPages[iPage].fSomethingElse = 0;
154 pNew->aPages[iPage].idPage = NIL_GMM_PAGEID;
155 pNew->aPages[iPage].u32B = 0;
156 }
157
158 /*
159 * Insert the new RAM range.
160 * (Take the lock just so that we're playing by the rules.)
161 */
162 pgmLock(pVM);
163 pNew->pNextHC = pCur;
164 //pNew->pNextR0 = pCur ? MMHyperCCToR0(pVM, pCur) : NIL_RTR0PTR;
165 pNew->pNextGC = pCur ? MMHyperCCToGC(pVM, pCur) : NIL_RTGCPTR;
166 if (pPrev)
167 {
168 pPrev->pNextHC = pNew;
169 //pPrev->pNextR0 = MMHyperCCToR0(pVM, pNew);
170 pPrev->pNextGC = MMHyperCCToGC(pVM, pNew);
171 }
172 else
173 {
174 pVM->pgm.s.pRamRangesHC = pNew;
175 //pVM->pgm.s.pRamRangesR0 = MMHyperCCToR0(pVM, pNew);
176 pVM->pgm.s.pRamRangesGC = MMHyperCCToGC(pVM, pNew);
177 }
178 pgmUnlock(pVM);
179
180 return VINF_SUCCESS;
181}
182
183
184
185/**
186 * Interface that the MMR3RamRegister(), MMR3RomRegister() and MMIO handler
187 * registration APIs calls to inform PGM about memory registrations.
188 *
189 * It registers the physical memory range with PGM. MM is responsible
190 * for the toplevel things - allocation and locking - while PGM is taking
191 * care of all the details and implements the physical address space virtualization.
192 *
193 * @returns VBox status.
194 * @param pVM The VM handle.
195 * @param pvRam HC virtual address of the RAM range. (page aligned)
196 * @param GCPhys GC physical address of the RAM range. (page aligned)
197 * @param cb Size of the RAM range. (page aligned)
198 * @param fFlags Flags, MM_RAM_*.
199 * @param paPages Pointer an array of physical page descriptors.
200 * @param pszDesc Description string.
201 */
202PGMR3DECL(int) PGMR3PhysRegister(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
203{
204 /*
205 * Validate input.
206 * (Not so important because callers are only MMR3PhysRegister()
207 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
208 */
209 Log(("PGMR3PhysRegister %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
210
211 Assert((fFlags & (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_DYNAMIC_ALLOC)) || paPages);
212 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !paPages);*/
213 Assert((fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO)) || (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC) || pvRam);
214 /*Assert(!(fFlags & MM_RAM_FLAGS_RESERVED) || !pvRam);*/
215 Assert(!(fFlags & ~0xfff));
216 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
217 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
218 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
219 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
220 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
221 if (GCPhysLast < GCPhys)
222 {
223 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
224 return VERR_INVALID_PARAMETER;
225 }
226
227 /*
228 * Find range location and check for conflicts.
229 */
230 PPGMRAMRANGE pPrev = NULL;
231 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesHC;
232 while (pCur)
233 {
234 if (GCPhys <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
235 {
236 AssertMsgFailed(("Conflict! This cannot happen!\n"));
237 return VERR_PGM_RAM_CONFLICT;
238 }
239 if (GCPhysLast < pCur->GCPhys)
240 break;
241
242 /* next */
243 pPrev = pCur;
244 pCur = pCur->pNextHC;
245 }
246
247 /*
248 * Allocate RAM range.
249 * Small ranges are allocated from the heap, big ones have separate mappings.
250 */
251 size_t cbRam = RT_OFFSETOF(PGMRAMRANGE, aPages[cb >> PAGE_SHIFT]);
252 PPGMRAMRANGE pNew;
253 RTGCPTR GCPtrNew;
254 int rc = VERR_NO_MEMORY;
255 if (cbRam > PAGE_SIZE / 2)
256 { /* large */
257 cbRam = RT_ALIGN_Z(cbRam, PAGE_SIZE);
258 rc = SUPPageAlloc(cbRam >> PAGE_SHIFT, (void **)&pNew);
259 if (VBOX_SUCCESS(rc))
260 {
261 rc = MMR3HyperMapHCRam(pVM, pNew, cbRam, true, pszDesc, &GCPtrNew);
262 if (VBOX_SUCCESS(rc))
263 {
264 Assert(MMHyperHC2GC(pVM, pNew) == GCPtrNew);
265 rc = MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
266 }
267 else
268 {
269 AssertMsgFailed(("MMR3HyperMapHCRam(,,%#x,,,) -> %Vrc\n", cbRam, rc));
270 SUPPageFree(pNew, cbRam >> PAGE_SHIFT);
271 }
272 }
273 else
274 AssertMsgFailed(("SUPPageAlloc(%#x,,) -> %Vrc\n", cbRam >> PAGE_SHIFT, rc));
275
276 }
277/** @todo Make VGA and VMMDev register their memory at init time before the hma size is fixated. */
278 if (RT_FAILURE(rc))
279 { /* small + fallback (vga) */
280 rc = MMHyperAlloc(pVM, cbRam, 16, MM_TAG_PGM, (void **)&pNew);
281 if (VBOX_SUCCESS(rc))
282 GCPtrNew = MMHyperHC2GC(pVM, pNew);
283 else
284 AssertMsgFailed(("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb));
285 }
286 if (VBOX_SUCCESS(rc))
287 {
288 /*
289 * Initialize the range.
290 */
291 pNew->pvHC = pvRam;
292 pNew->GCPhys = GCPhys;
293 pNew->GCPhysLast = GCPhysLast;
294 pNew->cb = cb;
295 pNew->fFlags = fFlags;
296 pNew->pavHCChunkHC = NULL;
297 pNew->pavHCChunkGC = 0;
298
299 unsigned iPage = cb >> PAGE_SHIFT;
300 if (paPages)
301 {
302 while (iPage-- > 0)
303 {
304 pNew->aPages[iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
305 pNew->aPages[iPage].u2State = PGM_PAGE_STATE_ALLOCATED;
306 pNew->aPages[iPage].fWrittenTo = 0;
307 pNew->aPages[iPage].fSomethingElse = 0;
308 pNew->aPages[iPage].idPage = 0;
309 pNew->aPages[iPage].u32B = 0;
310 }
311 }
312 else if (fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC)
313 {
314 /* Allocate memory for chunk to HC ptr lookup array. */
315 rc = MMHyperAlloc(pVM, (cb >> PGM_DYNAMIC_CHUNK_SHIFT) * sizeof(void *), 16, MM_TAG_PGM, (void **)&pNew->pavHCChunkHC);
316 AssertMsgReturn(rc == VINF_SUCCESS, ("MMHyperAlloc(,%#x,,,) -> %Vrc\n", cbRam, cb), rc);
317
318 pNew->pavHCChunkGC = MMHyperHC2GC(pVM, pNew->pavHCChunkHC);
319 Assert(pNew->pavHCChunkGC);
320
321 /* Physical memory will be allocated on demand. */
322 while (iPage-- > 0)
323 {
324 pNew->aPages[iPage].HCPhys = fFlags; /** @todo PAGE FLAGS */
325 pNew->aPages[iPage].u2State = PGM_PAGE_STATE_ZERO;
326 pNew->aPages[iPage].fWrittenTo = 0;
327 pNew->aPages[iPage].fSomethingElse = 0;
328 pNew->aPages[iPage].idPage = 0;
329 pNew->aPages[iPage].u32B = 0;
330 }
331 }
332 else
333 {
334 Assert(fFlags == (MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_MMIO));
335 RTHCPHYS HCPhysDummyPage = (MMR3PageDummyHCPhys(pVM) & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
336 while (iPage-- > 0)
337 {
338 pNew->aPages[iPage].HCPhys = HCPhysDummyPage; /** @todo PAGE FLAGS */
339 pNew->aPages[iPage].u2State = PGM_PAGE_STATE_ZERO;
340 pNew->aPages[iPage].fWrittenTo = 0;
341 pNew->aPages[iPage].fSomethingElse = 0;
342 pNew->aPages[iPage].idPage = 0;
343 pNew->aPages[iPage].u32B = 0;
344 }
345 }
346
347 /*
348 * Insert the new RAM range.
349 */
350 pgmLock(pVM);
351 pNew->pNextHC = pCur;
352 pNew->pNextGC = pCur ? MMHyperHC2GC(pVM, pCur) : 0;
353 if (pPrev)
354 {
355 pPrev->pNextHC = pNew;
356 pPrev->pNextGC = GCPtrNew;
357 }
358 else
359 {
360 pVM->pgm.s.pRamRangesHC = pNew;
361 pVM->pgm.s.pRamRangesGC = GCPtrNew;
362 }
363 pgmUnlock(pVM);
364 }
365 return rc;
366}
367
368#ifndef VBOX_WITH_NEW_PHYS_CODE
369
370/**
371 * Register a chunk of a the physical memory range with PGM. MM is responsible
372 * for the toplevel things - allocation and locking - while PGM is taking
373 * care of all the details and implements the physical address space virtualization.
374 *
375 *
376 * @returns VBox status.
377 * @param pVM The VM handle.
378 * @param pvRam HC virtual address of the RAM range. (page aligned)
379 * @param GCPhys GC physical address of the RAM range. (page aligned)
380 * @param cb Size of the RAM range. (page aligned)
381 * @param fFlags Flags, MM_RAM_*.
382 * @param paPages Pointer an array of physical page descriptors.
383 * @param pszDesc Description string.
384 */
385PGMR3DECL(int) PGMR3PhysRegisterChunk(PVM pVM, void *pvRam, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, const SUPPAGE *paPages, const char *pszDesc)
386{
387 NOREF(pszDesc);
388
389 /*
390 * Validate input.
391 * (Not so important because callers are only MMR3PhysRegister()
392 * and PGMR3HandlerPhysicalRegisterEx(), but anyway...)
393 */
394 Log(("PGMR3PhysRegisterChunk %08X %x bytes flags %x %s\n", GCPhys, cb, fFlags, pszDesc));
395
396 Assert(paPages);
397 Assert(pvRam);
398 Assert(!(fFlags & ~0xfff));
399 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
400 Assert(RT_ALIGN_P(pvRam, PAGE_SIZE) == pvRam);
401 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2 | MM_RAM_FLAGS_DYNAMIC_ALLOC)));
402 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
403 Assert(VM_IS_EMT(pVM));
404 Assert(!(GCPhys & PGM_DYNAMIC_CHUNK_OFFSET_MASK));
405 Assert(cb == PGM_DYNAMIC_CHUNK_SIZE);
406
407 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
408 if (GCPhysLast < GCPhys)
409 {
410 AssertMsgFailed(("The range wraps! GCPhys=%VGp cb=%#x\n", GCPhys, cb));
411 return VERR_INVALID_PARAMETER;
412 }
413
414 /*
415 * Find existing range location.
416 */
417 PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
418 while (pRam)
419 {
420 RTGCPHYS off = GCPhys - pRam->GCPhys;
421 if ( off < pRam->cb
422 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
423 break;
424
425 pRam = CTXSUFF(pRam->pNext);
426 }
427 AssertReturn(pRam, VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS);
428
429 unsigned off = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
430 unsigned iPage = cb >> PAGE_SHIFT;
431 if (paPages)
432 {
433 while (iPage-- > 0)
434 pRam->aPages[off + iPage].HCPhys = (paPages[iPage].Phys & X86_PTE_PAE_PG_MASK) | fFlags; /** @todo PAGE FLAGS */
435 }
436 off >>= (PGM_DYNAMIC_CHUNK_SHIFT - PAGE_SHIFT);
437 pRam->pavHCChunkHC[off] = pvRam;
438
439 /* Notify the recompiler. */
440 REMR3NotifyPhysRamChunkRegister(pVM, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, (RTHCUINTPTR)pvRam, fFlags);
441
442 return VINF_SUCCESS;
443}
444
445
446/**
447 * Allocate missing physical pages for an existing guest RAM range.
448 *
449 * @returns VBox status.
450 * @param pVM The VM handle.
451 * @param GCPhys GC physical address of the RAM range. (page aligned)
452 */
453PGMR3DECL(int) PGM3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
454{
455 /*
456 * Walk range list.
457 */
458 pgmLock(pVM);
459
460 PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
461 while (pRam)
462 {
463 RTGCPHYS off = GCPhys - pRam->GCPhys;
464 if ( off < pRam->cb
465 && (pRam->fFlags & MM_RAM_FLAGS_DYNAMIC_ALLOC))
466 {
467 bool fRangeExists = false;
468 unsigned off = (GCPhys - pRam->GCPhys) >> PGM_DYNAMIC_CHUNK_SHIFT;
469
470 /** @note A request made from another thread may end up in EMT after somebody else has already allocated the range. */
471 if (pRam->pavHCChunkHC[off])
472 fRangeExists = true;
473
474 pgmUnlock(pVM);
475 if (fRangeExists)
476 return VINF_SUCCESS;
477 return pgmr3PhysGrowRange(pVM, GCPhys);
478 }
479
480 pRam = CTXSUFF(pRam->pNext);
481 }
482 pgmUnlock(pVM);
483 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS;
484}
485
486
487/**
488 * Allocate missing physical pages for an existing guest RAM range.
489 *
490 * @returns VBox status.
491 * @param pVM The VM handle.
492 * @param pRamRange RAM range
493 * @param GCPhys GC physical address of the RAM range. (page aligned)
494 */
495int pgmr3PhysGrowRange(PVM pVM, RTGCPHYS GCPhys)
496{
497 void *pvRam;
498 int rc;
499
500 /* We must execute this function in the EMT thread, otherwise we'll run into problems. */
501 if (!VM_IS_EMT(pVM))
502 {
503 PVMREQ pReq;
504
505 AssertMsg(!PDMCritSectIsOwner(&pVM->pgm.s.CritSect), ("We own the PGM lock -> deadlock danger!!\n"));
506
507 rc = VMR3ReqCall(pVM, &pReq, RT_INDEFINITE_WAIT, (PFNRT)PGM3PhysGrowRange, 2, pVM, GCPhys);
508 if (VBOX_SUCCESS(rc))
509 {
510 rc = pReq->iStatus;
511 VMR3ReqFree(pReq);
512 }
513 return rc;
514 }
515
516 /* Round down to chunk boundary */
517 GCPhys = GCPhys & PGM_DYNAMIC_CHUNK_BASE_MASK;
518
519 STAM_COUNTER_INC(&pVM->pgm.s.StatDynRamGrow);
520 STAM_COUNTER_ADD(&pVM->pgm.s.StatDynRamTotal, PGM_DYNAMIC_CHUNK_SIZE/(1024*1024));
521
522 Log(("pgmr3PhysGrowRange: allocate chunk of size 0x%X at %VGp\n", PGM_DYNAMIC_CHUNK_SIZE, GCPhys));
523
524 unsigned cPages = PGM_DYNAMIC_CHUNK_SIZE >> PAGE_SHIFT;
525
526 for (;;)
527 {
528 rc = SUPPageAlloc(cPages, &pvRam);
529 if (VBOX_SUCCESS(rc))
530 {
531
532 rc = MMR3PhysRegisterEx(pVM, pvRam, GCPhys, PGM_DYNAMIC_CHUNK_SIZE, 0, MM_PHYS_TYPE_DYNALLOC_CHUNK, "Main Memory");
533 if (VBOX_SUCCESS(rc))
534 return rc;
535
536 SUPPageFree(pvRam, cPages);
537 }
538
539 VMSTATE enmVMState = VMR3GetState(pVM);
540 if (enmVMState != VMSTATE_RUNNING)
541 {
542 AssertMsgFailed(("Out of memory while trying to allocate a guest RAM chunk at %VGp!\n", GCPhys));
543 LogRel(("PGM: Out of memory while trying to allocate a guest RAM chunk at %VGp (VMstate=%s)!\n", GCPhys, VMR3GetStateName(enmVMState)));
544 return rc;
545 }
546
547 LogRel(("pgmr3PhysGrowRange: out of memory. pause until the user resumes execution.\n"));
548
549 /* Pause first, then inform Main. */
550 rc = VMR3SuspendNoSave(pVM);
551 AssertRC(rc);
552
553 VMSetRuntimeError(pVM, false, "HostMemoryLow", "Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM.");
554
555 /* Wait for resume event; will only return in that case. If the VM is stopped, the EMT thread will be destroyed. */
556 rc = VMR3WaitForResume(pVM);
557
558 /* Retry */
559 LogRel(("pgmr3PhysGrowRange: VM execution resumed -> retry.\n"));
560 }
561}
562
563#endif /* !VBOX_WITH_NEW_PHYS_CODE */
564
565/**
566 * Interface MMIO handler relocation calls.
567 *
568 * It relocates an existing physical memory range with PGM.
569 *
570 * @returns VBox status.
571 * @param pVM The VM handle.
572 * @param GCPhysOld Previous GC physical address of the RAM range. (page aligned)
573 * @param GCPhysNew New GC physical address of the RAM range. (page aligned)
574 * @param cb Size of the RAM range. (page aligned)
575 */
576PGMR3DECL(int) PGMR3PhysRelocate(PVM pVM, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, size_t cb)
577{
578 /*
579 * Validate input.
580 * (Not so important because callers are only MMR3PhysRelocate(),
581 * but anyway...)
582 */
583 Log(("PGMR3PhysRelocate Old %VGp New %VGp (%#x bytes)\n", GCPhysOld, GCPhysNew, cb));
584
585 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
586 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
587 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
588 RTGCPHYS GCPhysLast;
589 GCPhysLast = GCPhysOld + (cb - 1);
590 if (GCPhysLast < GCPhysOld)
591 {
592 AssertMsgFailed(("The old range wraps! GCPhys=%VGp cb=%#x\n", GCPhysOld, cb));
593 return VERR_INVALID_PARAMETER;
594 }
595 GCPhysLast = GCPhysNew + (cb - 1);
596 if (GCPhysLast < GCPhysNew)
597 {
598 AssertMsgFailed(("The new range wraps! GCPhys=%VGp cb=%#x\n", GCPhysNew, cb));
599 return VERR_INVALID_PARAMETER;
600 }
601
602 /*
603 * Find and remove old range location.
604 */
605 pgmLock(pVM);
606 PPGMRAMRANGE pPrev = NULL;
607 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesHC;
608 while (pCur)
609 {
610 if (pCur->GCPhys == GCPhysOld && pCur->cb == cb)
611 break;
612
613 /* next */
614 pPrev = pCur;
615 pCur = pCur->pNextHC;
616 }
617 if (pPrev)
618 {
619 pPrev->pNextHC = pCur->pNextHC;
620 pPrev->pNextGC = pCur->pNextGC;
621 }
622 else
623 {
624 pVM->pgm.s.pRamRangesHC = pCur->pNextHC;
625 pVM->pgm.s.pRamRangesGC = pCur->pNextGC;
626 }
627
628 /*
629 * Update the range.
630 */
631 pCur->GCPhys = GCPhysNew;
632 pCur->GCPhysLast= GCPhysLast;
633 PPGMRAMRANGE pNew = pCur;
634
635 /*
636 * Find range location and check for conflicts.
637 */
638 pPrev = NULL;
639 pCur = pVM->pgm.s.pRamRangesHC;
640 while (pCur)
641 {
642 if (GCPhysNew <= pCur->GCPhysLast && GCPhysLast >= pCur->GCPhys)
643 {
644 AssertMsgFailed(("Conflict! This cannot happen!\n"));
645 pgmUnlock(pVM);
646 return VERR_PGM_RAM_CONFLICT;
647 }
648 if (GCPhysLast < pCur->GCPhys)
649 break;
650
651 /* next */
652 pPrev = pCur;
653 pCur = pCur->pNextHC;
654 }
655
656 /*
657 * Reinsert the RAM range.
658 */
659 pNew->pNextHC = pCur;
660 pNew->pNextGC = pCur ? MMHyperHC2GC(pVM, pCur) : 0;
661 if (pPrev)
662 {
663 pPrev->pNextHC = pNew;
664 pPrev->pNextGC = MMHyperHC2GC(pVM, pNew);
665 }
666 else
667 {
668 pVM->pgm.s.pRamRangesHC = pNew;
669 pVM->pgm.s.pRamRangesGC = MMHyperHC2GC(pVM, pNew);
670 }
671
672 pgmUnlock(pVM);
673 return VINF_SUCCESS;
674}
675
676
677/**
678 * Interface MMR3RomRegister() and MMR3PhysReserve calls to update the
679 * flags of existing RAM ranges.
680 *
681 * @returns VBox status.
682 * @param pVM The VM handle.
683 * @param GCPhys GC physical address of the RAM range. (page aligned)
684 * @param cb Size of the RAM range. (page aligned)
685 * @param fFlags The Or flags, MM_RAM_* \#defines.
686 * @param fMask The and mask for the flags.
687 */
688PGMR3DECL(int) PGMR3PhysSetFlags(PVM pVM, RTGCPHYS GCPhys, size_t cb, unsigned fFlags, unsigned fMask)
689{
690 Log(("PGMR3PhysSetFlags %08X %x %x %x\n", GCPhys, cb, fFlags, fMask));
691
692 /*
693 * Validate input.
694 * (Not so important because caller is always MMR3RomRegister() and MMR3PhysReserve(), but anyway...)
695 */
696 Assert(!(fFlags & ~(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)));
697 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb && cb);
698 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
699 RTGCPHYS GCPhysLast = GCPhys + (cb - 1);
700 AssertReturn(GCPhysLast > GCPhys, VERR_INVALID_PARAMETER);
701
702 /*
703 * Lookup the range.
704 */
705 PPGMRAMRANGE pRam = CTXSUFF(pVM->pgm.s.pRamRanges);
706 while (pRam && GCPhys > pRam->GCPhysLast)
707 pRam = CTXSUFF(pRam->pNext);
708 if ( !pRam
709 || GCPhys > pRam->GCPhysLast
710 || GCPhysLast < pRam->GCPhys)
711 {
712 AssertMsgFailed(("No RAM range for %VGp-%VGp\n", GCPhys, GCPhysLast));
713 return VERR_INVALID_PARAMETER;
714 }
715
716 /*
717 * Update the requested flags.
718 */
719 RTHCPHYS fFullMask = ~(RTHCPHYS)(MM_RAM_FLAGS_RESERVED | MM_RAM_FLAGS_ROM | MM_RAM_FLAGS_MMIO | MM_RAM_FLAGS_MMIO2)
720 | fMask;
721 unsigned iPageEnd = (GCPhysLast - pRam->GCPhys + 1) >> PAGE_SHIFT;
722 unsigned iPage = (GCPhys - pRam->GCPhys) >> PAGE_SHIFT;
723 for ( ; iPage < iPageEnd; iPage++)
724 pRam->aPages[iPage].HCPhys = (pRam->aPages[iPage].HCPhys & fFullMask) | fFlags; /** @todo PAGE FLAGS */
725
726 return VINF_SUCCESS;
727}
728
729
730/**
731 * Sets the Address Gate 20 state.
732 *
733 * @param pVM VM handle.
734 * @param fEnable True if the gate should be enabled.
735 * False if the gate should be disabled.
736 */
737PGMDECL(void) PGMR3PhysSetA20(PVM pVM, bool fEnable)
738{
739 LogFlow(("PGMR3PhysSetA20 %d (was %d)\n", fEnable, pVM->pgm.s.fA20Enabled));
740 if (pVM->pgm.s.fA20Enabled != (RTUINT)fEnable)
741 {
742 pVM->pgm.s.fA20Enabled = fEnable;
743 pVM->pgm.s.GCPhysA20Mask = ~(RTGCPHYS)(!fEnable << 20);
744 REMR3A20Set(pVM, fEnable);
745 }
746}
747
748
749/**
750 * Tree enumeration callback for dealing with age rollover.
751 * It will perform a simple compression of the current age.
752 */
753static DECLCALLBACK(int) pgmR3PhysChunkAgeingRolloverCallback(PAVLU32NODECORE pNode, void *pvUser)
754{
755 /* Age compression - ASSUMES iNow == 4. */
756 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
757 if (pChunk->iAge >= UINT32_C(0xffffff00))
758 pChunk->iAge = 3;
759 else if (pChunk->iAge >= UINT32_C(0xfffff000))
760 pChunk->iAge = 2;
761 else if (pChunk->iAge)
762 pChunk->iAge = 1;
763 else /* iAge = 0 */
764 pChunk->iAge = 4;
765
766 /* reinsert */
767 PVM pVM = (PVM)pvUser;
768 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
769 pChunk->AgeCore.Key = pChunk->iAge;
770 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
771 return 0;
772}
773
774
775/**
776 * Tree enumeration callback that updates the chunks that have
777 * been used since the last
778 */
779static DECLCALLBACK(int) pgmR3PhysChunkAgeingCallback(PAVLU32NODECORE pNode, void *pvUser)
780{
781 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)pNode;
782 if (!pChunk->iAge)
783 {
784 PVM pVM = (PVM)pvUser;
785 RTAvllU32Remove(&pVM->pgm.s.ChunkR3Map.pAgeTree, pChunk->AgeCore.Key);
786 pChunk->AgeCore.Key = pChunk->iAge = pVM->pgm.s.ChunkR3Map.iNow;
787 RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
788 }
789
790 return 0;
791}
792
793
794/**
795 * Performs ageing of the ring-3 chunk mappings.
796 *
797 * @param pVM The VM handle.
798 */
799PGMR3DECL(void) PGMR3PhysChunkAgeing(PVM pVM)
800{
801 pVM->pgm.s.ChunkR3Map.AgeingCountdown = RT_MIN(pVM->pgm.s.ChunkR3Map.cMax / 4, 1024);
802 pVM->pgm.s.ChunkR3Map.iNow++;
803 if (pVM->pgm.s.ChunkR3Map.iNow == 0)
804 {
805 pVM->pgm.s.ChunkR3Map.iNow = 4;
806 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingRolloverCallback, pVM);
807 }
808 else
809 RTAvlU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pTree, true /*fFromLeft*/, pgmR3PhysChunkAgeingCallback, pVM);
810}
811
812
813/**
814 * The structure passed in the pvUser argument of pgmR3PhysChunkUnmapCandidateCallback().
815 */
816typedef struct PGMR3PHYSCHUNKUNMAPCB
817{
818 PVM pVM; /**< The VM handle. */
819 PPGMCHUNKR3MAP pChunk; /**< The chunk to unmap. */
820} PGMR3PHYSCHUNKUNMAPCB, *PPGMR3PHYSCHUNKUNMAPCB;
821
822
823/**
824 * Callback used to find the mapping that's been unused for
825 * the longest time.
826 */
827static DECLCALLBACK(int) pgmR3PhysChunkUnmapCandidateCallback(PAVLLU32NODECORE pNode, void *pvUser)
828{
829 do
830 {
831 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)((uint8_t *)pNode - RT_OFFSETOF(PGMCHUNKR3MAP, AgeCore));
832 if ( pChunk->iAge
833 && !pChunk->cRefs)
834 {
835 /*
836 * Check that it's not in any of the TLBs.
837 */
838 PVM pVM = ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pVM;
839 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
840 if (pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk == pChunk)
841 {
842 pChunk = NULL;
843 break;
844 }
845 if (pChunk)
846 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.PhysTlbHC.aEntries); i++)
847 if (pVM->pgm.s.PhysTlbHC.aEntries[i].pMap == pChunk)
848 {
849 pChunk = NULL;
850 break;
851 }
852 if (pChunk)
853 {
854 ((PPGMR3PHYSCHUNKUNMAPCB)pvUser)->pChunk = pChunk;
855 return 1; /* done */
856 }
857 }
858
859 /* next with the same age - this version of the AVL API doesn't enumerate the list, so we have to do it. */
860 pNode = pNode->pList;
861 } while (pNode);
862 return 0;
863}
864
865
866/**
867 * Finds a good candidate for unmapping when the ring-3 mapping cache is full.
868 *
869 * The candidate will not be part of any TLBs, so no need to flush
870 * anything afterwards.
871 *
872 * @returns Chunk id.
873 * @param pVM The VM handle.
874 */
875static int32_t pgmR3PhysChunkFindUnmapCandidate(PVM pVM)
876{
877 /*
878 * Do tree ageing first?
879 */
880 if (pVM->pgm.s.ChunkR3Map.AgeingCountdown-- == 0)
881 PGMR3PhysChunkAgeing(pVM);
882
883 /*
884 * Enumerate the age tree starting with the left most node.
885 */
886 PGMR3PHYSCHUNKUNMAPCB Args;
887 Args.pVM = pVM;
888 Args.pChunk = NULL;
889 if (RTAvllU32DoWithAll(&pVM->pgm.s.ChunkR3Map.pAgeTree, true /*fFromLeft*/, pgmR3PhysChunkUnmapCandidateCallback, pVM))
890 return Args.pChunk->Core.Key;
891 return INT32_MAX;
892}
893
894
895/**
896 * Maps the given chunk into the ring-3 mapping cache.
897 *
898 * This will call ring-0.
899 *
900 * @returns VBox status code.
901 * @param pVM The VM handle.
902 * @param idChunk The chunk in question.
903 * @param ppChunk Where to store the chunk tracking structure.
904 *
905 * @remarks Called from within the PGM critical section.
906 */
907int pgmR3PhysChunkMap(PVM pVM, uint32_t idChunk, PPPGMCHUNKR3MAP ppChunk)
908{
909 int rc;
910 /*
911 * Allocate a new tracking structure first.
912 */
913#if 0 /* for later when we've got a separate mapping method for ring-0. */
914 PPGMCHUNKR3MAP pChunk = (PPGMCHUNKR3MAP)MMR3HeapAlloc(pVM, MM_TAG_PGM_CHUNK_MAPPING, sizeof(*pChunk));
915 AssertReturn(pChunk, VERR_NO_MEMORY);
916#else
917 PPGMCHUNKR3MAP pChunk;
918 rc = MMHyperAlloc(pVM, sizeof(*pChunk), 0, MM_TAG_PGM_CHUNK_MAPPING, (void **)&pChunk);
919 AssertRCReturn(rc, rc);
920#endif
921 pChunk->Core.Key = idChunk;
922 pChunk->AgeCore.Key = pVM->pgm.s.ChunkR3Map.iNow;
923 pChunk->iAge = 0;
924 pChunk->cRefs = 0;
925 pChunk->cPermRefs = 0;
926 pChunk->pv = NULL;
927
928 /*
929 * Request the ring-0 part to map the chunk in question and if
930 * necessary unmap another one to make space in the mapping cache.
931 */
932 GMMMAPUNMAPCHUNKREQ Req;
933 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
934 Req.Hdr.cbReq = sizeof(Req);
935 Req.pvR3 = NULL;
936 Req.idChunkMap = idChunk;
937 Req.idChunkUnmap = INT32_MAX;
938 if (pVM->pgm.s.ChunkR3Map.c >= pVM->pgm.s.ChunkR3Map.cMax)
939 Req.idChunkUnmap = pgmR3PhysChunkFindUnmapCandidate(pVM);
940 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
941 if (VBOX_SUCCESS(rc))
942 {
943 /*
944 * Update the tree.
945 */
946 /* insert the new one. */
947 AssertPtr(Req.pvR3);
948 pChunk->pv = Req.pvR3;
949 bool fRc = RTAvlU32Insert(&pVM->pgm.s.ChunkR3Map.pTree, &pChunk->Core);
950 AssertRelease(fRc);
951 pVM->pgm.s.ChunkR3Map.c++;
952
953 fRc = RTAvllU32Insert(&pVM->pgm.s.ChunkR3Map.pAgeTree, &pChunk->AgeCore);
954 AssertRelease(fRc);
955
956 /* remove the unmapped one. */
957 if (Req.idChunkUnmap != INT32_MAX)
958 {
959 PPGMCHUNKR3MAP pUnmappedChunk = (PPGMCHUNKR3MAP)RTAvlU32Remove(&pVM->pgm.s.ChunkR3Map.pTree, Req.idChunkUnmap);
960 AssertRelease(pUnmappedChunk);
961 pUnmappedChunk->pv = NULL;
962 pUnmappedChunk->Core.Key = UINT32_MAX;
963#if 0 /* for later when we've got a separate mapping method for ring-0. */
964 MMR3HeapFree(pUnmappedChunk);
965#else
966 MMHyperFree(pVM, pUnmappedChunk);
967#endif
968 pVM->pgm.s.ChunkR3Map.c--;
969 }
970 }
971 else
972 {
973 AssertRC(rc);
974#if 0 /* for later when we've got a separate mapping method for ring-0. */
975 MMR3HeapFree(pChunk);
976#else
977 MMHyperFree(pVM, pChunk);
978#endif
979 pChunk = NULL;
980 }
981
982 *ppChunk = pChunk;
983 return rc;
984}
985
986
987/**
988 * For VMMCALLHOST_PGM_MAP_CHUNK, considered internal.
989 *
990 * @returns see pgmR3PhysChunkMap.
991 * @param pVM The VM handle.
992 * @param idChunk The chunk to map.
993 */
994PDMR3DECL(int) PGMR3PhysChunkMap(PVM pVM, uint32_t idChunk)
995{
996 PPGMCHUNKR3MAP pChunk;
997 return pgmR3PhysChunkMap(pVM, idChunk, &pChunk);
998}
999
1000
1001/**
1002 * Invalidates the TLB for the ring-3 mapping cache.
1003 *
1004 * @param pVM The VM handle.
1005 */
1006PGMR3DECL(void) PGMR3PhysChunkInvalidateTLB(PVM pVM)
1007{
1008 pgmLock(pVM);
1009 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.ChunkR3Map.Tlb.aEntries); i++)
1010 {
1011 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].idChunk = NIL_GMM_CHUNKID;
1012 pVM->pgm.s.ChunkR3Map.Tlb.aEntries[i].pChunk = NULL;
1013 }
1014 pgmUnlock(pVM);
1015}
1016
1017
1018/**
1019 * Response to VM_FF_PGM_NEED_HANDY_PAGES and VMMCALLHOST_PGM_ALLOCATE_HANDY_PAGES.
1020 *
1021 * @returns The following VBox status codes.
1022 * @retval VINF_SUCCESS on success. FF cleared.
1023 * @retval VINF_EM_NO_MEMORY if we're out of memory. The FF is not cleared in this case.
1024 *
1025 * @param pVM The VM handle.
1026 */
1027PDMR3DECL(int) PGMR3PhysAllocateHandyPages(PVM pVM)
1028{
1029 pgmLock(pVM);
1030 int rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES, 0, NULL);
1031 if (rc == VERR_GMM_SEED_ME)
1032 {
1033 void *pvChunk;
1034 rc = SUPPageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
1035 if (VBOX_SUCCESS(rc))
1036 rc = SUPCallVMMR0Ex(pVM->pVMR0, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
1037 if (VBOX_FAILURE(rc))
1038 {
1039 LogRel(("PGM: GMM Seeding failed, rc=%Vrc\n", rc));
1040 rc = VINF_EM_NO_MEMORY;
1041 }
1042 }
1043 pgmUnlock(pVM);
1044 Assert(rc == VINF_SUCCESS || rc == VINF_EM_NO_MEMORY);
1045 return rc;
1046}
1047
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette