VirtualBox

source: vbox/trunk/src/VBox/VMM/GMM.cpp@ 30660

Last change on this file since 30660 was 30660, checked in by vboxsync, 15 years ago

Very annoying to return informational codes without hitting assertions, so turn it into an error.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 13.3 KB
Line 
1/* $Id: GMM.cpp 30660 2010-07-06 12:08:21Z vboxsync $ */
2/** @file
3 * GMM - Global Memory Manager, ring-3 request wrappers.
4 */
5
6/*
7 * Copyright (C) 2008 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_GMM
23#include <VBox/gmm.h>
24#include <VBox/vmm.h>
25#include <VBox/vm.h>
26#include <VBox/sup.h>
27#include <VBox/err.h>
28#include <VBox/param.h>
29
30#include <iprt/assert.h>
31#include <VBox/log.h>
32#include <iprt/mem.h>
33#include <iprt/string.h>
34
35
36/**
37 * @see GMMR0InitialReservation
38 */
39GMMR3DECL(int) GMMR3InitialReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages,
40 GMMOCPOLICY enmPolicy, GMMPRIORITY enmPriority)
41{
42 GMMINITIALRESERVATIONREQ Req;
43 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
44 Req.Hdr.cbReq = sizeof(Req);
45 Req.cBasePages = cBasePages;
46 Req.cShadowPages = cShadowPages;
47 Req.cFixedPages = cFixedPages;
48 Req.enmPolicy = enmPolicy;
49 Req.enmPriority = enmPriority;
50 return VMMR3CallR0(pVM, VMMR0_DO_GMM_INITIAL_RESERVATION, 0, &Req.Hdr);
51}
52
53
54/**
55 * @see GMMR0UpdateReservation
56 */
57GMMR3DECL(int) GMMR3UpdateReservation(PVM pVM, uint64_t cBasePages, uint32_t cShadowPages, uint32_t cFixedPages)
58{
59 GMMUPDATERESERVATIONREQ Req;
60 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
61 Req.Hdr.cbReq = sizeof(Req);
62 Req.cBasePages = cBasePages;
63 Req.cShadowPages = cShadowPages;
64 Req.cFixedPages = cFixedPages;
65 return VMMR3CallR0(pVM, VMMR0_DO_GMM_UPDATE_RESERVATION, 0, &Req.Hdr);
66}
67
68
69/**
70 * Prepares a GMMR0AllocatePages request.
71 *
72 * @returns VINF_SUCCESS or VERR_NO_TMP_MEMORY.
73 * @param pVM Pointer to the shared VM structure.
74 * @param[out] ppReq Where to store the pointer to the request packet.
75 * @param cPages The number of pages that's to be allocated.
76 * @param enmAccount The account to charge.
77 */
78GMMR3DECL(int) GMMR3AllocatePagesPrepare(PVM pVM, PGMMALLOCATEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount)
79{
80 uint32_t cb = RT_OFFSETOF(GMMALLOCATEPAGESREQ, aPages[cPages]);
81 PGMMALLOCATEPAGESREQ pReq = (PGMMALLOCATEPAGESREQ)RTMemTmpAllocZ(cb);
82 if (!pReq)
83 return VERR_NO_TMP_MEMORY;
84
85 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
86 pReq->Hdr.cbReq = cb;
87 pReq->enmAccount = enmAccount;
88 pReq->cPages = cPages;
89 NOREF(pVM);
90 *ppReq = pReq;
91 return VINF_SUCCESS;
92}
93
94
95/**
96 * Performs a GMMR0AllocatePages request.
97 * This will call VMSetError on failure.
98 *
99 * @returns VBox status code.
100 * @param pVM Pointer to the shared VM structure.
101 * @param pReq Pointer to the request (returned by GMMR3AllocatePagesPrepare).
102 */
103GMMR3DECL(int) GMMR3AllocatePagesPerform(PVM pVM, PGMMALLOCATEPAGESREQ pReq)
104{
105 for (unsigned i = 0; ; i++)
106 {
107 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_ALLOCATE_PAGES, 0, &pReq->Hdr);
108 if (RT_SUCCESS(rc))
109 {
110#ifdef LOG_ENABLED
111 for (uint32_t iPage = 0; iPage < pReq->cPages; iPage++)
112 Log3(("GMMR3AllocatePagesPerform: idPage=%#x HCPhys=%RHp\n",
113 pReq->aPages[iPage].idPage, pReq->aPages[iPage].HCPhysGCPhys));
114#endif
115 return rc;
116 }
117 if (rc != VERR_GMM_SEED_ME)
118 return VMSetError(pVM, rc, RT_SRC_POS,
119 N_("GMMR0AllocatePages failed to allocate %u pages"),
120 pReq->cPages);
121 Assert(i < pReq->cPages);
122
123 /*
124 * Seed another chunk.
125 */
126 void *pvChunk;
127 rc = SUPR3PageAlloc(GMM_CHUNK_SIZE >> PAGE_SHIFT, &pvChunk);
128 if (RT_FAILURE(rc))
129 return VMSetError(pVM, rc, RT_SRC_POS,
130 N_("Out of memory (SUPR3PageAlloc) seeding a %u pages allocation request"),
131 pReq->cPages);
132
133 rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvChunk, NULL);
134 if (RT_FAILURE(rc))
135 return VMSetError(pVM, rc, RT_SRC_POS, N_("GMM seeding failed"));
136 }
137}
138
139
140/**
141 * Cleans up a GMMR0AllocatePages request.
142 * @param pReq Pointer to the request (returned by GMMR3AllocatePagesPrepare).
143 */
144GMMR3DECL(void) GMMR3AllocatePagesCleanup(PGMMALLOCATEPAGESREQ pReq)
145{
146 RTMemTmpFree(pReq);
147}
148
149
150/**
151 * Prepares a GMMR0FreePages request.
152 *
153 * @returns VINF_SUCCESS or VERR_NO_TMP_MEMORY.
154 * @param pVM Pointer to the shared VM structure.
155 * @param[out] ppReq Where to store the pointer to the request packet.
156 * @param cPages The number of pages that's to be freed.
157 * @param enmAccount The account to charge.
158 */
159GMMR3DECL(int) GMMR3FreePagesPrepare(PVM pVM, PGMMFREEPAGESREQ *ppReq, uint32_t cPages, GMMACCOUNT enmAccount)
160{
161 uint32_t cb = RT_OFFSETOF(GMMFREEPAGESREQ, aPages[cPages]);
162 PGMMFREEPAGESREQ pReq = (PGMMFREEPAGESREQ)RTMemTmpAllocZ(cb);
163 if (!pReq)
164 return VERR_NO_TMP_MEMORY;
165
166 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
167 pReq->Hdr.cbReq = cb;
168 pReq->enmAccount = enmAccount;
169 pReq->cPages = cPages;
170 NOREF(pVM);
171 *ppReq = pReq;
172 return VINF_SUCCESS;
173}
174
175
176/**
177 * Re-prepares a GMMR0FreePages request.
178 *
179 * @returns VINF_SUCCESS or VERR_NO_TMP_MEMORY.
180 * @param pVM Pointer to the shared VM structure.
181 * @param pReq A request buffer previously returned by
182 * GMMR3FreePagesPrepare().
183 * @param cPages The number of pages originally passed to
184 * GMMR3FreePagesPrepare().
185 * @param enmAccount The account to charge.
186 */
187GMMR3DECL(void) GMMR3FreePagesRePrep(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cPages, GMMACCOUNT enmAccount)
188{
189 Assert(pReq->Hdr.u32Magic == SUPVMMR0REQHDR_MAGIC);
190 pReq->Hdr.cbReq = RT_OFFSETOF(GMMFREEPAGESREQ, aPages[cPages]);
191 pReq->enmAccount = enmAccount;
192 pReq->cPages = cPages;
193 NOREF(pVM);
194}
195
196
197/**
198 * Performs a GMMR0FreePages request.
199 * This will call VMSetError on failure.
200 *
201 * @returns VBox status code.
202 * @param pVM Pointer to the shared VM structure.
203 * @param pReq Pointer to the request (returned by GMMR3FreePagesPrepare).
204 * @param cActualPages The number of pages actually freed.
205 */
206GMMR3DECL(int) GMMR3FreePagesPerform(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cActualPages)
207{
208 /*
209 * Adjust the request if we ended up with fewer pages than anticipated.
210 */
211 if (cActualPages != pReq->cPages)
212 {
213 AssertReturn(cActualPages < pReq->cPages, VERR_INTERNAL_ERROR);
214 if (!cActualPages)
215 return VINF_SUCCESS;
216 pReq->cPages = cActualPages;
217 pReq->Hdr.cbReq = RT_OFFSETOF(GMMFREEPAGESREQ, aPages[cActualPages]);
218 }
219
220 /*
221 * Do the job.
222 */
223 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_PAGES, 0, &pReq->Hdr);
224 if (RT_SUCCESS(rc))
225 return rc;
226 AssertRC(rc);
227 return VMSetError(pVM, rc, RT_SRC_POS,
228 N_("GMMR0FreePages failed to free %u pages"),
229 pReq->cPages);
230}
231
232
233/**
234 * Cleans up a GMMR0FreePages request.
235 * @param pReq Pointer to the request (returned by GMMR3FreePagesPrepare).
236 */
237GMMR3DECL(void) GMMR3FreePagesCleanup(PGMMFREEPAGESREQ pReq)
238{
239 RTMemTmpFree(pReq);
240}
241
242
243/**
244 * Frees allocated pages, for bailing out on failure.
245 *
246 * This will not call VMSetError on failure but will use AssertLogRel instead.
247 *
248 * @param pVM Pointer to the shared VM structure.
249 * @param pAllocReq The allocation request to undo.
250 */
251GMMR3DECL(void) GMMR3FreeAllocatedPages(PVM pVM, GMMALLOCATEPAGESREQ const *pAllocReq)
252{
253 uint32_t cb = RT_OFFSETOF(GMMFREEPAGESREQ, aPages[pAllocReq->cPages]);
254 PGMMFREEPAGESREQ pReq = (PGMMFREEPAGESREQ)RTMemTmpAllocZ(cb);
255 AssertLogRelReturnVoid(pReq);
256
257 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
258 pReq->Hdr.cbReq = cb;
259 pReq->enmAccount = pAllocReq->enmAccount;
260 pReq->cPages = pAllocReq->cPages;
261 uint32_t iPage = pAllocReq->cPages;
262 while (iPage-- > 0)
263 {
264 Assert(pAllocReq->aPages[iPage].idPage != NIL_GMM_PAGEID);
265 pReq->aPages[iPage].idPage = pAllocReq->aPages[iPage].idPage;
266 }
267
268 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_PAGES, 0, &pReq->Hdr);
269 AssertLogRelRC(rc);
270
271 RTMemTmpFree(pReq);
272}
273
274
275/**
276 * @see GMMR0BalloonedPages
277 */
278GMMR3DECL(int) GMMR3BalloonedPages(PVM pVM, GMMBALLOONACTION enmAction, uint32_t cBalloonedPages)
279{
280 GMMBALLOONEDPAGESREQ Req;
281 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
282 Req.Hdr.cbReq = sizeof(Req);
283 Req.enmAction = enmAction;
284 Req.cBalloonedPages = cBalloonedPages;
285
286 return VMMR3CallR0(pVM, VMMR0_DO_GMM_BALLOONED_PAGES, 0, &Req.Hdr);
287}
288
289/**
290 * @see GMMR0QueryVMMMemoryStatsReq
291 */
292GMMR3DECL(int) GMMR3QueryHypervisorMemoryStats(PVM pVM, uint64_t *pcTotalAllocPages, uint64_t *pcTotalFreePages, uint64_t *pcTotalBalloonPages, uint64_t *puTotalBalloonSize)
293{
294 GMMMEMSTATSREQ Req;
295 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
296 Req.Hdr.cbReq = sizeof(Req);
297 Req.cAllocPages = 0;
298 Req.cFreePages = 0;
299 Req.cBalloonedPages = 0;
300 Req.cSharedPages = 0;
301
302 *pcTotalAllocPages = 0;
303 *pcTotalFreePages = 0;
304 *pcTotalBalloonPages = 0;
305 *puTotalBalloonSize = 0;
306
307 /* Must be callable from any thread, so can't use VMMR3CallR0. */
308 int rc = SUPR3CallVMMR0Ex(pVM->pVMR0, NIL_VMCPUID, VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS, 0, &Req.Hdr);
309 if (rc == VINF_SUCCESS)
310 {
311 *pcTotalAllocPages = Req.cAllocPages;
312 *pcTotalFreePages = Req.cFreePages;
313 *pcTotalBalloonPages = Req.cBalloonedPages;
314 *puTotalBalloonSize = Req.cSharedPages;
315 }
316 return rc;
317}
318
319/**
320 * @see GMMR0QueryMemoryStatsReq
321 */
322GMMR3DECL(int) GMMR3QueryMemoryStats(PVM pVM, uint64_t *pcAllocPages, uint64_t *pcMaxPages, uint64_t *pcBalloonPages)
323{
324 GMMMEMSTATSREQ Req;
325 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
326 Req.Hdr.cbReq = sizeof(Req);
327 Req.cAllocPages = 0;
328 Req.cFreePages = 0;
329 Req.cBalloonedPages = 0;
330
331 *pcAllocPages = 0;
332 *pcMaxPages = 0;
333 *pcBalloonPages = 0;
334
335 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_QUERY_MEM_STATS, 0, &Req.Hdr);
336 if (rc == VINF_SUCCESS)
337 {
338 *pcAllocPages = Req.cAllocPages;
339 *pcMaxPages = Req.cMaxPages;
340 *pcBalloonPages = Req.cBalloonedPages;
341 }
342 return rc;
343}
344
345/**
346 * @see GMMR0MapUnmapChunk
347 */
348GMMR3DECL(int) GMMR3MapUnmapChunk(PVM pVM, uint32_t idChunkMap, uint32_t idChunkUnmap, PRTR3PTR ppvR3)
349{
350 GMMMAPUNMAPCHUNKREQ Req;
351 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
352 Req.Hdr.cbReq = sizeof(Req);
353 Req.idChunkMap = idChunkMap;
354 Req.idChunkUnmap = idChunkUnmap;
355 Req.pvR3 = NULL;
356 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_MAP_UNMAP_CHUNK, 0, &Req.Hdr);
357 if (RT_SUCCESS(rc) && ppvR3)
358 *ppvR3 = Req.pvR3;
359 return rc;
360}
361
362/**
363 * @see GMMR0FreeLargePage
364 */
365GMMR3DECL(int) GMMR3FreeLargePage(PVM pVM, uint32_t idPage)
366{
367 GMMFREELARGEPAGEREQ Req;
368 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
369 Req.Hdr.cbReq = sizeof(Req);
370 Req.idPage = idPage;
371 return VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_LARGE_PAGE, 0, &Req.Hdr);
372}
373
374/**
375 * @see GMMR0SeedChunk
376 */
377GMMR3DECL(int) GMMR3SeedChunk(PVM pVM, RTR3PTR pvR3)
378{
379 return VMMR3CallR0(pVM, VMMR0_DO_GMM_SEED_CHUNK, (uintptr_t)pvR3, NULL);
380}
381
382
383/**
384 * @see GMMR0RegisterSharedModule
385 */
386GMMR3DECL(int) GMMR3RegisterSharedModule(PVM pVM, PGMMREGISTERSHAREDMODULEREQ pReq)
387{
388 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
389 pReq->Hdr.cbReq = RT_OFFSETOF(GMMREGISTERSHAREDMODULEREQ, aRegions[pReq->cRegions]);
390 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_REGISTER_SHARED_MODULE, 0, &pReq->Hdr);
391 if (rc == VINF_SUCCESS)
392 rc = pReq->rc;
393 return rc;
394}
395
396/**
397 * @see GMMR0RegisterSharedModule
398 */
399GMMR3DECL(int) GMMR3UnregisterSharedModule(PVM pVM, PGMMUNREGISTERSHAREDMODULEREQ pReq)
400{
401 pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
402 pReq->Hdr.cbReq = sizeof(*pReq);
403 return VMMR3CallR0(pVM, VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE, 0, &pReq->Hdr);
404}
405
406/**
407 * @see GMMR0ResetSharedModules
408 */
409GMMR3DECL(int) GMMR3ResetSharedModules(PVM pVM)
410{
411 return VMMR3CallR0(pVM, VMMR0_DO_GMM_RESET_SHARED_MODULES, 0, NULL);
412}
413
414/**
415 * @see GMMR0CheckSharedModules
416 */
417GMMR3DECL(int) GMMR3CheckSharedModules(PVM pVM)
418{
419 return VMMR3CallR0(pVM, VMMR0_DO_GMM_CHECK_SHARED_MODULES, 0, NULL);
420}
421
422#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
423/**
424 * @see GMMR0FindDuplicatePage
425 */
426GMMR3DECL(bool) GMMR3IsDuplicatePage(PVM pVM, uint32_t idPage)
427{
428 GMMFINDDUPLICATEPAGEREQ Req;
429 Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
430 Req.Hdr.cbReq = sizeof(Req);
431 Req.idPage = idPage;
432 Req.fDuplicate = false;
433
434 int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_FIND_DUPLICATE_PAGE, 0, &Req.Hdr);
435 if (rc == VINF_SUCCESS)
436 return Req.fDuplicate;
437 else
438 return false;
439}
440#endif /* VBOX_STRICT && HC_ARCH_BITS == 64 */
441
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette