VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibPhysHeap.cpp@ 97913

Last change on this file since 97913 was 97913, checked in by vboxsync, 2 years ago

Add/VBoxGuestR0LibPhysHeap.cpp: A bit of cleanup and a testcase based on one of the IPRT heaps.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 25.2 KB
Line 
1/* $Id: VBoxGuestR0LibPhysHeap.cpp 97913 2022-12-30 02:22:15Z vboxsync $ */
2/** @file
3 * VBoxGuestLibR0 - Physical memory heap.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#include "VBoxGuestR0LibInternal.h"
36
37#include <iprt/assert.h>
38#include <iprt/semaphore.h>
39#include <iprt/alloc.h>
40
41/** @page pg_vbglr0_phys_heap VBoxGuestLibR0 - Physical memory heap.
42 *
43 * The physical memory heap consists of a doubly linked list of large chunks
44 * (VBGLDATA::pChunkHead), memory blocks are allocated within these chunks and
45 * are members of allocated (VBGLDATA::pAllocBlocksHead) and free
46 * (VBGLDATA::pFreeBlocksHead) doubly linked lists.
47 *
48 * When allocating a block, we search in Free linked list for a suitable free
49 * block. If there is no such block, a new chunk is allocated and the new block
50 * is taken from the new chunk as the only chunk-sized free block. Allocated
51 * block is excluded from the Free list and goes to Alloc list.
52 *
53 * When freeing block, we check the pointer and then exclude block from Alloc
54 * list and move it to free list.
55 *
56 * For each chunk we maintain the allocated blocks counter. If 2 (or more)
57 * entire chunks are free they are immediately deallocated, so we always have at
58 * most 1 free chunk.
59 *
60 * When freeing blocks, two subsequent free blocks are always merged together.
61 * Current implementation merges blocks only when there is a block after the
62 * just freed one.
63 */
64
65
66/*********************************************************************************************************************************
67* Defined Constants And Macros *
68*********************************************************************************************************************************/
69#define VBGL_PH_ASSERT Assert
70#define VBGL_PH_ASSERT_MSG AssertMsg
71
72// #define DUMPHEAP
73
74#ifdef DUMPHEAP
75# define VBGL_PH_dprintf(a) RTAssertMsg2Weak a
76#else
77# define VBGL_PH_dprintf(a)
78#endif
79
80/* Heap block signature */
81#define VBGL_PH_BLOCKSIGNATURE (0xADDBBBBB)
82
83
84/* Heap chunk signature */
85#define VBGL_PH_CHUNKSIGNATURE (0xADDCCCCC)
86/* Heap chunk allocation unit */
87#define VBGL_PH_CHUNKSIZE (0x10000)
88
89/* Heap block bit flags */
90#define VBGL_PH_BF_ALLOCATED (0x1)
91
92/** Threshold at which to split out a tail free block when allocating.
93 * The value is the amount of user space, i.e. excluding the header. */
94#define VBGL_PH_MIN_SPLIT_FREE_BLOCK 32
95
96
97/*********************************************************************************************************************************
98* Structures and Typedefs *
99*********************************************************************************************************************************/
100struct VBGLPHYSHEAPBLOCK
101{
102 /** Magic value (VBGL_PH_BLOCKSIGNATURE). */
103 uint32_t u32Signature;
104
105 /** Size of user data in the block. Does not include this block header. */
106 uint32_t cbDataSize;
107
108 uint32_t fu32Flags;
109
110 VBGLPHYSHEAPBLOCK *pNext;
111 VBGLPHYSHEAPBLOCK *pPrev;
112
113 VBGLPHYSHEAPCHUNK *pChunk;
114};
115
116struct VBGLPHYSHEAPCHUNK
117{
118 /** Magic value (VBGL_PH_CHUNKSIGNATURE). */
119 uint32_t u32Signature;
120
121 /** Size of the chunk. Includes the chunk header. */
122 uint32_t cbSize;
123
124 /** Physical address of the chunk (contiguous). */
125 uint32_t physAddr;
126
127 /** Number of allocated blocks in the chunk */
128 int32_t cAllocatedBlocks;
129
130 VBGLPHYSHEAPCHUNK *pNext;
131 VBGLPHYSHEAPCHUNK *pPrev;
132};
133
134
135#ifndef DUMPHEAP
136# define dumpheap(pszWhere) do { } while (0)
137#else
138void dumpheap(const char *pszWhere)
139{
140 VBGL_PH_dprintf(("VBGL_PH dump at '%s'\n", pszWhere));
141
142 VBGL_PH_dprintf(("Chunks:\n"));
143
144 VBGLPHYSHEAPCHUNK *pChunk = g_vbgldata.pChunkHead;
145
146 while (pChunk)
147 {
148 VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, allocated = %8d, phys = %08X\n",
149 pChunk, pChunk->pNext, pChunk->pPrev, pChunk->u32Signature, pChunk->cbSize, pChunk->cAllocatedBlocks, pChunk->physAddr));
150
151 pChunk = pChunk->pNext;
152 }
153
154 VBGL_PH_dprintf(("Allocated blocks:\n"));
155
156 VBGLPHYSHEAPBLOCK *pBlock = g_vbgldata.pAllocBlocksHead;
157
158 while (pBlock)
159 {
160 VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, flags = %08X, pChunk = %p\n",
161 pBlock, pBlock->pNext, pBlock->pPrev, pBlock->u32Signature, pBlock->cbDataSize, pBlock->fu32Flags, pBlock->pChunk));
162
163 pBlock = pBlock->pNext;
164 }
165
166 VBGL_PH_dprintf(("Free blocks:\n"));
167
168 pBlock = g_vbgldata.pFreeBlocksHead;
169
170 while (pBlock)
171 {
172 VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, flags = %08X, pChunk = %p\n",
173 pBlock, pBlock->pNext, pBlock->pPrev, pBlock->u32Signature, pBlock->cbDataSize, pBlock->fu32Flags, pBlock->pChunk));
174
175 pBlock = pBlock->pNext;
176 }
177
178 VBGL_PH_dprintf(("VBGL_PH dump at '%s' done\n", pszWhere));
179}
180#endif
181
182
183DECLINLINE(void *) vbglPhysHeapBlock2Data(VBGLPHYSHEAPBLOCK *pBlock)
184{
185 if (pBlock)
186 return pBlock + 1;
187 return NULL;
188}
189
190
191DECLINLINE(VBGLPHYSHEAPBLOCK *) vbglPhysHeapData2Block(void *pv)
192{
193 if (pv)
194 {
195 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)pv - 1;
196 AssertMsgReturn(pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
197 ("pBlock->u32Signature = %08X\n", pBlock->u32Signature),
198 NULL);
199 return pBlock;
200 }
201 return NULL;
202}
203
204
205DECLINLINE(int) vbglPhysHeapEnter(void)
206{
207 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap);
208
209 VBGL_PH_ASSERT_MSG(RT_SUCCESS(rc), ("Failed to request heap mutex, rc = %Rrc\n", rc));
210
211 return rc;
212}
213
214
215DECLINLINE(void) vbglPhysHeapLeave(void)
216{
217 RTSemFastMutexRelease(g_vbgldata.mutexHeap);
218}
219
220
221static void vbglPhysHeapInitBlock(VBGLPHYSHEAPBLOCK *pBlock, VBGLPHYSHEAPCHUNK *pChunk, uint32_t cbDataSize)
222{
223 VBGL_PH_ASSERT(pBlock != NULL);
224 VBGL_PH_ASSERT(pChunk != NULL);
225
226 pBlock->u32Signature = VBGL_PH_BLOCKSIGNATURE;
227 pBlock->cbDataSize = cbDataSize;
228 pBlock->fu32Flags = 0;
229 pBlock->pNext = NULL;
230 pBlock->pPrev = NULL;
231 pBlock->pChunk = pChunk;
232}
233
234
235static void vbglPhysHeapInsertBlock(VBGLPHYSHEAPBLOCK *pInsertAfter, VBGLPHYSHEAPBLOCK *pBlock)
236{
237 VBGL_PH_ASSERT_MSG(pBlock->pNext == NULL, ("pBlock->pNext = %p\n", pBlock->pNext));
238 VBGL_PH_ASSERT_MSG(pBlock->pPrev == NULL, ("pBlock->pPrev = %p\n", pBlock->pPrev));
239
240 if (pInsertAfter)
241 {
242 pBlock->pNext = pInsertAfter->pNext;
243 pBlock->pPrev = pInsertAfter;
244
245 if (pInsertAfter->pNext)
246 pInsertAfter->pNext->pPrev = pBlock;
247
248 pInsertAfter->pNext = pBlock;
249 }
250 else
251 {
252 /* inserting to head of list */
253 pBlock->pPrev = NULL;
254
255 if (pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED)
256 {
257 pBlock->pNext = g_vbgldata.pAllocBlocksHead;
258
259 if (g_vbgldata.pAllocBlocksHead)
260 g_vbgldata.pAllocBlocksHead->pPrev = pBlock;
261
262 g_vbgldata.pAllocBlocksHead = pBlock;
263 }
264 else
265 {
266 pBlock->pNext = g_vbgldata.pFreeBlocksHead;
267
268 if (g_vbgldata.pFreeBlocksHead)
269 g_vbgldata.pFreeBlocksHead->pPrev = pBlock;
270
271 g_vbgldata.pFreeBlocksHead = pBlock;
272 }
273 }
274}
275
276
277/**
278 * Unlinks @a pBlock from the chain its on.
279 */
280static void vbglPhysHeapExcludeBlock(VBGLPHYSHEAPBLOCK *pBlock)
281{
282 if (pBlock->pNext)
283 pBlock->pNext->pPrev = pBlock->pPrev;
284 /* else: this is tail of list but we do not maintain tails of block lists. so nothing to do. */
285
286 if (pBlock->pPrev)
287 pBlock->pPrev->pNext = pBlock->pNext;
288 else if (pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED)
289 {
290 Assert(g_vbgldata.pAllocBlocksHead == pBlock);
291 g_vbgldata.pAllocBlocksHead = pBlock->pNext;
292 }
293 else
294 {
295 Assert(g_vbgldata.pFreeBlocksHead == pBlock);
296 g_vbgldata.pFreeBlocksHead = pBlock->pNext;
297 }
298
299 pBlock->pNext = NULL;
300 pBlock->pPrev = NULL;
301}
302
303static VBGLPHYSHEAPBLOCK *vbglPhysHeapChunkAlloc(uint32_t cbMinBlock)
304{
305 RTCCPHYS PhysAddr = NIL_RTHCPHYS;
306 VBGLPHYSHEAPCHUNK *pChunk;
307 uint32_t cbChunk;
308 VBGL_PH_dprintf(("Allocating new chunk for %#x byte allocation\n", cbMinBlock));
309 AssertReturn(cbMinBlock < _128M, NULL); /* paranoia */
310
311 /* Compute the size of the new chunk, rounding up to next chunk size,
312 which must be power of 2. */
313 Assert(RT_IS_POWER_OF_TWO(VBGL_PH_CHUNKSIZE));
314 cbChunk = cbMinBlock + sizeof(VBGLPHYSHEAPCHUNK) + sizeof(VBGLPHYSHEAPBLOCK);
315 cbChunk = RT_ALIGN_32(cbChunk, VBGL_PH_CHUNKSIZE);
316
317 /* This function allocates physical contiguous memory below 4 GB. This 4GB
318 limitation stems from using a 32-bit OUT instruction to pass a block
319 physical address to the host. */
320 pChunk = (VBGLPHYSHEAPCHUNK *)RTMemContAlloc(&PhysAddr, cbChunk);
321 /** @todo retry with smaller size if it fails, treating VBGL_PH_CHUNKSIZE as
322 * a guideline rather than absolute minimum size. */
323 if (pChunk)
324 {
325 VBGLPHYSHEAPCHUNK *pOldHeadChunk;
326 VBGLPHYSHEAPBLOCK *pBlock;
327 AssertRelease(PhysAddr < _4G && PhysAddr + cbChunk <= _4G);
328
329 /* Init the new chunk. */
330 pChunk->u32Signature = VBGL_PH_CHUNKSIGNATURE;
331 pChunk->cbSize = cbChunk;
332 pChunk->physAddr = (uint32_t)PhysAddr;
333 pChunk->cAllocatedBlocks = 0;
334 pChunk->pNext = NULL;
335 pChunk->pPrev = NULL;
336
337 /* Initialize the free block, which now occupies entire chunk. */
338 pBlock = (VBGLPHYSHEAPBLOCK *)(pChunk + 1);
339 vbglPhysHeapInitBlock(pBlock, pChunk, cbChunk - sizeof(VBGLPHYSHEAPCHUNK) - sizeof(VBGLPHYSHEAPBLOCK));
340 vbglPhysHeapInsertBlock(NULL, pBlock);
341
342 /* Add the chunk to the list. */
343 pOldHeadChunk = g_vbgldata.pChunkHead;
344 pChunk->pNext = pOldHeadChunk;
345 if (pOldHeadChunk)
346 pOldHeadChunk->pPrev = pChunk;
347 g_vbgldata.pChunkHead = pChunk;
348
349 VBGL_PH_dprintf(("Allocated chunk %p LB %#x, block %p LB %#x\n", pChunk, cbChunk, pBlock, pBlock->cbDataSize));
350 return pBlock;
351 }
352 LogRel(("vbglPhysHeapChunkAlloc: failed to alloc %u (%#x) contiguous bytes.\n", cbChunk, cbChunk));
353 return NULL;
354}
355
356
357static void vbglPhysHeapChunkDelete(VBGLPHYSHEAPCHUNK *pChunk)
358{
359 uintptr_t uEnd, uCur;
360 VBGL_PH_ASSERT(pChunk != NULL);
361 VBGL_PH_ASSERT_MSG(pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE, ("pChunk->u32Signature = %08X\n", pChunk->u32Signature));
362
363 VBGL_PH_dprintf(("Deleting chunk %p size %x\n", pChunk, pChunk->cbSize));
364
365 /* first scan the chunk and exclude (unlink) all blocks from the lists */
366
367 uEnd = (uintptr_t)pChunk + pChunk->cbSize;
368 uCur = (uintptr_t)(pChunk + 1);
369
370 while (uCur < uEnd)
371 {
372 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)uCur;
373
374 uCur += pBlock->cbDataSize + sizeof(VBGLPHYSHEAPBLOCK);
375
376 vbglPhysHeapExcludeBlock(pBlock);
377 }
378
379 VBGL_PH_ASSERT_MSG(uCur == uEnd, ("uCur = %p, uEnd = %p, pChunk->cbSize = %08X\n", uCur, uEnd, pChunk->cbSize));
380
381 /* Exclude chunk from the chunk list */
382 if (pChunk->pNext)
383 pChunk->pNext->pPrev = pChunk->pPrev;
384 /* else: we do not maintain tail pointer. */
385
386 if (pChunk->pPrev)
387 pChunk->pPrev->pNext = pChunk->pNext;
388 else
389 {
390 Assert(g_vbgldata.pChunkHead == pChunk);
391 g_vbgldata.pChunkHead = pChunk->pNext;
392 }
393
394 RTMemContFree(pChunk, pChunk->cbSize);
395}
396
397
398DECLR0VBGL(void *) VbglR0PhysHeapAlloc(uint32_t cbSize)
399{
400 VBGLPHYSHEAPBLOCK *pBlock, *pIter;
401 int rc;
402
403 /*
404 * Align the size to a pointer size to avoid getting misaligned header pointers and whatnot.
405 */
406 cbSize = RT_ALIGN_32(cbSize, sizeof(void *));
407
408 rc = vbglPhysHeapEnter();
409 if (RT_FAILURE(rc))
410 return NULL;
411
412 dumpheap("pre alloc");
413
414 /*
415 * Search the free list. We do this in linear fashion as we don't expect
416 * there to be many blocks in the heap.
417 */
418
419 pBlock = NULL;
420 if (cbSize <= PAGE_SIZE / 4 * 3)
421 {
422 /* Smaller than 3/4 page: Prefer a free block that can keep the request within a single page,
423 so HGCM processing in VMMDev can use page locks instead of several reads and writes. */
424
425 VBGLPHYSHEAPBLOCK *pFallback = NULL;
426 for (pIter = g_vbgldata.pFreeBlocksHead; pIter != NULL; pIter = pIter->pNext)
427 if (pIter->cbDataSize >= cbSize)
428 {
429 if (pIter->cbDataSize == cbSize)
430 {
431 if (PAGE_SIZE - ((uintptr_t)vbglPhysHeapBlock2Data(pIter) & PAGE_OFFSET_MASK) >= cbSize)
432 {
433 pBlock = pIter;
434 break;
435 }
436 pFallback = pIter;
437 }
438 else
439 {
440 if (!pFallback || pIter->cbDataSize < pFallback->cbDataSize)
441 pFallback = pIter;
442 if (PAGE_SIZE - ((uintptr_t)vbglPhysHeapBlock2Data(pIter) & PAGE_OFFSET_MASK) >= cbSize)
443 if (!pBlock || pIter->cbDataSize < pBlock->cbDataSize)
444 pBlock = pIter;
445 }
446 }
447
448 if (!pBlock)
449 pBlock = pFallback;
450 }
451 else
452 {
453 /* Large than 3/4 page: Find smallest free list match. */
454
455 for (pIter = g_vbgldata.pFreeBlocksHead; pIter != NULL; pIter = pIter->pNext)
456 if (pIter->cbDataSize >= cbSize)
457 {
458 if (pIter->cbDataSize == cbSize)
459 {
460 /* Exact match - we're done! */
461 pBlock = pIter;
462 break;
463 }
464
465 /* Looking for a free block with nearest size. */
466 if (!pBlock || pIter->cbDataSize < pBlock->cbDataSize)
467 pBlock = pIter;
468 }
469 }
470
471 if (!pBlock)
472 {
473 /* No free blocks, allocate a new chunk, the only free block of the
474 chunk will be returned. */
475 pBlock = vbglPhysHeapChunkAlloc(cbSize);
476 }
477
478 if (pBlock)
479 {
480 VBGL_PH_ASSERT_MSG(pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
481 ("pBlock = %p, pBlock->u32Signature = %08X\n", pBlock, pBlock->u32Signature));
482 VBGL_PH_ASSERT_MSG((pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED) == 0,
483 ("pBlock = %p, pBlock->fu32Flags = %08X\n", pBlock, pBlock->fu32Flags));
484
485 /* We have a free block, either found or allocated. */
486
487#if 1 /** @todo r=bird: This might work okay for medium sizes, but it can't be good for small
488 * allocations (unnecessary extra work) nor for large ones (internal fragmentation). */
489 if (pBlock->cbDataSize > 2 * (cbSize + sizeof(VBGLPHYSHEAPBLOCK)))
490#else
491 if (pBlock->cbDataSize >= sizeof(VBGLPHYSHEAPBLOCK) * 2 + VBGL_PH_MIN_SPLIT_FREE_BLOCK + cbSize)
492#endif
493 {
494 /* Data will occupy less than a half of the block,
495 * split off the tail end into a new free list entry.
496 */
497 pIter = (VBGLPHYSHEAPBLOCK *)((uintptr_t)(pBlock + 1) + cbSize);
498
499 /* Init the new 'pIter' block, initialized blocks are always marked as free. */
500 vbglPhysHeapInitBlock(pIter, pBlock->pChunk, pBlock->cbDataSize - cbSize - sizeof(VBGLPHYSHEAPBLOCK));
501
502 pBlock->cbDataSize = cbSize;
503
504 /* Insert the new 'pIter' block after the 'pBlock' in the free list */
505 vbglPhysHeapInsertBlock(pBlock, pIter);
506 }
507
508 /* Exclude pBlock from free list */
509 vbglPhysHeapExcludeBlock(pBlock);
510
511 /* Mark as allocated */
512 pBlock->fu32Flags |= VBGL_PH_BF_ALLOCATED;
513
514 /* Insert to allocated list */
515 vbglPhysHeapInsertBlock(NULL, pBlock);
516
517 /* Adjust the chunk allocated blocks counter */
518 pBlock->pChunk->cAllocatedBlocks++;
519 }
520
521 dumpheap("post alloc");
522
523 vbglPhysHeapLeave();
524 VBGL_PH_dprintf(("VbglR0PhysHeapAlloc %x size %x\n", vbglPhysHeapBlock2Data(pBlock), pBlock->cbDataSize));
525
526 return vbglPhysHeapBlock2Data(pBlock);
527}
528
529DECLR0VBGL(uint32_t) VbglR0PhysHeapGetPhysAddr(void *pv)
530{
531 uint32_t physAddr = 0;
532 VBGLPHYSHEAPBLOCK *pBlock = vbglPhysHeapData2Block(pv);
533
534 if (pBlock)
535 {
536 VBGL_PH_ASSERT_MSG((pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED) != 0,
537 ("pBlock = %p, pBlock->fu32Flags = %08X\n", pBlock, pBlock->fu32Flags));
538
539 if (pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED)
540 physAddr = pBlock->pChunk->physAddr + (uint32_t)((uintptr_t)pv - (uintptr_t)pBlock->pChunk);
541 }
542
543 return physAddr;
544}
545
546DECLR0VBGL(void) VbglR0PhysHeapFree(void *pv)
547{
548 VBGLPHYSHEAPBLOCK *pBlock;
549 VBGLPHYSHEAPBLOCK *pNeighbour;
550 VBGLPHYSHEAPCHUNK *pChunk;
551
552 int rc = vbglPhysHeapEnter();
553 if (RT_FAILURE(rc))
554 return;
555
556 dumpheap ("pre free");
557
558 pBlock = vbglPhysHeapData2Block(pv);
559
560 if (!pBlock)
561 {
562 vbglPhysHeapLeave();
563 return;
564 }
565
566 VBGL_PH_ASSERT_MSG((pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED) != 0,
567 ("pBlock = %p, pBlock->fu32Flags = %08X\n", pBlock, pBlock->fu32Flags));
568
569 /* Exclude from allocated list */
570 vbglPhysHeapExcludeBlock(pBlock);
571
572 dumpheap("post exclude");
573
574 VBGL_PH_dprintf(("VbglR0PhysHeapFree %p size %x\n", pv, pBlock->cbDataSize));
575
576 /* Mark as free */
577 pBlock->fu32Flags &= ~VBGL_PH_BF_ALLOCATED;
578
579 /* Insert to free list */
580 vbglPhysHeapInsertBlock(NULL, pBlock);
581
582 dumpheap("post insert");
583
584 /* Adjust the chunk allocated blocks counter */
585 pChunk = pBlock->pChunk;
586 pChunk->cAllocatedBlocks--;
587
588 VBGL_PH_ASSERT(pChunk->cAllocatedBlocks >= 0);
589
590 /* Check if we can merge 2 free blocks. To simplify heap maintenance,
591 * we will look at block after the just freed one.
592 * This will not prevent us from detecting free memory chunks.
593 * Also in most cases blocks are deallocated in reverse allocation order
594 * and in that case the merging will work.
595 */
596 /** @todo r=bird: This simplistic approach is of course not working.
597 * However, since the heap lists aren't sorted in any way, we cannot
598 * cheaply determine where the block before us starts. */
599
600 pNeighbour = (VBGLPHYSHEAPBLOCK *)((uintptr_t)(pBlock + 1) + pBlock->cbDataSize);
601
602 if ( (uintptr_t)pNeighbour < (uintptr_t)pChunk + pChunk->cbSize
603 && (pNeighbour->fu32Flags & VBGL_PH_BF_ALLOCATED) == 0)
604 {
605 /* The next block is free as well. */
606
607 /* Adjust size of current memory block */
608 pBlock->cbDataSize += pNeighbour->cbDataSize + sizeof(VBGLPHYSHEAPBLOCK);
609
610 /* Exclude the next neighbour */
611 vbglPhysHeapExcludeBlock(pNeighbour);
612 }
613
614 dumpheap("post merge");
615
616 /* now check if there are 2 or more free (unused) chunks */
617 if (pChunk->cAllocatedBlocks == 0)
618 {
619 VBGLPHYSHEAPCHUNK *pCurChunk;
620
621 uint32_t cUnusedChunks = 0;
622
623 for (pCurChunk = g_vbgldata.pChunkHead; pCurChunk; pCurChunk = pCurChunk->pNext)
624 {
625 Assert(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE);
626 if (pCurChunk->cAllocatedBlocks == 0)
627 cUnusedChunks++;
628 }
629
630 if (cUnusedChunks > 1)
631 {
632 /* Delete current chunk, it will also exclude all free blocks
633 * remaining in the chunk from the free list, so the pBlock
634 * will also be invalid after this.
635 */
636 vbglPhysHeapChunkDelete(pChunk);
637 }
638 }
639
640 dumpheap("post free");
641
642 vbglPhysHeapLeave();
643}
644
645#ifdef IN_TESTCASE /* For the testcase only */
646# include <iprt/err.h>
647
648/**
649 * Returns the sum of all free heap blocks.
650 *
651 * This is the amount of memory you can theoretically allocate if you do
652 * allocations exactly matching the free blocks.
653 *
654 * @returns The size of the free blocks.
655 * @returns 0 if heap was safely detected as being bad.
656 */
657DECLVBGL(size_t) VbglR0PhysHeapGetFreeSize(void)
658{
659 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap);
660 AssertRCReturn(rc, 0);
661
662 size_t cbTotal = 0;
663 for (VBGLPHYSHEAPBLOCK *pCurBlock = g_vbgldata.pFreeBlocksHead; pCurBlock; pCurBlock = pCurBlock->pNext)
664 {
665 Assert(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE);
666 cbTotal += pCurBlock->cbDataSize;
667 }
668
669 RTSemFastMutexRelease(g_vbgldata.mutexHeap);
670 return cbTotal;
671}
672
673static int vbglR0PhysHeapCheckLocked(PRTERRINFO pErrInfo)
674{
675 /*
676 * Scan the blocks in each chunk.
677 */
678 unsigned cTotalFreeBlocks = 0;
679 unsigned cTotalUsedBlocks = 0;
680 for (VBGLPHYSHEAPCHUNK *pCurChunk = g_vbgldata.pChunkHead; pCurChunk; pCurChunk = pCurChunk->pNext)
681 {
682 AssertReturn(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE,
683 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, "pCurChunk=%p: magic=%#x\n", pCurChunk, pCurChunk->u32Signature));
684
685 uintptr_t const uEnd = (uintptr_t)pCurChunk + pCurChunk->cbSize;
686 const VBGLPHYSHEAPBLOCK *pCurBlock = (const VBGLPHYSHEAPBLOCK *)(pCurChunk + 1);
687 unsigned cUsedBlocks = 0;
688 while ((uintptr_t)pCurBlock < uEnd)
689 {
690 AssertReturn(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
691 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC,
692 "pCurBlock=%p: magic=%#x\n", pCurBlock, pCurBlock->u32Signature));
693 AssertReturn(pCurBlock->pChunk == pCurChunk,
694 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
695 "pCurBlock=%p: pChunk=%p, expected %p\n", pCurBlock, pCurBlock->pChunk, pCurChunk));
696 AssertReturn( pCurBlock->cbDataSize >= 8
697 && pCurBlock->cbDataSize < _128M
698 && RT_ALIGN_32(pCurBlock->cbDataSize, sizeof(void *)) == pCurBlock->cbDataSize,
699 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3,
700 "pCurBlock=%p: cbDataSize=%#x\n", pCurBlock, pCurBlock->cbDataSize));
701 AssertReturn( pCurBlock->fu32Flags <= VBGL_PH_BF_ALLOCATED,
702 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3,
703 "pCurBlock=%p: fu32Flags=%#x\n", pCurBlock, pCurBlock->fu32Flags));
704 if (pCurBlock->fu32Flags & VBGL_PH_BF_ALLOCATED)
705 cUsedBlocks += 1;
706 else
707 cTotalFreeBlocks += 1;
708
709 /* advance */
710 pCurBlock = (const VBGLPHYSHEAPBLOCK *)((uintptr_t)(pCurBlock + 1) + pCurBlock->cbDataSize);
711 }
712 AssertReturn((uintptr_t)pCurBlock == uEnd,
713 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
714 "pCurBlock=%p uEnd=%p\n", pCurBlock, uEnd));
715 AssertReturn(cUsedBlocks == (uint32_t)pCurChunk->cAllocatedBlocks,
716 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
717 "pCurChunk=%p: cAllocatedBlocks=%u, expected %u\n",
718 pCurChunk, pCurChunk->cAllocatedBlocks, cUsedBlocks));
719 cTotalUsedBlocks += cUsedBlocks;
720 }
721 return VINF_SUCCESS;
722}
723
724/**
725 * Performs a heap check.
726 *
727 * @returns Problem description on failure, NULL on success.
728 */
729DECLVBGL(int) VbglR0PhysHeapCheck(PRTERRINFO pErrInfo)
730{
731 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap);
732 AssertRCReturn(rc, 0);
733
734 rc = vbglR0PhysHeapCheckLocked(pErrInfo);
735
736 RTSemFastMutexRelease(g_vbgldata.mutexHeap);
737 return rc;
738}
739
740
741#endif /* IN_TESTCASE */
742
743
744DECLR0VBGL(int) VbglR0PhysHeapInit(void)
745{
746 g_vbgldata.mutexHeap = NIL_RTSEMFASTMUTEX;
747
748 /* Allocate the first chunk of the heap. */
749 VBGLPHYSHEAPBLOCK *pBlock = vbglPhysHeapChunkAlloc(0);
750 if (pBlock)
751 return RTSemFastMutexCreate(&g_vbgldata.mutexHeap);
752 return VERR_NO_MEMORY;
753}
754
755DECLR0VBGL(void) VbglR0PhysHeapTerminate(void)
756{
757 while (g_vbgldata.pChunkHead)
758 vbglPhysHeapChunkDelete(g_vbgldata.pChunkHead);
759
760 RTSemFastMutexDestroy(g_vbgldata.mutexHeap);
761}
762
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette