VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/lib/VBoxGuestR0LibPhysHeap.cpp@ 97918

Last change on this file since 97918 was 97918, checked in by vboxsync, 2 years ago

Add/VBoxGuestR0LibPhysHeap.cpp: Use a fixed minimum size for splitting of a free node during allocation. This used to be the same as the allocation size, which typically results in more internal fragmentation that necessary.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 25.2 KB
Line 
1/* $Id: VBoxGuestR0LibPhysHeap.cpp 97918 2022-12-30 15:59:06Z vboxsync $ */
2/** @file
3 * VBoxGuestLibR0 - Physical memory heap.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#include "VBoxGuestR0LibInternal.h"
36
37#include <iprt/assert.h>
38#include <iprt/semaphore.h>
39#include <iprt/alloc.h>
40
41/** @page pg_vbglr0_phys_heap VBoxGuestLibR0 - Physical memory heap.
42 *
43 * The physical memory heap consists of a doubly linked list of large chunks
44 * (VBGLDATA::pChunkHead), memory blocks are allocated within these chunks and
45 * are members of allocated (VBGLDATA::pAllocBlocksHead) and free
46 * (VBGLDATA::pFreeBlocksHead) doubly linked lists.
47 *
48 * When allocating a block, we search in Free linked list for a suitable free
49 * block. If there is no such block, a new chunk is allocated and the new block
50 * is taken from the new chunk as the only chunk-sized free block. Allocated
51 * block is excluded from the Free list and goes to Alloc list.
52 *
53 * When freeing block, we check the pointer and then exclude block from Alloc
54 * list and move it to free list.
55 *
56 * For each chunk we maintain the allocated blocks counter. If 2 (or more)
57 * entire chunks are free they are immediately deallocated, so we always have at
58 * most 1 free chunk.
59 *
60 * When freeing blocks, two subsequent free blocks are always merged together.
61 * Current implementation merges blocks only when there is a block after the
62 * just freed one.
63 */
64
65
66/*********************************************************************************************************************************
67* Defined Constants And Macros *
68*********************************************************************************************************************************/
69#define VBGL_PH_ASSERT Assert
70#define VBGL_PH_ASSERT_MSG AssertMsg
71
72// #define DUMPHEAP
73
74#ifdef DUMPHEAP
75# define VBGL_PH_dprintf(a) RTAssertMsg2Weak a
76#else
77# define VBGL_PH_dprintf(a)
78#endif
79
80/* Heap block signature */
81#define VBGL_PH_BLOCKSIGNATURE (0xADDBBBBB)
82
83
84/* Heap chunk signature */
85#define VBGL_PH_CHUNKSIGNATURE (0xADDCCCCC)
86/* Heap chunk allocation unit */
87#define VBGL_PH_CHUNKSIZE (0x10000)
88
89/* Heap block bit flags */
90#define VBGL_PH_BF_ALLOCATED (0x1)
91
92/** Threshold at which to split out a tail free block when allocating.
93 *
94 * The value gives the amount of user space, i.e. excluding the header.
95 *
96 * Using 32 bytes based on VMMDev.h request sizes. The smallest requests are 24
97 * bytes, i.e. only the header, at least 4 of these. There are at least 10 with
98 * size 28 bytes and at least 11 with size 32 bytes. So, 32 bytes would fit
99 * some 25 requests out of about 60, which is reasonable.
100 */
101#define VBGL_PH_MIN_SPLIT_FREE_BLOCK 32
102
103
104/*********************************************************************************************************************************
105* Structures and Typedefs *
106*********************************************************************************************************************************/
107struct VBGLPHYSHEAPBLOCK
108{
109 /** Magic value (VBGL_PH_BLOCKSIGNATURE). */
110 uint32_t u32Signature;
111
112 /** Size of user data in the block. Does not include this block header. */
113 uint32_t cbDataSize;
114
115 uint32_t fu32Flags;
116
117 VBGLPHYSHEAPBLOCK *pNext;
118 VBGLPHYSHEAPBLOCK *pPrev;
119
120 VBGLPHYSHEAPCHUNK *pChunk;
121};
122
123struct VBGLPHYSHEAPCHUNK
124{
125 /** Magic value (VBGL_PH_CHUNKSIGNATURE). */
126 uint32_t u32Signature;
127
128 /** Size of the chunk. Includes the chunk header. */
129 uint32_t cbSize;
130
131 /** Physical address of the chunk (contiguous). */
132 uint32_t physAddr;
133
134 /** Number of allocated blocks in the chunk */
135 int32_t cAllocatedBlocks;
136
137 VBGLPHYSHEAPCHUNK *pNext;
138 VBGLPHYSHEAPCHUNK *pPrev;
139};
140
141
142#ifndef DUMPHEAP
143# define dumpheap(pszWhere) do { } while (0)
144#else
145void dumpheap(const char *pszWhere)
146{
147 VBGL_PH_dprintf(("VBGL_PH dump at '%s'\n", pszWhere));
148
149 VBGL_PH_dprintf(("Chunks:\n"));
150
151 VBGLPHYSHEAPCHUNK *pChunk = g_vbgldata.pChunkHead;
152
153 while (pChunk)
154 {
155 VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, allocated = %8d, phys = %08X\n",
156 pChunk, pChunk->pNext, pChunk->pPrev, pChunk->u32Signature, pChunk->cbSize, pChunk->cAllocatedBlocks, pChunk->physAddr));
157
158 pChunk = pChunk->pNext;
159 }
160
161 VBGL_PH_dprintf(("Allocated blocks:\n"));
162
163 VBGLPHYSHEAPBLOCK *pBlock = g_vbgldata.pAllocBlocksHead;
164
165 while (pBlock)
166 {
167 VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, flags = %08X, pChunk = %p\n",
168 pBlock, pBlock->pNext, pBlock->pPrev, pBlock->u32Signature, pBlock->cbDataSize, pBlock->fu32Flags, pBlock->pChunk));
169
170 pBlock = pBlock->pNext;
171 }
172
173 VBGL_PH_dprintf(("Free blocks:\n"));
174
175 pBlock = g_vbgldata.pFreeBlocksHead;
176
177 while (pBlock)
178 {
179 VBGL_PH_dprintf(("%p: pNext = %p, pPrev = %p, sign = %08X, size = %8d, flags = %08X, pChunk = %p\n",
180 pBlock, pBlock->pNext, pBlock->pPrev, pBlock->u32Signature, pBlock->cbDataSize, pBlock->fu32Flags, pBlock->pChunk));
181
182 pBlock = pBlock->pNext;
183 }
184
185 VBGL_PH_dprintf(("VBGL_PH dump at '%s' done\n", pszWhere));
186}
187#endif
188
189
190DECLINLINE(void *) vbglPhysHeapBlock2Data(VBGLPHYSHEAPBLOCK *pBlock)
191{
192 if (pBlock)
193 return pBlock + 1;
194 return NULL;
195}
196
197
198DECLINLINE(VBGLPHYSHEAPBLOCK *) vbglPhysHeapData2Block(void *pv)
199{
200 if (pv)
201 {
202 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)pv - 1;
203 AssertMsgReturn(pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
204 ("pBlock->u32Signature = %08X\n", pBlock->u32Signature),
205 NULL);
206 return pBlock;
207 }
208 return NULL;
209}
210
211
212DECLINLINE(int) vbglPhysHeapEnter(void)
213{
214 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap);
215
216 VBGL_PH_ASSERT_MSG(RT_SUCCESS(rc), ("Failed to request heap mutex, rc = %Rrc\n", rc));
217
218 return rc;
219}
220
221
222DECLINLINE(void) vbglPhysHeapLeave(void)
223{
224 RTSemFastMutexRelease(g_vbgldata.mutexHeap);
225}
226
227
228static void vbglPhysHeapInitBlock(VBGLPHYSHEAPBLOCK *pBlock, VBGLPHYSHEAPCHUNK *pChunk, uint32_t cbDataSize)
229{
230 VBGL_PH_ASSERT(pBlock != NULL);
231 VBGL_PH_ASSERT(pChunk != NULL);
232
233 pBlock->u32Signature = VBGL_PH_BLOCKSIGNATURE;
234 pBlock->cbDataSize = cbDataSize;
235 pBlock->fu32Flags = 0;
236 pBlock->pNext = NULL;
237 pBlock->pPrev = NULL;
238 pBlock->pChunk = pChunk;
239}
240
241
242static void vbglPhysHeapInsertBlock(VBGLPHYSHEAPBLOCK *pInsertAfter, VBGLPHYSHEAPBLOCK *pBlock)
243{
244 VBGL_PH_ASSERT_MSG(pBlock->pNext == NULL, ("pBlock->pNext = %p\n", pBlock->pNext));
245 VBGL_PH_ASSERT_MSG(pBlock->pPrev == NULL, ("pBlock->pPrev = %p\n", pBlock->pPrev));
246
247 if (pInsertAfter)
248 {
249 pBlock->pNext = pInsertAfter->pNext;
250 pBlock->pPrev = pInsertAfter;
251
252 if (pInsertAfter->pNext)
253 pInsertAfter->pNext->pPrev = pBlock;
254
255 pInsertAfter->pNext = pBlock;
256 }
257 else
258 {
259 /* inserting to head of list */
260 pBlock->pPrev = NULL;
261
262 if (pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED)
263 {
264 pBlock->pNext = g_vbgldata.pAllocBlocksHead;
265
266 if (g_vbgldata.pAllocBlocksHead)
267 g_vbgldata.pAllocBlocksHead->pPrev = pBlock;
268
269 g_vbgldata.pAllocBlocksHead = pBlock;
270 }
271 else
272 {
273 pBlock->pNext = g_vbgldata.pFreeBlocksHead;
274
275 if (g_vbgldata.pFreeBlocksHead)
276 g_vbgldata.pFreeBlocksHead->pPrev = pBlock;
277
278 g_vbgldata.pFreeBlocksHead = pBlock;
279 }
280 }
281}
282
283
284/**
285 * Unlinks @a pBlock from the chain its on.
286 */
287static void vbglPhysHeapExcludeBlock(VBGLPHYSHEAPBLOCK *pBlock)
288{
289 if (pBlock->pNext)
290 pBlock->pNext->pPrev = pBlock->pPrev;
291 /* else: this is tail of list but we do not maintain tails of block lists. so nothing to do. */
292
293 if (pBlock->pPrev)
294 pBlock->pPrev->pNext = pBlock->pNext;
295 else if (pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED)
296 {
297 Assert(g_vbgldata.pAllocBlocksHead == pBlock);
298 g_vbgldata.pAllocBlocksHead = pBlock->pNext;
299 }
300 else
301 {
302 Assert(g_vbgldata.pFreeBlocksHead == pBlock);
303 g_vbgldata.pFreeBlocksHead = pBlock->pNext;
304 }
305
306 pBlock->pNext = NULL;
307 pBlock->pPrev = NULL;
308}
309
310static VBGLPHYSHEAPBLOCK *vbglPhysHeapChunkAlloc(uint32_t cbMinBlock)
311{
312 RTCCPHYS PhysAddr = NIL_RTHCPHYS;
313 VBGLPHYSHEAPCHUNK *pChunk;
314 uint32_t cbChunk;
315 VBGL_PH_dprintf(("Allocating new chunk for %#x byte allocation\n", cbMinBlock));
316 AssertReturn(cbMinBlock < _128M, NULL); /* paranoia */
317
318 /* Compute the size of the new chunk, rounding up to next chunk size,
319 which must be power of 2. */
320 Assert(RT_IS_POWER_OF_TWO(VBGL_PH_CHUNKSIZE));
321 cbChunk = cbMinBlock + sizeof(VBGLPHYSHEAPCHUNK) + sizeof(VBGLPHYSHEAPBLOCK);
322 cbChunk = RT_ALIGN_32(cbChunk, VBGL_PH_CHUNKSIZE);
323
324 /* This function allocates physical contiguous memory below 4 GB. This 4GB
325 limitation stems from using a 32-bit OUT instruction to pass a block
326 physical address to the host. */
327 pChunk = (VBGLPHYSHEAPCHUNK *)RTMemContAlloc(&PhysAddr, cbChunk);
328 /** @todo retry with smaller size if it fails, treating VBGL_PH_CHUNKSIZE as
329 * a guideline rather than absolute minimum size. */
330 if (pChunk)
331 {
332 VBGLPHYSHEAPCHUNK *pOldHeadChunk;
333 VBGLPHYSHEAPBLOCK *pBlock;
334 AssertRelease(PhysAddr < _4G && PhysAddr + cbChunk <= _4G);
335
336 /* Init the new chunk. */
337 pChunk->u32Signature = VBGL_PH_CHUNKSIGNATURE;
338 pChunk->cbSize = cbChunk;
339 pChunk->physAddr = (uint32_t)PhysAddr;
340 pChunk->cAllocatedBlocks = 0;
341 pChunk->pNext = NULL;
342 pChunk->pPrev = NULL;
343
344 /* Initialize the free block, which now occupies entire chunk. */
345 pBlock = (VBGLPHYSHEAPBLOCK *)(pChunk + 1);
346 vbglPhysHeapInitBlock(pBlock, pChunk, cbChunk - sizeof(VBGLPHYSHEAPCHUNK) - sizeof(VBGLPHYSHEAPBLOCK));
347 vbglPhysHeapInsertBlock(NULL, pBlock);
348
349 /* Add the chunk to the list. */
350 pOldHeadChunk = g_vbgldata.pChunkHead;
351 pChunk->pNext = pOldHeadChunk;
352 if (pOldHeadChunk)
353 pOldHeadChunk->pPrev = pChunk;
354 g_vbgldata.pChunkHead = pChunk;
355
356 VBGL_PH_dprintf(("Allocated chunk %p LB %#x, block %p LB %#x\n", pChunk, cbChunk, pBlock, pBlock->cbDataSize));
357 return pBlock;
358 }
359 LogRel(("vbglPhysHeapChunkAlloc: failed to alloc %u (%#x) contiguous bytes.\n", cbChunk, cbChunk));
360 return NULL;
361}
362
363
364static void vbglPhysHeapChunkDelete(VBGLPHYSHEAPCHUNK *pChunk)
365{
366 uintptr_t uEnd, uCur;
367 VBGL_PH_ASSERT(pChunk != NULL);
368 VBGL_PH_ASSERT_MSG(pChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE, ("pChunk->u32Signature = %08X\n", pChunk->u32Signature));
369
370 VBGL_PH_dprintf(("Deleting chunk %p size %x\n", pChunk, pChunk->cbSize));
371
372 /* first scan the chunk and exclude (unlink) all blocks from the lists */
373
374 uEnd = (uintptr_t)pChunk + pChunk->cbSize;
375 uCur = (uintptr_t)(pChunk + 1);
376
377 while (uCur < uEnd)
378 {
379 VBGLPHYSHEAPBLOCK *pBlock = (VBGLPHYSHEAPBLOCK *)uCur;
380
381 uCur += pBlock->cbDataSize + sizeof(VBGLPHYSHEAPBLOCK);
382
383 vbglPhysHeapExcludeBlock(pBlock);
384 }
385
386 VBGL_PH_ASSERT_MSG(uCur == uEnd, ("uCur = %p, uEnd = %p, pChunk->cbSize = %08X\n", uCur, uEnd, pChunk->cbSize));
387
388 /* Exclude chunk from the chunk list */
389 if (pChunk->pNext)
390 pChunk->pNext->pPrev = pChunk->pPrev;
391 /* else: we do not maintain tail pointer. */
392
393 if (pChunk->pPrev)
394 pChunk->pPrev->pNext = pChunk->pNext;
395 else
396 {
397 Assert(g_vbgldata.pChunkHead == pChunk);
398 g_vbgldata.pChunkHead = pChunk->pNext;
399 }
400
401 RTMemContFree(pChunk, pChunk->cbSize);
402}
403
404
405DECLR0VBGL(void *) VbglR0PhysHeapAlloc(uint32_t cbSize)
406{
407 VBGLPHYSHEAPBLOCK *pBlock, *pIter;
408 int rc;
409
410 /*
411 * Align the size to a pointer size to avoid getting misaligned header pointers and whatnot.
412 */
413 cbSize = RT_ALIGN_32(cbSize, sizeof(void *));
414
415 rc = vbglPhysHeapEnter();
416 if (RT_FAILURE(rc))
417 return NULL;
418
419 dumpheap("pre alloc");
420
421 /*
422 * Search the free list. We do this in linear fashion as we don't expect
423 * there to be many blocks in the heap.
424 */
425
426 pBlock = NULL;
427 if (cbSize <= PAGE_SIZE / 4 * 3)
428 {
429 /* Smaller than 3/4 page: Prefer a free block that can keep the request within a single page,
430 so HGCM processing in VMMDev can use page locks instead of several reads and writes. */
431
432 VBGLPHYSHEAPBLOCK *pFallback = NULL;
433 for (pIter = g_vbgldata.pFreeBlocksHead; pIter != NULL; pIter = pIter->pNext)
434 if (pIter->cbDataSize >= cbSize)
435 {
436 if (pIter->cbDataSize == cbSize)
437 {
438 if (PAGE_SIZE - ((uintptr_t)vbglPhysHeapBlock2Data(pIter) & PAGE_OFFSET_MASK) >= cbSize)
439 {
440 pBlock = pIter;
441 break;
442 }
443 pFallback = pIter;
444 }
445 else
446 {
447 if (!pFallback || pIter->cbDataSize < pFallback->cbDataSize)
448 pFallback = pIter;
449 if (PAGE_SIZE - ((uintptr_t)vbglPhysHeapBlock2Data(pIter) & PAGE_OFFSET_MASK) >= cbSize)
450 if (!pBlock || pIter->cbDataSize < pBlock->cbDataSize)
451 pBlock = pIter;
452 }
453 }
454
455 if (!pBlock)
456 pBlock = pFallback;
457 }
458 else
459 {
460 /* Large than 3/4 page: Find smallest free list match. */
461
462 for (pIter = g_vbgldata.pFreeBlocksHead; pIter != NULL; pIter = pIter->pNext)
463 if (pIter->cbDataSize >= cbSize)
464 {
465 if (pIter->cbDataSize == cbSize)
466 {
467 /* Exact match - we're done! */
468 pBlock = pIter;
469 break;
470 }
471
472 /* Looking for a free block with nearest size. */
473 if (!pBlock || pIter->cbDataSize < pBlock->cbDataSize)
474 pBlock = pIter;
475 }
476 }
477
478 if (!pBlock)
479 {
480 /* No free blocks, allocate a new chunk, the only free block of the
481 chunk will be returned. */
482 pBlock = vbglPhysHeapChunkAlloc(cbSize);
483 }
484
485 if (pBlock)
486 {
487 VBGL_PH_ASSERT_MSG(pBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
488 ("pBlock = %p, pBlock->u32Signature = %08X\n", pBlock, pBlock->u32Signature));
489 VBGL_PH_ASSERT_MSG((pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED) == 0,
490 ("pBlock = %p, pBlock->fu32Flags = %08X\n", pBlock, pBlock->fu32Flags));
491
492 /* We have a free block, either found or allocated. */
493
494 if (pBlock->cbDataSize >= sizeof(VBGLPHYSHEAPBLOCK) * 2 + VBGL_PH_MIN_SPLIT_FREE_BLOCK + cbSize)
495 {
496 /* Data will occupy less than a half of the block,
497 * split off the tail end into a new free list entry.
498 */
499 pIter = (VBGLPHYSHEAPBLOCK *)((uintptr_t)(pBlock + 1) + cbSize);
500
501 /* Init the new 'pIter' block, initialized blocks are always marked as free. */
502 vbglPhysHeapInitBlock(pIter, pBlock->pChunk, pBlock->cbDataSize - cbSize - sizeof(VBGLPHYSHEAPBLOCK));
503
504 pBlock->cbDataSize = cbSize;
505
506 /* Insert the new 'pIter' block after the 'pBlock' in the free list */
507 vbglPhysHeapInsertBlock(pBlock, pIter);
508 }
509
510 /* Exclude pBlock from free list */
511 vbglPhysHeapExcludeBlock(pBlock);
512
513 /* Mark as allocated */
514 pBlock->fu32Flags |= VBGL_PH_BF_ALLOCATED;
515
516 /* Insert to allocated list */
517 vbglPhysHeapInsertBlock(NULL, pBlock);
518
519 /* Adjust the chunk allocated blocks counter */
520 pBlock->pChunk->cAllocatedBlocks++;
521 }
522
523 dumpheap("post alloc");
524
525 vbglPhysHeapLeave();
526 VBGL_PH_dprintf(("VbglR0PhysHeapAlloc %x size %x\n", vbglPhysHeapBlock2Data(pBlock), pBlock->cbDataSize));
527
528 return vbglPhysHeapBlock2Data(pBlock);
529}
530
531DECLR0VBGL(uint32_t) VbglR0PhysHeapGetPhysAddr(void *pv)
532{
533 uint32_t physAddr = 0;
534 VBGLPHYSHEAPBLOCK *pBlock = vbglPhysHeapData2Block(pv);
535
536 if (pBlock)
537 {
538 VBGL_PH_ASSERT_MSG((pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED) != 0,
539 ("pBlock = %p, pBlock->fu32Flags = %08X\n", pBlock, pBlock->fu32Flags));
540
541 if (pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED)
542 physAddr = pBlock->pChunk->physAddr + (uint32_t)((uintptr_t)pv - (uintptr_t)pBlock->pChunk);
543 }
544
545 return physAddr;
546}
547
548DECLR0VBGL(void) VbglR0PhysHeapFree(void *pv)
549{
550 VBGLPHYSHEAPBLOCK *pBlock;
551 VBGLPHYSHEAPBLOCK *pNeighbour;
552 VBGLPHYSHEAPCHUNK *pChunk;
553
554 int rc = vbglPhysHeapEnter();
555 if (RT_FAILURE(rc))
556 return;
557
558 dumpheap ("pre free");
559
560 pBlock = vbglPhysHeapData2Block(pv);
561
562 if (!pBlock)
563 {
564 vbglPhysHeapLeave();
565 return;
566 }
567
568 VBGL_PH_ASSERT_MSG((pBlock->fu32Flags & VBGL_PH_BF_ALLOCATED) != 0,
569 ("pBlock = %p, pBlock->fu32Flags = %08X\n", pBlock, pBlock->fu32Flags));
570
571 /* Exclude from allocated list */
572 vbglPhysHeapExcludeBlock(pBlock);
573
574 dumpheap("post exclude");
575
576 VBGL_PH_dprintf(("VbglR0PhysHeapFree %p size %x\n", pv, pBlock->cbDataSize));
577
578 /* Mark as free */
579 pBlock->fu32Flags &= ~VBGL_PH_BF_ALLOCATED;
580
581 /* Insert to free list */
582 vbglPhysHeapInsertBlock(NULL, pBlock);
583
584 dumpheap("post insert");
585
586 /* Adjust the chunk allocated blocks counter */
587 pChunk = pBlock->pChunk;
588 pChunk->cAllocatedBlocks--;
589
590 VBGL_PH_ASSERT(pChunk->cAllocatedBlocks >= 0);
591
592 /* Check if we can merge 2 free blocks. To simplify heap maintenance,
593 * we will look at block after the just freed one.
594 * This will not prevent us from detecting free memory chunks.
595 * Also in most cases blocks are deallocated in reverse allocation order
596 * and in that case the merging will work.
597 */
598 /** @todo r=bird: This simplistic approach is of course not working.
599 * However, since the heap lists aren't sorted in any way, we cannot
600 * cheaply determine where the block before us starts. */
601
602 pNeighbour = (VBGLPHYSHEAPBLOCK *)((uintptr_t)(pBlock + 1) + pBlock->cbDataSize);
603
604 if ( (uintptr_t)pNeighbour < (uintptr_t)pChunk + pChunk->cbSize
605 && (pNeighbour->fu32Flags & VBGL_PH_BF_ALLOCATED) == 0)
606 {
607 /* The next block is free as well. */
608
609 /* Adjust size of current memory block */
610 pBlock->cbDataSize += pNeighbour->cbDataSize + sizeof(VBGLPHYSHEAPBLOCK);
611
612 /* Exclude the next neighbour */
613 vbglPhysHeapExcludeBlock(pNeighbour);
614 }
615
616 dumpheap("post merge");
617
618 /* now check if there are 2 or more free (unused) chunks */
619 if (pChunk->cAllocatedBlocks == 0)
620 {
621 VBGLPHYSHEAPCHUNK *pCurChunk;
622
623 uint32_t cUnusedChunks = 0;
624
625 for (pCurChunk = g_vbgldata.pChunkHead; pCurChunk; pCurChunk = pCurChunk->pNext)
626 {
627 Assert(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE);
628 if (pCurChunk->cAllocatedBlocks == 0)
629 cUnusedChunks++;
630 }
631
632 if (cUnusedChunks > 1)
633 {
634 /* Delete current chunk, it will also exclude all free blocks
635 * remaining in the chunk from the free list, so the pBlock
636 * will also be invalid after this.
637 */
638 vbglPhysHeapChunkDelete(pChunk);
639 }
640 }
641
642 dumpheap("post free");
643
644 vbglPhysHeapLeave();
645}
646
647#ifdef IN_TESTCASE /* For the testcase only */
648# include <iprt/err.h>
649
650/**
651 * Returns the sum of all free heap blocks.
652 *
653 * This is the amount of memory you can theoretically allocate if you do
654 * allocations exactly matching the free blocks.
655 *
656 * @returns The size of the free blocks.
657 * @returns 0 if heap was safely detected as being bad.
658 */
659DECLVBGL(size_t) VbglR0PhysHeapGetFreeSize(void)
660{
661 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap);
662 AssertRCReturn(rc, 0);
663
664 size_t cbTotal = 0;
665 for (VBGLPHYSHEAPBLOCK *pCurBlock = g_vbgldata.pFreeBlocksHead; pCurBlock; pCurBlock = pCurBlock->pNext)
666 {
667 Assert(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE);
668 cbTotal += pCurBlock->cbDataSize;
669 }
670
671 RTSemFastMutexRelease(g_vbgldata.mutexHeap);
672 return cbTotal;
673}
674
675static int vbglR0PhysHeapCheckLocked(PRTERRINFO pErrInfo)
676{
677 /*
678 * Scan the blocks in each chunk.
679 */
680 unsigned cTotalFreeBlocks = 0;
681 unsigned cTotalUsedBlocks = 0;
682 for (VBGLPHYSHEAPCHUNK *pCurChunk = g_vbgldata.pChunkHead; pCurChunk; pCurChunk = pCurChunk->pNext)
683 {
684 AssertReturn(pCurChunk->u32Signature == VBGL_PH_CHUNKSIGNATURE,
685 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC, "pCurChunk=%p: magic=%#x\n", pCurChunk, pCurChunk->u32Signature));
686
687 uintptr_t const uEnd = (uintptr_t)pCurChunk + pCurChunk->cbSize;
688 const VBGLPHYSHEAPBLOCK *pCurBlock = (const VBGLPHYSHEAPBLOCK *)(pCurChunk + 1);
689 unsigned cUsedBlocks = 0;
690 while ((uintptr_t)pCurBlock < uEnd)
691 {
692 AssertReturn(pCurBlock->u32Signature == VBGL_PH_BLOCKSIGNATURE,
693 RTErrInfoSetF(pErrInfo, VERR_INVALID_MAGIC,
694 "pCurBlock=%p: magic=%#x\n", pCurBlock, pCurBlock->u32Signature));
695 AssertReturn(pCurBlock->pChunk == pCurChunk,
696 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_2,
697 "pCurBlock=%p: pChunk=%p, expected %p\n", pCurBlock, pCurBlock->pChunk, pCurChunk));
698 AssertReturn( pCurBlock->cbDataSize >= 8
699 && pCurBlock->cbDataSize < _128M
700 && RT_ALIGN_32(pCurBlock->cbDataSize, sizeof(void *)) == pCurBlock->cbDataSize,
701 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3,
702 "pCurBlock=%p: cbDataSize=%#x\n", pCurBlock, pCurBlock->cbDataSize));
703 AssertReturn( pCurBlock->fu32Flags <= VBGL_PH_BF_ALLOCATED,
704 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_3,
705 "pCurBlock=%p: fu32Flags=%#x\n", pCurBlock, pCurBlock->fu32Flags));
706 if (pCurBlock->fu32Flags & VBGL_PH_BF_ALLOCATED)
707 cUsedBlocks += 1;
708 else
709 cTotalFreeBlocks += 1;
710
711 /* advance */
712 pCurBlock = (const VBGLPHYSHEAPBLOCK *)((uintptr_t)(pCurBlock + 1) + pCurBlock->cbDataSize);
713 }
714 AssertReturn((uintptr_t)pCurBlock == uEnd,
715 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
716 "pCurBlock=%p uEnd=%p\n", pCurBlock, uEnd));
717 AssertReturn(cUsedBlocks == (uint32_t)pCurChunk->cAllocatedBlocks,
718 RTErrInfoSetF(pErrInfo, VERR_INTERNAL_ERROR_4,
719 "pCurChunk=%p: cAllocatedBlocks=%u, expected %u\n",
720 pCurChunk, pCurChunk->cAllocatedBlocks, cUsedBlocks));
721 cTotalUsedBlocks += cUsedBlocks;
722 }
723 return VINF_SUCCESS;
724}
725
726/**
727 * Performs a heap check.
728 *
729 * @returns Problem description on failure, NULL on success.
730 */
731DECLVBGL(int) VbglR0PhysHeapCheck(PRTERRINFO pErrInfo)
732{
733 int rc = RTSemFastMutexRequest(g_vbgldata.mutexHeap);
734 AssertRCReturn(rc, 0);
735
736 rc = vbglR0PhysHeapCheckLocked(pErrInfo);
737
738 RTSemFastMutexRelease(g_vbgldata.mutexHeap);
739 return rc;
740}
741
742
743#endif /* IN_TESTCASE */
744
745
746DECLR0VBGL(int) VbglR0PhysHeapInit(void)
747{
748 g_vbgldata.mutexHeap = NIL_RTSEMFASTMUTEX;
749
750 /* Allocate the first chunk of the heap. */
751 VBGLPHYSHEAPBLOCK *pBlock = vbglPhysHeapChunkAlloc(0);
752 if (pBlock)
753 return RTSemFastMutexCreate(&g_vbgldata.mutexHeap);
754 return VERR_NO_MEMORY;
755}
756
757DECLR0VBGL(void) VbglR0PhysHeapTerminate(void)
758{
759 while (g_vbgldata.pChunkHead)
760 vbglPhysHeapChunkDelete(g_vbgldata.pChunkHead);
761
762 RTSemFastMutexDestroy(g_vbgldata.mutexHeap);
763}
764
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette