VirtualBox

source: vbox/trunk/src/VBox/Runtime/common/alloc/memcache.cpp@ 26419

Last change on this file since 26419 was 26419, checked in by vboxsync, 15 years ago

tstRTMemCache: Testcase + bug fixes. Works pretty well, but should try tune it a bit.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 14.1 KB
Line 
1/* $Id: memcache.cpp 26419 2010-02-10 23:21:14Z vboxsync $ */
2/** @file
3 * IPRT - Memory Object Allocation Cache.
4 */
5
6/*
7 * Copyright (C) 2006-2010 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include <iprt/memcache.h>
36#include "internal/iprt.h"
37
38#include <iprt/assert.h>
39#include <iprt/asm.h>
40#include <iprt/critsect.h>
41#include <iprt/err.h>
42#include <iprt/mem.h>
43#include <iprt/param.h>
44
45#include "internal/magics.h"
46
47
48/*******************************************************************************
49* Structures and Typedefs *
50*******************************************************************************/
51/** Pointer to a cache instance. */
52typedef struct RTMEMCACHEINT *PRTMEMCACHEINT;
53/** Pointer to a cache page. */
54typedef struct RTMEMCACHEPAGE *PRTMEMCACHEPAGE;
55
56
57/**
58 * A cache page.
59 *
60 * This is a page of memory that we split up in to a bunch object sized chunks
61 * and hand out to the cache users. The bitmap is updated in an atomic fashion
62 * so that we don't have to take any locks when freeing or allocating memory.
63 */
64typedef struct RTMEMCACHEPAGE
65{
66 /** Pointer to the cache owning this page.
67 * This is used for validation purposes only. */
68 PRTMEMCACHEINT pCache;
69 /** Pointer to the next page.
70 * This is marked as volatile since we'll be adding new entries to the list
71 * without taking any locks. */
72 PRTMEMCACHEPAGE volatile pNext;
73 /** The number of free objects. */
74 int32_t volatile cFree;
75 /** The number of objects on this page. */
76 uint32_t cObjects;
77 /** Bitmap tracking allocated blocks. */
78 void volatile *pbmAlloc;
79 /** Bitmap tracking which blocks that has been thru the constructor. */
80 void volatile *pbmCtor;
81 /** Pointer to the object array.. */
82 uint8_t *pbObjects;
83} RTMEMCACHEPAGE;
84
85
86/**
87 * Memory object cache instance.
88 */
89typedef struct RTMEMCACHEINT
90{
91 /** Magic value (RTMEMCACHE_MAGIC). */
92 uint32_t u32Magic;
93 /** The object size. */
94 uint32_t cbObject;
95 /** Object alignment. */
96 uint32_t cbAlignment;
97 /** The per page object count. */
98 uint32_t cPerPage;
99 /** Number of bits in the bitmap.
100 * @remarks This is higher or equal to cPerPage and it is aligned such that
101 * the search operation will be most efficient on x86/AMD64. */
102 uint32_t cBits;
103 /** The maximum number of objects. */
104 uint32_t cMax;
105 /** The total object count. */
106 uint32_t volatile cTotal;
107 /** The number of free objects. */
108 int32_t volatile cFree;
109 /** Head of the page list. */
110 PRTMEMCACHEPAGE pPageHead;
111 /** This may point to a page with free entries. */
112 PRTMEMCACHEPAGE volatile pPageHint;
113 /** Constructor callback. */
114 PFNMEMCACHECTOR pfnCtor;
115 /** Destructor callback. */
116 PFNMEMCACHEDTOR pfnDtor;
117 /** Callback argument. */
118 void *pvUser;
119 /** Critical section serializing page allocation and similar. */
120 RTCRITSECT CritSect;
121} RTMEMCACHEINT;
122
123
124
125RTDECL(int) RTMemCacheCreate(PRTMEMCACHE phMemCache, size_t cbObject, size_t cbAlignment, uint32_t cMaxObjects,
126 PFNMEMCACHECTOR pfnCtor, PFNMEMCACHEDTOR pfnDtor, void *pvUser)
127{
128 AssertPtr(phMemCache);
129 AssertPtrNull(pfnCtor);
130 AssertPtrNull(pfnDtor);
131 AssertReturn(cbObject > 0, VERR_INVALID_PARAMETER);
132 AssertReturn(cbObject <= PAGE_SIZE / 8, VERR_INVALID_PARAMETER);
133
134 if (cbAlignment == 0)
135 {
136 cbAlignment = UINT32_C(1) << ASMBitLastSetU32((uint32_t)cbObject);
137 if (cbAlignment > 64)
138 cbAlignment = 64;
139 }
140 else
141 {
142 AssertReturn(!((cbAlignment - 1) & cbAlignment), VERR_NOT_POWER_OF_TWO);
143 AssertReturn(cbAlignment <= 64, VERR_OUT_OF_RANGE);
144 }
145
146 /*
147 * Allocate and initialize the instance memory.
148 */
149 RTMEMCACHEINT *pThis = (RTMEMCACHEINT *)RTMemAlloc(sizeof(*pThis));
150 if (!pThis)
151 return VERR_NO_MEMORY;
152 int rc = RTCritSectInit(&pThis->CritSect);
153 if (RT_FAILURE(rc))
154 {
155 RTMemFree(pThis);
156 return rc;
157 }
158
159 pThis->u32Magic = RTMEMCACHE_MAGIC;
160 pThis->cbObject = RT_ALIGN_Z(cbObject, cbAlignment);
161 pThis->cbAlignment = cbAlignment;
162 pThis->cPerPage = (PAGE_SIZE - RT_ALIGN_Z(sizeof(RTMEMCACHEPAGE), cbAlignment)) / pThis->cbObject;
163 while ( RT_ALIGN_Z(sizeof(RTMEMCACHEPAGE), cbAlignment)
164 + pThis->cPerPage * pThis->cbObject
165 + RT_ALIGN(pThis->cPerPage, 32) * 2
166 > PAGE_SIZE)
167 pThis->cPerPage--;
168 pThis->cBits = RT_ALIGN(pThis->cPerPage, 32);
169 pThis->cMax = cMaxObjects;
170 pThis->cTotal = 0;
171 pThis->cFree = 0;
172 pThis->pPageHead = NULL;
173 pThis->pPageHint = NULL;
174 pThis->pfnCtor = pfnCtor;
175 pThis->pfnDtor = pfnDtor;
176 pThis->pvUser = pvUser;
177
178 *phMemCache = pThis;
179 return VINF_SUCCESS;
180}
181
182
183RTDECL(int) RTMemCacheDestroy(RTMEMCACHE hMemCache)
184{
185 RTMEMCACHEINT *pThis = hMemCache;
186 if (!pThis)
187 return VINF_SUCCESS;
188 AssertPtrReturn(pThis, VERR_INVALID_HANDLE);
189 AssertReturn(pThis->u32Magic == RTMEMCACHE_MAGIC, VERR_INVALID_HANDLE);
190 AssertMsg((uint32_t)pThis->cFree == pThis->cTotal, ("cFree=%u cTotal=%u\n", pThis->cFree, pThis->cTotal));
191
192 /*
193 * Destroy it.
194 */
195 AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, RTMEMCACHE_MAGIC_DEAD, RTMEMCACHE_MAGIC), VERR_INVALID_HANDLE);
196 RTCritSectDelete(&pThis->CritSect);
197
198 while (pThis->pPageHead)
199 {
200 PRTMEMCACHEPAGE pPage = pThis->pPageHead;
201 pThis->pPageHead = pPage->pNext;
202 pPage->cFree = 0;
203
204 if (pThis->pfnDtor)
205 {
206 uint32_t iObj = pPage->cObjects;
207 while (iObj-- > 0)
208 if (ASMBitTestAndClear(pPage->pbmCtor, iObj))
209 pThis->pfnDtor(hMemCache, pPage->pbObjects + iObj * pThis->cbObject, pThis->pvUser);
210 }
211
212 RTMemPageFree(pPage);
213 }
214
215 RTMemFree(pThis);
216 return VINF_SUCCESS;
217}
218
219
220/**
221 * Grows the cache.
222 *
223 * @returns IPRT status code.
224 * @param pThis The memory cache instance.
225 */
226static int rtMemCacheGrow(RTMEMCACHEINT *pThis)
227{
228 /*
229 * Enter the critical section here to avoid allocation races leading to
230 * wasted memory (++) and make it easier to link in the new page.
231 */
232 RTCritSectEnter(&pThis->CritSect);
233 int rc = VINF_SUCCESS;
234 if (pThis->cFree < 0)
235 {
236 /*
237 * Allocate and initialize the new page.
238 */
239 PRTMEMCACHEPAGE pPage = (PRTMEMCACHEPAGE)RTMemPageAlloc(PAGE_SIZE);
240 if (pPage)
241 {
242 uint32_t const cObjects = RT_MIN(pThis->cPerPage, pThis->cMax - pThis->cTotal);
243
244 ASMMemZeroPage(pPage);
245 pPage->pCache = pThis;
246 pPage->pNext = NULL;
247 pPage->cFree = cObjects;
248 pPage->cObjects = cObjects;
249 pPage->pbmAlloc = pPage + 1;
250 pPage->pbmCtor = (uint8_t *)pPage->pbmAlloc + pThis->cBits / 8;
251 pPage->pbObjects = (uint8_t *)pPage->pbmCtor + pThis->cBits / 8;
252 pPage->pbObjects = RT_ALIGN_PT(pPage->pbObjects, pThis->cbAlignment, uint8_t *);
253
254 /* Mark the bitmap padding and any unused objects as allocated. */
255 for (uint32_t iBit = cObjects; iBit < pThis->cBits; iBit++)
256 ASMBitSet(pPage->pbmAlloc, iBit);
257
258 /* Make it the hint. */
259 ASMAtomicWritePtr((void * volatile *)&pThis->pPageHint, pPage);
260
261 /* Link the page. */
262 PRTMEMCACHEPAGE pPrevPage = pThis->pPageHead;
263 if (!pPrevPage)
264 ASMAtomicWritePtr((void * volatile *)&pThis->pPageHead, pPage);
265 else
266 {
267 while (pPrevPage->pNext)
268 pPrevPage = pPrevPage->pNext;
269 ASMAtomicWritePtr((void * volatile *)&pPrevPage->pNext, pPage);
270 }
271
272 /* Add it to the page counts. */
273 ASMAtomicAddS32(&pThis->cFree, cObjects);
274 ASMAtomicAddU32(&pThis->cTotal, cObjects);
275 }
276 else
277 rc = VERR_NO_MEMORY;
278 }
279 RTCritSectLeave(&pThis->CritSect);
280 return rc;
281}
282
283
284/**
285 * Grabs a an object in a page.
286 * @returns New cFree value on success (0 or higher), -1 on failure.
287 * @param pPage Pointer to the page.
288 */
289DECL_FORCE_INLINE(int32_t) rtMemCacheGrabObj(PRTMEMCACHEPAGE pPage)
290{
291 int32_t cFreeNew = ASMAtomicDecS32(&pPage->cFree);
292 if (cFreeNew < 0)
293 {
294 ASMAtomicIncS32(&pPage->cFree);
295 return -1;
296 }
297 return cFreeNew;
298}
299
300
301RTDECL(int) RTMemCacheAllocEx(RTMEMCACHE hMemCache, void **ppvObj)
302{
303 RTMEMCACHEINT *pThis = hMemCache;
304 AssertPtrReturn(pThis, VERR_INVALID_PARAMETER);
305 AssertReturn(pThis->u32Magic == RTMEMCACHE_MAGIC, VERR_INVALID_PARAMETER);
306
307 /*
308 * Try grab a free object at the cache level.
309 */
310 int32_t cNewFree = ASMAtomicDecS32(&pThis->cFree);
311 if (cNewFree < 0)
312 {
313 uint32_t cTotal = ASMAtomicUoReadU32(&pThis->cTotal);
314 if ( (uint32_t)(cTotal + -cNewFree) > pThis->cMax
315 || (uint32_t)(cTotal + -cNewFree) <= cTotal)
316 {
317 ASMAtomicIncS32(&pThis->cFree);
318 return VERR_MEM_CACHE_MAX_SIZE;
319 }
320
321 int rc = rtMemCacheGrow(pThis);
322 if (RT_FAILURE(rc))
323 {
324 ASMAtomicIncS32(&pThis->cFree);
325 return rc;
326 }
327 }
328
329 /*
330 * Grab a free object at the page level.
331 */
332 PRTMEMCACHEPAGE pPage = (PRTMEMCACHEPAGE)ASMAtomicReadPtr((void * volatile *)&pThis->pPageHint);
333 int32_t iObj = pPage ? rtMemCacheGrabObj(pPage) : -1;
334 if (iObj < 0)
335 {
336 for (unsigned cLoops = 0; ; cLoops++)
337 {
338 for (pPage = pThis->pPageHead; pPage; pPage = pPage->pNext)
339 {
340 iObj = rtMemCacheGrabObj(pPage);
341 if (iObj >= 0)
342 {
343 if (iObj > 0)
344 ASMAtomicWritePtr((void * volatile *)&pThis->pPageHint, pPage);
345 break;
346 }
347 }
348 if (iObj >= 0)
349 break;
350 Assert(cLoops != 2);
351 Assert(cLoops < 10);
352 }
353 }
354 Assert(iObj >= 0);
355 Assert((uint32_t)iObj < pThis->cMax);
356
357 /*
358 * Find a free object in the allocation bitmap. Use the new cFree count
359 * as a hint.
360 */
361 if (ASMAtomicBitTestAndSet(pPage->pbmAlloc, iObj))
362 {
363 for (unsigned cLoops2 = 0;; cLoops2++)
364 {
365 iObj = ASMBitFirstClear(pPage->pbmAlloc, pThis->cBits);
366 if (iObj < 0)
367 ASMMemoryFence();
368 else if (!ASMAtomicBitTestAndSet(pPage->pbmAlloc, iObj))
369 break;
370 Assert(cLoops2 != 40);
371 }
372 Assert(iObj >= 0);
373 }
374 void *pvObj = &pPage->pbObjects[iObj * pThis->cbObject];
375 Assert((uintptr_t)pvObj - (uintptr_t)pPage < PAGE_SIZE);
376
377 /*
378 * Call the constructor?
379 */
380 if ( pThis->pfnCtor
381 && !ASMAtomicBitTestAndSet(pPage->pbmCtor, iObj))
382 {
383 int rc = pThis->pfnCtor(hMemCache, pvObj, pThis->pvUser);
384 if (RT_FAILURE(rc))
385 {
386 ASMAtomicBitClear(pPage->pbmCtor, iObj);
387 RTMemCacheFree(pThis, pvObj);
388 return rc;
389 }
390 }
391
392 *ppvObj = pvObj;
393 return VINF_SUCCESS;
394}
395
396
397RTDECL(void *) RTMemCacheAlloc(RTMEMCACHE hMemCache)
398{
399 void *pvObj;
400 int rc = RTMemCacheAllocEx(hMemCache, &pvObj);
401 if (RT_SUCCESS(rc))
402 return pvObj;
403 return NULL;
404}
405
406
407RTDECL(void) RTMemCacheFree(RTMEMCACHE hMemCache, void *pvObj)
408{
409 if (!pvObj)
410 return;
411
412 RTMEMCACHEINT *pThis = hMemCache;
413 AssertPtrReturnVoid(pThis);
414 AssertReturnVoid(pThis->u32Magic == RTMEMCACHE_MAGIC);
415
416 AssertPtr(pvObj);
417 Assert(RT_ALIGN_P(pvObj, pThis->cbAlignment) == pvObj);
418
419 /* Note: Do *NOT* attempt to poison the object if we have a constructor
420 or/and destructor! */
421
422 /*
423 * Find the cache page. The page structure is at the start of the page.
424 */
425 PRTMEMCACHEPAGE pPage = (PRTMEMCACHEPAGE)(((uintptr_t)pvObj) & ~(uintptr_t)PAGE_OFFSET_MASK);
426 AssertReturnVoid(pPage->pCache == pThis);
427 AssertReturnVoid(ASMAtomicUoReadS32(&pPage->cFree) < (int32_t)pThis->cPerPage);
428
429 /*
430 * Clear the bitmap bit and update the two object counter. Order matters!
431 */
432 uintptr_t offObj = (uintptr_t)pvObj - (uintptr_t)pPage->pbObjects;
433 uintptr_t iObj = offObj / pThis->cbObject;
434 AssertReturnVoid(iObj * pThis->cbObject == offObj);
435 Assert(iObj < pThis->cPerPage);
436 AssertReturnVoid(ASMAtomicBitTestAndClear(pPage->pbmAlloc, iObj));
437
438 ASMAtomicIncS32(&pPage->cFree);
439 ASMAtomicIncS32(&pThis->cFree);
440}
441
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette