Changeset 26812 in vbox
- Timestamp:
- Feb 25, 2010 8:55:08 PM (15 years ago)
- svn:sync-xref-src-repo-rev:
- 58083
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PDMAsyncCompletionFile.cpp
r26745 r26812 647 647 if (RT_FAILURE(rc)) 648 648 { 649 RTCritSectDelete(&pEpClassFile->CritSect);650 649 pEpClassFile->fCacheEnabled = false; 651 650 LogRel(("AIOMgr: Failed to initialise the cache (rc=%Rrc), disabled caching\n")); -
trunk/src/VBox/VMM/PDMAsyncCompletionFileCache.cpp
r26526 r26812 60 60 AssertMsg(RTCritSectIsOwner(&Cache->CritSect), \ 61 61 ("Thread does not own critical section\n"));\ 62 } while(0); 62 } while(0) 63 64 # define PDMACFILECACHE_EP_IS_SEMRW_WRITE_OWNER(pEpCache) \ 65 do \ 66 { \ 67 AssertMsg(RTSemRWIsWriteOwner(pEpCache->SemRWEntries), \ 68 ("Thread is not exclusive owner of the per endpoint RW semaphore\n")); \ 69 } while(0) 70 71 # define PDMACFILECACHE_EP_IS_SEMRW_READ_OWNER(pEpCache) \ 72 do \ 73 { \ 74 AssertMsg(RTSemRWIsReadOwner(pEpCache->SemRWEntries), \ 75 ("Thread is not read owner of the per endpoint RW semaphore\n")); \ 76 } while(0) 77 63 78 #else 64 # define PDMACFILECACHE_IS_CRITSECT_OWNER(Cache) do { } while(0); 79 # define PDMACFILECACHE_IS_CRITSECT_OWNER(Cache) do { } while(0) 80 # define PDMACFILECACHE_EP_IS_SEMRW_WRITE_OWNER(pEpCache) do { } while(0) 81 # define PDMACFILECACHE_EP_IS_SEMRW_READ_OWNER(pEpCache) do { } while(0) 65 82 #endif 66 83 … … 402 419 pEntry = pEntry->pPrev; 403 420 404 /* We can't evict pages which are currently in progress */405 if ( !(pCurr->fFlags & PDMACFILECACHE_ ENTRY_IO_IN_PROGRESS)421 /* We can't evict pages which are currently in progress or dirty but not in progress */ 422 if ( !(pCurr->fFlags & PDMACFILECACHE_NOT_EVICTABLE) 406 423 && (ASMAtomicReadU32(&pCurr->cRefs) == 0)) 407 424 { … … 411 428 RTSemRWRequestWrite(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT); 412 429 413 if (!(pCurr->fFlags & PDMACFILECACHE_ ENTRY_IO_IN_PROGRESS)430 if (!(pCurr->fFlags & PDMACFILECACHE_NOT_EVICTABLE) 414 431 && (ASMAtomicReadU32(&pCurr->cRefs) == 0)) 415 432 { … … 594 611 595 612 /** 613 * Commit a single dirty entry to the endpoint 614 * 615 * @returns nothing 616 * @param pEntry The entry to commit. 617 */ 618 static void pdmacFileCacheEntryCommit(PPDMACFILEENDPOINTCACHE pEndpointCache, PPDMACFILECACHEENTRY pEntry) 619 { 620 NOREF(pEndpointCache); 621 AssertMsg( (pEntry->fFlags & PDMACFILECACHE_ENTRY_IS_DIRTY) 622 && !(pEntry->fFlags & (PDMACFILECACHE_ENTRY_IO_IN_PROGRESS | PDMACFILECACHE_ENTRY_IS_DEPRECATED)), 623 ("Invalid flags set for entry %#p\n", pEntry)); 624 625 pdmacFileCacheWriteToEndpoint(pEntry); 626 } 627 628 /** 629 * Commit all dirty entries for a single endpoint. 630 * 631 * @returns nothing. 632 * @param pEndpointCache The endpoint cache to commit. 633 */ 634 static void pdmacFileCacheEndpointCommit(PPDMACFILEENDPOINTCACHE pEndpointCache) 635 { 636 uint32_t cbCommitted = 0; 637 RTSemRWRequestWrite(pEndpointCache->SemRWEntries, RT_INDEFINITE_WAIT); 638 639 if (!RTListIsEmpty(&pEndpointCache->ListDirtyNotCommitted)) 640 { 641 PPDMACFILECACHEENTRY pEntry = RTListNodeGetFirst(&pEndpointCache->ListDirtyNotCommitted, 642 PDMACFILECACHEENTRY, 643 NodeNotCommitted); 644 645 while (!RTListNodeIsLast(&pEndpointCache->ListDirtyNotCommitted, &pEntry->NodeNotCommitted)) 646 { 647 PPDMACFILECACHEENTRY pNext = RTListNodeGetNext(&pEntry->NodeNotCommitted, PDMACFILECACHEENTRY, 648 NodeNotCommitted); 649 pdmacFileCacheEntryCommit(pEndpointCache, pEntry); 650 cbCommitted += pEntry->cbData; 651 RTListNodeRemove(&pEntry->NodeNotCommitted); 652 pEntry = pNext; 653 } 654 655 /* Commit the last endpoint */ 656 Assert(RTListNodeIsLast(&pEndpointCache->ListDirtyNotCommitted, &pEntry->NodeNotCommitted)); 657 pdmacFileCacheEntryCommit(pEndpointCache, pEntry); 658 RTListNodeRemove(&pEntry->NodeNotCommitted); 659 AssertMsg(RTListIsEmpty(&pEndpointCache->ListDirtyNotCommitted), 660 ("Committed all entries but list is not empty\n")); 661 } 662 663 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries); 664 AssertMsg(pEndpointCache->pCache->cbDirty >= cbCommitted, 665 ("Number of committed bytes exceeds number of dirty bytes\n")); 666 ASMAtomicSubU32(&pEndpointCache->pCache->cbDirty, cbCommitted); 667 } 668 669 /** 670 * Commit all dirty entries in the cache. 671 * 672 * @returns nothing. 673 * @param pCache The global cache instance. 674 */ 675 static void pdmacFileCacheCommitDirtyEntries(PPDMACFILECACHEGLOBAL pCache) 676 { 677 bool fCommitInProgress = ASMAtomicXchgBool(&pCache->fCommitInProgress, true); 678 679 if (!fCommitInProgress) 680 { 681 pdmacFileCacheLockEnter(pCache); 682 Assert(!RTListIsEmpty(&pCache->ListEndpoints)); 683 684 PPDMACFILEENDPOINTCACHE pEndpointCache = RTListNodeGetFirst(&pCache->ListEndpoints, 685 PDMACFILEENDPOINTCACHE, 686 NodeCacheEndpoint); 687 AssertPtr(pEndpointCache); 688 689 while (!RTListNodeIsLast(&pCache->ListEndpoints, &pEndpointCache->NodeCacheEndpoint)) 690 { 691 pdmacFileCacheEndpointCommit(pEndpointCache); 692 693 pEndpointCache = RTListNodeGetNext(&pEndpointCache->NodeCacheEndpoint, PDMACFILEENDPOINTCACHE, 694 NodeCacheEndpoint); 695 } 696 697 /* Commit the last endpoint */ 698 Assert(RTListNodeIsLast(&pCache->ListEndpoints, &pEndpointCache->NodeCacheEndpoint)); 699 pdmacFileCacheEndpointCommit(pEndpointCache); 700 701 pdmacFileCacheLockLeave(pCache); 702 ASMAtomicWriteBool(&pCache->fCommitInProgress, false); 703 } 704 } 705 706 /** 707 * Adds the given entry as a dirty to the cache. 708 * 709 * @returns Flag whether the amount of dirty bytes in the cache exceeds the threshold 710 * @param pEndpointCache The endpoint cache the entry belongs to. 711 * @param pEntry The entry to add. 712 */ 713 static bool pdmacFileCacheAddDirtyEntry(PPDMACFILEENDPOINTCACHE pEndpointCache, PPDMACFILECACHEENTRY pEntry) 714 { 715 bool fDirtyBytesExceeded = false; 716 PPDMACFILECACHEGLOBAL pCache = pEndpointCache->pCache; 717 718 /* If the commit timer is disabled we commit right away. */ 719 if (pCache->u32CommitTimeoutMs == 0) 720 { 721 pEntry->fFlags |= PDMACFILECACHE_ENTRY_IS_DIRTY; 722 pdmacFileCacheEntryCommit(pEndpointCache, pEntry); 723 } 724 else if (!(pEntry->fFlags & PDMACFILECACHE_ENTRY_IS_DIRTY)) 725 { 726 pEntry->fFlags |= PDMACFILECACHE_ENTRY_IS_DIRTY; 727 RTListAppend(&pEndpointCache->ListDirtyNotCommitted, &pEntry->NodeNotCommitted); 728 uint32_t cbDirty = ASMAtomicAddU32(&pCache->cbDirty, pEntry->cbData); 729 730 fDirtyBytesExceeded = (cbDirty >= pCache->cbCommitDirtyThreshold); 731 } 732 733 return fDirtyBytesExceeded; 734 } 735 736 737 /** 596 738 * Completes a task segment freeing all ressources and completes the task handle 597 739 * if everything was transfered. … … 638 780 639 781 /* Process waiting segment list. The data in entry might have changed inbetween. */ 782 bool fDirty = false; 640 783 PPDMACFILETASKSEG pCurr = pEntry->pWaitingHead; 641 784 … … 649 792 AssertMsg(pEndpointCache->cWritesOutstanding > 0, ("Completed write request but outstanding task count is 0\n")); 650 793 ASMAtomicDecU32(&pEndpointCache->cWritesOutstanding); 794 795 pEntry->fFlags &= ~PDMACFILECACHE_ENTRY_IS_DIRTY; 651 796 652 797 if (pEntry->fFlags & PDMACFILECACHE_ENTRY_IS_DEPRECATED) … … 661 806 else 662 807 { 663 pEntry->fFlags &= ~PDMACFILECACHE_ENTRY_IS_DIRTY;664 665 808 while (pCurr) 666 809 { … … 668 811 669 812 memcpy(pEntry->pbData + pCurr->uBufOffset, pCurr->pvBuf, pCurr->cbTransfer); 670 pEntry->fFlags |= PDMACFILECACHE_ENTRY_IS_DIRTY;813 fDirty = true; 671 814 672 815 pCurr = pdmacFileCacheTaskComplete(pEndpointCache, pCurr); … … 677 820 { 678 821 AssertMsg(pTask->enmTransferType == PDMACTASKFILETRANSFER_READ, ("Invalid transfer type\n")); 679 AssertMsg(!(pEntry->fFlags & PDMACFILECACHE_ENTRY_IS_DIRTY),("Invalid flags set\n")); 822 AssertMsg(!(pEntry->fFlags & (PDMACFILECACHE_ENTRY_IS_DIRTY | PDMACFILECACHE_ENTRY_IS_DEPRECATED)), 823 ("Invalid flags set\n")); 680 824 681 825 while (pCurr) … … 684 828 { 685 829 memcpy(pEntry->pbData + pCurr->uBufOffset, pCurr->pvBuf, pCurr->cbTransfer); 686 pEntry->fFlags |= PDMACFILECACHE_ENTRY_IS_DIRTY;830 fDirty = true; 687 831 } 688 832 else … … 693 837 } 694 838 695 if (pEntry->fFlags & PDMACFILECACHE_ENTRY_IS_DIRTY) 696 pdmacFileCacheWriteToEndpoint(pEntry); 839 bool fCommit = false; 840 if (fDirty) 841 fCommit = pdmacFileCacheAddDirtyEntry(pEndpointCache, pEntry); 697 842 698 843 /* Complete a pending flush if all writes have completed */ … … 708 853 /* Dereference so that it isn't protected anymore except we issued anyother write for it. */ 709 854 pdmacFileEpCacheEntryRelease(pEntry); 855 856 if (fCommit) 857 pdmacFileCacheCommitDirtyEntries(pCache); 858 } 859 860 /** 861 * Commit timer callback. 862 */ 863 static void pdmacFileCacheCommitTimerCallback(PVM pVM, PTMTIMER pTimer, void *pvUser) 864 { 865 PPDMASYNCCOMPLETIONEPCLASSFILE pClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pvUser; 866 PPDMACFILECACHEGLOBAL pCache = &pClassFile->Cache; 867 868 LogFlowFunc(("Commit interval expired, commiting dirty entries\n")); 869 870 if (ASMAtomicReadU32(&pCache->cbDirty) > 0) 871 pdmacFileCacheCommitDirtyEntries(pCache); 872 873 TMTimerSetMillies(pTimer, pCache->u32CommitTimeoutMs); 874 LogFlowFunc(("Entries committed, going to sleep\n")); 710 875 } 711 876 … … 725 890 AssertLogRelRCReturn(rc, rc); 726 891 892 RTListInit(&pCache->ListEndpoints); 893 pCache->cRefs = 0; 727 894 pCache->cbCached = 0; 895 pCache->fCommitInProgress = 0; 728 896 LogFlowFunc((": Maximum number of bytes cached %u\n", pCache->cbMax)); 729 897 … … 744 912 pCache->cbRecentlyUsedOutMax = (pCache->cbMax / 100) * 50; /* 50% of the buffer size */ 745 913 LogFlowFunc((": cbRecentlyUsedInMax=%u cbRecentlyUsedOutMax=%u\n", pCache->cbRecentlyUsedInMax, pCache->cbRecentlyUsedOutMax)); 914 915 /** @todo r=aeichner: Experiment to find optimal default values */ 916 rc = CFGMR3QueryU32Def(pCfgNode, "CacheCommitIntervalMs", &pCache->u32CommitTimeoutMs, 10000 /* 10sec */); 917 AssertLogRelRCReturn(rc, rc); 918 rc = CFGMR3QueryU32(pCfgNode, "CacheCommitThreshold", &pCache->cbCommitDirtyThreshold); 919 if ( rc == VERR_CFGM_VALUE_NOT_FOUND 920 || rc == VERR_CFGM_NO_PARENT) 921 { 922 /* Start committing after 50% of the cache are dirty */ 923 pCache->cbCommitDirtyThreshold = pCache->cbMax / 2; 924 } 925 else 926 return rc; 746 927 747 928 STAMR3Register(pClassFile->Core.pVM, &pCache->cbMax, … … 814 995 815 996 if (RT_SUCCESS(rc)) 816 LogRel(("AIOMgr: Cache successfully initialised. Cache size is %u bytes\n", pCache->cbMax)); 997 { 998 /* Create the commit timer */ 999 if (pCache->u32CommitTimeoutMs > 0) 1000 rc = TMR3TimerCreateInternal(pClassFile->Core.pVM, TMCLOCK_REAL, 1001 pdmacFileCacheCommitTimerCallback, 1002 pClassFile, 1003 "Cache-Commit", 1004 &pClassFile->Cache.pTimerCommit); 1005 1006 if (RT_SUCCESS(rc)) 1007 { 1008 LogRel(("AIOMgr: Cache successfully initialised. Cache size is %u bytes\n", pCache->cbMax)); 1009 LogRel(("AIOMgr: Cache commit interval is %u ms\n", pCache->u32CommitTimeoutMs)); 1010 LogRel(("AIOMgr: Cache commit threshold is %u bytes\n", pCache->cbCommitDirtyThreshold)); 1011 return VINF_SUCCESS; 1012 } 1013 1014 RTCritSectDelete(&pCache->CritSect); 1015 } 817 1016 818 1017 return rc; … … 855 1054 856 1055 pEndpointCache->pCache = &pClassFile->Cache; 1056 RTListInit(&pEndpointCache->ListDirtyNotCommitted); 857 1057 858 1058 int rc = RTSemRWCreate(&pEndpointCache->SemRWEntries); … … 860 1060 { 861 1061 pEndpointCache->pTree = (PAVLRFOFFTREE)RTMemAllocZ(sizeof(AVLRFOFFTREE)); 862 if ( !pEndpointCache->pTree)1062 if (pEndpointCache->pTree) 863 1063 { 1064 pClassFile->Cache.cRefs++; 1065 RTListAppend(&pClassFile->Cache.ListEndpoints, &pEndpointCache->NodeCacheEndpoint); 1066 1067 /* Arm the timer if this is the first endpoint. */ 1068 if ( pClassFile->Cache.cRefs == 1 1069 && pClassFile->Cache.u32CommitTimeoutMs > 0) 1070 rc = TMTimerSetMillies(pClassFile->Cache.pTimerCommit, pClassFile->Cache.u32CommitTimeoutMs); 1071 } 1072 else 864 1073 rc = VERR_NO_MEMORY; 1074 1075 if (RT_FAILURE(rc)) 865 1076 RTSemRWDestroy(pEndpointCache->SemRWEntries); 866 }867 1077 } 868 1078 … … 877 1087 #endif 878 1088 1089 LogFlowFunc(("Leave rc=%Rrc\n", rc)); 879 1090 return rc; 880 1091 } … … 933 1144 RTAvlrFileOffsetDestroy(pEndpointCache->pTree, pdmacFileEpCacheEntryDestroy, pCache); 934 1145 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries); 1146 1147 pCache->cRefs--; 1148 RTListNodeRemove(&pEndpointCache->NodeCacheEndpoint); 1149 1150 if ( !pCache->cRefs 1151 && pCache->u32CommitTimeoutMs > 0) 1152 TMTimerStop(pCache->pTimerCommit); 1153 935 1154 pdmacFileCacheLockLeave(pCache); 936 1155 … … 1592 1811 else /* Deprecated flag not set */ 1593 1812 { 1594 /* If the entry is dirty it must be also in progress now and we have to defer updating it again. */1813 /* Check if the entry is dirty. */ 1595 1814 if(pdmacFileEpCacheEntryFlagIsSetClearAcquireLock(pEndpointCache, pEntry, 1596 1815 PDMACFILECACHE_ENTRY_IS_DIRTY, 1597 1816 0)) 1598 1817 { 1599 AssertMsg(pEntry->fFlags & PDMACFILECACHE_ENTRY_IO_IN_PROGRESS, 1600 ("Entry is dirty but not in progress\n")); 1601 Assert(!pEntry->pbDataReplace); 1602 1603 /* Deprecate the current buffer. */ 1604 if (!pEntry->pWaitingHead) 1605 pEntry->pbDataReplace = (uint8_t *)RTMemPageAlloc(pEntry->cbData); 1606 1607 /* If we are out of memory or have waiting segments 1608 * defer the write. */ 1609 if (!pEntry->pbDataReplace || pEntry->pWaitingHead) 1818 /* If it is dirty but not in progrss just update the data. */ 1819 if (!(pEntry->fFlags & PDMACFILECACHE_ENTRY_IO_IN_PROGRESS)) 1610 1820 { 1611 /* The data isn't written to the file yet */ 1612 pdmacFileEpCacheEntryWaitersAdd(pEntry, pTask, 1613 &IoMemCtx, 1614 OffDiff, cbToWrite, 1615 true /* fWrite */); 1616 STAM_COUNTER_INC(&pEndpointCache->StatWriteDeferred); 1821 pdmacFileEpCacheCopyFromIoMemCtx(&IoMemCtx, 1822 pEntry->pbData + OffDiff, 1823 cbToWrite); 1617 1824 } 1618 else /* Deprecate buffer */1825 else 1619 1826 { 1620 LogFlow(("Deprecating buffer for entry %#p\n", pEntry)); 1621 pEntry->fFlags |= PDMACFILECACHE_ENTRY_IS_DEPRECATED; 1622 1623 /* Copy the data before the update. */ 1624 if (OffDiff) 1625 memcpy(pEntry->pbDataReplace, pEntry->pbData, OffDiff); 1626 1627 /* Copy data behind the update. */ 1628 if ((pEntry->cbData - OffDiff - cbToWrite) > 0) 1629 memcpy(pEntry->pbDataReplace + OffDiff + cbToWrite, 1630 pEntry->pbData + OffDiff + cbToWrite, 1631 (pEntry->cbData - OffDiff - cbToWrite)); 1632 1633 /* Update the data from the write. */ 1634 pdmacFileEpCacheCopyFromIoMemCtx(&IoMemCtx, 1635 pEntry->pbDataReplace + OffDiff, 1636 cbToWrite); 1637 ASMAtomicSubS32(&pTask->cbTransferLeft, cbToWrite); 1638 1639 /* We are done here. A new write is initiated if the current request completes. */ 1827 Assert(!pEntry->pbDataReplace); 1828 1829 /* Deprecate the current buffer. */ 1830 if (!pEntry->pWaitingHead) 1831 pEntry->pbDataReplace = (uint8_t *)RTMemPageAlloc(pEntry->cbData); 1832 1833 /* If we are out of memory or have waiting segments 1834 * defer the write. */ 1835 if (!pEntry->pbDataReplace || pEntry->pWaitingHead) 1836 { 1837 /* The data isn't written to the file yet */ 1838 pdmacFileEpCacheEntryWaitersAdd(pEntry, pTask, 1839 &IoMemCtx, 1840 OffDiff, cbToWrite, 1841 true /* fWrite */); 1842 STAM_COUNTER_INC(&pEndpointCache->StatWriteDeferred); 1843 } 1844 else /* Deprecate buffer */ 1845 { 1846 LogFlow(("Deprecating buffer for entry %#p\n", pEntry)); 1847 pEntry->fFlags |= PDMACFILECACHE_ENTRY_IS_DEPRECATED; 1848 1849 /* Copy the data before the update. */ 1850 if (OffDiff) 1851 memcpy(pEntry->pbDataReplace, pEntry->pbData, OffDiff); 1852 1853 /* Copy data behind the update. */ 1854 if ((pEntry->cbData - OffDiff - cbToWrite) > 0) 1855 memcpy(pEntry->pbDataReplace + OffDiff + cbToWrite, 1856 pEntry->pbData + OffDiff + cbToWrite, 1857 (pEntry->cbData - OffDiff - cbToWrite)); 1858 1859 /* Update the data from the write. */ 1860 pdmacFileEpCacheCopyFromIoMemCtx(&IoMemCtx, 1861 pEntry->pbDataReplace + OffDiff, 1862 cbToWrite); 1863 /* We are done here. A new write is initiated if the current request completes. */ 1864 } 1640 1865 } 1641 1866 1867 ASMAtomicSubS32(&pTask->cbTransferLeft, cbToWrite); 1642 1868 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries); 1643 1869 } … … 1659 1885 RTSemRWReleaseWrite(pEndpointCache->SemRWEntries); 1660 1886 } 1661 else /* I/O in progres flag not set */1887 else /* I/O in progress flag not set */ 1662 1888 { 1663 1889 /* Write as much as we can into the entry and update the file. */ … … 1667 1893 ASMAtomicSubS32(&pTask->cbTransferLeft, cbToWrite); 1668 1894 1669 pEntry->fFlags |= PDMACFILECACHE_ENTRY_IS_DIRTY; 1670 pdmacFileCacheWriteToEndpoint(pEntry); 1895 bool fCommit = pdmacFileCacheAddDirtyEntry(pEndpointCache, pEntry); 1896 if (fCommit) 1897 pdmacFileCacheCommitDirtyEntries(pCache); 1671 1898 } 1672 1899 } /* Dirty bit not set */ … … 1787 2014 ASMAtomicSubS32(&pTask->cbTransferLeft, cbToWrite); 1788 2015 1789 pEntryNew->fFlags |= PDMACFILECACHE_ENTRY_IS_DIRTY; 1790 pdmacFileCacheWriteToEndpoint(pEntryNew); 1791 pdmacFileEpCacheEntryRelease(pEntryNew); /* it is protected by the I/O in progress flag now. */ 2016 bool fCommit = pdmacFileCacheAddDirtyEntry(pEndpointCache, pEntryNew); 2017 if (fCommit) 2018 pdmacFileCacheCommitDirtyEntries(pCache); 2019 pdmacFileEpCacheEntryRelease(pEntryNew); 1792 2020 STAM_COUNTER_ADD(&pCache->StatWritten, cbToWrite); 1793 2021 } … … 1835 2063 else 1836 2064 { 2065 /* Check for dirty entries in the cache. */ 2066 pdmacFileCacheEndpointCommit(&pEndpoint->DataCache); 1837 2067 if (ASMAtomicReadU32(&pEndpoint->DataCache.cWritesOutstanding) > 0) 1838 2068 { -
trunk/src/VBox/VMM/PDMAsyncCompletionFileInternal.h
r26689 r26812 32 32 #include <iprt/critsect.h> 33 33 #include <iprt/avl.h> 34 #include <iprt/list.h> 34 35 35 36 #include "PDMAsyncCompletionInternal.h" … … 282 283 /** Tail of list of tasks waiting for this one to finish. */ 283 284 PPDMACFILETASKSEG pWaitingTail; 285 /** Node for dirty but not yet committed entries list per endpoint. */ 286 RTLISTNODE NodeNotCommitted; 284 287 } PDMACFILECACHEENTRY, *PPDMACFILECACHEENTRY; 285 288 /** I/O is still in progress for this entry. This entry is not evictable. */ … … 295 298 #define PDMACFILECACHE_ENTRY_IS_DEPRECATED RT_BIT(3) 296 299 /** Entry is not evictable. */ 297 #define PDMACFILECACHE_NOT_EVICTABLE (PDMACFILECACHE_ENTRY_LOCKED | PDMACFILECACHE_ IO_IN_PROGRESS | PDMACFILECACHE_ENTRY_IS_DEPRECATED)300 #define PDMACFILECACHE_NOT_EVICTABLE (PDMACFILECACHE_ENTRY_LOCKED | PDMACFILECACHE_ENTRY_IO_IN_PROGRESS | PDMACFILECACHE_ENTRY_IS_DIRTY) 298 301 299 302 /** … … 321 324 /** Critical section protecting the cache. */ 322 325 RTCRITSECT CritSect; 326 /** Maximum number of bytes cached. */ 323 327 uint32_t cbRecentlyUsedInMax; 328 /** Maximum number of bytes in the paged out list .*/ 324 329 uint32_t cbRecentlyUsedOutMax; 330 /** Recently used cache entries list */ 325 331 PDMACFILELRULIST LruRecentlyUsedIn; 332 /** Scorecard cache entry list. */ 326 333 PDMACFILELRULIST LruRecentlyUsedOut; 334 /** List of frequently used cache entries */ 327 335 PDMACFILELRULIST LruFrequentlyUsed; 336 /** Commit timeout in milli seconds */ 337 uint32_t u32CommitTimeoutMs; 338 /** Number of dirty bytes needed to start a commit of the data to the disk. */ 339 uint32_t cbCommitDirtyThreshold; 340 /** Current number of dirty bytes in the cache. */ 341 volatile uint32_t cbDirty; 342 /** Flag whether a commit is currently in progress. */ 343 volatile bool fCommitInProgress; 344 /** Commit interval timer */ 345 PTMTIMERR3 pTimerCommit; 346 /** Number of endpoints using the cache. */ 347 uint32_t cRefs; 348 /** List of all endpoints using this cache. */ 349 RTLISTNODE ListEndpoints; 328 350 #ifdef VBOX_WITH_STATISTICS 329 351 /** Hit counter. */ … … 363 385 /** Handle of the flush request if one is active */ 364 386 volatile PPDMASYNCCOMPLETIONTASKFILE pTaskFlush; 387 /** List of dirty but not committed entries for this endpoint. */ 388 RTLISTNODE ListDirtyNotCommitted; 389 /** Node of the cache endpoint list. */ 390 RTLISTNODE NodeCacheEndpoint; 365 391 #ifdef VBOX_WITH_STATISTICS 366 392 /** Number of times a write was deferred because the cache entry was still in progress */
Note:
See TracChangeset
for help on using the changeset viewer.