VirtualBox

source: vbox/trunk/src/VBox/VMM/TM.cpp@ 5505

Last change on this file since 5505 was 5505, checked in by vboxsync, 18 years ago

Hooked up the new IPRT time code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 82.7 KB
Line 
1/* $Id: TM.cpp 5505 2007-10-25 23:47:19Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License as published by the Free Software Foundation,
13 * in version 2 as it comes in the "COPYING" file of the VirtualBox OSE
14 * distribution. VirtualBox OSE is distributed in the hope that it will
15 * be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_tm TM - The Time Manager
20 *
21 * The Time Manager abstracts the CPU clocks and manages timers used by the VMM,
22 * device and drivers.
23 *
24 *
25 * @section sec_tm_clocks Clocks
26 *
27 * There are currently 4 clocks:
28 * - Virtual (guest).
29 * - Synchronous virtual (guest).
30 * - CPU Tick (TSC) (guest). Only current use is rdtsc emulation. Usually a
31 * function of the virtual clock.
32 * - Real (host). The only current use is display updates for not real
33 * good reason...
34 *
35 * The interesting clocks are two first ones, the virtual and synchronous virtual
36 * clock. The synchronous virtual clock is tied to the virtual clock except that
37 * it will take into account timer delivery lag caused by host scheduling. It will
38 * normally never advance beyond the header timer, and when lagging too far behind
39 * it will gradually speed up to catch up with the virtual clock.
40 *
41 * The CPU tick (TSC) is normally virtualized as a function of the virtual time,
42 * where the frequency defaults to the host cpu frequency (as we measure it). It
43 * can also use the host TSC as source and either present it with an offset or
44 * unmodified. It is of course possible to configure the TSC frequency and mode
45 * of operation.
46 *
47 * @subsection subsec_tm_timesync Guest Time Sync / UTC time
48 *
49 * Guest time syncing is primarily taken care of by the VMM device. The principle
50 * is very simple, the guest additions periodically asks the VMM device what the
51 * current UTC time is and makes adjustments accordingly. Now, because the
52 * synchronous virtual clock might be doing catchups and we would therefore
53 * deliver more than the normal rate for a little while, some adjusting of the
54 * UTC time is required before passing it on to the guest. This is why TM provides
55 * an API for query the current UTC time.
56 *
57 *
58 * @section sec_tm_timers Timers
59 *
60 * The timers can use any of the TM clocks described in the previous section. Each
61 * clock has its own scheduling facility, or timer queue if you like. There are
62 * a few factors which makes it a bit complex. First there is the usual R0 vs R3
63 * vs. GC thing. Then there is multiple threads, and then there is the timer thread
64 * that periodically checks whether any timers has expired without EMT noticing. On
65 * the API level, all but the create and save APIs must be mulithreaded. EMT will
66 * always run the timers.
67 *
68 * The design is using a doubly linked list of active timers which is ordered
69 * by expire date. This list is only modified by the EMT thread. Updates to the
70 * list are are batched in a singly linked list, which is then process by the EMT
71 * thread at the first opportunity (immediately, next time EMT modifies a timer
72 * on that clock, or next timer timeout). Both lists are offset based and all
73 * the elements therefore allocated from the hyper heap.
74 *
75 * For figuring out when there is need to schedule and run timers TM will:
76 * - Poll whenever somebody queries the virtual clock.
77 * - Poll the virtual clocks from the EM and REM loops.
78 * - Poll the virtual clocks from trap exit path.
79 * - Poll the virtual clocks and calculate first timeout from the halt loop.
80 * - Employ a thread which periodically (100Hz) polls all the timer queues.
81 *
82 *
83 * @section sec_tm_timer Logging
84 *
85 * Level 2: Logs a most of the timer state transitions and queue servicing.
86 * Level 3: Logs a few oddments.
87 * Level 4: Logs TMCLOCK_VIRTUAL_SYNC catch-up events.
88 *
89 */
90
91
92
93
94/*******************************************************************************
95* Header Files *
96*******************************************************************************/
97#define LOG_GROUP LOG_GROUP_TM
98#include <VBox/tm.h>
99#include <VBox/vmm.h>
100#include <VBox/mm.h>
101#include <VBox/ssm.h>
102#include <VBox/dbgf.h>
103#include <VBox/rem.h>
104#include <VBox/pdm.h>
105#include "TMInternal.h"
106#include <VBox/vm.h>
107
108#include <VBox/param.h>
109#include <VBox/err.h>
110
111#include <VBox/log.h>
112#include <iprt/asm.h>
113#include <iprt/assert.h>
114#include <iprt/thread.h>
115#include <iprt/time.h>
116#include <iprt/timer.h>
117#include <iprt/semaphore.h>
118#include <iprt/string.h>
119#include <iprt/env.h>
120
121
122/*******************************************************************************
123* Defined Constants And Macros *
124*******************************************************************************/
125/** The current saved state version.*/
126#define TM_SAVED_STATE_VERSION 3
127
128
129/*******************************************************************************
130* Internal Functions *
131*******************************************************************************/
132static bool tmR3HasFixedTSC(void);
133static uint64_t tmR3CalibrateTSC(void);
134static DECLCALLBACK(int) tmR3Save(PVM pVM, PSSMHANDLE pSSM);
135static DECLCALLBACK(int) tmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
136static DECLCALLBACK(void) tmR3TimerCallback(PRTTIMER pTimer, void *pvUser);
137static void tmR3TimerQueueRun(PVM pVM, PTMTIMERQUEUE pQueue);
138static void tmR3TimerQueueRunVirtualSync(PVM pVM);
139static DECLCALLBACK(void) tmR3TimerInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
140static DECLCALLBACK(void) tmR3TimerInfoActive(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
141static DECLCALLBACK(void) tmR3InfoClocks(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs);
142
143
144/**
145 * Internal function for getting the clock time.
146 *
147 * @returns clock time.
148 * @param pVM The VM handle.
149 * @param enmClock The clock.
150 */
151DECLINLINE(uint64_t) tmClock(PVM pVM, TMCLOCK enmClock)
152{
153 switch (enmClock)
154 {
155 case TMCLOCK_VIRTUAL: return TMVirtualGet(pVM);
156 case TMCLOCK_VIRTUAL_SYNC: return TMVirtualSyncGet(pVM);
157 case TMCLOCK_REAL: return TMRealGet(pVM);
158 case TMCLOCK_TSC: return TMCpuTickGet(pVM);
159 default:
160 AssertMsgFailed(("enmClock=%d\n", enmClock));
161 return ~(uint64_t)0;
162 }
163}
164
165
166/**
167 * Initializes the TM.
168 *
169 * @returns VBox status code.
170 * @param pVM The VM to operate on.
171 */
172TMR3DECL(int) TMR3Init(PVM pVM)
173{
174 LogFlow(("TMR3Init:\n"));
175
176 /*
177 * Assert alignment and sizes.
178 */
179 AssertRelease(!(RT_OFFSETOF(VM, tm.s) & 31));
180 AssertRelease(sizeof(pVM->tm.s) <= sizeof(pVM->tm.padding));
181
182 /*
183 * Init the structure.
184 */
185 void *pv;
186 int rc = MMHyperAlloc(pVM, sizeof(pVM->tm.s.paTimerQueuesR3[0]) * TMCLOCK_MAX, 0, MM_TAG_TM, &pv);
187 AssertRCReturn(rc, rc);
188 pVM->tm.s.paTimerQueuesR3 = (PTMTIMERQUEUE)pv;
189
190 pVM->tm.s.offVM = RT_OFFSETOF(VM, tm.s);
191 pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL].enmClock = TMCLOCK_VIRTUAL;
192 pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL].u64Expire = INT64_MAX;
193 pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC].enmClock = TMCLOCK_VIRTUAL_SYNC;
194 pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC].u64Expire = INT64_MAX;
195 pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL].enmClock = TMCLOCK_REAL;
196 pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL].u64Expire = INT64_MAX;
197 pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC].enmClock = TMCLOCK_TSC;
198 pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC].u64Expire = INT64_MAX;
199
200 /*
201 * We directly use the GIP to calculate the virtual time. We map the
202 * the GIP into the guest context so we can do this calculation there
203 * as well and save costly world switches.
204 */
205 pVM->tm.s.pvGIPR3 = (void *)g_pSUPGlobalInfoPage;
206 AssertMsgReturn(pVM->tm.s.pvGIPR3, ("GIP support is now required!\n"), VERR_INTERNAL_ERROR);
207 RTHCPHYS HCPhysGIP;
208 rc = SUPGipGetPhys(&HCPhysGIP);
209 AssertMsgRCReturn(rc, ("Failed to get GIP physical address!\n"), rc);
210
211 rc = MMR3HyperMapHCPhys(pVM, pVM->tm.s.pvGIPR3, HCPhysGIP, PAGE_SIZE, "GIP", &pVM->tm.s.pvGIPGC);
212 if (VBOX_FAILURE(rc))
213 {
214 AssertMsgFailed(("Failed to map GIP into GC, rc=%Vrc!\n", rc));
215 return rc;
216 }
217 LogFlow(("TMR3Init: HCPhysGIP=%RHp at %VGv\n", HCPhysGIP, pVM->tm.s.pvGIPGC));
218 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
219
220 /* Check assumptions made in TMAllVirtual.cpp about the GIP update interval. */
221 if ( g_pSUPGlobalInfoPage->u32Magic == SUPGLOBALINFOPAGE_MAGIC
222 && g_pSUPGlobalInfoPage->u32UpdateIntervalNS >= 250000000 /* 0.25s */)
223 return VMSetError(pVM, VERR_INTERNAL_ERROR, RT_SRC_POS,
224 N_("The GIP update interval is too big. u32UpdateIntervalNS=%RU32 (u32UpdateHz=%RU32)\n"),
225 g_pSUPGlobalInfoPage->u32UpdateIntervalNS, g_pSUPGlobalInfoPage->u32UpdateHz);
226
227 /*
228 * Setup the VirtualGetRaw backend.
229 */
230 pVM->tm.s.VirtualGetRawDataR3.pu64Prev = &pVM->tm.s.u64VirtualRawPrev;
231 pVM->tm.s.VirtualGetRawDataR3.pfnBad = tmVirtualNanoTSBad;
232 pVM->tm.s.VirtualGetRawDataR3.pfnRediscover = tmVirtualNanoTSRediscover;
233
234 pVM->tm.s.VirtualGetRawDataGC.pu64Prev = MMHyperR3ToGC(pVM, (void *)&pVM->tm.s.u64VirtualRawPrev);
235#if 0 /* too early */
236 rc = PDMR3GetSymbolGCLazy(pVM, NULL, "tmVirtualNanoTSBad", &pVM->tm.s.VirtualGetRawDataGC.pfnBad);
237 AssertRCReturn(rc, rc);
238 rc = PDMR3GetSymbolGCLazy(pVM, NULL, "tmVirtualNanoTSRediscover", &pVM->tm.s.VirtualGetRawDataGC.pfnRediscover);
239 AssertRCReturn(rc, rc);
240#endif
241
242 pVM->tm.s.VirtualGetRawDataR0.pu64Prev = MMHyperR3ToR0(pVM, (void *)&pVM->tm.s.u64VirtualRawPrev);
243 AssertReturn(pVM->tm.s.VirtualGetRawDataR0.pu64Prev, VERR_INTERNAL_ERROR);
244 rc = PDMR3GetSymbolR0Lazy(pVM, NULL, "tmVirtualNanoTSBad", &pVM->tm.s.VirtualGetRawDataR0.pfnBad);
245 AssertRCReturn(rc, rc);
246 rc = PDMR3GetSymbolR0Lazy(pVM, NULL, "tmVirtualNanoTSRediscover", &pVM->tm.s.VirtualGetRawDataR0.pfnRediscover);
247 AssertRCReturn(rc, rc);
248
249 if (ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2)
250 {
251 if (g_pSUPGlobalInfoPage->u32Mode == SUPGIPMODE_SYNC_TSC)
252 {
253 pVM->tm.s.pfnVirtualGetRawR3 = RTTimeNanoTSLFenceSync;
254 rc = PDMR3GetSymbolR0Lazy(pVM, NULL, "RTTimeNanoTSLFenceSync", &pVM->tm.s.pfnVirtualGetRawR0);
255 AssertRCReturn(rc, rc);
256 }
257 else
258 {
259 pVM->tm.s.pfnVirtualGetRawR3 = RTTimeNanoTSLFenceAsync;
260 rc = PDMR3GetSymbolR0Lazy(pVM, NULL, "RTTimeNanoTSLFenceAsync", &pVM->tm.s.pfnVirtualGetRawR0);
261 AssertRCReturn(rc, rc);
262 }
263 }
264 else
265 {
266 if (g_pSUPGlobalInfoPage->u32Mode == SUPGIPMODE_SYNC_TSC)
267 {
268 pVM->tm.s.pfnVirtualGetRawR3 = RTTimeNanoTSLegacySync;
269 rc = PDMR3GetSymbolR0Lazy(pVM, NULL, "RTTimeNanoTSLegacySync", &pVM->tm.s.pfnVirtualGetRawR0);
270 AssertRCReturn(rc, rc);
271 }
272 else
273 {
274 pVM->tm.s.pfnVirtualGetRawR3 = RTTimeNanoTSLegacyAsync;
275 rc = PDMR3GetSymbolR0Lazy(pVM, NULL, "RTTimeNanoTSLegacyAsync", &pVM->tm.s.pfnVirtualGetRawR0);
276 AssertRCReturn(rc, rc);
277 }
278 }
279
280 /*
281 * Get our CFGM node, create it if necessary.
282 */
283 PCFGMNODE pCfgHandle = CFGMR3GetChild(CFGMR3GetRoot(pVM), "TM");
284 if (!pCfgHandle)
285 {
286 rc = CFGMR3InsertNode(CFGMR3GetRoot(pVM), "TM", &pCfgHandle);
287 AssertRCReturn(rc, rc);
288 }
289
290 /*
291 * Determin the TSC configuration and frequency.
292 */
293 /* mode */
294 rc = CFGMR3QueryBool(pCfgHandle, "TSCVirtualized", &pVM->tm.s.fTSCVirtualized);
295 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
296 pVM->tm.s.fTSCVirtualized = true; /* trap rdtsc */
297 else if (VBOX_FAILURE(rc))
298 return VMSetError(pVM, rc, RT_SRC_POS,
299 N_("Configuration error: Failed to querying bool value \"UseRealTSC\". (%Vrc)"), rc);
300
301 /* source */
302 rc = CFGMR3QueryBool(pCfgHandle, "UseRealTSC", &pVM->tm.s.fTSCUseRealTSC);
303 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
304 pVM->tm.s.fTSCUseRealTSC = false; /* use virtual time */
305 else if (VBOX_FAILURE(rc))
306 return VMSetError(pVM, rc, RT_SRC_POS,
307 N_("Configuration error: Failed to querying bool value \"UseRealTSC\". (%Vrc)"), rc);
308 if (!pVM->tm.s.fTSCUseRealTSC)
309 pVM->tm.s.fTSCVirtualized = true;
310
311 /* TSC reliability */
312 rc = CFGMR3QueryBool(pCfgHandle, "MaybeUseOffsettedHostTSC", &pVM->tm.s.fMaybeUseOffsettedHostTSC);
313 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
314 {
315 if (!pVM->tm.s.fTSCUseRealTSC)
316 pVM->tm.s.fMaybeUseOffsettedHostTSC = tmR3HasFixedTSC();
317 else
318 pVM->tm.s.fMaybeUseOffsettedHostTSC = true;
319 }
320
321 /* frequency */
322 rc = CFGMR3QueryU64(pCfgHandle, "TSCTicksPerSecond", &pVM->tm.s.cTSCTicksPerSecond);
323 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
324 {
325 pVM->tm.s.cTSCTicksPerSecond = tmR3CalibrateTSC();
326 if ( !pVM->tm.s.fTSCUseRealTSC
327 && pVM->tm.s.cTSCTicksPerSecond >= _4G)
328 {
329 pVM->tm.s.cTSCTicksPerSecond = _4G - 1; /* (A limitation of our math code) */
330 pVM->tm.s.fMaybeUseOffsettedHostTSC = false;
331 }
332 }
333 else if (VBOX_FAILURE(rc))
334 return VMSetError(pVM, rc, RT_SRC_POS,
335 N_("Configuration error: Failed to querying uint64_t value \"TSCTicksPerSecond\". (%Vrc)"), rc);
336 else if ( pVM->tm.s.cTSCTicksPerSecond < _1M
337 || pVM->tm.s.cTSCTicksPerSecond >= _4G)
338 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS,
339 N_("Configuration error: \"TSCTicksPerSecond\" = %RI64 is not in the range 1MHz..4GHz-1!"),
340 pVM->tm.s.cTSCTicksPerSecond);
341 else
342 {
343 pVM->tm.s.fTSCUseRealTSC = pVM->tm.s.fMaybeUseOffsettedHostTSC = false;
344 pVM->tm.s.fTSCVirtualized = true;
345 }
346
347 /* setup and report */
348 if (pVM->tm.s.fTSCVirtualized)
349 CPUMR3SetCR4Feature(pVM, X86_CR4_TSD, ~X86_CR4_TSD);
350 else
351 CPUMR3SetCR4Feature(pVM, 0, ~X86_CR4_TSD);
352 LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%RU64) fTSCVirtualized=%RTbool fTSCUseRealTSC=%RTbool fMaybeUseOffsettedHostTSC=%RTbool\n",
353 pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.fTSCVirtualized,
354 pVM->tm.s.fTSCUseRealTSC, pVM->tm.s.fMaybeUseOffsettedHostTSC));
355
356 /*
357 * Configure the timer synchronous virtual time.
358 */
359 rc = CFGMR3QueryU32(pCfgHandle, "ScheduleSlack", &pVM->tm.s.u32VirtualSyncScheduleSlack);
360 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
361 pVM->tm.s.u32VirtualSyncScheduleSlack = 100000; /* 0.100ms (ASSUMES virtual time is nanoseconds) */
362 else if (VBOX_FAILURE(rc))
363 return VMSetError(pVM, rc, RT_SRC_POS,
364 N_("Configuration error: Failed to querying 32-bit integer value \"ScheduleSlack\". (%Vrc)"), rc);
365
366 rc = CFGMR3QueryU64(pCfgHandle, "CatchUpStopThreshold", &pVM->tm.s.u64VirtualSyncCatchUpStopThreshold);
367 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
368 pVM->tm.s.u64VirtualSyncCatchUpStopThreshold = 500000; /* 0.5ms */
369 else if (VBOX_FAILURE(rc))
370 return VMSetError(pVM, rc, RT_SRC_POS,
371 N_("Configuration error: Failed to querying 64-bit integer value \"CatchUpStopThreshold\". (%Vrc)"), rc);
372
373 rc = CFGMR3QueryU64(pCfgHandle, "CatchUpGiveUpThreshold", &pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold);
374 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
375 pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold = UINT64_C(60000000000); /* 60 sec */
376 else if (VBOX_FAILURE(rc))
377 return VMSetError(pVM, rc, RT_SRC_POS,
378 N_("Configuration error: Failed to querying 64-bit integer value \"CatchUpGiveUpThreshold\". (%Vrc)"), rc);
379
380
381#define TM_CFG_PERIOD(iPeriod, DefStart, DefPct) \
382 do \
383 { \
384 uint64_t u64; \
385 rc = CFGMR3QueryU64(pCfgHandle, "CatchUpStartThreshold" #iPeriod, &u64); \
386 if (rc == VERR_CFGM_VALUE_NOT_FOUND) \
387 u64 = UINT64_C(DefStart); \
388 else if (VBOX_FAILURE(rc)) \
389 return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying 64-bit integer value \"CatchUpThreshold" #iPeriod "\". (%Vrc)"), rc); \
390 if ( (iPeriod > 0 && u64 <= pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod - 1].u64Start) \
391 || u64 >= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold) \
392 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("Configuration error: Invalid start of period #" #iPeriod ": %RU64"), u64); \
393 pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod].u64Start = u64; \
394 rc = CFGMR3QueryU32(pCfgHandle, "CatchUpPrecentage" #iPeriod, &pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod].u32Percentage); \
395 if (rc == VERR_CFGM_VALUE_NOT_FOUND) \
396 pVM->tm.s.aVirtualSyncCatchUpPeriods[iPeriod].u32Percentage = (DefPct); \
397 else if (VBOX_FAILURE(rc)) \
398 return VMSetError(pVM, rc, RT_SRC_POS, N_("Configuration error: Failed to querying 32-bit integer value \"CatchUpPrecentage" #iPeriod "\". (%Vrc)"), rc); \
399 } while (0)
400 /* This needs more tuning. Not sure if we really need so many period and be so gentle. */
401 TM_CFG_PERIOD(0, 750000, 5); /* 0.75ms at 1.05x */
402 TM_CFG_PERIOD(1, 1500000, 10); /* 1.50ms at 1.10x */
403 TM_CFG_PERIOD(2, 8000000, 25); /* 8ms at 1.25x */
404 TM_CFG_PERIOD(3, 30000000, 50); /* 30ms at 1.50x */
405 TM_CFG_PERIOD(4, 75000000, 75); /* 75ms at 1.75x */
406 TM_CFG_PERIOD(5, 175000000, 100); /* 175ms at 2x */
407 TM_CFG_PERIOD(6, 500000000, 200); /* 500ms at 3x */
408 TM_CFG_PERIOD(7, 3000000000, 300); /* 3s at 4x */
409 TM_CFG_PERIOD(8,30000000000, 400); /* 30s at 5x */
410 TM_CFG_PERIOD(9,55000000000, 500); /* 55s at 6x */
411 AssertCompile(RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods) == 10);
412#undef TM_CFG_PERIOD
413
414 /*
415 * Configure real world time (UTC).
416 */
417 rc = CFGMR3QueryS64(pCfgHandle, "UTCOffset", &pVM->tm.s.offUTC);
418 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
419 pVM->tm.s.offUTC = 0; /* ns */
420 else if (VBOX_FAILURE(rc))
421 return VMSetError(pVM, rc, RT_SRC_POS,
422 N_("Configuration error: Failed to querying 64-bit integer value \"UTCOffset\". (%Vrc)"), rc);
423
424 /*
425 * Setup the warp drive.
426 */
427 rc = CFGMR3QueryU32(pCfgHandle, "WarpDrivePercentage", &pVM->tm.s.u32VirtualWarpDrivePercentage);
428 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
429 rc = CFGMR3QueryU32(CFGMR3GetRoot(pVM), "WarpDrivePercentage", &pVM->tm.s.u32VirtualWarpDrivePercentage); /* legacy */
430 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
431 pVM->tm.s.u32VirtualWarpDrivePercentage = 100;
432 else if (VBOX_FAILURE(rc))
433 return VMSetError(pVM, rc, RT_SRC_POS,
434 N_("Configuration error: Failed to querying uint32_t value \"WarpDrivePercent\". (%Vrc)"), rc);
435 else if ( pVM->tm.s.u32VirtualWarpDrivePercentage < 2
436 || pVM->tm.s.u32VirtualWarpDrivePercentage > 20000)
437 return VMSetError(pVM, VERR_INVALID_PARAMETER, RT_SRC_POS,
438 N_("Configuration error: \"WarpDrivePercent\" = %RI32 is not in the range 2..20000!"),
439 pVM->tm.s.u32VirtualWarpDrivePercentage);
440 pVM->tm.s.fVirtualWarpDrive = pVM->tm.s.u32VirtualWarpDrivePercentage != 100;
441 if (pVM->tm.s.fVirtualWarpDrive)
442 LogRel(("TM: u32VirtualWarpDrivePercentage=%RI32\n", pVM->tm.s.u32VirtualWarpDrivePercentage));
443
444 /*
445 * Start the timer (guard against REM not yielding).
446 */
447 uint32_t u32Millies;
448 rc = CFGMR3QueryU32(pCfgHandle, "TimerMillies", &u32Millies);
449 if (rc == VERR_CFGM_VALUE_NOT_FOUND)
450 u32Millies = 10;
451 else if (VBOX_FAILURE(rc))
452 return VMSetError(pVM, rc, RT_SRC_POS,
453 N_("Configuration error: Failed to query uint32_t value \"TimerMillies\", rc=%Vrc.\n"), rc);
454 rc = RTTimerCreate(&pVM->tm.s.pTimer, u32Millies, tmR3TimerCallback, pVM);
455 if (VBOX_FAILURE(rc))
456 {
457 AssertMsgFailed(("Failed to create timer, u32Millies=%d rc=%Vrc.\n", u32Millies, rc));
458 return rc;
459 }
460 Log(("TM: Created timer %p firing every %d millieseconds\n", pVM->tm.s.pTimer, u32Millies));
461 pVM->tm.s.u32TimerMillies = u32Millies;
462
463 /*
464 * Register saved state.
465 */
466 rc = SSMR3RegisterInternal(pVM, "tm", 1, TM_SAVED_STATE_VERSION, sizeof(uint64_t) * 8,
467 NULL, tmR3Save, NULL,
468 NULL, tmR3Load, NULL);
469 if (VBOX_FAILURE(rc))
470 return rc;
471
472 /*
473 * Register statistics.
474 */
475 STAM_REL_REG_USED(pVM, (void *)&pVM->tm.s.VirtualGetRawDataR3.c1nsSteps, STAMTYPE_U32, "/TM/R3/1nsSteps", STAMUNIT_OCCURENCES, "Virtual time 1ns steps (due to TSC / GIP variations).");
476 STAM_REL_REG_USED(pVM, (void *)&pVM->tm.s.VirtualGetRawDataR3.cBadPrev, STAMTYPE_U32, "/TM/R3/cBadPrev", STAMUNIT_OCCURENCES, "Times the previous virtual time was considered erratic (shouldn't ever happen).");
477 STAM_REL_REG_USED(pVM, (void *)&pVM->tm.s.VirtualGetRawDataR0.c1nsSteps, STAMTYPE_U32, "/TM/R0/1nsSteps", STAMUNIT_OCCURENCES, "Virtual time 1ns steps (due to TSC / GIP variations).");
478 STAM_REL_REG_USED(pVM, (void *)&pVM->tm.s.VirtualGetRawDataR0.cBadPrev, STAMTYPE_U32, "/TM/R0/cBadPrev", STAMUNIT_OCCURENCES, "Times the previous virtual time was considered erratic (shouldn't ever happen).");
479 STAM_REL_REG_USED(pVM, (void *)&pVM->tm.s.VirtualGetRawDataGC.c1nsSteps, STAMTYPE_U32, "/TM/GC/1nsSteps", STAMUNIT_OCCURENCES, "Virtual time 1ns steps (due to TSC / GIP variations).");
480 STAM_REL_REG_USED(pVM, (void *)&pVM->tm.s.VirtualGetRawDataGC.cBadPrev, STAMTYPE_U32, "/TM/GC/cBadPrev", STAMUNIT_OCCURENCES, "Times the previous virtual time was considered erratic (shouldn't ever happen).");
481 STAM_REL_REG( pVM, (void *)&pVM->tm.s.offVirtualSync, STAMTYPE_U64, "/TM/VirtualSync/CurrentOffset", STAMUNIT_NS, "The current offset. (subtract GivenUp to get the lag)");
482 STAM_REL_REG_USED(pVM, (void *)&pVM->tm.s.offVirtualSyncGivenUp, STAMTYPE_U64, "/TM/VirtualSync/GivenUp", STAMUNIT_NS, "Nanoseconds of the 'CurrentOffset' that's been given up and won't ever be attemted caught up with.");
483
484#ifdef VBOX_WITH_STATISTICS
485 STAM_REG_USED( pVM, (void *)&pVM->tm.s.VirtualGetRawDataR3.cExpired, STAMTYPE_U32, "/TM/R3/cExpired", STAMUNIT_OCCURENCES, "Times the TSC interval expired (overlaps 1ns steps).");
486 STAM_REG_USED( pVM, (void *)&pVM->tm.s.VirtualGetRawDataR3.cUpdateRaces,STAMTYPE_U32, "/TM/R3/cUpdateRaces", STAMUNIT_OCCURENCES, "Thread races when updating the previous timestamp.");
487 STAM_REG_USED( pVM, (void *)&pVM->tm.s.VirtualGetRawDataR0.cExpired, STAMTYPE_U32, "/TM/R0/cExpired", STAMUNIT_OCCURENCES, "Times the TSC interval expired (overlaps 1ns steps).");
488 STAM_REG_USED( pVM, (void *)&pVM->tm.s.VirtualGetRawDataR0.cUpdateRaces,STAMTYPE_U32, "/TM/R0/cUpdateRaces", STAMUNIT_OCCURENCES, "Thread races when updating the previous timestamp.");
489 STAM_REG_USED( pVM, (void *)&pVM->tm.s.VirtualGetRawDataGC.cExpired, STAMTYPE_U32, "/TM/GC/cExpired", STAMUNIT_OCCURENCES, "Times the TSC interval expired (overlaps 1ns steps).");
490 STAM_REG_USED( pVM, (void *)&pVM->tm.s.VirtualGetRawDataGC.cUpdateRaces,STAMTYPE_U32, "/TM/GC/cUpdateRaces", STAMUNIT_OCCURENCES, "Thread races when updating the previous timestamp.");
491
492 STAM_REG(pVM, &pVM->tm.s.StatDoQueues, STAMTYPE_PROFILE, "/TM/DoQueues", STAMUNIT_TICKS_PER_CALL, "Profiling timer TMR3TimerQueuesDo.");
493 STAM_REG(pVM, &pVM->tm.s.StatDoQueuesSchedule, STAMTYPE_PROFILE_ADV, "/TM/DoQueues/Schedule",STAMUNIT_TICKS_PER_CALL, "The scheduling part.");
494 STAM_REG(pVM, &pVM->tm.s.StatDoQueuesRun, STAMTYPE_PROFILE_ADV, "/TM/DoQueues/Run", STAMUNIT_TICKS_PER_CALL, "The run part.");
495
496 STAM_REG(pVM, &pVM->tm.s.StatPollAlreadySet, STAMTYPE_COUNTER, "/TM/PollAlreadySet", STAMUNIT_OCCURENCES, "TMTimerPoll calls where the FF was already set.");
497 STAM_REG(pVM, &pVM->tm.s.StatPollVirtual, STAMTYPE_COUNTER, "/TM/PollHitsVirtual", STAMUNIT_OCCURENCES, "The number of times TMTimerPoll found an expired TMCLOCK_VIRTUAL queue.");
498 STAM_REG(pVM, &pVM->tm.s.StatPollVirtualSync, STAMTYPE_COUNTER, "/TM/PollHitsVirtualSync",STAMUNIT_OCCURENCES, "The number of times TMTimerPoll found an expired TMCLOCK_VIRTUAL_SYNC queue.");
499 STAM_REG(pVM, &pVM->tm.s.StatPollMiss, STAMTYPE_COUNTER, "/TM/PollMiss", STAMUNIT_OCCURENCES, "TMTimerPoll calls where nothing had expired.");
500
501 STAM_REG(pVM, &pVM->tm.s.StatPostponedR3, STAMTYPE_COUNTER, "/TM/PostponedR3", STAMUNIT_OCCURENCES, "Postponed due to unschedulable state, in ring-3.");
502 STAM_REG(pVM, &pVM->tm.s.StatPostponedR0, STAMTYPE_COUNTER, "/TM/PostponedR0", STAMUNIT_OCCURENCES, "Postponed due to unschedulable state, in ring-0.");
503 STAM_REG(pVM, &pVM->tm.s.StatPostponedGC, STAMTYPE_COUNTER, "/TM/PostponedGC", STAMUNIT_OCCURENCES, "Postponed due to unschedulable state, in GC.");
504
505 STAM_REG(pVM, &pVM->tm.s.StatScheduleOneGC, STAMTYPE_PROFILE, "/TM/ScheduleOneGC", STAMUNIT_TICKS_PER_CALL, "Profiling the scheduling of one queue during a TMTimer* call in EMT.\n");
506 STAM_REG(pVM, &pVM->tm.s.StatScheduleOneR0, STAMTYPE_PROFILE, "/TM/ScheduleOneR0", STAMUNIT_TICKS_PER_CALL, "Profiling the scheduling of one queue during a TMTimer* call in EMT.\n");
507 STAM_REG(pVM, &pVM->tm.s.StatScheduleOneR3, STAMTYPE_PROFILE, "/TM/ScheduleOneR3", STAMUNIT_TICKS_PER_CALL, "Profiling the scheduling of one queue during a TMTimer* call in EMT.\n");
508 STAM_REG(pVM, &pVM->tm.s.StatScheduleSetFF, STAMTYPE_COUNTER, "/TM/ScheduleSetFF", STAMUNIT_OCCURENCES, "The number of times the timer FF was set instead of doing scheduling.");
509
510 STAM_REG(pVM, &pVM->tm.s.StatTimerSetGC, STAMTYPE_PROFILE, "/TM/TimerSetGC", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerSet calls made in GC.");
511 STAM_REG(pVM, &pVM->tm.s.StatTimerSetR0, STAMTYPE_PROFILE, "/TM/TimerSetR0", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerSet calls made in ring-0.");
512 STAM_REG(pVM, &pVM->tm.s.StatTimerSetR3, STAMTYPE_PROFILE, "/TM/TimerSetR3", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerSet calls made in ring-3.");
513
514 STAM_REG(pVM, &pVM->tm.s.StatTimerStopGC, STAMTYPE_PROFILE, "/TM/TimerStopGC", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerStop calls made in GC.");
515 STAM_REG(pVM, &pVM->tm.s.StatTimerStopR0, STAMTYPE_PROFILE, "/TM/TimerStopR0", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerStop calls made in ring-0.");
516 STAM_REG(pVM, &pVM->tm.s.StatTimerStopR3, STAMTYPE_PROFILE, "/TM/TimerStopR3", STAMUNIT_TICKS_PER_CALL, "Profiling TMTimerStop calls made in ring-3.");
517
518 STAM_REG(pVM, &pVM->tm.s.StatVirtualGet, STAMTYPE_COUNTER, "/TM/VirtualGet", STAMUNIT_OCCURENCES, "The number of times TMTimerGet was called when the clock was running.");
519 STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSetFF, STAMTYPE_COUNTER, "/TM/VirtualGetSetFF", STAMUNIT_OCCURENCES, "Times we set the FF when calling TMTimerGet.");
520 STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSync, STAMTYPE_COUNTER, "/TM/VirtualGetSync", STAMUNIT_OCCURENCES, "The number of times TMTimerGetSync was called when the clock was running.");
521 STAM_REG(pVM, &pVM->tm.s.StatVirtualGetSyncSetFF,STAMTYPE_COUNTER, "/TM/VirtualGetSyncSetFF",STAMUNIT_OCCURENCES, "Times we set the FF when calling TMTimerGetSync.");
522 STAM_REG(pVM, &pVM->tm.s.StatVirtualPause, STAMTYPE_COUNTER, "/TM/VirtualPause", STAMUNIT_OCCURENCES, "The number of times TMR3TimerPause was called.");
523 STAM_REG(pVM, &pVM->tm.s.StatVirtualResume, STAMTYPE_COUNTER, "/TM/VirtualResume", STAMUNIT_OCCURENCES, "The number of times TMR3TimerResume was called.");
524
525 STAM_REG(pVM, &pVM->tm.s.StatTimerCallbackSetFF,STAMTYPE_COUNTER, "/TM/CallbackSetFF", STAMUNIT_OCCURENCES, "The number of times the timer callback set FF.");
526
527
528 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncCatchup, STAMTYPE_PROFILE_ADV, "/TM/VirtualSync/CatchUp", STAMUNIT_TICKS_PER_OCCURENCE, "Counting and measuring the times spent catching up.");
529 STAM_REG(pVM, (void *)&pVM->tm.s.fVirtualSyncCatchUp, STAMTYPE_U8, "/TM/VirtualSync/CatchUpActive", STAMUNIT_NONE, "Catch-Up active indicator.");
530 STAM_REG(pVM, (void *)&pVM->tm.s.u32VirtualSyncCatchUpPercentage, STAMTYPE_U32, "/TM/VirtualSync/CatchUpPercentage", STAMUNIT_PCT, "The catch-up percentage. (+100/100 to get clock multiplier)");
531 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGiveUp, STAMTYPE_COUNTER, "/TM/VirtualSync/GiveUp", STAMUNIT_OCCURENCES, "Times the catch-up was abandoned.");
532 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting,STAMTYPE_COUNTER, "/TM/VirtualSync/GiveUpBeforeStarting", STAMUNIT_OCCURENCES, "Times the catch-up was abandoned before even starting. (Typically debugging++.)");
533 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRun, STAMTYPE_COUNTER, "/TM/VirtualSync/Run", STAMUNIT_OCCURENCES, "Times the virtual sync timer queue was considered.");
534 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunRestart, STAMTYPE_COUNTER, "/TM/VirtualSync/Run/Restarts", STAMUNIT_OCCURENCES, "Times the clock was restarted after a run.");
535 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunStop, STAMTYPE_COUNTER, "/TM/VirtualSync/Run/Stop", STAMUNIT_OCCURENCES, "Times the clock was stopped when calculating the current time before examining the timers.");
536 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunStoppedAlready, STAMTYPE_COUNTER, "/TM/VirtualSync/Run/StoppedAlready", STAMUNIT_OCCURENCES, "Times the clock was already stopped elsewhere (TMVirtualSyncGet).");
537 STAM_REG(pVM, &pVM->tm.s.StatVirtualSyncRunSlack, STAMTYPE_PROFILE, "/TM/VirtualSync/Run/Slack", STAMUNIT_NS_PER_OCCURENCE, "The scheduling slack. (Catch-up handed out when running timers.)");
538 for (unsigned i = 0; i < RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods); i++)
539 {
540 STAMR3RegisterF(pVM, &pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_PCT, "The catch-up percentage.", "/TM/VirtualSync/Periods/%u", i);
541 STAMR3RegisterF(pVM, &pVM->tm.s.aStatVirtualSyncCatchupAdjust[i], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Times adjusted to this period.", "/TM/VirtualSync/Periods/%u/Adjust", i);
542 STAMR3RegisterF(pVM, &pVM->tm.s.aStatVirtualSyncCatchupInitial[i], STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Times started in this period.", "/TM/VirtualSync/Periods/%u/Initial", i);
543 STAMR3RegisterF(pVM, &pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u64Start, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_NS, "Start of this period (lag).", "/TM/VirtualSync/Periods/%u/Start", i);
544 }
545
546#endif /* VBOX_WITH_STATISTICS */
547
548 /*
549 * Register info handlers.
550 */
551 DBGFR3InfoRegisterInternalEx(pVM, "timers", "Dumps all timers. No arguments.", tmR3TimerInfo, DBGFINFO_FLAGS_RUN_ON_EMT);
552 DBGFR3InfoRegisterInternalEx(pVM, "activetimers", "Dumps active all timers. No arguments.", tmR3TimerInfoActive, DBGFINFO_FLAGS_RUN_ON_EMT);
553 DBGFR3InfoRegisterInternalEx(pVM, "clocks", "Display the time of the various clocks.", tmR3InfoClocks, DBGFINFO_FLAGS_RUN_ON_EMT);
554
555 return VINF_SUCCESS;
556}
557
558
559/**
560 * Checks if the host CPU has a fixed TSC frequency.
561 *
562 * @returns true if it has, false if it hasn't.
563 *
564 * @remark This test doesn't bother with very old CPUs that doesn't do power
565 * management or any other stuff that might influence the TSC rate.
566 * This isn't currently relevant.
567 */
568static bool tmR3HasFixedTSC(void)
569{
570 if (ASMHasCpuId())
571 {
572 uint32_t uEAX, uEBX, uECX, uEDX;
573 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
574 if ( uEAX >= 1
575 && uEBX == X86_CPUID_VENDOR_AMD_EBX
576 && uECX == X86_CPUID_VENDOR_AMD_ECX
577 && uEDX == X86_CPUID_VENDOR_AMD_EDX)
578 {
579 /*
580 * AuthenticAMD - Check for APM support and that TscInvariant is set.
581 *
582 * This test isn't correct with respect to fixed/non-fixed TSC and
583 * older models, but this isn't relevant since the result is currently
584 * only used for making a descision on AMD-V models.
585 */
586 ASMCpuId(0x80000000, &uEAX, &uEBX, &uECX, &uEDX);
587 if (uEAX >= 0x80000007)
588 {
589 ASMCpuId(0x80000007, &uEAX, &uEBX, &uECX, &uEDX);
590 if (uEDX & BIT(8) /* TscInvariant */)
591 return true;
592 }
593 }
594 else if ( uEAX >= 1
595 && uEBX == X86_CPUID_VENDOR_INTEL_EBX
596 && uECX == X86_CPUID_VENDOR_INTEL_ECX
597 && uEDX == X86_CPUID_VENDOR_INTEL_EDX)
598 {
599 /*
600 * GenuineIntel - Check the model number.
601 *
602 * This test is lacking in the same way and for the same reasons
603 * as the AMD test above.
604 */
605 ASMCpuId(1, &uEAX, &uEBX, &uECX, &uEDX);
606 unsigned uModel = (uEAX >> 4) & 0x0f;
607 unsigned uFamily = (uEAX >> 8) & 0x0f;
608 if (uFamily == 0x0f)
609 uFamily += (uEAX >> 20) & 0xff;
610 if (uFamily >= 0x06)
611 uModel += ((uEAX >> 16) & 0x0f) << 4;
612 if ( (uFamily == 0x0f /*P4*/ && uModel >= 0x03)
613 || (uFamily == 0x06 /*P2/P3*/ && uModel >= 0x0e))
614 return true;
615 }
616 }
617 return false;
618}
619
620
621/**
622 * Calibrate the CPU tick.
623 *
624 * @returns Number of ticks per second.
625 */
626static uint64_t tmR3CalibrateTSC(void)
627{
628 /*
629 * Use GIP when available present.
630 */
631 uint64_t u64Hz;
632 PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage;
633 if ( pGip
634 && pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC)
635 {
636 unsigned iCpu = pGip->u32Mode != SUPGIPMODE_ASYNC_TSC ? 0 : ASMGetApicId();
637 if (iCpu >= RT_ELEMENTS(pGip->aCPUs))
638 AssertReleaseMsgFailed(("iCpu=%d - the ApicId is too high. send VBox.log and hardware specs!\n", iCpu));
639 else
640 {
641 if (tmR3HasFixedTSC())
642 /* Sleep a bit to get a more reliable CpuHz value. */
643 RTThreadSleep(32);
644 else
645 {
646 /* Spin for 40ms to try push up the CPU frequency and get a more reliable CpuHz value. */
647 const uint64_t u64 = RTTimeMilliTS();
648 while ((RTTimeMilliTS() - u64) < 40 /*ms*/)
649 /* nothing */;
650 }
651
652 pGip = g_pSUPGlobalInfoPage;
653 if ( pGip
654 && pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC
655 && (u64Hz = pGip->aCPUs[iCpu].u64CpuHz)
656 && u64Hz != ~(uint64_t)0)
657 return u64Hz;
658 }
659 }
660
661 /* call this once first to make sure it's initialized. */
662 RTTimeNanoTS();
663
664 /*
665 * Yield the CPU to increase our chances of getting
666 * a correct value.
667 */
668 RTThreadYield(); /* Try avoid interruptions between TSC and NanoTS samplings. */
669 static const unsigned s_auSleep[5] = { 50, 30, 30, 40, 40 };
670 uint64_t au64Samples[5];
671 unsigned i;
672 for (i = 0; i < ELEMENTS(au64Samples); i++)
673 {
674 unsigned cMillies;
675 int cTries = 5;
676 uint64_t u64Start = ASMReadTSC();
677 uint64_t u64End;
678 uint64_t StartTS = RTTimeNanoTS();
679 uint64_t EndTS;
680 do
681 {
682 RTThreadSleep(s_auSleep[i]);
683 u64End = ASMReadTSC();
684 EndTS = RTTimeNanoTS();
685 cMillies = (unsigned)((EndTS - StartTS + 500000) / 1000000);
686 } while ( cMillies == 0 /* the sleep may be interrupted... */
687 || (cMillies < 20 && --cTries > 0));
688 uint64_t u64Diff = u64End - u64Start;
689
690 au64Samples[i] = (u64Diff * 1000) / cMillies;
691 AssertMsg(cTries > 0, ("cMillies=%d i=%d\n", cMillies, i));
692 }
693
694 /*
695 * Discard the highest and lowest results and calculate the average.
696 */
697 unsigned iHigh = 0;
698 unsigned iLow = 0;
699 for (i = 1; i < ELEMENTS(au64Samples); i++)
700 {
701 if (au64Samples[i] < au64Samples[iLow])
702 iLow = i;
703 if (au64Samples[i] > au64Samples[iHigh])
704 iHigh = i;
705 }
706 au64Samples[iLow] = 0;
707 au64Samples[iHigh] = 0;
708
709 u64Hz = au64Samples[0];
710 for (i = 1; i < ELEMENTS(au64Samples); i++)
711 u64Hz += au64Samples[i];
712 u64Hz /= ELEMENTS(au64Samples) - 2;
713
714 return u64Hz;
715}
716
717
718/**
719 * Applies relocations to data and code managed by this
720 * component. This function will be called at init and
721 * whenever the VMM need to relocate it self inside the GC.
722 *
723 * @param pVM The VM.
724 * @param offDelta Relocation delta relative to old location.
725 */
726TMR3DECL(void) TMR3Relocate(PVM pVM, RTGCINTPTR offDelta)
727{
728 int rc;
729 LogFlow(("TMR3Relocate\n"));
730
731 pVM->tm.s.pvGIPGC = MMHyperR3ToGC(pVM, pVM->tm.s.pvGIPR3);
732 pVM->tm.s.paTimerQueuesGC = MMHyperR3ToGC(pVM, pVM->tm.s.paTimerQueuesR3);
733 pVM->tm.s.paTimerQueuesR0 = MMHyperR3ToR0(pVM, pVM->tm.s.paTimerQueuesR3);
734
735 pVM->tm.s.VirtualGetRawDataGC.pu64Prev = MMHyperR3ToGC(pVM, (void *)&pVM->tm.s.u64VirtualRawPrev);
736 AssertFatal(pVM->tm.s.VirtualGetRawDataGC.pu64Prev);
737 rc = PDMR3GetSymbolGCLazy(pVM, NULL, "tmVirtualNanoTSBad", &pVM->tm.s.VirtualGetRawDataGC.pfnBad);
738 AssertFatalRC(rc);
739 rc = PDMR3GetSymbolGCLazy(pVM, NULL, "tmVirtualNanoTSRediscover", &pVM->tm.s.VirtualGetRawDataGC.pfnRediscover);
740 AssertFatalRC(rc);
741
742 if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLFenceSync)
743 rc = PDMR3GetSymbolGCLazy(pVM, NULL, "RTTimeNanoTSLFenceSync", &pVM->tm.s.pfnVirtualGetRawGC);
744 else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLFenceAsync)
745 rc = PDMR3GetSymbolGCLazy(pVM, NULL, "RTTimeNanoTSLFenceAsync", &pVM->tm.s.pfnVirtualGetRawGC);
746 else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLegacySync)
747 rc = PDMR3GetSymbolGCLazy(pVM, NULL, "RTTimeNanoTSLegacySync", &pVM->tm.s.pfnVirtualGetRawGC);
748 else if (pVM->tm.s.pfnVirtualGetRawR3 == RTTimeNanoTSLegacyAsync)
749 rc = PDMR3GetSymbolGCLazy(pVM, NULL, "RTTimeNanoTSLegacyAsync", &pVM->tm.s.pfnVirtualGetRawGC);
750 else
751 AssertFatalFailed();
752
753 /*
754 * Iterate the timers updating the pVMGC pointers.
755 */
756 for (PTMTIMER pTimer = pVM->tm.s.pCreated; pTimer; pTimer = pTimer->pBigNext)
757 {
758 pTimer->pVMGC = pVM->pVMGC;
759 pTimer->pVMR0 = pVM->pVMR0;
760 }
761}
762
763
764/**
765 * Terminates the TM.
766 *
767 * Termination means cleaning up and freeing all resources,
768 * the VM it self is at this point powered off or suspended.
769 *
770 * @returns VBox status code.
771 * @param pVM The VM to operate on.
772 */
773TMR3DECL(int) TMR3Term(PVM pVM)
774{
775 AssertMsg(pVM->tm.s.offVM, ("bad init order!\n"));
776 if (pVM->tm.s.pTimer)
777 {
778 int rc = RTTimerDestroy(pVM->tm.s.pTimer);
779 AssertRC(rc);
780 pVM->tm.s.pTimer = NULL;
781 }
782
783 return VINF_SUCCESS;
784}
785
786
787/**
788 * The VM is being reset.
789 *
790 * For the TM component this means that a rescheduling is preformed,
791 * the FF is cleared and but without running the queues. We'll have to
792 * check if this makes sense or not, but it seems like a good idea now....
793 *
794 * @param pVM VM handle.
795 */
796TMR3DECL(void) TMR3Reset(PVM pVM)
797{
798 LogFlow(("TMR3Reset:\n"));
799 VM_ASSERT_EMT(pVM);
800
801 /*
802 * Abort any pending catch up.
803 * This isn't perfect,
804 */
805 if (pVM->tm.s.fVirtualSyncCatchUp)
806 {
807 const uint64_t offVirtualNow = TMVirtualGetEx(pVM, false /* don't check timers */);
808 const uint64_t offVirtualSyncNow = TMVirtualSyncGetEx(pVM, false /* don't check timers */);
809 if (pVM->tm.s.fVirtualSyncCatchUp)
810 {
811 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
812
813 const uint64_t offOld = pVM->tm.s.offVirtualSyncGivenUp;
814 const uint64_t offNew = offVirtualNow - offVirtualSyncNow;
815 Assert(offOld <= offNew);
816 ASMAtomicXchgU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);
817 ASMAtomicXchgU64((uint64_t volatile *)&pVM->tm.s.offVirtualSync, offNew);
818 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
819 LogRel(("TM: Aborting catch-up attempt on reset with a %RU64 ns lag on reset; new total: %RU64 ns\n", offNew - offOld, offNew));
820 }
821 }
822
823 /*
824 * Process the queues.
825 */
826 for (int i = 0; i < TMCLOCK_MAX; i++)
827 tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[i]);
828#ifdef VBOX_STRICT
829 tmTimerQueuesSanityChecks(pVM, "TMR3Reset");
830#endif
831 VM_FF_CLEAR(pVM, VM_FF_TIMER);
832}
833
834
835/**
836 * Resolve a builtin GC symbol.
837 * Called by PDM when loading or relocating GC modules.
838 *
839 * @returns VBox status
840 * @param pVM VM Handle.
841 * @param pszSymbol Symbol to resolv
842 * @param pGCPtrValue Where to store the symbol value.
843 * @remark This has to work before TMR3Relocate() is called.
844 */
845TMR3DECL(int) TMR3GetImportGC(PVM pVM, const char *pszSymbol, PRTGCPTR pGCPtrValue)
846{
847 if (!strcmp(pszSymbol, "g_pSUPGlobalInfoPage"))
848 *pGCPtrValue = MMHyperHC2GC(pVM, &pVM->tm.s.pvGIPGC);
849 //else if (..)
850 else
851 return VERR_SYMBOL_NOT_FOUND;
852 return VINF_SUCCESS;
853}
854
855
856/**
857 * Execute state save operation.
858 *
859 * @returns VBox status code.
860 * @param pVM VM Handle.
861 * @param pSSM SSM operation handle.
862 */
863static DECLCALLBACK(int) tmR3Save(PVM pVM, PSSMHANDLE pSSM)
864{
865 LogFlow(("tmR3Save:\n"));
866 Assert(!pVM->tm.s.fTSCTicking);
867 Assert(!pVM->tm.s.fVirtualTicking);
868 Assert(!pVM->tm.s.fVirtualSyncTicking);
869
870 /*
871 * Save the virtual clocks.
872 */
873 /* the virtual clock. */
874 SSMR3PutU64(pSSM, TMCLOCK_FREQ_VIRTUAL);
875 SSMR3PutU64(pSSM, pVM->tm.s.u64Virtual);
876
877 /* the virtual timer synchronous clock. */
878 SSMR3PutU64(pSSM, pVM->tm.s.u64VirtualSync);
879 SSMR3PutU64(pSSM, pVM->tm.s.offVirtualSync);
880 SSMR3PutU64(pSSM, pVM->tm.s.offVirtualSyncGivenUp);
881 SSMR3PutU64(pSSM, pVM->tm.s.u64VirtualSyncCatchUpPrev);
882 SSMR3PutBool(pSSM, pVM->tm.s.fVirtualSyncCatchUp);
883
884 /* real time clock */
885 SSMR3PutU64(pSSM, TMCLOCK_FREQ_REAL);
886
887 /* the cpu tick clock. */
888 SSMR3PutU64(pSSM, TMCpuTickGet(pVM));
889 return SSMR3PutU64(pSSM, pVM->tm.s.cTSCTicksPerSecond);
890}
891
892
893/**
894 * Execute state load operation.
895 *
896 * @returns VBox status code.
897 * @param pVM VM Handle.
898 * @param pSSM SSM operation handle.
899 * @param u32Version Data layout version.
900 */
901static DECLCALLBACK(int) tmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
902{
903 LogFlow(("tmR3Load:\n"));
904 Assert(!pVM->tm.s.fTSCTicking);
905 Assert(!pVM->tm.s.fVirtualTicking);
906 Assert(!pVM->tm.s.fVirtualSyncTicking);
907
908 /*
909 * Validate version.
910 */
911 if (u32Version != TM_SAVED_STATE_VERSION)
912 {
913 Log(("tmR3Load: Invalid version u32Version=%d!\n", u32Version));
914 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
915 }
916
917 /*
918 * Load the virtual clock.
919 */
920 pVM->tm.s.fVirtualTicking = false;
921 /* the virtual clock. */
922 uint64_t u64Hz;
923 int rc = SSMR3GetU64(pSSM, &u64Hz);
924 if (VBOX_FAILURE(rc))
925 return rc;
926 if (u64Hz != TMCLOCK_FREQ_VIRTUAL)
927 {
928 AssertMsgFailed(("The virtual clock frequency differs! Saved: %RU64 Binary: %RU64\n",
929 u64Hz, TMCLOCK_FREQ_VIRTUAL));
930 return VERR_SSM_VIRTUAL_CLOCK_HZ;
931 }
932 SSMR3GetU64(pSSM, &pVM->tm.s.u64Virtual);
933 pVM->tm.s.u64VirtualOffset = 0;
934
935 /* the virtual timer synchronous clock. */
936 pVM->tm.s.fVirtualSyncTicking = false;
937 uint64_t u64;
938 SSMR3GetU64(pSSM, &u64);
939 pVM->tm.s.u64VirtualSync = u64;
940 SSMR3GetU64(pSSM, &u64);
941 pVM->tm.s.offVirtualSync = u64;
942 SSMR3GetU64(pSSM, &u64);
943 pVM->tm.s.offVirtualSyncGivenUp = u64;
944 SSMR3GetU64(pSSM, &u64);
945 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64;
946 bool f;
947 SSMR3GetBool(pSSM, &f);
948 pVM->tm.s.fVirtualSyncCatchUp = f;
949
950 /* the real clock */
951 rc = SSMR3GetU64(pSSM, &u64Hz);
952 if (VBOX_FAILURE(rc))
953 return rc;
954 if (u64Hz != TMCLOCK_FREQ_REAL)
955 {
956 AssertMsgFailed(("The real clock frequency differs! Saved: %RU64 Binary: %RU64\n",
957 u64Hz, TMCLOCK_FREQ_REAL));
958 return VERR_SSM_VIRTUAL_CLOCK_HZ; /* missleading... */
959 }
960
961 /* the cpu tick clock. */
962 pVM->tm.s.fTSCTicking = false;
963 SSMR3GetU64(pSSM, &pVM->tm.s.u64TSC);
964 rc = SSMR3GetU64(pSSM, &u64Hz);
965 if (VBOX_FAILURE(rc))
966 return rc;
967 if (pVM->tm.s.fTSCUseRealTSC)
968 pVM->tm.s.u64TSCOffset = 0; /** @todo TSC restore stuff and HWACC. */
969 else
970 pVM->tm.s.cTSCTicksPerSecond = u64Hz;
971 LogRel(("TM: cTSCTicksPerSecond=%#RX64 (%RU64) fTSCVirtualized=%RTbool fTSCUseRealTSC=%RTbool (state load)\n",
972 pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.cTSCTicksPerSecond, pVM->tm.s.fTSCVirtualized, pVM->tm.s.fTSCUseRealTSC));
973
974 /*
975 * Make sure timers get rescheduled immediately.
976 */
977 VM_FF_SET(pVM, VM_FF_TIMER);
978
979 return VINF_SUCCESS;
980}
981
982
983/**
984 * Internal TMR3TimerCreate worker.
985 *
986 * @returns VBox status code.
987 * @param pVM The VM handle.
988 * @param enmClock The timer clock.
989 * @param pszDesc The timer description.
990 * @param ppTimer Where to store the timer pointer on success.
991 */
992static int tmr3TimerCreate(PVM pVM, TMCLOCK enmClock, const char *pszDesc, PPTMTIMERR3 ppTimer)
993{
994 VM_ASSERT_EMT(pVM);
995
996 /*
997 * Allocate the timer.
998 */
999 PTMTIMERR3 pTimer = NULL;
1000 if (pVM->tm.s.pFree && VM_IS_EMT(pVM))
1001 {
1002 pTimer = pVM->tm.s.pFree;
1003 pVM->tm.s.pFree = pTimer->pBigNext;
1004 Log3(("TM: Recycling timer %p, new free head %p.\n", pTimer, pTimer->pBigNext));
1005 }
1006
1007 if (!pTimer)
1008 {
1009 int rc = MMHyperAlloc(pVM, sizeof(*pTimer), 0, MM_TAG_TM, (void **)&pTimer);
1010 if (VBOX_FAILURE(rc))
1011 return rc;
1012 Log3(("TM: Allocated new timer %p\n", pTimer));
1013 }
1014
1015 /*
1016 * Initialize it.
1017 */
1018 pTimer->u64Expire = 0;
1019 pTimer->enmClock = enmClock;
1020 pTimer->pVMR3 = pVM;
1021 pTimer->pVMR0 = pVM->pVMR0;
1022 pTimer->pVMGC = pVM->pVMGC;
1023 pTimer->enmState = TMTIMERSTATE_STOPPED;
1024 pTimer->offScheduleNext = 0;
1025 pTimer->offNext = 0;
1026 pTimer->offPrev = 0;
1027 pTimer->pszDesc = pszDesc;
1028
1029 /* insert into the list of created timers. */
1030 pTimer->pBigPrev = NULL;
1031 pTimer->pBigNext = pVM->tm.s.pCreated;
1032 pVM->tm.s.pCreated = pTimer;
1033 if (pTimer->pBigNext)
1034 pTimer->pBigNext->pBigPrev = pTimer;
1035#ifdef VBOX_STRICT
1036 tmTimerQueuesSanityChecks(pVM, "tmR3TimerCreate");
1037#endif
1038
1039 *ppTimer = pTimer;
1040 return VINF_SUCCESS;
1041}
1042
1043
1044/**
1045 * Creates a device timer.
1046 *
1047 * @returns VBox status.
1048 * @param pVM The VM to create the timer in.
1049 * @param pDevIns Device instance.
1050 * @param enmClock The clock to use on this timer.
1051 * @param pfnCallback Callback function.
1052 * @param pszDesc Pointer to description string which must stay around
1053 * until the timer is fully destroyed (i.e. a bit after TMTimerDestroy()).
1054 * @param ppTimer Where to store the timer on success.
1055 */
1056TMR3DECL(int) TMR3TimerCreateDevice(PVM pVM, PPDMDEVINS pDevIns, TMCLOCK enmClock, PFNTMTIMERDEV pfnCallback, const char *pszDesc, PPTMTIMERR3 ppTimer)
1057{
1058 /*
1059 * Allocate and init stuff.
1060 */
1061 int rc = tmr3TimerCreate(pVM, enmClock, pszDesc, ppTimer);
1062 if (VBOX_SUCCESS(rc))
1063 {
1064 (*ppTimer)->enmType = TMTIMERTYPE_DEV;
1065 (*ppTimer)->u.Dev.pfnTimer = pfnCallback;
1066 (*ppTimer)->u.Dev.pDevIns = pDevIns;
1067 Log(("TM: Created device timer %p clock %d callback %p '%s'\n", (*ppTimer), enmClock, pfnCallback, pszDesc));
1068 }
1069
1070 return rc;
1071}
1072
1073
1074/**
1075 * Creates a driver timer.
1076 *
1077 * @returns VBox status.
1078 * @param pVM The VM to create the timer in.
1079 * @param pDrvIns Driver instance.
1080 * @param enmClock The clock to use on this timer.
1081 * @param pfnCallback Callback function.
1082 * @param pszDesc Pointer to description string which must stay around
1083 * until the timer is fully destroyed (i.e. a bit after TMTimerDestroy()).
1084 * @param ppTimer Where to store the timer on success.
1085 */
1086TMR3DECL(int) TMR3TimerCreateDriver(PVM pVM, PPDMDRVINS pDrvIns, TMCLOCK enmClock, PFNTMTIMERDRV pfnCallback, const char *pszDesc, PPTMTIMERR3 ppTimer)
1087{
1088 /*
1089 * Allocate and init stuff.
1090 */
1091 int rc = tmr3TimerCreate(pVM, enmClock, pszDesc, ppTimer);
1092 if (VBOX_SUCCESS(rc))
1093 {
1094 (*ppTimer)->enmType = TMTIMERTYPE_DRV;
1095 (*ppTimer)->u.Drv.pfnTimer = pfnCallback;
1096 (*ppTimer)->u.Drv.pDrvIns = pDrvIns;
1097 Log(("TM: Created device timer %p clock %d callback %p '%s'\n", (*ppTimer), enmClock, pfnCallback, pszDesc));
1098 }
1099
1100 return rc;
1101}
1102
1103
1104/**
1105 * Creates an internal timer.
1106 *
1107 * @returns VBox status.
1108 * @param pVM The VM to create the timer in.
1109 * @param enmClock The clock to use on this timer.
1110 * @param pfnCallback Callback function.
1111 * @param pvUser User argument to be passed to the callback.
1112 * @param pszDesc Pointer to description string which must stay around
1113 * until the timer is fully destroyed (i.e. a bit after TMTimerDestroy()).
1114 * @param ppTimer Where to store the timer on success.
1115 */
1116TMR3DECL(int) TMR3TimerCreateInternal(PVM pVM, TMCLOCK enmClock, PFNTMTIMERINT pfnCallback, void *pvUser, const char *pszDesc, PPTMTIMERR3 ppTimer)
1117{
1118 /*
1119 * Allocate and init stuff.
1120 */
1121 PTMTIMER pTimer;
1122 int rc = tmr3TimerCreate(pVM, enmClock, pszDesc, &pTimer);
1123 if (VBOX_SUCCESS(rc))
1124 {
1125 pTimer->enmType = TMTIMERTYPE_INTERNAL;
1126 pTimer->u.Internal.pfnTimer = pfnCallback;
1127 pTimer->u.Internal.pvUser = pvUser;
1128 *ppTimer = pTimer;
1129 Log(("TM: Created internal timer %p clock %d callback %p '%s'\n", pTimer, enmClock, pfnCallback, pszDesc));
1130 }
1131
1132 return rc;
1133}
1134
1135/**
1136 * Creates an external timer.
1137 *
1138 * @returns Timer handle on success.
1139 * @returns NULL on failure.
1140 * @param pVM The VM to create the timer in.
1141 * @param enmClock The clock to use on this timer.
1142 * @param pfnCallback Callback function.
1143 * @param pvUser User argument.
1144 * @param pszDesc Pointer to description string which must stay around
1145 * until the timer is fully destroyed (i.e. a bit after TMTimerDestroy()).
1146 */
1147TMR3DECL(PTMTIMERR3) TMR3TimerCreateExternal(PVM pVM, TMCLOCK enmClock, PFNTMTIMEREXT pfnCallback, void *pvUser, const char *pszDesc)
1148{
1149 /*
1150 * Allocate and init stuff.
1151 */
1152 PTMTIMERR3 pTimer;
1153 int rc = tmr3TimerCreate(pVM, enmClock, pszDesc, &pTimer);
1154 if (VBOX_SUCCESS(rc))
1155 {
1156 pTimer->enmType = TMTIMERTYPE_EXTERNAL;
1157 pTimer->u.External.pfnTimer = pfnCallback;
1158 pTimer->u.External.pvUser = pvUser;
1159 Log(("TM: Created external timer %p clock %d callback %p '%s'\n", pTimer, enmClock, pfnCallback, pszDesc));
1160 return pTimer;
1161 }
1162
1163 return NULL;
1164}
1165
1166
1167/**
1168 * Destroy all timers owned by a device.
1169 *
1170 * @returns VBox status.
1171 * @param pVM VM handle.
1172 * @param pDevIns Device which timers should be destroyed.
1173 */
1174TMR3DECL(int) TMR3TimerDestroyDevice(PVM pVM, PPDMDEVINS pDevIns)
1175{
1176 LogFlow(("TMR3TimerDestroyDevice: pDevIns=%p\n", pDevIns));
1177 if (!pDevIns)
1178 return VERR_INVALID_PARAMETER;
1179
1180 PTMTIMER pCur = pVM->tm.s.pCreated;
1181 while (pCur)
1182 {
1183 PTMTIMER pDestroy = pCur;
1184 pCur = pDestroy->pBigNext;
1185 if ( pDestroy->enmType == TMTIMERTYPE_DEV
1186 && pDestroy->u.Dev.pDevIns == pDevIns)
1187 {
1188 int rc = TMTimerDestroy(pDestroy);
1189 AssertRC(rc);
1190 }
1191 }
1192 LogFlow(("TMR3TimerDestroyDevice: returns VINF_SUCCESS\n"));
1193 return VINF_SUCCESS;
1194}
1195
1196
1197/**
1198 * Destroy all timers owned by a driver.
1199 *
1200 * @returns VBox status.
1201 * @param pVM VM handle.
1202 * @param pDrvIns Driver which timers should be destroyed.
1203 */
1204TMR3DECL(int) TMR3TimerDestroyDriver(PVM pVM, PPDMDRVINS pDrvIns)
1205{
1206 LogFlow(("TMR3TimerDestroyDriver: pDrvIns=%p\n", pDrvIns));
1207 if (!pDrvIns)
1208 return VERR_INVALID_PARAMETER;
1209
1210 PTMTIMER pCur = pVM->tm.s.pCreated;
1211 while (pCur)
1212 {
1213 PTMTIMER pDestroy = pCur;
1214 pCur = pDestroy->pBigNext;
1215 if ( pDestroy->enmType == TMTIMERTYPE_DRV
1216 && pDestroy->u.Drv.pDrvIns == pDrvIns)
1217 {
1218 int rc = TMTimerDestroy(pDestroy);
1219 AssertRC(rc);
1220 }
1221 }
1222 LogFlow(("TMR3TimerDestroyDriver: returns VINF_SUCCESS\n"));
1223 return VINF_SUCCESS;
1224}
1225
1226
1227/**
1228 * Checks if the sync queue has one or more expired timers.
1229 *
1230 * @returns true / false.
1231 *
1232 * @param pVM The VM handle.
1233 * @param enmClock The queue.
1234 */
1235DECLINLINE(bool) tmR3HasExpiredTimer(PVM pVM, TMCLOCK enmClock)
1236{
1237 const uint64_t u64Expire = pVM->tm.s.CTXALLSUFF(paTimerQueues)[enmClock].u64Expire;
1238 return u64Expire != INT64_MAX && u64Expire <= tmClock(pVM, enmClock);
1239}
1240
1241
1242/**
1243 * Checks for expired timers in all the queues.
1244 *
1245 * @returns true / false.
1246 * @param pVM The VM handle.
1247 */
1248DECLINLINE(bool) tmR3AnyExpiredTimers(PVM pVM)
1249{
1250 /*
1251 * Combine the time calculation for the first two since we're not on EMT
1252 * TMVirtualSyncGet only permits EMT.
1253 */
1254 uint64_t u64Now = TMVirtualGet(pVM);
1255 if (pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire <= u64Now)
1256 return true;
1257 u64Now = pVM->tm.s.fVirtualSyncTicking
1258 ? u64Now - pVM->tm.s.offVirtualSync
1259 : pVM->tm.s.u64VirtualSync;
1260 if (pVM->tm.s.CTXALLSUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire <= u64Now)
1261 return true;
1262
1263 /*
1264 * The remaining timers.
1265 */
1266 if (tmR3HasExpiredTimer(pVM, TMCLOCK_REAL))
1267 return true;
1268 if (tmR3HasExpiredTimer(pVM, TMCLOCK_TSC))
1269 return true;
1270 return false;
1271}
1272
1273
1274/**
1275 * Schedulation timer callback.
1276 *
1277 * @param pTimer Timer handle.
1278 * @param pvUser VM handle.
1279 * @thread Timer thread.
1280 *
1281 * @remark We cannot do the scheduling and queues running from a timer handler
1282 * since it's not executing in EMT, and even if it was it would be async
1283 * and we wouldn't know the state of the affairs.
1284 * So, we'll just raise the timer FF and force any REM execution to exit.
1285 */
1286static DECLCALLBACK(void) tmR3TimerCallback(PRTTIMER pTimer, void *pvUser)
1287{
1288 PVM pVM = (PVM)pvUser;
1289 AssertCompile(TMCLOCK_MAX == 4);
1290#ifdef DEBUG_Sander /* very annoying, keep it private. */
1291 if (VM_FF_ISSET(pVM, VM_FF_TIMER))
1292 Log(("tmR3TimerCallback: timer event still pending!!\n"));
1293#endif
1294 if ( !VM_FF_ISSET(pVM, VM_FF_TIMER)
1295 && ( pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC].offSchedule
1296 || pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL].offSchedule
1297 || pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL].offSchedule
1298 || pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC].offSchedule
1299 || tmR3AnyExpiredTimers(pVM)
1300 )
1301 && !VM_FF_ISSET(pVM, VM_FF_TIMER)
1302 )
1303 {
1304 VM_FF_SET(pVM, VM_FF_TIMER);
1305 REMR3NotifyTimerPending(pVM);
1306 VMR3NotifyFF(pVM, true);
1307 STAM_COUNTER_INC(&pVM->tm.s.StatTimerCallbackSetFF);
1308 }
1309}
1310
1311
1312/**
1313 * Schedules and runs any pending timers.
1314 *
1315 * This is normally called from a forced action handler in EMT.
1316 *
1317 * @param pVM The VM to run the timers for.
1318 */
1319TMR3DECL(void) TMR3TimerQueuesDo(PVM pVM)
1320{
1321 STAM_PROFILE_START(&pVM->tm.s.StatDoQueues, a);
1322 Log2(("TMR3TimerQueuesDo:\n"));
1323
1324 /*
1325 * Process the queues.
1326 */
1327 AssertCompile(TMCLOCK_MAX == 4);
1328
1329 /* TMCLOCK_VIRTUAL_SYNC */
1330 STAM_PROFILE_ADV_START(&pVM->tm.s.StatDoQueuesSchedule, s1);
1331 tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC]);
1332 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesSchedule, s1);
1333 STAM_PROFILE_ADV_START(&pVM->tm.s.StatDoQueuesRun, r1);
1334 tmR3TimerQueueRunVirtualSync(pVM);
1335 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesRun, r1);
1336
1337 /* TMCLOCK_VIRTUAL */
1338 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesSchedule, s1);
1339 tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL]);
1340 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesSchedule, s2);
1341 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesRun, r1);
1342 tmR3TimerQueueRun(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL]);
1343 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesRun, r2);
1344
1345#if 0 /** @todo if ever used, remove this and fix the stam prefixes on TMCLOCK_REAL below. */
1346 /* TMCLOCK_TSC */
1347 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesSchedule, s2);
1348 tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC]);
1349 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesSchedule, s3);
1350 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesRun, r2);
1351 tmR3TimerQueueRun(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_TSC]);
1352 STAM_PROFILE_ADV_SUSPEND(&pVM->tm.s.StatDoQueuesRun, r3);
1353#endif
1354
1355 /* TMCLOCK_REAL */
1356 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesSchedule, s2);
1357 tmTimerQueueSchedule(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL]);
1358 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatDoQueuesSchedule, s3);
1359 STAM_PROFILE_ADV_RESUME(&pVM->tm.s.StatDoQueuesRun, r2);
1360 tmR3TimerQueueRun(pVM, &pVM->tm.s.paTimerQueuesR3[TMCLOCK_REAL]);
1361 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatDoQueuesRun, r3);
1362
1363 /* done. */
1364 VM_FF_CLEAR(pVM, VM_FF_TIMER);
1365
1366#ifdef VBOX_STRICT
1367 /* check that we didn't screwup. */
1368 tmTimerQueuesSanityChecks(pVM, "TMR3TimerQueuesDo");
1369#endif
1370
1371 Log2(("TMR3TimerQueuesDo: returns void\n"));
1372 STAM_PROFILE_STOP(&pVM->tm.s.StatDoQueues, a);
1373}
1374
1375
1376/**
1377 * Schedules and runs any pending times in the specified queue.
1378 *
1379 * This is normally called from a forced action handler in EMT.
1380 *
1381 * @param pVM The VM to run the timers for.
1382 * @param pQueue The queue to run.
1383 */
1384static void tmR3TimerQueueRun(PVM pVM, PTMTIMERQUEUE pQueue)
1385{
1386 VM_ASSERT_EMT(pVM);
1387
1388 /*
1389 * Run timers.
1390 *
1391 * We check the clock once and run all timers which are ACTIVE
1392 * and have an expire time less or equal to the time we read.
1393 *
1394 * N.B. A generic unlink must be applied since other threads
1395 * are allowed to mess with any active timer at any time.
1396 * However, we only allow EMT to handle EXPIRED_PENDING
1397 * timers, thus enabling the timer handler function to
1398 * arm the timer again.
1399 */
1400 PTMTIMER pNext = TMTIMER_GET_HEAD(pQueue);
1401 if (!pNext)
1402 return;
1403 const uint64_t u64Now = tmClock(pVM, pQueue->enmClock);
1404 while (pNext && pNext->u64Expire <= u64Now)
1405 {
1406 PTMTIMER pTimer = pNext;
1407 pNext = TMTIMER_GET_NEXT(pTimer);
1408 Log2(("tmR3TimerQueueRun: pTimer=%p:{.enmState=%s, .enmClock=%d, .enmType=%d, u64Expire=%llx (now=%llx) .pszDesc=%s}\n",
1409 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, pTimer->u64Expire, u64Now, pTimer->pszDesc));
1410 bool fRc;
1411 TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED, TMTIMERSTATE_ACTIVE, fRc);
1412 if (fRc)
1413 {
1414 Assert(!pTimer->offScheduleNext); /* this can trigger falsely */
1415
1416 /* unlink */
1417 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1418 if (pPrev)
1419 TMTIMER_SET_NEXT(pPrev, pNext);
1420 else
1421 {
1422 TMTIMER_SET_HEAD(pQueue, pNext);
1423 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1424 }
1425 if (pNext)
1426 TMTIMER_SET_PREV(pNext, pPrev);
1427 pTimer->offNext = 0;
1428 pTimer->offPrev = 0;
1429
1430
1431 /* fire */
1432 switch (pTimer->enmType)
1433 {
1434 case TMTIMERTYPE_DEV: pTimer->u.Dev.pfnTimer(pTimer->u.Dev.pDevIns, pTimer); break;
1435 case TMTIMERTYPE_DRV: pTimer->u.Drv.pfnTimer(pTimer->u.Drv.pDrvIns, pTimer); break;
1436 case TMTIMERTYPE_INTERNAL: pTimer->u.Internal.pfnTimer(pVM, pTimer, pTimer->u.Internal.pvUser); break;
1437 case TMTIMERTYPE_EXTERNAL: pTimer->u.External.pfnTimer(pTimer->u.External.pvUser); break;
1438 default:
1439 AssertMsgFailed(("Invalid timer type %d (%s)\n", pTimer->enmType, pTimer->pszDesc));
1440 break;
1441 }
1442
1443 /* change the state if it wasn't changed already in the handler. */
1444 TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_EXPIRED, fRc);
1445 Log2(("tmR3TimerQueueRun: new state %s\n", tmTimerState(pTimer->enmState)));
1446 }
1447 } /* run loop */
1448}
1449
1450
1451/**
1452 * Schedules and runs any pending times in the timer queue for the
1453 * synchronous virtual clock.
1454 *
1455 * This scheduling is a bit different from the other queues as it need
1456 * to implement the special requirements of the timer synchronous virtual
1457 * clock, thus this 2nd queue run funcion.
1458 *
1459 * @param pVM The VM to run the timers for.
1460 */
1461static void tmR3TimerQueueRunVirtualSync(PVM pVM)
1462{
1463 PTMTIMERQUEUE const pQueue = &pVM->tm.s.paTimerQueuesR3[TMCLOCK_VIRTUAL_SYNC];
1464 VM_ASSERT_EMT(pVM);
1465
1466 /*
1467 * Any timers?
1468 */
1469 PTMTIMER pNext = TMTIMER_GET_HEAD(pQueue);
1470 if (RT_UNLIKELY(!pNext))
1471 {
1472 Assert(pVM->tm.s.fVirtualSyncTicking || !pVM->tm.s.fVirtualTicking);
1473 return;
1474 }
1475 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRun);
1476
1477 /*
1478 * Calculate the time frame for which we will dispatch timers.
1479 *
1480 * We use a time frame ranging from the current sync time (which is most likely the
1481 * same as the head timer) and some configurable period (100000ns) up towards the
1482 * current virtual time. This period might also need to be restricted by the catch-up
1483 * rate so frequent calls to this function won't accelerate the time too much, however
1484 * this will be implemented at a later point if neccessary.
1485 *
1486 * Without this frame we would 1) having to run timers much more frequently
1487 * and 2) lag behind at a steady rate.
1488 */
1489 const uint64_t u64VirtualNow = TMVirtualGetEx(pVM, false /* don't check timers */);
1490 uint64_t u64Now;
1491 if (!pVM->tm.s.fVirtualSyncTicking)
1492 {
1493 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunStoppedAlready);
1494 u64Now = pVM->tm.s.u64VirtualSync;
1495 Assert(u64Now <= pNext->u64Expire);
1496 }
1497 else
1498 {
1499 /* Calc 'now'. (update order doesn't really matter here) */
1500 uint64_t off = pVM->tm.s.offVirtualSync;
1501 if (pVM->tm.s.fVirtualSyncCatchUp)
1502 {
1503 uint64_t u64Delta = u64VirtualNow - pVM->tm.s.u64VirtualSyncCatchUpPrev;
1504 if (RT_LIKELY(!(u64Delta >> 32)))
1505 {
1506 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, pVM->tm.s.u32VirtualSyncCatchUpPercentage, 100);
1507 if (off > u64Sub + pVM->tm.s.offVirtualSyncGivenUp)
1508 {
1509 off -= u64Sub;
1510 Log4(("TM: %RU64/%RU64: sub %RU64 (run)\n", u64VirtualNow - off, off - pVM->tm.s.offVirtualSyncGivenUp, u64Sub));
1511 }
1512 else
1513 {
1514 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
1515 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
1516 off = pVM->tm.s.offVirtualSyncGivenUp;
1517 Log4(("TM: %RU64/0: caught up (run)\n", u64VirtualNow));
1518 }
1519 }
1520 ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, off);
1521 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64VirtualNow;
1522 }
1523 u64Now = u64VirtualNow - off;
1524
1525 /* Check if stopped by expired timer. */
1526 if (u64Now >= pNext->u64Expire)
1527 {
1528 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunStop);
1529 u64Now = pNext->u64Expire;
1530 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, u64Now);
1531 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, false);
1532 Log4(("TM: %RU64/%RU64: exp tmr (run)\n", u64Now, u64VirtualNow - u64Now - pVM->tm.s.offVirtualSyncGivenUp));
1533
1534 }
1535 }
1536
1537 /* calc end of frame. */
1538 uint64_t u64Max = u64Now + pVM->tm.s.u32VirtualSyncScheduleSlack;
1539 if (u64Max > u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp)
1540 u64Max = u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp;
1541
1542 /* assert sanity */
1543 Assert(u64Now <= u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp);
1544 Assert(u64Max <= u64VirtualNow - pVM->tm.s.offVirtualSyncGivenUp);
1545 Assert(u64Now <= u64Max);
1546
1547 /*
1548 * Process the expired timers moving the clock along as we progress.
1549 */
1550#ifdef VBOX_STRICT
1551 uint64_t u64Prev = u64Now; NOREF(u64Prev);
1552#endif
1553 while (pNext && pNext->u64Expire <= u64Max)
1554 {
1555 PTMTIMER pTimer = pNext;
1556 pNext = TMTIMER_GET_NEXT(pTimer);
1557 Log2(("tmR3TimerQueueRun: pTimer=%p:{.enmState=%s, .enmClock=%d, .enmType=%d, u64Expire=%llx (now=%llx) .pszDesc=%s}\n",
1558 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, pTimer->u64Expire, u64Now, pTimer->pszDesc));
1559 bool fRc;
1560 TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_EXPIRED, TMTIMERSTATE_ACTIVE, fRc);
1561 if (fRc)
1562 {
1563 /* unlink */
1564 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1565 if (pPrev)
1566 TMTIMER_SET_NEXT(pPrev, pNext);
1567 else
1568 {
1569 TMTIMER_SET_HEAD(pQueue, pNext);
1570 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1571 }
1572 if (pNext)
1573 TMTIMER_SET_PREV(pNext, pPrev);
1574 pTimer->offNext = 0;
1575 pTimer->offPrev = 0;
1576
1577 /* advance the clock - don't permit timers to be out of order or armed in the 'past'. */
1578#ifdef VBOX_STRICT
1579 AssertMsg(pTimer->u64Expire >= u64Prev, ("%RU64 < %RU64 %s\n", pTimer->u64Expire, u64Prev, pTimer->pszDesc));
1580 u64Prev = pTimer->u64Expire;
1581#endif
1582 ASMAtomicXchgSize(&pVM->tm.s.fVirtualSyncTicking, false);
1583 ASMAtomicXchgU64(&pVM->tm.s.u64VirtualSync, pTimer->u64Expire);
1584
1585 /* fire */
1586 switch (pTimer->enmType)
1587 {
1588 case TMTIMERTYPE_DEV: pTimer->u.Dev.pfnTimer(pTimer->u.Dev.pDevIns, pTimer); break;
1589 case TMTIMERTYPE_DRV: pTimer->u.Drv.pfnTimer(pTimer->u.Drv.pDrvIns, pTimer); break;
1590 case TMTIMERTYPE_INTERNAL: pTimer->u.Internal.pfnTimer(pVM, pTimer, pTimer->u.Internal.pvUser); break;
1591 case TMTIMERTYPE_EXTERNAL: pTimer->u.External.pfnTimer(pTimer->u.External.pvUser); break;
1592 default:
1593 AssertMsgFailed(("Invalid timer type %d (%s)\n", pTimer->enmType, pTimer->pszDesc));
1594 break;
1595 }
1596
1597 /* change the state if it wasn't changed already in the handler. */
1598 TM_TRY_SET_STATE(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_EXPIRED, fRc);
1599 Log2(("tmR3TimerQueueRun: new state %s\n", tmTimerState(pTimer->enmState)));
1600 }
1601 } /* run loop */
1602
1603 /*
1604 * Restart the clock if it was stopped to serve any timers,
1605 * and start/adjust catch-up if necessary.
1606 */
1607 if ( !pVM->tm.s.fVirtualSyncTicking
1608 && pVM->tm.s.fVirtualTicking)
1609 {
1610 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncRunRestart);
1611
1612 /* calc the slack we've handed out. */
1613 const uint64_t u64VirtualNow2 = TMVirtualGetEx(pVM, false /* don't check timers */);
1614 Assert(u64VirtualNow2 >= u64VirtualNow);
1615 AssertMsg(pVM->tm.s.u64VirtualSync >= u64Now, ("%RU64 < %RU64\n", pVM->tm.s.u64VirtualSync, u64Now));
1616 const uint64_t offSlack = pVM->tm.s.u64VirtualSync - u64Now;
1617 STAM_STATS({
1618 if (offSlack)
1619 {
1620 PSTAMPROFILE p = &pVM->tm.s.StatVirtualSyncRunSlack;
1621 p->cPeriods++;
1622 p->cTicks += offSlack;
1623 if (p->cTicksMax < offSlack) p->cTicksMax = offSlack;
1624 if (p->cTicksMin > offSlack) p->cTicksMin = offSlack;
1625 }
1626 });
1627
1628 /* Let the time run a little bit while we were busy running timers(?). */
1629 uint64_t u64Elapsed;
1630#define MAX_ELAPSED 30000 /* ns */
1631 if (offSlack > MAX_ELAPSED)
1632 u64Elapsed = 0;
1633 else
1634 {
1635 u64Elapsed = u64VirtualNow2 - u64VirtualNow;
1636 if (u64Elapsed > MAX_ELAPSED)
1637 u64Elapsed = MAX_ELAPSED;
1638 u64Elapsed = u64Elapsed > offSlack ? u64Elapsed - offSlack : 0;
1639 }
1640#undef MAX_ELAPSED
1641
1642 /* Calc the current offset. */
1643 uint64_t offNew = u64VirtualNow2 - pVM->tm.s.u64VirtualSync - u64Elapsed;
1644 Assert(!(offNew & RT_BIT_64(63)));
1645 uint64_t offLag = offNew - pVM->tm.s.offVirtualSyncGivenUp;
1646 Assert(!(offLag & RT_BIT_64(63)));
1647
1648 /*
1649 * Deal with starting, adjusting and stopping catchup.
1650 */
1651 if (pVM->tm.s.fVirtualSyncCatchUp)
1652 {
1653 if (offLag <= pVM->tm.s.u64VirtualSyncCatchUpStopThreshold)
1654 {
1655 /* stop */
1656 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
1657 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
1658 Log4(("TM: %RU64/%RU64: caught up\n", u64VirtualNow2 - offNew, offLag));
1659 }
1660 else if (offLag <= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold)
1661 {
1662 /* adjust */
1663 unsigned i = 0;
1664 while ( i + 1 < RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods)
1665 && offLag >= pVM->tm.s.aVirtualSyncCatchUpPeriods[i + 1].u64Start)
1666 i++;
1667 if (pVM->tm.s.u32VirtualSyncCatchUpPercentage < pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage)
1668 {
1669 STAM_COUNTER_INC(&pVM->tm.s.aStatVirtualSyncCatchupAdjust[i]);
1670 ASMAtomicXchgU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage);
1671 Log4(("TM: %RU64/%RU64: adj %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
1672 }
1673 pVM->tm.s.u64VirtualSyncCatchUpPrev = u64VirtualNow2;
1674 }
1675 else
1676 {
1677 /* give up */
1678 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUp);
1679 STAM_PROFILE_ADV_STOP(&pVM->tm.s.StatVirtualSyncCatchup, c);
1680 ASMAtomicXchgU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);
1681 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, false);
1682 Log4(("TM: %RU64/%RU64: give up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
1683 LogRel(("TM: Giving up catch-up attempt at a %RU64 ns lag; new total: %RU64 ns\n", offLag, offNew));
1684 }
1685 }
1686 else if (offLag >= pVM->tm.s.aVirtualSyncCatchUpPeriods[0].u64Start)
1687 {
1688 if (offLag <= pVM->tm.s.u64VirtualSyncCatchUpGiveUpThreshold)
1689 {
1690 /* start */
1691 STAM_PROFILE_ADV_START(&pVM->tm.s.StatVirtualSyncCatchup, c);
1692 unsigned i = 0;
1693 while ( i + 1 < RT_ELEMENTS(pVM->tm.s.aVirtualSyncCatchUpPeriods)
1694 && offLag >= pVM->tm.s.aVirtualSyncCatchUpPeriods[i + 1].u64Start)
1695 i++;
1696 STAM_COUNTER_INC(&pVM->tm.s.aStatVirtualSyncCatchupInitial[i]);
1697 ASMAtomicXchgU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage, pVM->tm.s.aVirtualSyncCatchUpPeriods[i].u32Percentage);
1698 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncCatchUp, true);
1699 Log4(("TM: %RU64/%RU64: catch-up %u%%\n", u64VirtualNow2 - offNew, offLag, pVM->tm.s.u32VirtualSyncCatchUpPercentage));
1700 }
1701 else
1702 {
1703 /* don't bother */
1704 STAM_COUNTER_INC(&pVM->tm.s.StatVirtualSyncGiveUpBeforeStarting);
1705 ASMAtomicXchgU64((uint64_t volatile *)&pVM->tm.s.offVirtualSyncGivenUp, offNew);
1706 Log4(("TM: %RU64/%RU64: give up\n", u64VirtualNow2 - offNew, offLag));
1707 LogRel(("TM: Not bothering to attempt catching up a %RU64 ns lag; new total: %RU64\n", offLag, offNew));
1708 }
1709 }
1710
1711 /*
1712 * Update the offset and restart the clock.
1713 */
1714 Assert(!(offNew & RT_BIT_64(63)));
1715 ASMAtomicXchgU64(&pVM->tm.s.offVirtualSync, offNew);
1716 ASMAtomicXchgBool(&pVM->tm.s.fVirtualSyncTicking, true);
1717 }
1718}
1719
1720
1721/**
1722 * Saves the state of a timer to a saved state.
1723 *
1724 * @returns VBox status.
1725 * @param pTimer Timer to save.
1726 * @param pSSM Save State Manager handle.
1727 */
1728TMR3DECL(int) TMR3TimerSave(PTMTIMERR3 pTimer, PSSMHANDLE pSSM)
1729{
1730 LogFlow(("TMR3TimerSave: pTimer=%p:{enmState=%s, .pszDesc={%s}} pSSM=%p\n", pTimer, tmTimerState(pTimer->enmState), pTimer->pszDesc, pSSM));
1731 switch (pTimer->enmState)
1732 {
1733 case TMTIMERSTATE_STOPPED:
1734 case TMTIMERSTATE_PENDING_STOP:
1735 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1736 return SSMR3PutU8(pSSM, (uint8_t)TMTIMERSTATE_PENDING_STOP);
1737
1738 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1739 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1740 AssertMsgFailed(("u64Expire is being updated! (%s)\n", pTimer->pszDesc));
1741 if (!RTThreadYield())
1742 RTThreadSleep(1);
1743 /* fall thru */
1744 case TMTIMERSTATE_ACTIVE:
1745 case TMTIMERSTATE_PENDING_SCHEDULE:
1746 case TMTIMERSTATE_PENDING_RESCHEDULE:
1747 SSMR3PutU8(pSSM, (uint8_t)TMTIMERSTATE_PENDING_SCHEDULE);
1748 return SSMR3PutU64(pSSM, pTimer->u64Expire);
1749
1750 case TMTIMERSTATE_EXPIRED:
1751 case TMTIMERSTATE_PENDING_DESTROY:
1752 case TMTIMERSTATE_PENDING_STOP_DESTROY:
1753 case TMTIMERSTATE_FREE:
1754 AssertMsgFailed(("Invalid timer state %d %s (%s)\n", pTimer->enmState, tmTimerState(pTimer->enmState), pTimer->pszDesc));
1755 return SSMR3HandleSetStatus(pSSM, VERR_TM_INVALID_STATE);
1756 }
1757
1758 AssertMsgFailed(("Unknown timer state %d (%s)\n", pTimer->enmState, pTimer->pszDesc));
1759 return SSMR3HandleSetStatus(pSSM, VERR_TM_UNKNOWN_STATE);
1760}
1761
1762
1763/**
1764 * Loads the state of a timer from a saved state.
1765 *
1766 * @returns VBox status.
1767 * @param pTimer Timer to restore.
1768 * @param pSSM Save State Manager handle.
1769 */
1770TMR3DECL(int) TMR3TimerLoad(PTMTIMERR3 pTimer, PSSMHANDLE pSSM)
1771{
1772 Assert(pTimer); Assert(pSSM); VM_ASSERT_EMT(pTimer->pVMR3);
1773 LogFlow(("TMR3TimerLoad: pTimer=%p:{enmState=%s, .pszDesc={%s}} pSSM=%p\n", pTimer, tmTimerState(pTimer->enmState), pTimer->pszDesc, pSSM));
1774
1775 /*
1776 * Load the state and validate it.
1777 */
1778 uint8_t u8State;
1779 int rc = SSMR3GetU8(pSSM, &u8State);
1780 if (VBOX_FAILURE(rc))
1781 return rc;
1782 TMTIMERSTATE enmState = (TMTIMERSTATE)u8State;
1783 if ( enmState != TMTIMERSTATE_PENDING_STOP
1784 && enmState != TMTIMERSTATE_PENDING_SCHEDULE
1785 && enmState != TMTIMERSTATE_PENDING_STOP_SCHEDULE)
1786 {
1787 AssertMsgFailed(("enmState=%d %s\n", enmState, tmTimerState(enmState)));
1788 return SSMR3HandleSetStatus(pSSM, VERR_TM_LOAD_STATE);
1789 }
1790
1791 if (enmState == TMTIMERSTATE_PENDING_SCHEDULE)
1792 {
1793 /*
1794 * Load the expire time.
1795 */
1796 uint64_t u64Expire;
1797 rc = SSMR3GetU64(pSSM, &u64Expire);
1798 if (VBOX_FAILURE(rc))
1799 return rc;
1800
1801 /*
1802 * Set it.
1803 */
1804 Log(("enmState=%d %s u64Expire=%llu\n", enmState, tmTimerState(enmState), u64Expire));
1805 rc = TMTimerSet(pTimer, u64Expire);
1806 }
1807 else
1808 {
1809 /*
1810 * Stop it.
1811 */
1812 Log(("enmState=%d %s\n", enmState, tmTimerState(enmState)));
1813 rc = TMTimerStop(pTimer);
1814 }
1815
1816 /*
1817 * On failure set SSM status.
1818 */
1819 if (VBOX_FAILURE(rc))
1820 rc = SSMR3HandleSetStatus(pSSM, rc);
1821 return rc;
1822}
1823
1824
1825/**
1826 * Get the real world UTC time adjusted for VM lag.
1827 *
1828 * @returns pTime.
1829 * @param pVM The VM instance.
1830 * @param pTime Where to store the time.
1831 */
1832TMR3DECL(PRTTIMESPEC) TMR3UTCNow(PVM pVM, PRTTIMESPEC pTime)
1833{
1834 RTTimeNow(pTime);
1835 RTTimeSpecSubNano(pTime, pVM->tm.s.offVirtualSync - pVM->tm.s.offVirtualSyncGivenUp);
1836 RTTimeSpecAddNano(pTime, pVM->tm.s.offUTC);
1837 return pTime;
1838}
1839
1840
1841/**
1842 * Display all timers.
1843 *
1844 * @param pVM VM Handle.
1845 * @param pHlp The info helpers.
1846 * @param pszArgs Arguments, ignored.
1847 */
1848static DECLCALLBACK(void) tmR3TimerInfo(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1849{
1850 NOREF(pszArgs);
1851 pHlp->pfnPrintf(pHlp,
1852 "Timers (pVM=%p)\n"
1853 "%.*s %.*s %.*s %.*s Clock %-18s %-18s %-25s Description\n",
1854 pVM,
1855 sizeof(RTR3PTR) * 2, "pTimerR3 ",
1856 sizeof(int32_t) * 2, "offNext ",
1857 sizeof(int32_t) * 2, "offPrev ",
1858 sizeof(int32_t) * 2, "offSched ",
1859 "Time",
1860 "Expire",
1861 "State");
1862 for (PTMTIMERR3 pTimer = pVM->tm.s.pCreated; pTimer; pTimer = pTimer->pBigNext)
1863 {
1864 pHlp->pfnPrintf(pHlp,
1865 "%p %08RX32 %08RX32 %08RX32 %s %18RU64 %18RU64 %-25s %s\n",
1866 pTimer,
1867 pTimer->offNext,
1868 pTimer->offPrev,
1869 pTimer->offScheduleNext,
1870 pTimer->enmClock == TMCLOCK_REAL ? "Real " : "Virt ",
1871 TMTimerGet(pTimer),
1872 pTimer->u64Expire,
1873 tmTimerState(pTimer->enmState),
1874 pTimer->pszDesc);
1875 }
1876}
1877
1878
1879/**
1880 * Display all active timers.
1881 *
1882 * @param pVM VM Handle.
1883 * @param pHlp The info helpers.
1884 * @param pszArgs Arguments, ignored.
1885 */
1886static DECLCALLBACK(void) tmR3TimerInfoActive(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1887{
1888 NOREF(pszArgs);
1889 pHlp->pfnPrintf(pHlp,
1890 "Active Timers (pVM=%p)\n"
1891 "%.*s %.*s %.*s %.*s Clock %-18s %-18s %-25s Description\n",
1892 pVM,
1893 sizeof(RTR3PTR) * 2, "pTimerR3 ",
1894 sizeof(int32_t) * 2, "offNext ",
1895 sizeof(int32_t) * 2, "offPrev ",
1896 sizeof(int32_t) * 2, "offSched ",
1897 "Time",
1898 "Expire",
1899 "State");
1900 for (unsigned iQueue = 0; iQueue < TMCLOCK_MAX; iQueue++)
1901 {
1902 for (PTMTIMERR3 pTimer = TMTIMER_GET_HEAD(&pVM->tm.s.paTimerQueuesR3[iQueue]);
1903 pTimer;
1904 pTimer = TMTIMER_GET_NEXT(pTimer))
1905 {
1906 pHlp->pfnPrintf(pHlp,
1907 "%p %08RX32 %08RX32 %08RX32 %s %18RU64 %18RU64 %-25s %s\n",
1908 pTimer,
1909 pTimer->offNext,
1910 pTimer->offPrev,
1911 pTimer->offScheduleNext,
1912 pTimer->enmClock == TMCLOCK_REAL
1913 ? "Real "
1914 : pTimer->enmClock == TMCLOCK_VIRTUAL
1915 ? "Virt "
1916 : pTimer->enmClock == TMCLOCK_VIRTUAL_SYNC
1917 ? "VrSy "
1918 : "TSC ",
1919 TMTimerGet(pTimer),
1920 pTimer->u64Expire,
1921 tmTimerState(pTimer->enmState),
1922 pTimer->pszDesc);
1923 }
1924 }
1925}
1926
1927
1928/**
1929 * Display all clocks.
1930 *
1931 * @param pVM VM Handle.
1932 * @param pHlp The info helpers.
1933 * @param pszArgs Arguments, ignored.
1934 */
1935static DECLCALLBACK(void) tmR3InfoClocks(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs)
1936{
1937 NOREF(pszArgs);
1938
1939 /*
1940 * Read the times first to avoid more than necessary time variation.
1941 */
1942 const uint64_t u64TSC = TMCpuTickGet(pVM);
1943 const uint64_t u64Virtual = TMVirtualGet(pVM);
1944 const uint64_t u64VirtualSync = TMVirtualSyncGet(pVM);
1945 const uint64_t u64Real = TMRealGet(pVM);
1946
1947 /*
1948 * TSC
1949 */
1950 pHlp->pfnPrintf(pHlp,
1951 "Cpu Tick: %18RU64 (%#016RX64) %RU64Hz %s%s",
1952 u64TSC, u64TSC, TMCpuTicksPerSecond(pVM),
1953 pVM->tm.s.fTSCTicking ? "ticking" : "paused",
1954 pVM->tm.s.fTSCVirtualized ? " - virtualized" : "");
1955 if (pVM->tm.s.fTSCUseRealTSC)
1956 {
1957 pHlp->pfnPrintf(pHlp, " - real tsc");
1958 if (pVM->tm.s.u64TSCOffset)
1959 pHlp->pfnPrintf(pHlp, "\n offset %RU64", pVM->tm.s.u64TSCOffset);
1960 }
1961 else
1962 pHlp->pfnPrintf(pHlp, " - virtual clock");
1963 pHlp->pfnPrintf(pHlp, "\n");
1964
1965 /*
1966 * virtual
1967 */
1968 pHlp->pfnPrintf(pHlp,
1969 " Virtual: %18RU64 (%#016RX64) %RU64Hz %s",
1970 u64Virtual, u64Virtual, TMVirtualGetFreq(pVM),
1971 pVM->tm.s.fVirtualTicking ? "ticking" : "paused");
1972 if (pVM->tm.s.fVirtualWarpDrive)
1973 pHlp->pfnPrintf(pHlp, " WarpDrive %RU32 %%", pVM->tm.s.u32VirtualWarpDrivePercentage);
1974 pHlp->pfnPrintf(pHlp, "\n");
1975
1976 /*
1977 * virtual sync
1978 */
1979 pHlp->pfnPrintf(pHlp,
1980 "VirtSync: %18RU64 (%#016RX64) %s%s",
1981 u64VirtualSync, u64VirtualSync,
1982 pVM->tm.s.fVirtualSyncTicking ? "ticking" : "paused",
1983 pVM->tm.s.fVirtualSyncCatchUp ? " - catchup" : "");
1984 if (pVM->tm.s.offVirtualSync)
1985 {
1986 pHlp->pfnPrintf(pHlp, "\n offset %RU64", pVM->tm.s.offVirtualSync);
1987 if (pVM->tm.s.u32VirtualSyncCatchUpPercentage)
1988 pHlp->pfnPrintf(pHlp, " catch-up rate %u %%", pVM->tm.s.u32VirtualSyncCatchUpPercentage);
1989 }
1990 pHlp->pfnPrintf(pHlp, "\n");
1991
1992 /*
1993 * real
1994 */
1995 pHlp->pfnPrintf(pHlp,
1996 " Real: %18RU64 (%#016RX64) %RU64Hz\n",
1997 u64Real, u64Real, TMRealGetFreq(pVM));
1998}
1999
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette