VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/TMAll.cpp@ 30581

Last change on this file since 30581 was 30581, checked in by vboxsync, 15 years ago

TM: Added simple CPU time accounting. Accessible thru the statistics and TMR3GetCpuLoadTimes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 75.1 KB
Line 
1/* $Id: TMAll.cpp 30581 2010-07-02 16:02:57Z vboxsync $ */
2/** @file
3 * TM - Timeout Manager, all contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_TM
23#include <VBox/tm.h>
24#include <VBox/mm.h>
25#ifdef IN_RING3
26# include <VBox/rem.h>
27#endif
28#include "TMInternal.h"
29#include <VBox/vm.h>
30
31#include <VBox/param.h>
32#include <VBox/err.h>
33#include <VBox/log.h>
34#include <VBox/sup.h>
35#include <iprt/time.h>
36#include <iprt/assert.h>
37#include <iprt/asm.h>
38#include <iprt/asm-math.h>
39#ifdef IN_RING3
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Defined Constants And Macros *
46*******************************************************************************/
47/** @def TMTIMER_ASSERT_CRITSECT
48 * Checks that the caller owns the critical section if one is associated with
49 * the timer. */
50#ifdef VBOX_STRICT
51# define TMTIMER_ASSERT_CRITSECT(pTimer) \
52 do { \
53 if ((pTimer)->pCritSect) \
54 { \
55 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC((pTimer)->CTX_SUFF(pVM), (pTimer)->pCritSect); \
56 AssertMsg(pCritSect && PDMCritSectIsOwner(pCritSect), \
57 ("pTimer=%p (%s) pCritSect=%p\n", pTimer, R3STRING(pTimer->pszDesc), (pTimer)->pCritSect)); \
58 } \
59 } while (0)
60#else
61# define TMTIMER_ASSERT_CRITSECT(pTimer) do { } while (0)
62#endif
63
64
65#ifndef tmTimerLock
66
67/**
68 * Try take the timer lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
69 *
70 * @retval VINF_SUCCESS on success (always in ring-3).
71 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
72 *
73 * @param pVM The VM handle.
74 *
75 * @thread EMTs for the time being.
76 */
77int tmTimerLock(PVM pVM)
78{
79 VM_ASSERT_EMT(pVM);
80 int rc = PDMCritSectEnter(&pVM->tm.s.TimerCritSect, VERR_SEM_BUSY);
81 return rc;
82}
83
84
85/**
86 * Try take the timer lock, no waiting.
87 *
88 * @retval VINF_SUCCESS on success.
89 * @retval VERR_SEM_BUSY if busy.
90 *
91 * @param pVM The VM handle.
92 */
93int tmTimerTryLock(PVM pVM)
94{
95 int rc = PDMCritSectTryEnter(&pVM->tm.s.TimerCritSect);
96 return rc;
97}
98
99
100/**
101 * Release the EMT/TM lock.
102 *
103 * @param pVM The VM handle.
104 */
105void tmTimerUnlock(PVM pVM)
106{
107 PDMCritSectLeave(&pVM->tm.s.TimerCritSect);
108}
109
110
111/**
112 * Try take the VirtualSync lock, wait in ring-3 return VERR_SEM_BUSY in R0/RC.
113 *
114 * @retval VINF_SUCCESS on success (always in ring-3).
115 * @retval VERR_SEM_BUSY in RC and R0 if the semaphore is busy.
116 *
117 * @param pVM The VM handle.
118 */
119int tmVirtualSyncLock(PVM pVM)
120{
121 VM_ASSERT_EMT(pVM);
122 int rc = PDMCritSectEnter(&pVM->tm.s.VirtualSyncLock, VERR_SEM_BUSY);
123 return rc;
124}
125
126
127/**
128 * Try take the VirtualSync lock, no waiting.
129 *
130 * @retval VINF_SUCCESS on success.
131 * @retval VERR_SEM_BUSY if busy.
132 *
133 * @param pVM The VM handle.
134 */
135int tmVirtualSyncTryLock(PVM pVM)
136{
137 VM_ASSERT_EMT(pVM);
138 int rc = PDMCritSectTryEnter(&pVM->tm.s.VirtualSyncLock);
139 return rc;
140}
141
142
143/**
144 * Release the VirtualSync lock.
145 *
146 * @param pVM The VM handle.
147 */
148void tmVirtualSyncUnlock(PVM pVM)
149{
150 PDMCritSectLeave(&pVM->tm.s.VirtualSyncLock);
151}
152
153#endif /* ! macros */
154
155/**
156 * Notification that execution is about to start.
157 *
158 * This call must always be paired with a TMNotifyEndOfExecution call.
159 *
160 * The function may, depending on the configuration, resume the TSC and future
161 * clocks that only ticks when we're executing guest code.
162 *
163 * @param pVCpu The VMCPU to operate on.
164 */
165VMMDECL(void) TMNotifyStartOfExecution(PVMCPU pVCpu)
166{
167 PVM pVM = pVCpu->CTX_SUFF(pVM);
168
169#ifndef VBOX_WITHOUT_NS_ACCOUNTING
170 pVCpu->tm.s.u64NsTsStartExecuting = RTTimeNanoTS();
171#endif
172 if (pVM->tm.s.fTSCTiedToExecution)
173 tmCpuTickResume(pVM, pVCpu);
174}
175
176
177/**
178 * Notification that execution is about to start.
179 *
180 * This call must always be paired with a TMNotifyStartOfExecution call.
181 *
182 * The function may, depending on the configuration, suspend the TSC and future
183 * clocks that only ticks when we're executing guest code.
184 *
185 * @param pVCpu The VMCPU to operate on.
186 */
187VMMDECL(void) TMNotifyEndOfExecution(PVMCPU pVCpu)
188{
189 PVM pVM = pVCpu->CTX_SUFF(pVM);
190
191 if (pVM->tm.s.fTSCTiedToExecution)
192 tmCpuTickPause(pVM, pVCpu);
193
194#ifndef VBOX_WITHOUT_NS_ACCOUNTING
195 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
196 uint64_t u64NsTs = RTTimeNanoTS();
197 pVCpu->tm.s.cNsExecuting += u64NsTs - pVCpu->tm.s.u64NsTsStartExecuting;
198 pVCpu->tm.s.cNsTotal = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
199 pVCpu->tm.s.cNsOther = pVCpu->tm.s.cNsTotal - pVCpu->tm.s.cNsExecuting - pVCpu->tm.s.cNsHalted;
200 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
201#endif
202}
203
204
205/**
206 * Notification that the cpu is entering the halt state
207 *
208 * This call must always be paired with a TMNotifyEndOfExecution call.
209 *
210 * The function may, depending on the configuration, resume the TSC and future
211 * clocks that only ticks when we're halted.
212 *
213 * @param pVCpu The VMCPU to operate on.
214 */
215VMM_INT_DECL(void) TMNotifyStartOfHalt(PVMCPU pVCpu)
216{
217 PVM pVM = pVCpu->CTX_SUFF(pVM);
218
219#ifndef VBOX_WITHOUT_NS_ACCOUNTING
220 pVCpu->tm.s.u64NsTsStartHalting = RTTimeNanoTS();
221#endif
222
223 if ( pVM->tm.s.fTSCTiedToExecution
224 && !pVM->tm.s.fTSCNotTiedToHalt)
225 tmCpuTickResume(pVM, pVCpu);
226}
227
228
229/**
230 * Notification that the cpu is leaving the halt state
231 *
232 * This call must always be paired with a TMNotifyStartOfHalt call.
233 *
234 * The function may, depending on the configuration, suspend the TSC and future
235 * clocks that only ticks when we're halted.
236 *
237 * @param pVCpu The VMCPU to operate on.
238 */
239VMM_INT_DECL(void) TMNotifyEndOfHalt(PVMCPU pVCpu)
240{
241 PVM pVM = pVCpu->CTX_SUFF(pVM);
242
243 if ( pVM->tm.s.fTSCTiedToExecution
244 && !pVM->tm.s.fTSCNotTiedToHalt)
245 tmCpuTickPause(pVM, pVCpu);
246
247#ifndef VBOX_WITHOUT_NS_ACCOUNTING
248 uint32_t uGen = ASMAtomicIncU32(&pVCpu->tm.s.uTimesGen); Assert(uGen & 1);
249 uint64_t u64NsTs = RTTimeNanoTS();
250 pVCpu->tm.s.cNsHalted += u64NsTs - pVCpu->tm.s.u64NsTsStartHalting;
251 pVCpu->tm.s.cNsTotal = u64NsTs - pVCpu->tm.s.u64NsTsStartTotal;
252 pVCpu->tm.s.cNsOther = pVCpu->tm.s.cNsTotal - pVCpu->tm.s.cNsExecuting - pVCpu->tm.s.cNsHalted;
253 ASMAtomicWriteU32(&pVCpu->tm.s.uTimesGen, (uGen | 1) + 1);
254#endif
255}
256
257
258/**
259 * Raise the timer force action flag and notify the dedicated timer EMT.
260 *
261 * @param pVM The VM handle.
262 */
263DECLINLINE(void) tmScheduleNotify(PVM pVM)
264{
265 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
266 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
267 {
268 Log5(("TMAll(%u): FF: 0 -> 1\n", __LINE__));
269 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
270#ifdef IN_RING3
271 REMR3NotifyTimerPending(pVM, pVCpuDst);
272 VMR3NotifyCpuFFU(pVCpuDst->pUVCpu, VMNOTIFYFF_FLAGS_DONE_REM);
273#endif
274 STAM_COUNTER_INC(&pVM->tm.s.StatScheduleSetFF);
275 }
276}
277
278
279/**
280 * Schedule the queue which was changed.
281 */
282DECLINLINE(void) tmSchedule(PTMTIMER pTimer)
283{
284 PVM pVM = pTimer->CTX_SUFF(pVM);
285 if ( VM_IS_EMT(pVM)
286 && RT_SUCCESS(tmTimerTryLock(pVM)))
287 {
288 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
289 Log3(("tmSchedule: tmTimerQueueSchedule\n"));
290 tmTimerQueueSchedule(pVM, &pVM->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock]);
291#ifdef VBOX_STRICT
292 tmTimerQueuesSanityChecks(pVM, "tmSchedule");
293#endif
294 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatScheduleOne), a);
295 tmTimerUnlock(pVM);
296 }
297 else
298 {
299 TMTIMERSTATE enmState = pTimer->enmState;
300 if (TMTIMERSTATE_IS_PENDING_SCHEDULING(enmState))
301 tmScheduleNotify(pVM);
302 }
303}
304
305
306/**
307 * Try change the state to enmStateNew from enmStateOld
308 * and link the timer into the scheduling queue.
309 *
310 * @returns Success indicator.
311 * @param pTimer Timer in question.
312 * @param enmStateNew The new timer state.
313 * @param enmStateOld The old timer state.
314 */
315DECLINLINE(bool) tmTimerTry(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
316{
317 /*
318 * Attempt state change.
319 */
320 bool fRc;
321 TM_TRY_SET_STATE(pTimer, enmStateNew, enmStateOld, fRc);
322 return fRc;
323}
324
325
326/**
327 * Links the timer onto the scheduling queue.
328 *
329 * @param pQueue The timer queue the timer belongs to.
330 * @param pTimer The timer.
331 *
332 * @todo FIXME: Look into potential race with the thread running the queues
333 * and stuff.
334 */
335DECLINLINE(void) tmTimerLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
336{
337 Assert(!pTimer->offScheduleNext);
338 const int32_t offHeadNew = (intptr_t)pTimer - (intptr_t)pQueue;
339 int32_t offHead;
340 do
341 {
342 offHead = pQueue->offSchedule;
343 if (offHead)
344 pTimer->offScheduleNext = ((intptr_t)pQueue + offHead) - (intptr_t)pTimer;
345 else
346 pTimer->offScheduleNext = 0;
347 } while (!ASMAtomicCmpXchgS32(&pQueue->offSchedule, offHeadNew, offHead));
348}
349
350
351/**
352 * Try change the state to enmStateNew from enmStateOld
353 * and link the timer into the scheduling queue.
354 *
355 * @returns Success indicator.
356 * @param pTimer Timer in question.
357 * @param enmStateNew The new timer state.
358 * @param enmStateOld The old timer state.
359 */
360DECLINLINE(bool) tmTimerTryWithLink(PTMTIMER pTimer, TMTIMERSTATE enmStateNew, TMTIMERSTATE enmStateOld)
361{
362 if (tmTimerTry(pTimer, enmStateNew, enmStateOld))
363 {
364 tmTimerLink(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF(paTimerQueues)[pTimer->enmClock], pTimer);
365 return true;
366 }
367 return false;
368}
369
370
371#ifdef VBOX_HIGH_RES_TIMERS_HACK
372
373/**
374 * Worker for tmTimerPollInternal that handles misses when the decidate timer
375 * EMT is polling.
376 *
377 * @returns See tmTimerPollInternal.
378 * @param pVM Pointer to the shared VM structure.
379 * @param u64Now Current virtual clock timestamp.
380 * @param u64Delta The delta to the next even in ticks of the
381 * virtual clock.
382 * @param pu64Delta Where to return the delta.
383 * @param pCounter The statistics counter to update.
384 */
385DECLINLINE(uint64_t) tmTimerPollReturnMiss(PVM pVM, uint64_t u64Now, uint64_t u64Delta, uint64_t *pu64Delta)
386{
387 Assert(!(u64Delta & RT_BIT_64(63)));
388
389 if (!pVM->tm.s.fVirtualWarpDrive)
390 {
391 *pu64Delta = u64Delta;
392 return u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
393 }
394
395 /*
396 * Warp drive adjustments - this is the reverse of what tmVirtualGetRaw is doing.
397 */
398 uint64_t const u64Start = pVM->tm.s.u64VirtualWarpDriveStart;
399 uint32_t const u32Pct = pVM->tm.s.u32VirtualWarpDrivePercentage;
400
401 uint64_t u64GipTime = u64Delta + u64Now + pVM->tm.s.u64VirtualOffset;
402 u64GipTime -= u64Start; /* the start is GIP time. */
403 if (u64GipTime >= u64Delta)
404 {
405 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
406 ASMMultU64ByU32DivByU32(u64Delta, 100, u32Pct);
407 }
408 else
409 {
410 u64Delta -= u64GipTime;
411 ASMMultU64ByU32DivByU32(u64GipTime, 100, u32Pct);
412 u64Delta += u64GipTime;
413 }
414 *pu64Delta = u64Delta;
415 u64GipTime += u64Start;
416 return u64GipTime;
417}
418
419
420/**
421 * Worker for tmTimerPollInternal dealing with returns on virtual CPUs other
422 * than the one dedicated to timer work.
423 *
424 * @returns See tmTimerPollInternal.
425 * @param pVM Pointer to the shared VM structure.
426 * @param u64Now Current virtual clock timestamp.
427 * @param pu64Delta Where to return the delta.
428 */
429DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnOtherCpu(PVM pVM, uint64_t u64Now, uint64_t *pu64Delta)
430{
431 static const uint64_t s_u64OtherRet = 500000000; /* 500 ms for non-timer EMTs. */
432 *pu64Delta = s_u64OtherRet;
433 return u64Now + pVM->tm.s.u64VirtualOffset + s_u64OtherRet;
434}
435
436
437/**
438 * Worker for tmTimerPollInternal.
439 *
440 * @returns See tmTimerPollInternal.
441 * @param pVM Pointer to the shared VM structure.
442 * @param pVCpu Pointer to the shared VMCPU structure of the
443 * caller.
444 * @param pVCpuDst Pointer to the shared VMCPU structure of the
445 * dedicated timer EMT.
446 * @param u64Now Current virtual clock timestamp.
447 * @param pu64Delta Where to return the delta.
448 * @param pCounter The statistics counter to update.
449 */
450DECL_FORCE_INLINE(uint64_t) tmTimerPollReturnHit(PVM pVM, PVMCPU pVCpu, PVMCPU pVCpuDst, uint64_t u64Now,
451 uint64_t *pu64Delta, PSTAMCOUNTER pCounter)
452{
453 STAM_COUNTER_INC(pCounter);
454 if (pVCpuDst != pVCpu)
455 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
456 *pu64Delta = 0;
457 return 0;
458}
459
460/**
461 * Common worker for TMTimerPollGIP and TMTimerPoll.
462 *
463 * This function is called before FFs are checked in the inner execution EM loops.
464 *
465 * @returns The GIP timestamp of the next event.
466 * 0 if the next event has already expired.
467 *
468 * @param pVM Pointer to the shared VM structure.
469 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
470 * @param pu64Delta Where to store the delta.
471 *
472 * @thread The emulation thread.
473 *
474 * @remarks GIP uses ns ticks.
475 */
476DECL_FORCE_INLINE(uint64_t) tmTimerPollInternal(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
477{
478 PVMCPU pVCpuDst = &pVM->aCpus[pVM->tm.s.idTimerCpu];
479 const uint64_t u64Now = TMVirtualGetNoCheck(pVM);
480 STAM_COUNTER_INC(&pVM->tm.s.StatPoll);
481
482 /*
483 * Return straight away if the timer FF is already set ...
484 */
485 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
486 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
487
488 /*
489 * ... or if timers are being run.
490 */
491 if (ASMAtomicReadBool(&pVM->tm.s.fRunningQueues))
492 {
493 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
494 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
495 }
496
497 /*
498 * Check for TMCLOCK_VIRTUAL expiration.
499 */
500 const uint64_t u64Expire1 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL].u64Expire);
501 const int64_t i64Delta1 = u64Expire1 - u64Now;
502 if (i64Delta1 <= 0)
503 {
504 if (!VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
505 {
506 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
507 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
508#ifdef IN_RING3
509 REMR3NotifyTimerPending(pVM, pVCpuDst);
510#endif
511 }
512 LogFlow(("TMTimerPoll: expire1=%'RU64 <= now=%'RU64\n", u64Expire1, u64Now));
513 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtual);
514 }
515
516 /*
517 * Check for TMCLOCK_VIRTUAL_SYNC expiration.
518 * This isn't quite as stright forward if in a catch-up, not only do
519 * we have to adjust the 'now' but when have to adjust the delta as well.
520 */
521
522 /*
523 * Optimistic lockless approach.
524 */
525 uint64_t u64VirtualSyncNow;
526 uint64_t u64Expire2 = ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
527 if (ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
528 {
529 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
530 {
531 u64VirtualSyncNow = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
532 if (RT_LIKELY( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
533 && !ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
534 && u64VirtualSyncNow == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
535 && u64Expire2 == ASMAtomicUoReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)))
536 {
537 u64VirtualSyncNow = u64Now - u64VirtualSyncNow;
538 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
539 if (i64Delta2 > 0)
540 {
541 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
542 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
543
544 if (pVCpu == pVCpuDst)
545 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
546 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
547 }
548
549 if ( !pVM->tm.s.fRunningQueues
550 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
551 {
552 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
553 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
554#ifdef IN_RING3
555 REMR3NotifyTimerPending(pVM, pVCpuDst);
556#endif
557 }
558
559 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
560 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
561 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
562 }
563 }
564 }
565 else
566 {
567 STAM_COUNTER_INC(&pVM->tm.s.StatPollSimple);
568 LogFlow(("TMTimerPoll: stopped\n"));
569 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
570 }
571
572 /*
573 * Complicated lockless approach.
574 */
575 uint64_t off;
576 uint32_t u32Pct = 0;
577 bool fCatchUp;
578 int cOuterTries = 42;
579 for (;; cOuterTries--)
580 {
581 fCatchUp = ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp);
582 off = ASMAtomicReadU64(&pVM->tm.s.offVirtualSync);
583 u64Expire2 = ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire);
584 if (fCatchUp)
585 {
586 /* No changes allowed, try get a consistent set of parameters. */
587 uint64_t const u64Prev = ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev);
588 uint64_t const offGivenUp = ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp);
589 u32Pct = ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage);
590 if ( ( u64Prev == ASMAtomicReadU64(&pVM->tm.s.u64VirtualSyncCatchUpPrev)
591 && offGivenUp == ASMAtomicReadU64(&pVM->tm.s.offVirtualSyncGivenUp)
592 && u32Pct == ASMAtomicReadU32(&pVM->tm.s.u32VirtualSyncCatchUpPercentage)
593 && off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
594 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
595 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
596 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
597 || cOuterTries <= 0)
598 {
599 uint64_t u64Delta = u64Now - u64Prev;
600 if (RT_LIKELY(!(u64Delta >> 32)))
601 {
602 uint64_t u64Sub = ASMMultU64ByU32DivByU32(u64Delta, u32Pct, 100);
603 if (off > u64Sub + offGivenUp)
604 off -= u64Sub;
605 else /* we've completely caught up. */
606 off = offGivenUp;
607 }
608 else
609 /* More than 4 seconds since last time (or negative), ignore it. */
610 Log(("TMVirtualGetSync: u64Delta=%RX64 (NoLock)\n", u64Delta));
611
612 /* Check that we're still running and in catch up. */
613 if ( ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking)
614 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp))
615 break;
616 }
617 }
618 else if ( off == ASMAtomicReadU64(&pVM->tm.s.offVirtualSync)
619 && u64Expire2 == ASMAtomicReadU64(&pVM->tm.s.CTX_SUFF(paTimerQueues)[TMCLOCK_VIRTUAL_SYNC].u64Expire)
620 && !ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncCatchUp)
621 && ASMAtomicReadBool(&pVM->tm.s.fVirtualSyncTicking))
622 break; /* Got an consistent offset */
623
624 /* Repeat the initial checks before iterating. */
625 if (VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
626 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollAlreadySet);
627 if (ASMAtomicUoReadBool(&pVM->tm.s.fRunningQueues))
628 {
629 STAM_COUNTER_INC(&pVM->tm.s.StatPollRunning);
630 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
631 }
632 if (!ASMAtomicUoReadBool(&pVM->tm.s.fVirtualSyncTicking))
633 {
634 LogFlow(("TMTimerPoll: stopped\n"));
635 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
636 }
637 if (cOuterTries <= 0)
638 break; /* that's enough */
639 }
640 if (cOuterTries <= 0)
641 STAM_COUNTER_INC(&pVM->tm.s.StatPollELoop);
642 u64VirtualSyncNow = u64Now - off;
643
644 /* Calc delta and see if we've got a virtual sync hit. */
645 int64_t i64Delta2 = u64Expire2 - u64VirtualSyncNow;
646 if (i64Delta2 <= 0)
647 {
648 if ( !pVM->tm.s.fRunningQueues
649 && !VMCPU_FF_ISSET(pVCpuDst, VMCPU_FF_TIMER))
650 {
651 Log5(("TMAll(%u): FF: %d -> 1\n", __LINE__, VMCPU_FF_ISPENDING(pVCpuDst, VMCPU_FF_TIMER)));
652 VMCPU_FF_SET(pVCpuDst, VMCPU_FF_TIMER);
653#ifdef IN_RING3
654 REMR3NotifyTimerPending(pVM, pVCpuDst);
655#endif
656 }
657 STAM_COUNTER_INC(&pVM->tm.s.StatPollVirtualSync);
658 LogFlow(("TMTimerPoll: expire2=%'RU64 <= now=%'RU64\n", u64Expire2, u64Now));
659 return tmTimerPollReturnHit(pVM, pVCpu, pVCpuDst, u64Now, pu64Delta, &pVM->tm.s.StatPollVirtualSync);
660 }
661
662 /*
663 * Return the time left to the next event.
664 */
665 STAM_COUNTER_INC(&pVM->tm.s.StatPollMiss);
666 if (pVCpu == pVCpuDst)
667 {
668 if (fCatchUp)
669 i64Delta2 = ASMMultU64ByU32DivByU32(i64Delta2, 100, u32Pct + 100);
670 return tmTimerPollReturnMiss(pVM, u64Now, RT_MIN(i64Delta1, i64Delta2), pu64Delta);
671 }
672 return tmTimerPollReturnOtherCpu(pVM, u64Now, pu64Delta);
673}
674
675
676/**
677 * Set FF if we've passed the next virtual event.
678 *
679 * This function is called before FFs are checked in the inner execution EM loops.
680 *
681 * @returns true if timers are pending, false if not.
682 *
683 * @param pVM Pointer to the shared VM structure.
684 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
685 * @thread The emulation thread.
686 */
687VMMDECL(bool) TMTimerPollBool(PVM pVM, PVMCPU pVCpu)
688{
689 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
690 uint64_t off = 0;
691 tmTimerPollInternal(pVM, pVCpu, &off);
692 return off == 0;
693}
694
695
696/**
697 * Set FF if we've passed the next virtual event.
698 *
699 * This function is called before FFs are checked in the inner execution EM loops.
700 *
701 * @param pVM Pointer to the shared VM structure.
702 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
703 * @thread The emulation thread.
704 */
705VMM_INT_DECL(void) TMTimerPollVoid(PVM pVM, PVMCPU pVCpu)
706{
707 uint64_t off;
708 tmTimerPollInternal(pVM, pVCpu, &off);
709}
710
711
712/**
713 * Set FF if we've passed the next virtual event.
714 *
715 * This function is called before FFs are checked in the inner execution EM loops.
716 *
717 * @returns The GIP timestamp of the next event.
718 * 0 if the next event has already expired.
719 * @param pVM Pointer to the shared VM structure.
720 * @param pVCpu Pointer to the shared VMCPU structure of the caller.
721 * @param pu64Delta Where to store the delta.
722 * @thread The emulation thread.
723 */
724VMM_INT_DECL(uint64_t) TMTimerPollGIP(PVM pVM, PVMCPU pVCpu, uint64_t *pu64Delta)
725{
726 return tmTimerPollInternal(pVM, pVCpu, pu64Delta);
727}
728
729#endif /* VBOX_HIGH_RES_TIMERS_HACK */
730
731/**
732 * Gets the host context ring-3 pointer of the timer.
733 *
734 * @returns HC R3 pointer.
735 * @param pTimer Timer handle as returned by one of the create functions.
736 */
737VMMDECL(PTMTIMERR3) TMTimerR3Ptr(PTMTIMER pTimer)
738{
739 return (PTMTIMERR3)MMHyperCCToR3(pTimer->CTX_SUFF(pVM), pTimer);
740}
741
742
743/**
744 * Gets the host context ring-0 pointer of the timer.
745 *
746 * @returns HC R0 pointer.
747 * @param pTimer Timer handle as returned by one of the create functions.
748 */
749VMMDECL(PTMTIMERR0) TMTimerR0Ptr(PTMTIMER pTimer)
750{
751 return (PTMTIMERR0)MMHyperCCToR0(pTimer->CTX_SUFF(pVM), pTimer);
752}
753
754
755/**
756 * Gets the RC pointer of the timer.
757 *
758 * @returns RC pointer.
759 * @param pTimer Timer handle as returned by one of the create functions.
760 */
761VMMDECL(PTMTIMERRC) TMTimerRCPtr(PTMTIMER pTimer)
762{
763 return (PTMTIMERRC)MMHyperCCToRC(pTimer->CTX_SUFF(pVM), pTimer);
764}
765
766
767/**
768 * Links a timer into the active list of a timer queue.
769 *
770 * The caller must have taken the TM semaphore before calling this function.
771 *
772 * @param pQueue The queue.
773 * @param pTimer The timer.
774 * @param u64Expire The timer expiration time.
775 */
776DECL_FORCE_INLINE(void) tmTimerActiveLink(PTMTIMERQUEUE pQueue, PTMTIMER pTimer, uint64_t u64Expire)
777{
778 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
779 if (pCur)
780 {
781 for (;; pCur = TMTIMER_GET_NEXT(pCur))
782 {
783 if (pCur->u64Expire > u64Expire)
784 {
785 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
786 TMTIMER_SET_NEXT(pTimer, pCur);
787 TMTIMER_SET_PREV(pTimer, pPrev);
788 if (pPrev)
789 TMTIMER_SET_NEXT(pPrev, pTimer);
790 else
791 {
792 TMTIMER_SET_HEAD(pQueue, pTimer);
793 pQueue->u64Expire = u64Expire;
794 }
795 TMTIMER_SET_PREV(pCur, pTimer);
796 return;
797 }
798 if (!pCur->offNext)
799 {
800 TMTIMER_SET_NEXT(pCur, pTimer);
801 TMTIMER_SET_PREV(pTimer, pCur);
802 return;
803 }
804 }
805 }
806 else
807 {
808 TMTIMER_SET_HEAD(pQueue, pTimer);
809 pQueue->u64Expire = u64Expire;
810 }
811}
812
813
814/**
815 * Optimized TMTimerSet code path for starting an inactive timer.
816 *
817 * @returns VBox status code.
818 *
819 * @param pVM The VM handle.
820 * @param pTimer The timer handle.
821 * @param u64Expire The new expire time.
822 */
823static int tmTimerSetOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t u64Expire)
824{
825 Assert(!pTimer->offPrev);
826 Assert(!pTimer->offNext);
827 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
828
829 /*
830 * Calculate and set the expiration time.
831 */
832 pTimer->u64Expire = u64Expire;
833 Log2(("tmTimerSetOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64}\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire));
834
835 /*
836 * Link the timer into the active list.
837 */
838 TMCLOCK const enmClock = pTimer->enmClock;
839 tmTimerActiveLink(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
840
841 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetOpt);
842 tmTimerUnlock(pVM);
843 return VINF_SUCCESS;
844}
845
846
847
848
849
850/**
851 * Arm a timer with a (new) expire time.
852 *
853 * @returns VBox status.
854 * @param pTimer Timer handle as returned by one of the create functions.
855 * @param u64Expire New expire time.
856 */
857VMMDECL(int) TMTimerSet(PTMTIMER pTimer, uint64_t u64Expire)
858{
859 PVM pVM = pTimer->CTX_SUFF(pVM);
860 STAM_PROFILE_START(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
861 TMTIMER_ASSERT_CRITSECT(pTimer);
862
863#ifdef VBOX_WITH_STATISTICS
864 /* Gather optimization info. */
865 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSet);
866 TMTIMERSTATE enmOrgState = pTimer->enmState;
867 switch (enmOrgState)
868 {
869 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStStopped); break;
870 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStExpDeliver); break;
871 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStActive); break;
872 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStop); break;
873 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendStopSched); break;
874 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendSched); break;
875 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStPendResched); break;
876 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetStOther); break;
877 }
878#endif
879
880 /*
881 * The most common case is setting the timer again during the callback.
882 * The second most common case is starting a timer at some other time.
883 */
884#if 1
885 TMTIMERSTATE enmState1 = pTimer->enmState;
886 if ( enmState1 == TMTIMERSTATE_EXPIRED_DELIVER
887 || ( enmState1 == TMTIMERSTATE_STOPPED
888 && pTimer->pCritSect))
889 {
890 /* Try take the TM lock and check the state again. */
891 if (RT_SUCCESS_NP(tmTimerTryLock(pVM)))
892 {
893 if (RT_LIKELY(tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState1)))
894 {
895 tmTimerSetOptimizedStart(pVM, pTimer, u64Expire);
896 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
897 return VINF_SUCCESS;
898 }
899 tmTimerUnlock(pVM);
900 }
901 }
902#endif
903
904 /*
905 * Unoptimized code path.
906 */
907 int cRetries = 1000;
908 do
909 {
910 /*
911 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
912 */
913 TMTIMERSTATE enmState = pTimer->enmState;
914 Log2(("TMTimerSet: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d u64Expire=%'RU64\n",
915 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries, u64Expire));
916 switch (enmState)
917 {
918 case TMTIMERSTATE_EXPIRED_DELIVER:
919 case TMTIMERSTATE_STOPPED:
920 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
921 {
922 Assert(!pTimer->offPrev);
923 Assert(!pTimer->offNext);
924 AssertMsg( pTimer->enmClock != TMCLOCK_VIRTUAL_SYNC
925 || pVM->tm.s.fVirtualSyncTicking
926 || u64Expire >= pVM->tm.s.u64VirtualSync,
927 ("%'RU64 < %'RU64 %s\n", u64Expire, pVM->tm.s.u64VirtualSync, R3STRING(pTimer->pszDesc)));
928 pTimer->u64Expire = u64Expire;
929 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
930 tmSchedule(pTimer);
931 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
932 return VINF_SUCCESS;
933 }
934 break;
935
936 case TMTIMERSTATE_PENDING_SCHEDULE:
937 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
938 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
939 {
940 pTimer->u64Expire = u64Expire;
941 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
942 tmSchedule(pTimer);
943 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
944 return VINF_SUCCESS;
945 }
946 break;
947
948
949 case TMTIMERSTATE_ACTIVE:
950 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
951 {
952 pTimer->u64Expire = u64Expire;
953 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
954 tmSchedule(pTimer);
955 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
956 return VINF_SUCCESS;
957 }
958 break;
959
960 case TMTIMERSTATE_PENDING_RESCHEDULE:
961 case TMTIMERSTATE_PENDING_STOP:
962 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
963 {
964 pTimer->u64Expire = u64Expire;
965 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
966 tmSchedule(pTimer);
967 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
968 return VINF_SUCCESS;
969 }
970 break;
971
972
973 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
974 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
975 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
976#ifdef IN_RING3
977 if (!RTThreadYield())
978 RTThreadSleep(1);
979#else
980/** @todo call host context and yield after a couple of iterations */
981#endif
982 break;
983
984 /*
985 * Invalid states.
986 */
987 case TMTIMERSTATE_DESTROY:
988 case TMTIMERSTATE_FREE:
989 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
990 return VERR_TM_INVALID_STATE;
991 default:
992 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
993 return VERR_TM_UNKNOWN_STATE;
994 }
995 } while (cRetries-- > 0);
996
997 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
998 STAM_PROFILE_STOP(&pVM->tm.s.CTX_SUFF_Z(StatTimerSet), a);
999 return VERR_INTERNAL_ERROR;
1000}
1001
1002
1003/**
1004 * Return the current time for the specified clock, setting pu64Now if not NULL.
1005 *
1006 * @returns Current time.
1007 * @param pVM The VM handle.
1008 * @param enmClock The clock to query.
1009 * @param pu64Now Optional pointer where to store the return time
1010 */
1011DECL_FORCE_INLINE(uint64_t) tmTimerSetRelativeNowWorker(PVM pVM, TMCLOCK enmClock, uint64_t *pu64Now)
1012{
1013 uint64_t u64Now;
1014 switch (enmClock)
1015 {
1016 case TMCLOCK_VIRTUAL_SYNC:
1017 u64Now = TMVirtualSyncGet(pVM);
1018 break;
1019 case TMCLOCK_VIRTUAL:
1020 u64Now = TMVirtualGet(pVM);
1021 break;
1022 case TMCLOCK_REAL:
1023 u64Now = TMRealGet(pVM);
1024 break;
1025 default:
1026 AssertFatalMsgFailed(("%d\n", enmClock));
1027 }
1028
1029 if (pu64Now)
1030 *pu64Now = u64Now;
1031 return u64Now;
1032}
1033
1034
1035/**
1036 * Optimized TMTimerSetRelative code path.
1037 *
1038 * @returns VBox status code.
1039 *
1040 * @param pVM The VM handle.
1041 * @param pTimer The timer handle.
1042 * @param cTicksToNext Clock ticks until the next time expiration.
1043 * @param pu64Now Where to return the current time stamp used.
1044 * Optional.
1045 */
1046static int tmTimerSetRelativeOptimizedStart(PVM pVM, PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1047{
1048 Assert(!pTimer->offPrev);
1049 Assert(!pTimer->offNext);
1050 Assert(pTimer->enmState == TMTIMERSTATE_ACTIVE);
1051
1052 /*
1053 * Calculate and set the expiration time.
1054 */
1055 TMCLOCK const enmClock = pTimer->enmClock;
1056 uint64_t const u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1057 pTimer->u64Expire = u64Expire;
1058 Log2(("tmTimerSetRelativeOptimizedStart: %p:{.pszDesc='%s', .u64Expire=%'RU64} cTicksToNext=%'RU64\n", pTimer, R3STRING(pTimer->pszDesc), u64Expire, cTicksToNext));
1059
1060 /*
1061 * Link the timer into the active list.
1062 */
1063 tmTimerActiveLink(&pVM->tm.s.CTX_SUFF(paTimerQueues)[enmClock], pTimer, u64Expire);
1064
1065 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeOpt);
1066 tmTimerUnlock(pVM);
1067 return VINF_SUCCESS;
1068}
1069
1070
1071/**
1072 * Arm a timer with a expire time relative to the current time.
1073 *
1074 * @returns VBox status.
1075 * @param pTimer Timer handle as returned by one of the create functions.
1076 * @param cTicksToNext Clock ticks until the next time expiration.
1077 * @param pu64Now Where to return the current time stamp used.
1078 * Optional.
1079 */
1080VMMDECL(int) TMTimerSetRelative(PTMTIMER pTimer, uint64_t cTicksToNext, uint64_t *pu64Now)
1081{
1082 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1083 TMTIMER_ASSERT_CRITSECT(pTimer);
1084 PVM pVM = pTimer->CTX_SUFF(pVM);
1085 int rc;
1086
1087#ifdef VBOX_WITH_STATISTICS
1088 /* Gather optimization info. */
1089 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelative);
1090 TMTIMERSTATE enmOrgState = pTimer->enmState;
1091 switch (enmOrgState)
1092 {
1093 case TMTIMERSTATE_STOPPED: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStStopped); break;
1094 case TMTIMERSTATE_EXPIRED_DELIVER: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStExpDeliver); break;
1095 case TMTIMERSTATE_ACTIVE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStActive); break;
1096 case TMTIMERSTATE_PENDING_STOP: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStop); break;
1097 case TMTIMERSTATE_PENDING_STOP_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendStopSched); break;
1098 case TMTIMERSTATE_PENDING_SCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendSched); break;
1099 case TMTIMERSTATE_PENDING_RESCHEDULE: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStPendResched); break;
1100 default: STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeStOther); break;
1101 }
1102#endif
1103
1104 /*
1105 * Try to take the TM lock and optimize the common cases.
1106 *
1107 * With the TM lock we can safely make optimizations like immediate
1108 * scheduling and we can also be 100% sure that we're not racing the
1109 * running of the timer queues. As an additional restraint we require the
1110 * timer to have a critical section associated with to be 100% there aren't
1111 * concurrent operations on the timer. (This latter isn't necessary any
1112 * longer as this isn't supported for any timers, critsect or not.)
1113 *
1114 * Note! Lock ordering doesn't apply when we only tries to
1115 * get the innermost locks.
1116 */
1117 bool fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM));
1118#if 1
1119 if ( fOwnTMLock
1120 && pTimer->pCritSect)
1121 {
1122 TMTIMERSTATE enmState = pTimer->enmState;
1123 if (RT_LIKELY( ( enmState == TMTIMERSTATE_EXPIRED_DELIVER
1124 || enmState == TMTIMERSTATE_STOPPED)
1125 && tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, enmState)))
1126 {
1127 tmTimerSetRelativeOptimizedStart(pVM, pTimer, cTicksToNext, pu64Now);
1128 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1129 return VINF_SUCCESS;
1130 }
1131
1132 /* Optimize other states when it becomes necessary. */
1133 }
1134#endif
1135
1136 /*
1137 * Unoptimized path.
1138 */
1139 TMCLOCK const enmClock = pTimer->enmClock;
1140 bool fOwnVirtSyncLock;
1141 fOwnVirtSyncLock = !fOwnTMLock
1142 && enmClock == TMCLOCK_VIRTUAL_SYNC
1143 && RT_SUCCESS(tmVirtualSyncTryLock(pVM));
1144 for (int cRetries = 1000; ; cRetries--)
1145 {
1146 /*
1147 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1148 */
1149 TMTIMERSTATE enmState = pTimer->enmState;
1150 switch (enmState)
1151 {
1152 case TMTIMERSTATE_STOPPED:
1153 if (enmClock == TMCLOCK_VIRTUAL_SYNC)
1154 {
1155 /** @todo To fix assertion in tmR3TimerQueueRunVirtualSync:
1156 * Figure a safe way of activating this timer while the queue is
1157 * being run.
1158 * (99.9% sure this that the assertion is caused by DevAPIC.cpp
1159 * re-starting the timer in respons to a initial_count write.) */
1160 }
1161 /* fall thru */
1162 case TMTIMERSTATE_EXPIRED_DELIVER:
1163 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1164 {
1165 Assert(!pTimer->offPrev);
1166 Assert(!pTimer->offNext);
1167 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1168 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [EXP/STOP]\n",
1169 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1170 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1171 tmSchedule(pTimer);
1172 rc = VINF_SUCCESS;
1173 break;
1174 }
1175 rc = VERR_TRY_AGAIN;
1176 break;
1177
1178 case TMTIMERSTATE_PENDING_SCHEDULE:
1179 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1180 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE, enmState))
1181 {
1182 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1183 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_SCHED]\n",
1184 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1185 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_SCHEDULE);
1186 tmSchedule(pTimer);
1187 rc = VINF_SUCCESS;
1188 break;
1189 }
1190 rc = VERR_TRY_AGAIN;
1191 break;
1192
1193
1194 case TMTIMERSTATE_ACTIVE:
1195 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1196 {
1197 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1198 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [ACTIVE]\n",
1199 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1200 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1201 tmSchedule(pTimer);
1202 rc = VINF_SUCCESS;
1203 break;
1204 }
1205 rc = VERR_TRY_AGAIN;
1206 break;
1207
1208 case TMTIMERSTATE_PENDING_RESCHEDULE:
1209 case TMTIMERSTATE_PENDING_STOP:
1210 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE, enmState))
1211 {
1212 pTimer->u64Expire = cTicksToNext + tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1213 Log2(("TMTimerSetRelative: %p:{.enmState=%s, .pszDesc='%s', .u64Expire=%'RU64} cRetries=%d [PEND_RESCH/STOP]\n",
1214 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), pTimer->u64Expire, cRetries));
1215 TM_SET_STATE(pTimer, TMTIMERSTATE_PENDING_RESCHEDULE);
1216 tmSchedule(pTimer);
1217 rc = VINF_SUCCESS;
1218 break;
1219 }
1220 rc = VERR_TRY_AGAIN;
1221 break;
1222
1223
1224 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1225 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1226 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1227#ifdef IN_RING3
1228 if (!RTThreadYield())
1229 RTThreadSleep(1);
1230#else
1231/** @todo call host context and yield after a couple of iterations */
1232#endif
1233 rc = VERR_TRY_AGAIN;
1234 break;
1235
1236 /*
1237 * Invalid states.
1238 */
1239 case TMTIMERSTATE_DESTROY:
1240 case TMTIMERSTATE_FREE:
1241 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1242 rc = VERR_TM_INVALID_STATE;
1243 break;
1244
1245 default:
1246 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1247 rc = VERR_TM_UNKNOWN_STATE;
1248 break;
1249 }
1250
1251 /* switch + loop is tedious to break out of. */
1252 if (rc == VINF_SUCCESS)
1253 break;
1254
1255 if (rc != VERR_TRY_AGAIN)
1256 {
1257 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1258 break;
1259 }
1260 if (cRetries <= 0)
1261 {
1262 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1263 rc = VERR_INTERNAL_ERROR;
1264 tmTimerSetRelativeNowWorker(pVM, enmClock, pu64Now);
1265 break;
1266 }
1267
1268 /*
1269 * Retry to gain locks.
1270 */
1271 if (!fOwnTMLock)
1272 {
1273 fOwnTMLock = RT_SUCCESS_NP(tmTimerTryLock(pVM));
1274 if ( !fOwnTMLock
1275 && enmClock == TMCLOCK_VIRTUAL_SYNC
1276 && !fOwnVirtSyncLock)
1277 fOwnVirtSyncLock = RT_SUCCESS_NP(tmVirtualSyncTryLock(pVM));
1278 }
1279
1280 } /* for (;;) */
1281
1282 /*
1283 * Clean up and return.
1284 */
1285 if (fOwnVirtSyncLock)
1286 tmVirtualSyncUnlock(pVM);
1287 if (fOwnTMLock)
1288 tmTimerUnlock(pVM);
1289
1290 if ( !fOwnTMLock
1291 && !fOwnVirtSyncLock
1292 && enmClock == TMCLOCK_VIRTUAL_SYNC)
1293 STAM_COUNTER_INC(&pVM->tm.s.StatTimerSetRelativeRacyVirtSync);
1294
1295 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerSetRelative), a);
1296 return rc;
1297}
1298
1299
1300/**
1301 * Arm a timer with a (new) expire time relative to current time.
1302 *
1303 * @returns VBox status.
1304 * @param pTimer Timer handle as returned by one of the create functions.
1305 * @param cMilliesToNext Number of millieseconds to the next tick.
1306 */
1307VMMDECL(int) TMTimerSetMillies(PTMTIMER pTimer, uint32_t cMilliesToNext)
1308{
1309 PVM pVM = pTimer->CTX_SUFF(pVM);
1310 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1311
1312 switch (pTimer->enmClock)
1313 {
1314 case TMCLOCK_VIRTUAL:
1315 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1316 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
1317
1318 case TMCLOCK_VIRTUAL_SYNC:
1319 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1320 return TMTimerSetRelative(pTimer, cMilliesToNext * UINT64_C(1000000), NULL);
1321
1322 case TMCLOCK_REAL:
1323 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1324 return TMTimerSetRelative(pTimer, cMilliesToNext, NULL);
1325
1326 default:
1327 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1328 return VERR_INTERNAL_ERROR;
1329 }
1330}
1331
1332
1333/**
1334 * Arm a timer with a (new) expire time relative to current time.
1335 *
1336 * @returns VBox status.
1337 * @param pTimer Timer handle as returned by one of the create functions.
1338 * @param cMicrosToNext Number of microseconds to the next tick.
1339 */
1340VMMDECL(int) TMTimerSetMicro(PTMTIMER pTimer, uint64_t cMicrosToNext)
1341{
1342 PVM pVM = pTimer->CTX_SUFF(pVM);
1343 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1344
1345 switch (pTimer->enmClock)
1346 {
1347 case TMCLOCK_VIRTUAL:
1348 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1349 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
1350
1351 case TMCLOCK_VIRTUAL_SYNC:
1352 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1353 return TMTimerSetRelative(pTimer, cMicrosToNext * 1000, NULL);
1354
1355 case TMCLOCK_REAL:
1356 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1357 return TMTimerSetRelative(pTimer, cMicrosToNext / 1000, NULL);
1358
1359 default:
1360 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1361 return VERR_INTERNAL_ERROR;
1362 }
1363}
1364
1365
1366/**
1367 * Arm a timer with a (new) expire time relative to current time.
1368 *
1369 * @returns VBox status.
1370 * @param pTimer Timer handle as returned by one of the create functions.
1371 * @param cNanosToNext Number of nanoseconds to the next tick.
1372 */
1373VMMDECL(int) TMTimerSetNano(PTMTIMER pTimer, uint64_t cNanosToNext)
1374{
1375 PVM pVM = pTimer->CTX_SUFF(pVM);
1376 PVMCPU pVCpu = &pVM->aCpus[0]; /* just take the first VCPU */
1377
1378 switch (pTimer->enmClock)
1379 {
1380 case TMCLOCK_VIRTUAL:
1381 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1382 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
1383
1384 case TMCLOCK_VIRTUAL_SYNC:
1385 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1386 return TMTimerSetRelative(pTimer, cNanosToNext, NULL);
1387
1388 case TMCLOCK_REAL:
1389 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1390 return TMTimerSetRelative(pTimer, cNanosToNext / 1000000, NULL);
1391
1392 default:
1393 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1394 return VERR_INTERNAL_ERROR;
1395 }
1396}
1397
1398
1399/**
1400 * Stop the timer.
1401 * Use TMR3TimerArm() to "un-stop" the timer.
1402 *
1403 * @returns VBox status.
1404 * @param pTimer Timer handle as returned by one of the create functions.
1405 */
1406VMMDECL(int) TMTimerStop(PTMTIMER pTimer)
1407{
1408 STAM_PROFILE_START(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1409 TMTIMER_ASSERT_CRITSECT(pTimer);
1410
1411 /** @todo see if this function needs optimizing. */
1412 int cRetries = 1000;
1413 do
1414 {
1415 /*
1416 * Change to any of the SET_EXPIRE states if valid and then to SCHEDULE or RESCHEDULE.
1417 */
1418 TMTIMERSTATE enmState = pTimer->enmState;
1419 Log2(("TMTimerStop: %p:{.enmState=%s, .pszDesc='%s'} cRetries=%d\n",
1420 pTimer, tmTimerState(enmState), R3STRING(pTimer->pszDesc), cRetries));
1421 switch (enmState)
1422 {
1423 case TMTIMERSTATE_EXPIRED_DELIVER:
1424 //AssertMsgFailed(("You don't stop an expired timer dude!\n"));
1425 return VERR_INVALID_PARAMETER;
1426
1427 case TMTIMERSTATE_STOPPED:
1428 case TMTIMERSTATE_PENDING_STOP:
1429 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1430 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1431 return VINF_SUCCESS;
1432
1433 case TMTIMERSTATE_PENDING_SCHEDULE:
1434 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, enmState))
1435 {
1436 tmSchedule(pTimer);
1437 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1438 return VINF_SUCCESS;
1439 }
1440
1441 case TMTIMERSTATE_PENDING_RESCHEDULE:
1442 if (tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1443 {
1444 tmSchedule(pTimer);
1445 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1446 return VINF_SUCCESS;
1447 }
1448 break;
1449
1450 case TMTIMERSTATE_ACTIVE:
1451 if (tmTimerTryWithLink(pTimer, TMTIMERSTATE_PENDING_STOP, enmState))
1452 {
1453 tmSchedule(pTimer);
1454 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1455 return VINF_SUCCESS;
1456 }
1457 break;
1458
1459 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1460 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1461 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1462#ifdef IN_RING3
1463 if (!RTThreadYield())
1464 RTThreadSleep(1);
1465#else
1466/**@todo call host and yield cpu after a while. */
1467#endif
1468 break;
1469
1470 /*
1471 * Invalid states.
1472 */
1473 case TMTIMERSTATE_DESTROY:
1474 case TMTIMERSTATE_FREE:
1475 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1476 return VERR_TM_INVALID_STATE;
1477 default:
1478 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1479 return VERR_TM_UNKNOWN_STATE;
1480 }
1481 } while (cRetries-- > 0);
1482
1483 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1484 STAM_PROFILE_STOP(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatTimerStop), a);
1485 return VERR_INTERNAL_ERROR;
1486}
1487
1488
1489/**
1490 * Get the current clock time.
1491 * Handy for calculating the new expire time.
1492 *
1493 * @returns Current clock time.
1494 * @param pTimer Timer handle as returned by one of the create functions.
1495 */
1496VMMDECL(uint64_t) TMTimerGet(PTMTIMER pTimer)
1497{
1498 uint64_t u64;
1499 PVM pVM = pTimer->CTX_SUFF(pVM);
1500
1501 switch (pTimer->enmClock)
1502 {
1503 case TMCLOCK_VIRTUAL:
1504 u64 = TMVirtualGet(pVM);
1505 break;
1506 case TMCLOCK_VIRTUAL_SYNC:
1507 u64 = TMVirtualSyncGet(pVM);
1508 break;
1509 case TMCLOCK_REAL:
1510 u64 = TMRealGet(pVM);
1511 break;
1512 default:
1513 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1514 return ~(uint64_t)0;
1515 }
1516 //Log2(("TMTimerGet: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1517 // u64, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1518 return u64;
1519}
1520
1521
1522/**
1523 * Get the freqency of the timer clock.
1524 *
1525 * @returns Clock frequency (as Hz of course).
1526 * @param pTimer Timer handle as returned by one of the create functions.
1527 */
1528VMMDECL(uint64_t) TMTimerGetFreq(PTMTIMER pTimer)
1529{
1530 switch (pTimer->enmClock)
1531 {
1532 case TMCLOCK_VIRTUAL:
1533 case TMCLOCK_VIRTUAL_SYNC:
1534 return TMCLOCK_FREQ_VIRTUAL;
1535
1536 case TMCLOCK_REAL:
1537 return TMCLOCK_FREQ_REAL;
1538
1539 default:
1540 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1541 return 0;
1542 }
1543}
1544
1545
1546/**
1547 * Get the current clock time as nanoseconds.
1548 *
1549 * @returns The timer clock as nanoseconds.
1550 * @param pTimer Timer handle as returned by one of the create functions.
1551 */
1552VMMDECL(uint64_t) TMTimerGetNano(PTMTIMER pTimer)
1553{
1554 return TMTimerToNano(pTimer, TMTimerGet(pTimer));
1555}
1556
1557
1558/**
1559 * Get the current clock time as microseconds.
1560 *
1561 * @returns The timer clock as microseconds.
1562 * @param pTimer Timer handle as returned by one of the create functions.
1563 */
1564VMMDECL(uint64_t) TMTimerGetMicro(PTMTIMER pTimer)
1565{
1566 return TMTimerToMicro(pTimer, TMTimerGet(pTimer));
1567}
1568
1569
1570/**
1571 * Get the current clock time as milliseconds.
1572 *
1573 * @returns The timer clock as milliseconds.
1574 * @param pTimer Timer handle as returned by one of the create functions.
1575 */
1576VMMDECL(uint64_t) TMTimerGetMilli(PTMTIMER pTimer)
1577{
1578 return TMTimerToMilli(pTimer, TMTimerGet(pTimer));
1579}
1580
1581
1582/**
1583 * Converts the specified timer clock time to nanoseconds.
1584 *
1585 * @returns nanoseconds.
1586 * @param pTimer Timer handle as returned by one of the create functions.
1587 * @param u64Ticks The clock ticks.
1588 * @remark There could be rounding errors here. We just do a simple integere divide
1589 * without any adjustments.
1590 */
1591VMMDECL(uint64_t) TMTimerToNano(PTMTIMER pTimer, uint64_t u64Ticks)
1592{
1593 switch (pTimer->enmClock)
1594 {
1595 case TMCLOCK_VIRTUAL:
1596 case TMCLOCK_VIRTUAL_SYNC:
1597 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1598 return u64Ticks;
1599
1600 case TMCLOCK_REAL:
1601 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1602 return u64Ticks * 1000000;
1603
1604 default:
1605 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1606 return 0;
1607 }
1608}
1609
1610
1611/**
1612 * Converts the specified timer clock time to microseconds.
1613 *
1614 * @returns microseconds.
1615 * @param pTimer Timer handle as returned by one of the create functions.
1616 * @param u64Ticks The clock ticks.
1617 * @remark There could be rounding errors here. We just do a simple integere divide
1618 * without any adjustments.
1619 */
1620VMMDECL(uint64_t) TMTimerToMicro(PTMTIMER pTimer, uint64_t u64Ticks)
1621{
1622 switch (pTimer->enmClock)
1623 {
1624 case TMCLOCK_VIRTUAL:
1625 case TMCLOCK_VIRTUAL_SYNC:
1626 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1627 return u64Ticks / 1000;
1628
1629 case TMCLOCK_REAL:
1630 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1631 return u64Ticks * 1000;
1632
1633 default:
1634 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1635 return 0;
1636 }
1637}
1638
1639
1640/**
1641 * Converts the specified timer clock time to milliseconds.
1642 *
1643 * @returns milliseconds.
1644 * @param pTimer Timer handle as returned by one of the create functions.
1645 * @param u64Ticks The clock ticks.
1646 * @remark There could be rounding errors here. We just do a simple integere divide
1647 * without any adjustments.
1648 */
1649VMMDECL(uint64_t) TMTimerToMilli(PTMTIMER pTimer, uint64_t u64Ticks)
1650{
1651 switch (pTimer->enmClock)
1652 {
1653 case TMCLOCK_VIRTUAL:
1654 case TMCLOCK_VIRTUAL_SYNC:
1655 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1656 return u64Ticks / 1000000;
1657
1658 case TMCLOCK_REAL:
1659 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1660 return u64Ticks;
1661
1662 default:
1663 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1664 return 0;
1665 }
1666}
1667
1668
1669/**
1670 * Converts the specified nanosecond timestamp to timer clock ticks.
1671 *
1672 * @returns timer clock ticks.
1673 * @param pTimer Timer handle as returned by one of the create functions.
1674 * @param u64NanoTS The nanosecond value ticks to convert.
1675 * @remark There could be rounding and overflow errors here.
1676 */
1677VMMDECL(uint64_t) TMTimerFromNano(PTMTIMER pTimer, uint64_t u64NanoTS)
1678{
1679 switch (pTimer->enmClock)
1680 {
1681 case TMCLOCK_VIRTUAL:
1682 case TMCLOCK_VIRTUAL_SYNC:
1683 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1684 return u64NanoTS;
1685
1686 case TMCLOCK_REAL:
1687 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1688 return u64NanoTS / 1000000;
1689
1690 default:
1691 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1692 return 0;
1693 }
1694}
1695
1696
1697/**
1698 * Converts the specified microsecond timestamp to timer clock ticks.
1699 *
1700 * @returns timer clock ticks.
1701 * @param pTimer Timer handle as returned by one of the create functions.
1702 * @param u64MicroTS The microsecond value ticks to convert.
1703 * @remark There could be rounding and overflow errors here.
1704 */
1705VMMDECL(uint64_t) TMTimerFromMicro(PTMTIMER pTimer, uint64_t u64MicroTS)
1706{
1707 switch (pTimer->enmClock)
1708 {
1709 case TMCLOCK_VIRTUAL:
1710 case TMCLOCK_VIRTUAL_SYNC:
1711 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1712 return u64MicroTS * 1000;
1713
1714 case TMCLOCK_REAL:
1715 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1716 return u64MicroTS / 1000;
1717
1718 default:
1719 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1720 return 0;
1721 }
1722}
1723
1724
1725/**
1726 * Converts the specified millisecond timestamp to timer clock ticks.
1727 *
1728 * @returns timer clock ticks.
1729 * @param pTimer Timer handle as returned by one of the create functions.
1730 * @param u64MilliTS The millisecond value ticks to convert.
1731 * @remark There could be rounding and overflow errors here.
1732 */
1733VMMDECL(uint64_t) TMTimerFromMilli(PTMTIMER pTimer, uint64_t u64MilliTS)
1734{
1735 switch (pTimer->enmClock)
1736 {
1737 case TMCLOCK_VIRTUAL:
1738 case TMCLOCK_VIRTUAL_SYNC:
1739 AssertCompile(TMCLOCK_FREQ_VIRTUAL == 1000000000);
1740 return u64MilliTS * 1000000;
1741
1742 case TMCLOCK_REAL:
1743 AssertCompile(TMCLOCK_FREQ_REAL == 1000);
1744 return u64MilliTS;
1745
1746 default:
1747 AssertMsgFailed(("Invalid enmClock=%d\n", pTimer->enmClock));
1748 return 0;
1749 }
1750}
1751
1752
1753/**
1754 * Get the expire time of the timer.
1755 * Only valid for active timers.
1756 *
1757 * @returns Expire time of the timer.
1758 * @param pTimer Timer handle as returned by one of the create functions.
1759 */
1760VMMDECL(uint64_t) TMTimerGetExpire(PTMTIMER pTimer)
1761{
1762 TMTIMER_ASSERT_CRITSECT(pTimer);
1763 int cRetries = 1000;
1764 do
1765 {
1766 TMTIMERSTATE enmState = pTimer->enmState;
1767 switch (enmState)
1768 {
1769 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1770 case TMTIMERSTATE_EXPIRED_DELIVER:
1771 case TMTIMERSTATE_STOPPED:
1772 case TMTIMERSTATE_PENDING_STOP:
1773 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1774 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1775 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1776 return ~(uint64_t)0;
1777
1778 case TMTIMERSTATE_ACTIVE:
1779 case TMTIMERSTATE_PENDING_RESCHEDULE:
1780 case TMTIMERSTATE_PENDING_SCHEDULE:
1781 Log2(("TMTimerGetExpire: returns %'RU64 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1782 pTimer->u64Expire, pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1783 return pTimer->u64Expire;
1784
1785 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1786 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1787#ifdef IN_RING3
1788 if (!RTThreadYield())
1789 RTThreadSleep(1);
1790#endif
1791 break;
1792
1793 /*
1794 * Invalid states.
1795 */
1796 case TMTIMERSTATE_DESTROY:
1797 case TMTIMERSTATE_FREE:
1798 AssertMsgFailed(("Invalid timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1799 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1800 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1801 return ~(uint64_t)0;
1802 default:
1803 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1804 return ~(uint64_t)0;
1805 }
1806 } while (cRetries-- > 0);
1807
1808 AssertMsgFailed(("Failed waiting for stable state. state=%d (%s)\n", pTimer->enmState, R3STRING(pTimer->pszDesc)));
1809 Log2(("TMTimerGetExpire: returns ~0 (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1810 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1811 return ~(uint64_t)0;
1812}
1813
1814
1815/**
1816 * Checks if a timer is active or not.
1817 *
1818 * @returns True if active.
1819 * @returns False if not active.
1820 * @param pTimer Timer handle as returned by one of the create functions.
1821 */
1822VMMDECL(bool) TMTimerIsActive(PTMTIMER pTimer)
1823{
1824 TMTIMERSTATE enmState = pTimer->enmState;
1825 switch (enmState)
1826 {
1827 case TMTIMERSTATE_STOPPED:
1828 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
1829 case TMTIMERSTATE_EXPIRED_DELIVER:
1830 case TMTIMERSTATE_PENDING_STOP:
1831 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
1832 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1833 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1834 return false;
1835
1836 case TMTIMERSTATE_ACTIVE:
1837 case TMTIMERSTATE_PENDING_RESCHEDULE:
1838 case TMTIMERSTATE_PENDING_SCHEDULE:
1839 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
1840 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
1841 Log2(("TMTimerIsActive: returns true (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1842 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1843 return true;
1844
1845 /*
1846 * Invalid states.
1847 */
1848 case TMTIMERSTATE_DESTROY:
1849 case TMTIMERSTATE_FREE:
1850 AssertMsgFailed(("Invalid timer state %s (%s)\n", tmTimerState(enmState), R3STRING(pTimer->pszDesc)));
1851 Log2(("TMTimerIsActive: returns false (pTimer=%p:{.enmState=%s, .pszDesc='%s'})\n",
1852 pTimer, tmTimerState(pTimer->enmState), R3STRING(pTimer->pszDesc)));
1853 return false;
1854 default:
1855 AssertMsgFailed(("Unknown timer state %d (%s)\n", enmState, R3STRING(pTimer->pszDesc)));
1856 return false;
1857 }
1858}
1859
1860
1861/**
1862 * Convert state to string.
1863 *
1864 * @returns Readonly status name.
1865 * @param enmState State.
1866 */
1867const char *tmTimerState(TMTIMERSTATE enmState)
1868{
1869 switch (enmState)
1870 {
1871#define CASE(num, state) \
1872 case TMTIMERSTATE_##state: \
1873 AssertCompile(TMTIMERSTATE_##state == (num)); \
1874 return #num "-" #state
1875 CASE( 1,STOPPED);
1876 CASE( 2,ACTIVE);
1877 CASE( 3,EXPIRED_GET_UNLINK);
1878 CASE( 4,EXPIRED_DELIVER);
1879 CASE( 5,PENDING_STOP);
1880 CASE( 6,PENDING_STOP_SCHEDULE);
1881 CASE( 7,PENDING_SCHEDULE_SET_EXPIRE);
1882 CASE( 8,PENDING_SCHEDULE);
1883 CASE( 9,PENDING_RESCHEDULE_SET_EXPIRE);
1884 CASE(10,PENDING_RESCHEDULE);
1885 CASE(11,DESTROY);
1886 CASE(12,FREE);
1887 default:
1888 AssertMsgFailed(("Invalid state enmState=%d\n", enmState));
1889 return "Invalid state!";
1890#undef CASE
1891 }
1892}
1893
1894
1895/**
1896 * Schedules the given timer on the given queue.
1897 *
1898 * @param pQueue The timer queue.
1899 * @param pTimer The timer that needs scheduling.
1900 *
1901 * @remarks Called while owning the lock.
1902 */
1903DECLINLINE(void) tmTimerQueueScheduleOne(PTMTIMERQUEUE pQueue, PTMTIMER pTimer)
1904{
1905 /*
1906 * Processing.
1907 */
1908 unsigned cRetries = 2;
1909 do
1910 {
1911 TMTIMERSTATE enmState = pTimer->enmState;
1912 switch (enmState)
1913 {
1914 /*
1915 * Reschedule timer (in the active list).
1916 */
1917 case TMTIMERSTATE_PENDING_RESCHEDULE:
1918 {
1919 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_SCHEDULE, TMTIMERSTATE_PENDING_RESCHEDULE)))
1920 break; /* retry */
1921
1922 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1923 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
1924 if (pPrev)
1925 TMTIMER_SET_NEXT(pPrev, pNext);
1926 else
1927 {
1928 TMTIMER_SET_HEAD(pQueue, pNext);
1929 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
1930 }
1931 if (pNext)
1932 TMTIMER_SET_PREV(pNext, pPrev);
1933 pTimer->offNext = 0;
1934 pTimer->offPrev = 0;
1935 /* fall thru */
1936 }
1937
1938 /*
1939 * Schedule timer (insert into the active list).
1940 */
1941 case TMTIMERSTATE_PENDING_SCHEDULE:
1942 {
1943 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
1944 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_ACTIVE, TMTIMERSTATE_PENDING_SCHEDULE)))
1945 break; /* retry */
1946
1947 PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue);
1948 if (pCur)
1949 {
1950 const uint64_t u64Expire = pTimer->u64Expire;
1951 for (;; pCur = TMTIMER_GET_NEXT(pCur))
1952 {
1953 if (pCur->u64Expire > u64Expire)
1954 {
1955 const PTMTIMER pPrev = TMTIMER_GET_PREV(pCur);
1956 TMTIMER_SET_NEXT(pTimer, pCur);
1957 TMTIMER_SET_PREV(pTimer, pPrev);
1958 if (pPrev)
1959 TMTIMER_SET_NEXT(pPrev, pTimer);
1960 else
1961 {
1962 TMTIMER_SET_HEAD(pQueue, pTimer);
1963 pQueue->u64Expire = u64Expire;
1964 }
1965 TMTIMER_SET_PREV(pCur, pTimer);
1966 return;
1967 }
1968 if (!pCur->offNext)
1969 {
1970 TMTIMER_SET_NEXT(pCur, pTimer);
1971 TMTIMER_SET_PREV(pTimer, pCur);
1972 return;
1973 }
1974 }
1975 }
1976 else
1977 {
1978 TMTIMER_SET_HEAD(pQueue, pTimer);
1979 pQueue->u64Expire = pTimer->u64Expire;
1980 }
1981 return;
1982 }
1983
1984 /*
1985 * Stop the timer in active list.
1986 */
1987 case TMTIMERSTATE_PENDING_STOP:
1988 {
1989 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_PENDING_STOP_SCHEDULE, TMTIMERSTATE_PENDING_STOP)))
1990 break; /* retry */
1991
1992 const PTMTIMER pPrev = TMTIMER_GET_PREV(pTimer);
1993 const PTMTIMER pNext = TMTIMER_GET_NEXT(pTimer);
1994 if (pPrev)
1995 TMTIMER_SET_NEXT(pPrev, pNext);
1996 else
1997 {
1998 TMTIMER_SET_HEAD(pQueue, pNext);
1999 pQueue->u64Expire = pNext ? pNext->u64Expire : INT64_MAX;
2000 }
2001 if (pNext)
2002 TMTIMER_SET_PREV(pNext, pPrev);
2003 pTimer->offNext = 0;
2004 pTimer->offPrev = 0;
2005 /* fall thru */
2006 }
2007
2008 /*
2009 * Stop the timer (not on the active list).
2010 */
2011 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2012 Assert(!pTimer->offNext); Assert(!pTimer->offPrev);
2013 if (RT_UNLIKELY(!tmTimerTry(pTimer, TMTIMERSTATE_STOPPED, TMTIMERSTATE_PENDING_STOP_SCHEDULE)))
2014 break;
2015 return;
2016
2017 /*
2018 * The timer is pending destruction by TMR3TimerDestroy, our caller.
2019 * Nothing to do here.
2020 */
2021 case TMTIMERSTATE_DESTROY:
2022 break;
2023
2024 /*
2025 * Postpone these until they get into the right state.
2026 */
2027 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2028 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2029 tmTimerLink(pQueue, pTimer);
2030 STAM_COUNTER_INC(&pTimer->CTX_SUFF(pVM)->tm.s.CTX_SUFF_Z(StatPostponed));
2031 return;
2032
2033 /*
2034 * None of these can be in the schedule.
2035 */
2036 case TMTIMERSTATE_FREE:
2037 case TMTIMERSTATE_STOPPED:
2038 case TMTIMERSTATE_ACTIVE:
2039 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2040 case TMTIMERSTATE_EXPIRED_DELIVER:
2041 default:
2042 AssertMsgFailed(("Timer (%p) in the scheduling list has an invalid state %s (%d)!",
2043 pTimer, tmTimerState(pTimer->enmState), pTimer->enmState));
2044 return;
2045 }
2046 } while (cRetries-- > 0);
2047}
2048
2049
2050/**
2051 * Schedules the specified timer queue.
2052 *
2053 * @param pVM The VM to run the timers for.
2054 * @param pQueue The queue to schedule.
2055 *
2056 * @remarks Called while owning the lock.
2057 */
2058void tmTimerQueueSchedule(PVM pVM, PTMTIMERQUEUE pQueue)
2059{
2060 TM_ASSERT_LOCK(pVM);
2061
2062 /*
2063 * Dequeue the scheduling list and iterate it.
2064 */
2065 int32_t offNext = ASMAtomicXchgS32(&pQueue->offSchedule, 0);
2066 Log2(("tmTimerQueueSchedule: pQueue=%p:{.enmClock=%d, offNext=%RI32, .u64Expired=%'RU64}\n", pQueue, pQueue->enmClock, offNext, pQueue->u64Expire));
2067 if (!offNext)
2068 return;
2069 PTMTIMER pNext = (PTMTIMER)((intptr_t)pQueue + offNext);
2070 while (pNext)
2071 {
2072 /*
2073 * Unlink the head timer and find the next one.
2074 */
2075 PTMTIMER pTimer = pNext;
2076 pNext = pNext->offScheduleNext ? (PTMTIMER)((intptr_t)pNext + pNext->offScheduleNext) : NULL;
2077 pTimer->offScheduleNext = 0;
2078
2079 /*
2080 * Do the scheduling.
2081 */
2082 Log2(("tmTimerQueueSchedule: %p:{.enmState=%s, .enmClock=%d, .enmType=%d, .pszDesc=%s}\n",
2083 pTimer, tmTimerState(pTimer->enmState), pTimer->enmClock, pTimer->enmType, R3STRING(pTimer->pszDesc)));
2084 tmTimerQueueScheduleOne(pQueue, pTimer);
2085 Log2(("tmTimerQueueSchedule: %p: new %s\n", pTimer, tmTimerState(pTimer->enmState)));
2086 } /* foreach timer in current schedule batch. */
2087 Log2(("tmTimerQueueSchedule: u64Expired=%'RU64\n", pQueue->u64Expire));
2088}
2089
2090
2091#ifdef VBOX_STRICT
2092/**
2093 * Checks that the timer queues are sane.
2094 *
2095 * @param pVM VM handle.
2096 *
2097 * @remarks Called while owning the lock.
2098 */
2099void tmTimerQueuesSanityChecks(PVM pVM, const char *pszWhere)
2100{
2101 TM_ASSERT_LOCK(pVM);
2102
2103 /*
2104 * Check the linking of the active lists.
2105 */
2106 for (int i = 0; i < TMCLOCK_MAX; i++)
2107 {
2108 PTMTIMERQUEUE pQueue = &pVM->tm.s.CTX_SUFF(paTimerQueues)[i];
2109 Assert((int)pQueue->enmClock == i);
2110 PTMTIMER pPrev = NULL;
2111 for (PTMTIMER pCur = TMTIMER_GET_HEAD(pQueue); pCur; pPrev = pCur, pCur = TMTIMER_GET_NEXT(pCur))
2112 {
2113 AssertMsg((int)pCur->enmClock == i, ("%s: %d != %d\n", pszWhere, pCur->enmClock, i));
2114 AssertMsg(TMTIMER_GET_PREV(pCur) == pPrev, ("%s: %p != %p\n", pszWhere, TMTIMER_GET_PREV(pCur), pPrev));
2115 TMTIMERSTATE enmState = pCur->enmState;
2116 switch (enmState)
2117 {
2118 case TMTIMERSTATE_ACTIVE:
2119 AssertMsg( !pCur->offScheduleNext
2120 || pCur->enmState != TMTIMERSTATE_ACTIVE,
2121 ("%s: %RI32\n", pszWhere, pCur->offScheduleNext));
2122 break;
2123 case TMTIMERSTATE_PENDING_STOP:
2124 case TMTIMERSTATE_PENDING_RESCHEDULE:
2125 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2126 break;
2127 default:
2128 AssertMsgFailed(("%s: Invalid state enmState=%d %s\n", pszWhere, enmState, tmTimerState(enmState)));
2129 break;
2130 }
2131 }
2132 }
2133
2134
2135# ifdef IN_RING3
2136 /*
2137 * Do the big list and check that active timers all are in the active lists.
2138 */
2139 PTMTIMERR3 pPrev = NULL;
2140 for (PTMTIMERR3 pCur = pVM->tm.s.pCreated; pCur; pPrev = pCur, pCur = pCur->pBigNext)
2141 {
2142 Assert(pCur->pBigPrev == pPrev);
2143 Assert((unsigned)pCur->enmClock < (unsigned)TMCLOCK_MAX);
2144
2145 TMTIMERSTATE enmState = pCur->enmState;
2146 switch (enmState)
2147 {
2148 case TMTIMERSTATE_ACTIVE:
2149 case TMTIMERSTATE_PENDING_STOP:
2150 case TMTIMERSTATE_PENDING_RESCHEDULE:
2151 case TMTIMERSTATE_PENDING_RESCHEDULE_SET_EXPIRE:
2152 {
2153 PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
2154 Assert(pCur->offPrev || pCur == pCurAct);
2155 while (pCurAct && pCurAct != pCur)
2156 pCurAct = TMTIMER_GET_NEXT(pCurAct);
2157 Assert(pCurAct == pCur);
2158 break;
2159 }
2160
2161 case TMTIMERSTATE_PENDING_SCHEDULE:
2162 case TMTIMERSTATE_PENDING_STOP_SCHEDULE:
2163 case TMTIMERSTATE_STOPPED:
2164 case TMTIMERSTATE_EXPIRED_DELIVER:
2165 {
2166 Assert(!pCur->offNext);
2167 Assert(!pCur->offPrev);
2168 for (PTMTIMERR3 pCurAct = TMTIMER_GET_HEAD(&pVM->tm.s.CTX_SUFF(paTimerQueues)[pCur->enmClock]);
2169 pCurAct;
2170 pCurAct = TMTIMER_GET_NEXT(pCurAct))
2171 {
2172 Assert(pCurAct != pCur);
2173 Assert(TMTIMER_GET_NEXT(pCurAct) != pCur);
2174 Assert(TMTIMER_GET_PREV(pCurAct) != pCur);
2175 }
2176 break;
2177 }
2178
2179 /* ignore */
2180 case TMTIMERSTATE_PENDING_SCHEDULE_SET_EXPIRE:
2181 break;
2182
2183 /* shouldn't get here! */
2184 case TMTIMERSTATE_EXPIRED_GET_UNLINK:
2185 case TMTIMERSTATE_DESTROY:
2186 default:
2187 AssertMsgFailed(("Invalid state enmState=%d %s\n", enmState, tmTimerState(enmState)));
2188 break;
2189 }
2190 }
2191# endif /* IN_RING3 */
2192}
2193#endif /* !VBOX_STRICT */
2194
2195
2196/**
2197 * Gets the current warp drive percent.
2198 *
2199 * @returns The warp drive percent.
2200 * @param pVM The VM handle.
2201 */
2202VMMDECL(uint32_t) TMGetWarpDrive(PVM pVM)
2203{
2204 return pVM->tm.s.u32VirtualWarpDrivePercentage;
2205}
2206
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette