VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 91816

Last change on this file since 91816 was 91816, checked in by vboxsync, 4 years ago

VMM/PDMCritSect: comments. bugref:10124

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 49.2 KB
Line 
1/* $Id: PDMAllCritSect.cpp 91816 2021-10-18 09:47:59Z vboxsync $ */
2/** @file
3 * PDM - Write-Only Critical Section, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM_CRITSECT
23#include "PDMInternal.h"
24#include <VBox/vmm/pdmcritsect.h>
25#include <VBox/vmm/mm.h>
26#include <VBox/vmm/vmm.h>
27#include <VBox/vmm/vmcc.h>
28#include <VBox/err.h>
29#include <VBox/vmm/hm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37#endif
38#if defined(IN_RING3) || defined(IN_RING0)
39# include <iprt/semaphore.h>
40#endif
41#ifdef IN_RING0
42# include <iprt/time.h>
43#endif
44#if defined(IN_RING3) || defined(IN_RING0)
45# include <iprt/thread.h>
46#endif
47
48
49/*********************************************************************************************************************************
50* Defined Constants And Macros *
51*********************************************************************************************************************************/
52/** The number loops to spin for in ring-3. */
53#define PDMCRITSECT_SPIN_COUNT_R3 20
54/** The number loops to spin for in ring-0. */
55#define PDMCRITSECT_SPIN_COUNT_R0 256
56/** The number loops to spin for in the raw-mode context. */
57#define PDMCRITSECT_SPIN_COUNT_RC 256
58
59
60/** Skips some of the overly paranoid atomic updates.
61 * Makes some assumptions about cache coherence, though not brave enough not to
62 * always end with an atomic update. */
63#define PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
64
65/* Undefine the automatic VBOX_STRICT API mappings. */
66#undef PDMCritSectEnter
67#undef PDMCritSectTryEnter
68
69
70/**
71 * Gets the ring-3 native thread handle of the calling thread.
72 *
73 * @returns native thread handle (ring-3).
74 * @param pVM The cross context VM structure.
75 * @param pCritSect The critical section. This is used in R0 and RC.
76 */
77DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PVMCC pVM, PCPDMCRITSECT pCritSect)
78{
79#ifdef IN_RING3
80 RT_NOREF(pVM, pCritSect);
81 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
82#else
83 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
84 NIL_RTNATIVETHREAD);
85 PVMCPUCC pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
86 RTNATIVETHREAD hNativeSelf = pVCpu ? pVCpu->hNativeThread : NIL_RTNATIVETHREAD; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
87#endif
88 return hNativeSelf;
89}
90
91
92#ifdef IN_RING0
93/**
94 * Marks the critical section as corrupted.
95 */
96DECL_NO_INLINE(static, int) pdmCritSectCorrupted(PPDMCRITSECT pCritSect, const char *pszMsg)
97{
98 ASMAtomicWriteU32(&pCritSect->s.Core.u32Magic, PDMCRITSECT_MAGIC_CORRUPTED);
99 LogRel(("PDMCritSect: %s pCritSect=%p\n", pszMsg, pCritSect));
100 return VERR_PDM_CRITSECT_IPE;
101}
102#endif
103
104
105/**
106 * Tail code called when we've won the battle for the lock.
107 *
108 * @returns VINF_SUCCESS.
109 *
110 * @param pCritSect The critical section.
111 * @param hNativeSelf The native handle of this thread.
112 * @param pSrcPos The source position of the lock operation.
113 */
114DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
115{
116 Assert(hNativeSelf != NIL_RTNATIVETHREAD);
117 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
118 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
119
120# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
121 pCritSect->s.Core.cNestings = 1;
122# else
123 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
124# endif
125 Assert(pCritSect->s.Core.cNestings == 1);
126 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
127
128# ifdef PDMCRITSECT_STRICT
129 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
130# else
131 NOREF(pSrcPos);
132# endif
133 if (pSrcPos)
134 Log12Func(("%p: uId=%p ln=%u fn=%s\n", pCritSect, pSrcPos->uId, pSrcPos->uLine, pSrcPos->pszFunction));
135 else
136 Log12Func(("%p\n", pCritSect));
137
138 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
139 return VINF_SUCCESS;
140}
141
142
143#if defined(IN_RING3) || defined(IN_RING0)
144/**
145 * Deals with the contended case in ring-3 and ring-0.
146 *
147 * @retval VINF_SUCCESS on success.
148 * @retval VERR_SEM_DESTROYED if destroyed.
149 *
150 * @param pVM The cross context VM structure.
151 * @param pVCpu The cross context virtual CPU structure if ring-0 and on
152 * an EMT, otherwise NULL.
153 * @param pCritSect The critsect.
154 * @param hNativeSelf The native thread handle.
155 * @param pSrcPos The source position of the lock operation.
156 * @param rcBusy The status code to return when we're in RC or R0
157 */
158static int pdmR3R0CritSectEnterContended(PVMCC pVM, PVMCPU pVCpu, PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf,
159 PCRTLOCKVALSRCPOS pSrcPos, int rcBusy)
160{
161# ifdef IN_RING0
162 /*
163 * If we've got queued critical section leave operations and rcBusy isn't
164 * VINF_SUCCESS, return to ring-3 immediately to avoid deadlocks.
165 */
166 if ( !pVCpu
167 || !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT)
168 || rcBusy == VINF_SUCCESS )
169 { /* likely */ }
170 else
171 {
172 /** @todo statistics. */
173 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
174 return rcBusy;
175 }
176# endif
177
178 /*
179 * Start waiting.
180 */
181 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
182 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
183# ifdef IN_RING3
184 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
185# else
186 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
187# endif
188
189 /*
190 * The wait loop.
191 *
192 * This handles VERR_TIMEOUT and VERR_INTERRUPTED.
193 */
194 STAM_REL_PROFILE_START(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
195 PSUPDRVSESSION const pSession = pVM->pSession;
196 SUPSEMEVENT const hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
197# ifdef IN_RING3
198# ifdef PDMCRITSECT_STRICT
199 RTTHREAD const hThreadSelf = RTThreadSelfAutoAdopt();
200 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
201 if (RT_FAILURE(rc2))
202 return rc2;
203# else
204 RTTHREAD const hThreadSelf = RTThreadSelf();
205# endif
206# else /* IN_RING0 */
207 uint64_t const tsStart = RTTimeNanoTS();
208 uint64_t const cNsMaxTotalDef = RT_NS_5MIN;
209 uint64_t cNsMaxTotal = cNsMaxTotalDef;
210 uint64_t const cNsMaxRetry = RT_NS_15SEC;
211 uint32_t cMsMaxOne = RT_MS_5SEC;
212 bool fNonInterruptible = false;
213# endif
214 for (;;)
215 {
216 /*
217 * Do the wait.
218 *
219 * In ring-3 this gets cluttered by lock validation and thread state
220 * maintainence.
221 *
222 * In ring-0 we have to deal with the possibility that the thread has
223 * been signalled and the interruptible wait function returning
224 * immediately. In that case we do normal R0/RC rcBusy handling.
225 *
226 * We always do a timed wait here, so the event handle is revalidated
227 * regularly and we won't end up stuck waiting for a destroyed critsect.
228 */
229 /** @todo Make SUPSemEventClose wake up all waiters. */
230# ifdef IN_RING3
231# ifdef PDMCRITSECT_STRICT
232 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
233 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
234 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
235 if (RT_FAILURE(rc9))
236 return rc9;
237# else
238 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
239# endif
240 int const rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_MS_5SEC);
241 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
242# else /* IN_RING0 */
243 int const rc = !fNonInterruptible
244 ? SUPSemEventWaitNoResume(pSession, hEvent, cMsMaxOne)
245 : SUPSemEventWait(pSession, hEvent, cMsMaxOne);
246 Log11Func(("%p: rc=%Rrc %'RU64 ns (cMsMaxOne=%RU64 hOwner=%p)\n",
247 pCritSect, rc, RTTimeNanoTS() - tsStart, cMsMaxOne, pCritSect->s.Core.NativeThreadOwner));
248# endif /* IN_RING0 */
249
250 /*
251 * Make sure the critical section hasn't been delete before continuing.
252 */
253 if (RT_LIKELY(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC))
254 { /* likely */ }
255 else
256 {
257 LogRel(("PDMCritSectEnter: Destroyed while waiting; pCritSect=%p rc=%Rrc\n", pCritSect, rc));
258 return VERR_SEM_DESTROYED;
259 }
260
261 /*
262 * Most likely we're here because we got signalled.
263 */
264 if (rc == VINF_SUCCESS)
265 {
266 STAM_REL_PROFILE_STOP(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
267 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
268 }
269
270 /*
271 * Timeout and interrupted waits needs careful handling in ring-0
272 * because we're cooperating with ring-3 on this critical section
273 * and thus need to make absolutely sure we won't get stuck here.
274 *
275 * The r0 interrupted case means something is pending (termination,
276 * signal, APC, debugger, whatever), so we must try our best to
277 * return to the caller and to ring-3 so it can be dealt with.
278 */
279 if (RT_LIKELY(rc == VERR_TIMEOUT || rc == VERR_INTERRUPTED))
280 {
281# ifdef IN_RING0
282 uint64_t const cNsElapsed = RTTimeNanoTS() - tsStart;
283 int const rcTerm = RTThreadQueryTerminationStatus(NIL_RTTHREAD);
284 AssertMsg(rcTerm == VINF_SUCCESS || rcTerm == VERR_NOT_SUPPORTED || rcTerm == VINF_THREAD_IS_TERMINATING,
285 ("rcTerm=%Rrc\n", rcTerm));
286 if (rcTerm == VERR_NOT_SUPPORTED && cNsMaxTotal == cNsMaxTotalDef)
287 cNsMaxTotal = RT_NS_1MIN;
288
289 if (rc == VERR_TIMEOUT)
290 {
291 /* Try return get out of here with a non-VINF_SUCCESS status if
292 the thread is terminating or if the timeout has been exceeded. */
293 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectVerrTimeout);
294 if ( rcTerm != VINF_THREAD_IS_TERMINATING
295 && cNsElapsed <= cNsMaxTotal)
296 continue;
297 }
298 else
299 {
300 /* For interrupt cases, we must return if we can. If rcBusy is VINF_SUCCESS,
301 we will try non-interruptible sleep for a while to help resolve the issue
302 w/o guru'ing. */
303 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectVerrInterrupted);
304 if ( rcTerm != VINF_THREAD_IS_TERMINATING
305 && rcBusy == VINF_SUCCESS
306 && pVCpu != NULL
307 && cNsElapsed <= cNsMaxTotal)
308 {
309 if (!fNonInterruptible)
310 {
311 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectNonInterruptibleWaits);
312 fNonInterruptible = true;
313 cMsMaxOne = 32;
314 uint64_t cNsLeft = cNsMaxTotal - cNsElapsed;
315 if (cNsLeft > RT_NS_10SEC)
316 cNsMaxTotal = cNsElapsed + RT_NS_10SEC;
317 }
318 continue;
319 }
320 }
321
322 /*
323 * Let try get out of here. We must very carefully undo the
324 * cLockers increment we did using compare-and-exchange so that
325 * we don't race the semaphore signalling in PDMCritSectLeave
326 * and end up with spurious wakeups and two owners at once.
327 */
328 uint32_t cNoIntWaits = 0;
329 uint32_t cCmpXchgs = 0;
330 int32_t cLockers = ASMAtomicReadS32(&pCritSect->s.Core.cLockers);
331 for (;;)
332 {
333 if (pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC)
334 {
335 if (cLockers > 0 && cCmpXchgs < _64M)
336 {
337 bool fRc = ASMAtomicCmpXchgExS32(&pCritSect->s.Core.cLockers, cLockers - 1, cLockers, &cLockers);
338 if (fRc)
339 {
340 LogFunc(("Aborting wait on %p (rc=%Rrc rcTerm=%Rrc cNsElapsed=%'RU64) -> %Rrc\n", pCritSect,
341 rc, rcTerm, cNsElapsed, rcBusy != VINF_SUCCESS ? rcBusy : rc));
342 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatAbortedCritSectEnters);
343 return rcBusy != VINF_SUCCESS ? rcBusy : rc;
344 }
345 cCmpXchgs++;
346 if ((cCmpXchgs & 0xffff) == 0)
347 Log11Func(("%p: cLockers=%d cCmpXchgs=%u (hOwner=%p)\n",
348 pCritSect, cLockers, cCmpXchgs, pCritSect->s.Core.NativeThreadOwner));
349 ASMNopPause();
350 continue;
351 }
352
353 if (cLockers == 0)
354 {
355 /*
356 * We are racing someone in PDMCritSectLeave.
357 *
358 * For the VERR_TIMEOUT case we'll just retry taking it the normal
359 * way for a while. For VERR_INTERRUPTED we're in for more fun as
360 * the previous owner might not have signalled the semaphore yet,
361 * so we'll do a short non-interruptible wait instead and then guru.
362 */
363 if ( rc == VERR_TIMEOUT
364 && RTTimeNanoTS() - tsStart <= cNsMaxTotal + cNsMaxRetry)
365 break;
366
367 if ( rc == VERR_INTERRUPTED
368 && ( cNoIntWaits == 0
369 || RTTimeNanoTS() - (tsStart + cNsElapsed) < RT_NS_100MS))
370 {
371 int const rc2 = SUPSemEventWait(pSession, hEvent, 1 /*ms*/);
372 if (rc2 == VINF_SUCCESS)
373 {
374 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatCritSectEntersWhileAborting);
375 STAM_REL_PROFILE_STOP(&pCritSect->s.CTX_MID_Z(StatContention,Wait), a);
376 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
377 }
378 cNoIntWaits++;
379 cLockers = ASMAtomicReadS32(&pCritSect->s.Core.cLockers);
380 continue;
381 }
382 }
383 else
384 LogFunc(("Critical section %p has a broken cLockers count. Aborting.\n", pCritSect));
385
386 /* Sabotage the critical section and return error to caller. */
387 ASMAtomicWriteU32(&pCritSect->s.Core.u32Magic, PDMCRITSECT_MAGIC_FAILED_ABORT);
388 LogRel(("PDMCritSectEnter: Failed to abort wait on pCritSect=%p (rc=%Rrc rcTerm=%Rrc)\n",
389 pCritSect, rc, rcTerm));
390 return VERR_PDM_CRITSECT_ABORT_FAILED;
391 }
392 LogRel(("PDMCritSectEnter: Destroyed while aborting wait; pCritSect=%p/%#x rc=%Rrc rcTerm=%Rrc\n",
393 pCritSect, pCritSect->s.Core.u32Magic, rc, rcTerm));
394 return VERR_SEM_DESTROYED;
395 }
396
397 /* We get here if we timed out. Just retry now that it
398 appears someone left already. */
399 Assert(rc == VERR_TIMEOUT);
400 cMsMaxOne = 10 /*ms*/;
401
402# else /* IN_RING3 */
403 RT_NOREF(pVM, pVCpu, rcBusy);
404# endif /* IN_RING3 */
405 }
406 /*
407 * Any other return code is fatal.
408 */
409 else
410 {
411 AssertMsgFailed(("rc=%Rrc\n", rc));
412 return RT_FAILURE_NP(rc) ? rc : -rc;
413 }
414 }
415 /* won't get here */
416}
417#endif /* IN_RING3 || IN_RING0 */
418
419
420#if defined(VMM_R0_SWITCH_STACK) && defined(IN_RING0)
421/**
422 * We must be on kernel stack before disabling preemption, thus this wrapper.
423 */
424DECLASM(int) StkBack_pdmR0CritSectEnterContendedOnKrnlStk(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECT pCritSect,
425 RTNATIVETHREAD hNativeSelf, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
426{
427 VMMR0EMTBLOCKCTX Ctx;
428 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pCritSect, &Ctx);
429 if (rc == VINF_SUCCESS)
430 {
431 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
432
433 rc = pdmR3R0CritSectEnterContended(pVM, pVCpu, pCritSect, hNativeSelf, pSrcPos, rcBusy);
434
435 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
436 }
437 else
438 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy);
439 return rc;
440}
441decltype(StkBack_pdmR0CritSectEnterContendedOnKrnlStk) pdmR0CritSectEnterContendedOnKrnlStk;
442#endif
443
444
445/**
446 * Common worker for the debug and normal APIs.
447 *
448 * @returns VINF_SUCCESS if entered successfully.
449 * @returns rcBusy when encountering a busy critical section in RC/R0.
450 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
451 * during the operation.
452 *
453 * @param pVM The cross context VM structure.
454 * @param pCritSect The PDM critical section to enter.
455 * @param rcBusy The status code to return when we're in RC or R0
456 * @param pSrcPos The source position of the lock operation.
457 */
458DECL_FORCE_INLINE(int) pdmCritSectEnter(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
459{
460 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
461 Assert(pCritSect->s.Core.cNestings >= 0);
462#if defined(VBOX_STRICT) && defined(IN_RING0)
463 /* Hope we're not messing with critical sections while in the no-block
464 zone, that would complicate things a lot. */
465 PVMCPUCC pVCpuAssert = VMMGetCpu(pVM);
466 Assert(pVCpuAssert && VMMRZCallRing3IsEnabled(pVCpuAssert));
467#endif
468
469 /*
470 * If the critical section has already been destroyed, then inform the caller.
471 */
472 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
473 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
474 VERR_SEM_DESTROYED);
475
476 /*
477 * See if we're lucky.
478 */
479 /* NOP ... */
480 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
481 { /* We're more likely to end up here with real critsects than a NOP one. */ }
482 else
483 return VINF_SUCCESS;
484
485 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
486 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
487 /* ... not owned ... */
488 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
489 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
490
491 /* ... or nested. */
492 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
493 {
494 Assert(pCritSect->s.Core.cNestings >= 1);
495# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
496 pCritSect->s.Core.cNestings += 1;
497# else
498 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
499# endif
500 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
501 Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, pCritSect->s.Core.cNestings, pCritSect->s.Core.cLockers));
502 return VINF_SUCCESS;
503 }
504
505 /*
506 * Spin for a bit without incrementing the counter.
507 */
508 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
509 * cpu systems. */
510 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
511 while (cSpinsLeft-- > 0)
512 {
513 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
514 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
515 ASMNopPause();
516 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
517 cli'ed pendingpreemption check up front using sti w/ instruction fusing
518 for avoiding races. Hmm ... This is assuming the other party is actually
519 executing code on another CPU ... which we could keep track of if we
520 wanted. */
521 }
522
523#ifdef IN_RING3
524 /*
525 * Take the slow path.
526 */
527 NOREF(rcBusy);
528 return pdmR3R0CritSectEnterContended(pVM, NULL, pCritSect, hNativeSelf, pSrcPos, rcBusy);
529
530#elif defined(IN_RING0)
531# if 1 /* new code */
532 /*
533 * In ring-0 context we have to take the special VT-x/AMD-V HM context into
534 * account when waiting on contended locks.
535 *
536 * While we usually (it can be VINF_SUCCESS) have the option of returning
537 * rcBusy and force the caller to go back to ring-3 and to re-start the work
538 * there, it's almost always more efficient to try wait for the lock here.
539 * The rcBusy will be used if we encounter an VERR_INTERRUPTED situation
540 * though.
541 */
542 PVMCPUCC pVCpu = VMMGetCpu(pVM);
543 if (pVCpu)
544 {
545# ifndef VMM_R0_SWITCH_STACK
546 VMMR0EMTBLOCKCTX Ctx;
547 int rc = VMMR0EmtPrepareToBlock(pVCpu, rcBusy, __FUNCTION__, pCritSect, &Ctx);
548 if (rc == VINF_SUCCESS)
549 {
550 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
551
552 rc = pdmR3R0CritSectEnterContended(pVM, pVCpu, pCritSect, hNativeSelf, pSrcPos, rcBusy);
553
554 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
555 }
556 else
557 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy);
558 return rc;
559# else
560 return pdmR0CritSectEnterContendedOnKrnlStk(pVM, pVCpu, pCritSect, hNativeSelf, rcBusy, pSrcPos);
561# endif
562 }
563
564 /* Non-EMT. */
565 Assert(RTThreadPreemptIsEnabled(NIL_RTTHREAD));
566 return pdmR3R0CritSectEnterContended(pVM, NULL, pCritSect, hNativeSelf, pSrcPos, rcBusy);
567
568# else /* old code: */
569 /*
570 * We preemption hasn't been disabled, we can block here in ring-0.
571 */
572 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
573 && ASMIntAreEnabled())
574 return pdmR3R0CritSectEnterContended(pVM, VMMGetCpu(pVM), pCritSect, hNativeSelf, pSrcPos, rcBusy);
575
576 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
577
578 /*
579 * Call ring-3 to acquire the critical section?
580 */
581 if (rcBusy == VINF_SUCCESS)
582 {
583 PVMCPUCC pVCpu = VMMGetCpu(pVM);
584 AssertReturn(pVCpu, VERR_PDM_CRITSECT_IPE);
585 return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect));
586 }
587
588 /*
589 * Return busy.
590 */
591 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
592 return rcBusy;
593# endif /* old code */
594#else
595# error "Unsupported context"
596#endif
597}
598
599
600/**
601 * Enters a PDM critical section.
602 *
603 * @returns VINF_SUCCESS if entered successfully.
604 * @returns rcBusy when encountering a busy critical section in RC/R0.
605 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
606 * during the operation.
607 *
608 * @param pVM The cross context VM structure.
609 * @param pCritSect The PDM critical section to enter.
610 * @param rcBusy The status code to return when we're in RC or R0
611 * and the section is busy. Pass VINF_SUCCESS to
612 * acquired the critical section thru a ring-3
613 * call if necessary.
614 *
615 * @note Even callers setting @a rcBusy to VINF_SUCCESS must either handle
616 * possible failures in ring-0 or apply
617 * PDM_CRITSECT_RELEASE_ASSERT_RC(),
618 * PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(),
619 * PDM_CRITSECT_RELEASE_ASSERT_RC_DRV() or
620 * PDM_CRITSECT_RELEASE_ASSERT_RC_USB() to the return value of this
621 * function.
622 */
623VMMDECL(DECL_CHECK_RETURN_NOT_R3(int)) PDMCritSectEnter(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy)
624{
625#ifndef PDMCRITSECT_STRICT
626 return pdmCritSectEnter(pVM, pCritSect, rcBusy, NULL);
627#else
628 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
629 return pdmCritSectEnter(pVM, pCritSect, rcBusy, &SrcPos);
630#endif
631}
632
633
634/**
635 * Enters a PDM critical section, with location information for debugging.
636 *
637 * @returns VINF_SUCCESS if entered successfully.
638 * @returns rcBusy when encountering a busy critical section in RC/R0.
639 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
640 * during the operation.
641 *
642 * @param pVM The cross context VM structure.
643 * @param pCritSect The PDM critical section to enter.
644 * @param rcBusy The status code to return when we're in RC or R0
645 * and the section is busy. Pass VINF_SUCCESS to
646 * acquired the critical section thru a ring-3
647 * call if necessary.
648 * @param uId Some kind of locking location ID. Typically a
649 * return address up the stack. Optional (0).
650 * @param SRC_POS The source position where to lock is being
651 * acquired from. Optional.
652 */
653VMMDECL(DECL_CHECK_RETURN_NOT_R3(int))
654PDMCritSectEnterDebug(PVMCC pVM, PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
655{
656#ifdef PDMCRITSECT_STRICT
657 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
658 return pdmCritSectEnter(pVM, pCritSect, rcBusy, &SrcPos);
659#else
660 NOREF(uId); RT_SRC_POS_NOREF();
661 return pdmCritSectEnter(pVM, pCritSect, rcBusy, NULL);
662#endif
663}
664
665
666/**
667 * Common worker for the debug and normal APIs.
668 *
669 * @retval VINF_SUCCESS on success.
670 * @retval VERR_SEM_BUSY if the critsect was owned.
671 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
672 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
673 * during the operation.
674 *
675 * @param pVM The cross context VM structure.
676 * @param pCritSect The critical section.
677 * @param pSrcPos The source position of the lock operation.
678 */
679static int pdmCritSectTryEnter(PVMCC pVM, PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
680{
681 /*
682 * If the critical section has already been destroyed, then inform the caller.
683 */
684 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
685 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
686 VERR_SEM_DESTROYED);
687
688 /*
689 * See if we're lucky.
690 */
691 /* NOP ... */
692 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
693 { /* We're more likely to end up here with real critsects than a NOP one. */ }
694 else
695 return VINF_SUCCESS;
696
697 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
698 AssertReturn(hNativeSelf != NIL_RTNATIVETHREAD, VERR_VM_THREAD_NOT_EMT);
699 /* ... not owned ... */
700 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
701 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
702
703 /* ... or nested. */
704 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
705 {
706 Assert(pCritSect->s.Core.cNestings >= 1);
707# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
708 pCritSect->s.Core.cNestings += 1;
709# else
710 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
711# endif
712 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
713 Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, pCritSect->s.Core.cNestings, pCritSect->s.Core.cLockers));
714 return VINF_SUCCESS;
715 }
716
717 /* no spinning */
718
719 /*
720 * Return busy.
721 */
722#ifdef IN_RING3
723 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
724#else
725 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLockBusy);
726#endif
727 LogFlow(("PDMCritSectTryEnter: locked\n"));
728 return VERR_SEM_BUSY;
729}
730
731
732/**
733 * Try enter a critical section.
734 *
735 * @retval VINF_SUCCESS on success.
736 * @retval VERR_SEM_BUSY if the critsect was owned.
737 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
738 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
739 * during the operation.
740 *
741 * @param pVM The cross context VM structure.
742 * @param pCritSect The critical section.
743 */
744VMMDECL(DECL_CHECK_RETURN(int)) PDMCritSectTryEnter(PVMCC pVM, PPDMCRITSECT pCritSect)
745{
746#ifndef PDMCRITSECT_STRICT
747 return pdmCritSectTryEnter(pVM, pCritSect, NULL);
748#else
749 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
750 return pdmCritSectTryEnter(pVM, pCritSect, &SrcPos);
751#endif
752}
753
754
755/**
756 * Try enter a critical section, with location information for debugging.
757 *
758 * @retval VINF_SUCCESS on success.
759 * @retval VERR_SEM_BUSY if the critsect was owned.
760 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
761 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
762 * during the operation.
763 *
764 * @param pVM The cross context VM structure.
765 * @param pCritSect The critical section.
766 * @param uId Some kind of locking location ID. Typically a
767 * return address up the stack. Optional (0).
768 * @param SRC_POS The source position where to lock is being
769 * acquired from. Optional.
770 */
771VMMDECL(DECL_CHECK_RETURN(int))
772PDMCritSectTryEnterDebug(PVMCC pVM, PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
773{
774#ifdef PDMCRITSECT_STRICT
775 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
776 return pdmCritSectTryEnter(pVM, pCritSect, &SrcPos);
777#else
778 NOREF(uId); RT_SRC_POS_NOREF();
779 return pdmCritSectTryEnter(pVM, pCritSect, NULL);
780#endif
781}
782
783
784#ifdef IN_RING3
785/**
786 * Enters a PDM critical section.
787 *
788 * @returns VINF_SUCCESS if entered successfully.
789 * @returns rcBusy when encountering a busy critical section in GC/R0.
790 * @retval VERR_SEM_DESTROYED if the critical section is delete before or
791 * during the operation.
792 *
793 * @param pVM The cross context VM structure.
794 * @param pCritSect The PDM critical section to enter.
795 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
796 */
797VMMR3DECL(int) PDMR3CritSectEnterEx(PVM pVM, PPDMCRITSECT pCritSect, bool fCallRing3)
798{
799 int rc = PDMCritSectEnter(pVM, pCritSect, VERR_IGNORED);
800 if ( rc == VINF_SUCCESS
801 && fCallRing3
802 && pCritSect->s.Core.pValidatorRec
803 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
804 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
805 return rc;
806}
807#endif /* IN_RING3 */
808
809
810#if defined(VMM_R0_SWITCH_STACK) && defined(IN_RING0)
811/**
812 * We must be on kernel stack before disabling preemption, thus this wrapper.
813 */
814DECLASM(int) StkBack_pdmR0CritSectLeaveSignallingOnKrnlStk(PVMCC pVM, PVMCPUCC pVCpu, PPDMCRITSECT pCritSect,
815 int32_t const cLockers, SUPSEMEVENT const hEventToSignal)
816{
817 VMMR0EMTBLOCKCTX Ctx;
818 bool fLeaveCtx = false;
819 if (cLockers < 0)
820 AssertMsg(cLockers == -1, ("cLockers=%d\n", cLockers));
821 else
822 {
823 /* Someone is waiting, wake up one of them. */
824 Assert(cLockers < _8K);
825 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
826 if (!RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)
827 {
828 int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);
829 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
830 fLeaveCtx = true;
831 }
832 int rc = SUPSemEventSignal(pVM->pSession, hEvent);
833 AssertRC(rc);
834 }
835
836 /*
837 * Signal exit event.
838 */
839 if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
840 { /* likely */ }
841 else
842 {
843 if (!fLeaveCtx && pVCpu != NULL && !RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)
844 {
845 int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);
846 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
847 fLeaveCtx = true;
848 }
849 Log8(("Signalling %#p\n", hEventToSignal));
850 int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
851 AssertRC(rc);
852 }
853
854 /*
855 * Restore HM context if needed.
856 */
857 if (!fLeaveCtx)
858 { /* contention should be unlikely */ }
859 else
860 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
861
862# ifdef DEBUG_bird
863 VMMTrashVolatileXMMRegs();
864# endif
865 return VINF_SUCCESS;
866}
867decltype(StkBack_pdmR0CritSectLeaveSignallingOnKrnlStk) pdmR0CritSectLeaveSignallingOnKrnlStk;
868#endif
869
870/**
871 * Leaves a critical section entered with PDMCritSectEnter().
872 *
873 * @returns Indication whether we really exited the critical section.
874 * @retval VINF_SUCCESS if we really exited.
875 * @retval VINF_SEM_NESTED if we only reduced the nesting count.
876 * @retval VERR_NOT_OWNER if you somehow ignore release assertions.
877 *
878 * @param pVM The cross context VM structure.
879 * @param pCritSect The PDM critical section to leave.
880 *
881 * @remarks Can be called from no-ring-3-call context in ring-0 (TM/VirtualSync)
882 * where we'll queue leaving operation for ring-3 processing.
883 */
884VMMDECL(int) PDMCritSectLeave(PVMCC pVM, PPDMCRITSECT pCritSect)
885{
886 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
887 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
888
889 /*
890 * Check for NOP sections before asserting ownership.
891 */
892 if (!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP))
893 { /* We're more likely to end up here with real critsects than a NOP one. */ }
894 else
895 return VINF_SUCCESS;
896
897 /*
898 * Always check that the caller is the owner (screw performance).
899 */
900 RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pVM, pCritSect);
901 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, pCritSect->s.Core.NativeThreadOwner == hNativeSelf && hNativeSelf != NIL_RTNATIVETHREAD,
902 ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName),
903 pCritSect->s.Core.NativeThreadOwner, hNativeSelf,
904 pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings),
905 VERR_NOT_OWNER);
906
907 /*
908 * Nested leave.
909 */
910 int32_t const cNestings = pCritSect->s.Core.cNestings;
911 Assert(cNestings >= 1);
912 if (cNestings > 1)
913 {
914#ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
915 pCritSect->s.Core.cNestings = cNestings - 1;
916#else
917 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, cNestings - 1);
918#endif
919 int32_t const cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
920 Assert(cLockers >= 0); RT_NOREF(cLockers);
921 Log12Func(("%p: cNestings=%d cLockers=%d\n", pCritSect, cNestings - 1, cLockers));
922 return VINF_SEM_NESTED;
923 }
924
925 Log12Func(("%p: cNestings=%d cLockers=%d hOwner=%p - leave for real\n",
926 pCritSect, cNestings, pCritSect->s.Core.cLockers, pCritSect->s.Core.NativeThreadOwner));
927
928#ifdef IN_RING3
929 /*
930 * Ring-3: Leave for real.
931 */
932 SUPSEMEVENT const hEventToSignal = pCritSect->s.hEventToSignal;
933 pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
934
935# if defined(PDMCRITSECT_STRICT)
936 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
937 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
938# endif
939 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
940
941# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
942 //pCritSect->s.Core.cNestings = 0; /* not really needed */
943 pCritSect->s.Core.NativeThreadOwner = NIL_RTNATIVETHREAD;
944# else
945 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
946 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
947# endif
948 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
949
950 /* Stop profiling and decrement lockers. */
951 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
952 ASMCompilerBarrier();
953 int32_t const cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
954 if (cLockers < 0)
955 AssertMsg(cLockers == -1, ("cLockers=%d\n", cLockers));
956 else
957 {
958 /* Someone is waiting, wake up one of them. */
959 Assert(cLockers < _8K);
960 Log8(("PDMCritSectLeave: Waking up %p (cLockers=%u)\n", pCritSect, cLockers));
961 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
962 int rc = SUPSemEventSignal(pVM->pSession, hEvent);
963 AssertRC(rc);
964 }
965
966 /* Signal exit event. */
967 if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
968 { /* likely */ }
969 else
970 {
971 Log8(("PDMCritSectLeave: Signalling %#p (%p)\n", hEventToSignal, pCritSect));
972 int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
973 AssertRC(rc);
974 }
975
976 return VINF_SUCCESS;
977
978
979#elif defined(IN_RING0)
980 /*
981 * Ring-0: Try leave for real, depends on host and context.
982 */
983 SUPSEMEVENT const hEventToSignal = pCritSect->s.hEventToSignal;
984 pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
985 PVMCPUCC pVCpu = VMMGetCpu(pVM);
986 bool fQueueOnTrouble = false; /* Set this to true to test queueing. */
987 if ( pVCpu == NULL /* non-EMT access, if we implement it must be able to block */
988 || VMMRZCallRing3IsEnabled(pVCpu)
989 || RTSemEventIsSignalSafe()
990 || ( VMMR0ThreadCtxHookIsEnabled(pVCpu) /* Doesn't matter if Signal() blocks if we have hooks, ... */
991 && RTThreadPreemptIsEnabled(NIL_RTTHREAD) /* ... and preemption is still enabled, */
992 && ASMIntAreEnabled()) /* ... and interrupts hasn't yet been disabled. Special pre-GC HM env. */
993 || (fQueueOnTrouble = ( hEventToSignal == NIL_SUPSEMEVENT
994 && ASMAtomicUoReadS32(&pCritSect->s.Core.cLockers) == 0)) )
995 {
996 pCritSect->s.hEventToSignal = NIL_SUPSEMEVENT;
997
998# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
999 //pCritSect->s.Core.cNestings = 0; /* not really needed */
1000 pCritSect->s.Core.NativeThreadOwner = NIL_RTNATIVETHREAD;
1001# else
1002 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
1003 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
1004# endif
1005 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
1006
1007 /*
1008 * Stop profiling and decrement lockers.
1009 */
1010 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
1011 ASMCompilerBarrier();
1012
1013 bool fQueueIt = false;
1014 int32_t cLockers;
1015 if (!fQueueOnTrouble)
1016 cLockers = ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
1017 else
1018 {
1019 cLockers = -1;
1020 if (!ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
1021 fQueueIt = true;
1022 }
1023 if (!fQueueIt)
1024 {
1025# ifndef VMM_R0_SWITCH_STACK
1026 VMMR0EMTBLOCKCTX Ctx;
1027 bool fLeaveCtx = false;
1028 if (cLockers < 0)
1029 AssertMsg(cLockers == -1, ("cLockers=%d\n", cLockers));
1030 else
1031 {
1032 /* Someone is waiting, wake up one of them. */
1033 Assert(cLockers < _8K);
1034 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
1035 if (!RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)
1036 {
1037 int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);
1038 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
1039 fLeaveCtx = true;
1040 }
1041 int rc = SUPSemEventSignal(pVM->pSession, hEvent);
1042 AssertRC(rc);
1043 }
1044
1045 /*
1046 * Signal exit event.
1047 */
1048 if (RT_LIKELY(hEventToSignal == NIL_SUPSEMEVENT))
1049 { /* likely */ }
1050 else
1051 {
1052 if (!fLeaveCtx && pVCpu != NULL && !RTSemEventIsSignalSafe() && (pVCpu = VMMGetCpu(pVM)) != NULL)
1053 {
1054 int rc = VMMR0EmtPrepareToBlock(pVCpu, VINF_SUCCESS, __FUNCTION__, pCritSect, &Ctx);
1055 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, RT_SUCCESS(rc), ("rc=%Rrc\n", rc), rc);
1056 fLeaveCtx = true;
1057 }
1058 Log8(("Signalling %#p\n", hEventToSignal));
1059 int rc = SUPSemEventSignal(pVM->pSession, hEventToSignal);
1060 AssertRC(rc);
1061 }
1062
1063 /*
1064 * Restore HM context if needed.
1065 */
1066 if (!fLeaveCtx)
1067 { /* contention should be unlikely */ }
1068 else
1069 VMMR0EmtResumeAfterBlocking(pVCpu, &Ctx);
1070
1071# ifdef DEBUG_bird
1072 VMMTrashVolatileXMMRegs();
1073# endif
1074 return VINF_SUCCESS;
1075# else /* VMM_R0_SWITCH_STACK */
1076 return pdmR0CritSectLeaveSignallingOnKrnlStk(pVM, pVCpu, pCritSect, cLockers, hEventToSignal);
1077# endif /* VMM_R0_SWITCH_STACK */
1078 }
1079
1080 /*
1081 * Darn, someone raced in on us. Restore the state (this works only
1082 * because the semaphore is effectively controlling ownership).
1083 */
1084 bool fRc;
1085 RTNATIVETHREAD hMessOwner = NIL_RTNATIVETHREAD;
1086 ASMAtomicCmpXchgExHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf, NIL_RTNATIVETHREAD, fRc, &hMessOwner);
1087 AssertLogRelMsgReturn(fRc, ("pCritSect=%p hMessOwner=%p\n", pCritSect, hMessOwner),
1088 pdmCritSectCorrupted(pCritSect, "owner race"));
1089 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
1090# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
1091 //pCritSect->s.Core.cNestings = 1;
1092 Assert(pCritSect->s.Core.cNestings == 1);
1093# else
1094 //Assert(pCritSect->s.Core.cNestings == 0);
1095 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
1096# endif
1097 Assert(hEventToSignal == NIL_SUPSEMEVENT);
1098 }
1099
1100
1101#else /* IN_RC */
1102 /*
1103 * Raw-mode: Try leave it.
1104 */
1105# error "This context is not use..."
1106 if (pCritSect->s.Core.cLockers == 0)
1107 {
1108# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
1109 //pCritSect->s.Core.cNestings = 0; /* not really needed */
1110# else
1111 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
1112# endif
1113 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
1114 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
1115
1116 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
1117 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
1118 return VINF_SUCCESS;
1119
1120 /*
1121 * Darn, someone raced in on us. Restore the state (this works only
1122 * because the semaphore is effectively controlling ownership).
1123 */
1124 bool fRc;
1125 RTNATIVETHREAD hMessOwner = NIL_RTNATIVETHREAD;
1126 ASMAtomicCmpXchgExHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf, NIL_RTNATIVETHREAD, fRc, &hMessOwner);
1127 AssertLogRelMsgReturn(fRc, ("pCritSect=%p hMessOwner=%p\n", pCritSect, hMessOwner),
1128 pdmCritSectCorrupted(pCritSect, "owner race"));
1129 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
1130# ifdef PDMCRITSECT_WITH_LESS_ATOMIC_STUFF
1131 //pCritSect->s.Core.cNestings = 1;
1132 Assert(pCritSect->s.Core.cNestings == 1);
1133# else
1134 //Assert(pCritSect->s.Core.cNestings == 0);
1135 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
1136# endif
1137 }
1138#endif /* IN_RC */
1139
1140
1141#ifndef IN_RING3
1142 /*
1143 * Ring-0/raw-mode: Unable to leave. Queue the leave for ring-3.
1144 */
1145 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
1146# ifndef IN_RING0
1147 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1148# endif
1149 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
1150 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
1151 VMM_ASSERT_RELEASE_MSG_RETURN(pVM, i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectLeaves), ("%d\n", i), VERR_PDM_CRITSECT_IPE);
1152 pVCpu->pdm.s.apQueuedCritSectLeaves[i] = pCritSect->s.pSelfR3;
1153 VMM_ASSERT_RELEASE_MSG_RETURN(pVM,
1154 RT_VALID_PTR(pVCpu->pdm.s.apQueuedCritSectLeaves[i])
1155 && ((uintptr_t)pVCpu->pdm.s.apQueuedCritSectLeaves[i] & PAGE_OFFSET_MASK)
1156 == ((uintptr_t)pCritSect & PAGE_OFFSET_MASK),
1157 ("%p vs %p\n", pVCpu->pdm.s.apQueuedCritSectLeaves[i], pCritSect),
1158 pdmCritSectCorrupted(pCritSect, "Invalid pSelfR3 value"));
1159 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); /** @todo handle VMCPU_FF_PDM_CRITSECT in ring-0 outside the no-call-ring-3 part. */
1160 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* unnecessary paranoia */
1161 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
1162 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
1163
1164 return VINF_SUCCESS;
1165#endif /* IN_RING3 */
1166}
1167
1168
1169#if defined(IN_RING0) || defined(IN_RING3)
1170/**
1171 * Schedule a event semaphore for signalling upon critsect exit.
1172 *
1173 * @returns VINF_SUCCESS on success.
1174 * @returns VERR_TOO_MANY_SEMAPHORES if an event was already scheduled.
1175 * @returns VERR_NOT_OWNER if we're not the critsect owner (ring-3 only).
1176 * @returns VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
1177 *
1178 * @param pCritSect The critical section.
1179 * @param hEventToSignal The support driver event semaphore that should be
1180 * signalled.
1181 */
1182VMMDECL(int) PDMHCCritSectScheduleExitEvent(PPDMCRITSECT pCritSect, SUPSEMEVENT hEventToSignal)
1183{
1184 AssertPtr(pCritSect);
1185 Assert(!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP));
1186 Assert(hEventToSignal != NIL_SUPSEMEVENT);
1187# ifdef IN_RING3
1188 if (RT_UNLIKELY(!RTCritSectIsOwner(&pCritSect->s.Core)))
1189 return VERR_NOT_OWNER;
1190# endif
1191 if (RT_LIKELY( pCritSect->s.hEventToSignal == NIL_RTSEMEVENT
1192 || pCritSect->s.hEventToSignal == hEventToSignal))
1193 {
1194 pCritSect->s.hEventToSignal = hEventToSignal;
1195 return VINF_SUCCESS;
1196 }
1197 return VERR_TOO_MANY_SEMAPHORES;
1198}
1199#endif /* IN_RING0 || IN_RING3 */
1200
1201
1202/**
1203 * Checks the caller is the owner of the critical section.
1204 *
1205 * @returns true if owner.
1206 * @returns false if not owner.
1207 * @param pVM The cross context VM structure.
1208 * @param pCritSect The critical section.
1209 */
1210VMMDECL(bool) PDMCritSectIsOwner(PVMCC pVM, PCPDMCRITSECT pCritSect)
1211{
1212#ifdef IN_RING3
1213 RT_NOREF(pVM);
1214 return RTCritSectIsOwner(&pCritSect->s.Core);
1215#else
1216 PVMCPUCC pVCpu = VMMGetCpu(pVM);
1217 if ( !pVCpu
1218 || pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
1219 return false;
1220 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
1221 || pCritSect->s.Core.cNestings > 1;
1222#endif
1223}
1224
1225
1226/**
1227 * Checks the specified VCPU is the owner of the critical section.
1228 *
1229 * @returns true if owner.
1230 * @returns false if not owner.
1231 * @param pVCpu The cross context virtual CPU structure.
1232 * @param pCritSect The critical section.
1233 */
1234VMMDECL(bool) PDMCritSectIsOwnerEx(PVMCPUCC pVCpu, PCPDMCRITSECT pCritSect)
1235{
1236#ifdef IN_RING3
1237 NOREF(pVCpu);
1238 return RTCritSectIsOwner(&pCritSect->s.Core);
1239#else
1240 Assert(VMCC_GET_CPU(pVCpu->CTX_SUFF(pVM), pVCpu->idCpu) == pVCpu);
1241 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
1242 return false;
1243 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0
1244 || pCritSect->s.Core.cNestings > 1;
1245#endif
1246}
1247
1248
1249/**
1250 * Checks if anyone is waiting on the critical section we own.
1251 *
1252 * @returns true if someone is waiting.
1253 * @returns false if no one is waiting.
1254 * @param pVM The cross context VM structure.
1255 * @param pCritSect The critical section.
1256 */
1257VMMDECL(bool) PDMCritSectHasWaiters(PVMCC pVM, PCPDMCRITSECT pCritSect)
1258{
1259 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
1260 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pVM, pCritSect)); RT_NOREF(pVM);
1261 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
1262}
1263
1264
1265/**
1266 * Checks if a critical section is initialized or not.
1267 *
1268 * @returns true if initialized.
1269 * @returns false if not initialized.
1270 * @param pCritSect The critical section.
1271 */
1272VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
1273{
1274 return RTCritSectIsInitialized(&pCritSect->s.Core);
1275}
1276
1277
1278/**
1279 * Gets the recursion depth.
1280 *
1281 * @returns The recursion depth.
1282 * @param pCritSect The critical section.
1283 */
1284VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
1285{
1286 return RTCritSectGetRecursion(&pCritSect->s.Core);
1287}
1288
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette