VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp@ 31392

Last change on this file since 31392 was 31392, checked in by vboxsync, 15 years ago

PDMCritSectEnter: Wait for critical sections in ring-0 when preemption and interrupts are enabled. Sketches for how we can wait from VT-x/AMD-V context.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 24.7 KB
Line 
1/* $Id: PDMAllCritSect.cpp 31392 2010-08-05 11:41:45Z vboxsync $ */
2/** @file
3 * PDM - Critical Sections, All Contexts.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
23#include "../PDMInternal.h"
24#include <VBox/pdmcritsect.h>
25#include <VBox/mm.h>
26#include <VBox/vmm.h>
27#include <VBox/vm.h>
28#include <VBox/err.h>
29#include <VBox/hwaccm.h>
30
31#include <VBox/log.h>
32#include <iprt/asm.h>
33#include <iprt/asm-amd64-x86.h>
34#include <iprt/assert.h>
35#ifdef IN_RING3
36# include <iprt/lockvalidator.h>
37# include <iprt/semaphore.h>
38#endif
39
40
41/*******************************************************************************
42* Defined Constants And Macros *
43*******************************************************************************/
44/** The number loops to spin for in ring-3. */
45#define PDMCRITSECT_SPIN_COUNT_R3 20
46/** The number loops to spin for in ring-0. */
47#define PDMCRITSECT_SPIN_COUNT_R0 256
48/** The number loops to spin for in the raw-mode context. */
49#define PDMCRITSECT_SPIN_COUNT_RC 256
50
51
52/* Undefine the automatic VBOX_STRICT API mappings. */
53#undef PDMCritSectEnter
54#undef PDMCritSectTryEnter
55
56
57/**
58 * Gets the ring-3 native thread handle of the calling thread.
59 *
60 * @returns native thread handle (ring-3).
61 * @param pCritSect The critical section. This is used in R0 and RC.
62 */
63DECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect)
64{
65#ifdef IN_RING3
66 NOREF(pCritSect);
67 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
68#else
69 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
70 NIL_RTNATIVETHREAD);
71 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
72 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
73 RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
74#endif
75 return hNativeSelf;
76}
77
78
79/**
80 * Tail code called when we've won the battle for the lock.
81 *
82 * @returns VINF_SUCCESS.
83 *
84 * @param pCritSect The critical section.
85 * @param hNativeSelf The native handle of this thread.
86 */
87DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
88{
89 AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
90 Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
91
92 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
93 Assert(pCritSect->s.Core.cNestings == 1);
94 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
95
96# ifdef PDMCRITSECT_STRICT
97 RTLockValidatorRecExclSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, pSrcPos, true);
98# endif
99
100 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
101 return VINF_SUCCESS;
102}
103
104
105#if defined(IN_RING3) || defined(IN_RING0)
106/**
107 * Deals with the contended case in ring-3 and ring-0.
108 *
109 * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
110 * @param pCritSect The critsect.
111 * @param hNativeSelf The native thread handle.
112 */
113static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
114{
115 /*
116 * Start waiting.
117 */
118 if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
119 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
120# ifdef IN_RING3
121 STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
122# else
123 STAM_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
124# endif
125
126 /*
127 * The wait loop.
128 */
129 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
130 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
131# ifdef IN_RING3
132# ifdef PDMCRITSECT_STRICT
133 RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt();
134 int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
135 if (RT_FAILURE(rc2))
136 return rc2;
137# else
138 RTTHREAD hThreadSelf = RTThreadSelf();
139# endif
140# endif
141 for (;;)
142 {
143# ifdef PDMCRITSECT_STRICT
144 int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
145 !(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NO_NESTING),
146 RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
147 if (RT_FAILURE(rc9))
148 return rc9;
149# elif defined(IN_RING3)
150 RTThreadBlocking(hThreadSelf, RTTHREADSTATE_CRITSECT, true);
151# endif
152 int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
153# ifdef IN_RING3
154 RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_CRITSECT);
155# endif
156
157 if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
158 return VERR_SEM_DESTROYED;
159 if (rc == VINF_SUCCESS)
160 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
161 AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
162 }
163 /* won't get here */
164}
165#endif /* IN_RING3 || IN_RING0 */
166
167
168/**
169 * Common worker for the debug and normal APIs.
170 *
171 * @returns VINF_SUCCESS if entered successfully.
172 * @returns rcBusy when encountering a busy critical section in GC/R0.
173 * @returns VERR_SEM_DESTROYED if the critical section is dead.
174 *
175 * @param pCritSect The PDM critical section to enter.
176 * @param rcBusy The status code to return when we're in GC or R0
177 * and the section is busy.
178 */
179DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
180{
181 Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
182 Assert(pCritSect->s.Core.cNestings >= 0);
183
184 /*
185 * If the critical section has already been destroyed, then inform the caller.
186 */
187 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
188 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
189 VERR_SEM_DESTROYED);
190
191 /*
192 * See if we're lucky.
193 */
194 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
195 /* Not owned ... */
196 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
197 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
198
199 /* ... or nested. */
200 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
201 {
202 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
203 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
204 Assert(pCritSect->s.Core.cNestings > 1);
205 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
206 return VINF_SUCCESS;
207 }
208
209 /*
210 * Spin for a bit without incrementing the counter.
211 */
212 /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
213 * cpu systems. */
214 int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
215 while (cSpinsLeft-- > 0)
216 {
217 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
218 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
219 ASMNopPause();
220 /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
221 cli'ed pendingpreemption check up front using sti w/ instruction fusing
222 for avoiding races. Hmm ... This is assuming the other party is actually
223 executing code on another CPU ... which we could keep track of if we
224 wanted. */
225 }
226
227#ifdef IN_RING3
228 /*
229 * Take the slow path.
230 */
231 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
232
233#elif defined(IN_RING0)
234 /** @todo If preemption is disabled it means we're in VT-x/AMD-V context
235 * and would be better off switching out of that while waiting for
236 * the lock. Several of the locks jumps back to ring-3 just to
237 * get the lock, the ring-3 code will then call the kernel to do
238 * the lock wait and when the call return it will call ring-0
239 * again and resume via in setjmp style. Not very efficient. */
240# if 0
241 if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
242 * callers not prepared for longjmp/blocking to
243 * use PDMCritSectTryEnter. */
244 {
245 /*
246 * Leave HWACCM context while waiting if necessary.
247 */
248 int rc;
249 if (RTThreadPreemptIsEnabled(NIL_RTTHREAD))
250 {
251 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000);
252 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
253 }
254 else
255 {
256 STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000);
257 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
258 PVMCPU pVCpu = VMMGetCpu(pVM);
259 HWACCMR0Leave(pVM, pVCpu);
260 RTThreadPreemptRestore(NIL_RTTHREAD, ????);
261
262 rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
263
264 RTThreadPreemptDisable(NIL_RTTHREAD, ????);
265 HWACCMR0Enter(pVM, pVCpu);
266 }
267 return rc;
268 }
269# else
270 /*
271 * We preemption hasn't been disabled, we can block here in ring-0.
272 */
273 if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD)
274 && ASMIntAreEnabled())
275 return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos);
276# endif
277
278 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
279 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
280 return rcBusy;
281
282#else /* IN_RC */
283 /*
284 * Return busy.
285 */
286 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
287 LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
288 return rcBusy;
289#endif /* IN_RC */
290}
291
292
293/**
294 * Enters a PDM critical section.
295 *
296 * @returns VINF_SUCCESS if entered successfully.
297 * @returns rcBusy when encountering a busy critical section in GC/R0.
298 * @returns VERR_SEM_DESTROYED if the critical section is dead.
299 *
300 * @param pCritSect The PDM critical section to enter.
301 * @param rcBusy The status code to return when we're in GC or R0
302 * and the section is busy.
303 */
304VMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
305{
306#ifndef PDMCRITSECT_STRICT
307 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
308#else
309 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
310 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
311#endif
312}
313
314
315/**
316 * Enters a PDM critical section, with location information for debugging.
317 *
318 * @returns VINF_SUCCESS if entered successfully.
319 * @returns rcBusy when encountering a busy critical section in GC/R0.
320 * @returns VERR_SEM_DESTROYED if the critical section is dead.
321 *
322 * @param pCritSect The PDM critical section to enter.
323 * @param rcBusy The status code to return when we're in GC or R0
324 * and the section is busy.
325 * @param uId Some kind of locking location ID. Typically a
326 * return address up the stack. Optional (0).
327 * @param pszFile The file where the lock is being acquired from.
328 * Optional.
329 * @param iLine The line number in that file. Optional (0).
330 * @param pszFunction The functionn where the lock is being acquired
331 * from. Optional.
332 */
333VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
334{
335#ifdef PDMCRITSECT_STRICT
336 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
337 return pdmCritSectEnter(pCritSect, rcBusy, &SrcPos);
338#else
339 return pdmCritSectEnter(pCritSect, rcBusy, NULL);
340#endif
341}
342
343
344/**
345 * Common worker for the debug and normal APIs.
346 *
347 * @retval VINF_SUCCESS on success.
348 * @retval VERR_SEM_BUSY if the critsect was owned.
349 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
350 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
351 *
352 * @param pCritSect The critical section.
353 */
354static int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PCRTLOCKVALSRCPOS pSrcPos)
355{
356 /*
357 * If the critical section has already been destroyed, then inform the caller.
358 */
359 AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
360 ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
361 VERR_SEM_DESTROYED);
362
363 /*
364 * See if we're lucky.
365 */
366 RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
367 /* Not owned ... */
368 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
369 return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos);
370
371 /* ... or nested. */
372 if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
373 {
374 ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
375 ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
376 Assert(pCritSect->s.Core.cNestings > 1);
377 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
378 return VINF_SUCCESS;
379 }
380
381 /* no spinning */
382
383 /*
384 * Return busy.
385 */
386#ifdef IN_RING3
387 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
388#else
389 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
390#endif
391 LogFlow(("PDMCritSectTryEnter: locked\n"));
392 return VERR_SEM_BUSY;
393}
394
395
396/**
397 * Try enter a critical section.
398 *
399 * @retval VINF_SUCCESS on success.
400 * @retval VERR_SEM_BUSY if the critsect was owned.
401 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
402 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
403 *
404 * @param pCritSect The critical section.
405 */
406VMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
407{
408#ifndef PDMCRITSECT_STRICT
409 return pdmCritSectTryEnter(pCritSect, NULL);
410#else
411 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_NORMAL_API();
412 return pdmCritSectTryEnter(pCritSect, &SrcPos);
413#endif
414}
415
416
417/**
418 * Try enter a critical section, with location information for debugging.
419 *
420 * @retval VINF_SUCCESS on success.
421 * @retval VERR_SEM_BUSY if the critsect was owned.
422 * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
423 * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
424 *
425 * @param pCritSect The critical section.
426 * @param uId Some kind of locking location ID. Typically a
427 * return address up the stack. Optional (0).
428 * @param pszFile The file where the lock is being acquired from.
429 * Optional.
430 * @param iLine The line number in that file. Optional (0).
431 * @param pszFunction The functionn where the lock is being acquired
432 * from. Optional.
433 */
434VMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
435{
436#ifdef PDMCRITSECT_STRICT
437 RTLOCKVALSRCPOS SrcPos = RTLOCKVALSRCPOS_INIT_DEBUG_API();
438 return pdmCritSectTryEnter(pCritSect, &SrcPos);
439#else
440 return pdmCritSectTryEnter(pCritSect, NULL);
441#endif
442}
443
444
445#ifdef IN_RING3
446/**
447 * Enters a PDM critical section.
448 *
449 * @returns VINF_SUCCESS if entered successfully.
450 * @returns rcBusy when encountering a busy critical section in GC/R0.
451 * @returns VERR_SEM_DESTROYED if the critical section is dead.
452 *
453 * @param pCritSect The PDM critical section to enter.
454 * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
455 */
456VMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
457{
458 int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
459 if ( rc == VINF_SUCCESS
460 && fCallRing3
461 && pCritSect->s.Core.pValidatorRec
462 && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
463 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
464 return rc;
465}
466#endif /* IN_RING3 */
467
468
469/**
470 * Leaves a critical section entered with PDMCritSectEnter().
471 *
472 * @param pCritSect The PDM critical section to leave.
473 */
474VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
475{
476 AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
477 Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
478 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
479 Assert(pCritSect->s.Core.cNestings >= 1);
480
481 /*
482 * Nested leave.
483 */
484 if (pCritSect->s.Core.cNestings > 1)
485 {
486 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
487 Assert(pCritSect->s.Core.cNestings >= 1);
488 ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
489 return;
490 }
491
492#ifdef IN_RING0
493# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
494 if (1) /* SUPSemEventSignal is safe */
495# else
496 if (ASMIntAreEnabled())
497# endif
498#endif
499#if defined(IN_RING3) || defined(IN_RING0)
500 {
501 /*
502 * Leave for real.
503 */
504 /* update members. */
505# ifdef IN_RING3
506 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
507 pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
508# if defined(PDMCRITSECT_STRICT)
509 if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
510 RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec);
511# endif
512 Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
513# endif
514 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
515 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
516 ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
517 Assert(pCritSect->s.Core.cNestings == 0);
518
519 /* stop and decrement lockers. */
520 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
521 ASMCompilerBarrier();
522 if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
523 {
524 /* Someone is waiting, wake up one of them. */
525 SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
526 PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
527 int rc = SUPSemEventSignal(pSession, hEvent);
528 AssertRC(rc);
529 }
530
531# ifdef IN_RING3
532 /* Signal exit event. */
533 if (hEventToSignal != NIL_RTSEMEVENT)
534 {
535 LogBird(("Signalling %#x\n", hEventToSignal));
536 int rc = RTSemEventSignal(hEventToSignal);
537 AssertRC(rc);
538 }
539# endif
540
541# if defined(DEBUG_bird) && defined(IN_RING0)
542 VMMTrashVolatileXMMRegs();
543# endif
544 }
545#endif /* IN_RING3 || IN_RING0 */
546#ifdef IN_RING0
547 else
548#endif
549#if defined(IN_RING0) || defined(IN_RC)
550 {
551 /*
552 * Try leave it.
553 */
554 if (pCritSect->s.Core.cLockers == 0)
555 {
556 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
557 RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
558 ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
559 STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
560
561 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
562 if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
563 return;
564
565 /* darn, someone raced in on us. */
566 ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
567 STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
568 Assert(pCritSect->s.Core.cNestings == 0);
569 ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
570 }
571 ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
572
573 /*
574 * Queue the request.
575 */
576 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
577 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
578 uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
579 LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
580 AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
581 pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
582 VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
583 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
584 STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
585 STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
586 }
587#endif /* IN_RING0 || IN_RC */
588}
589
590
591#if defined(IN_RING3) || defined(IN_RING0)
592/**
593 * Process the critical sections queued for ring-3 'leave'.
594 *
595 * @param pVCpu The VMCPU handle.
596 */
597VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
598{
599 Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
600
601 const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
602 for (RTUINT i = 0; i < c; i++)
603 {
604# ifdef IN_RING3
605 PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
606# else
607 PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
608# endif
609
610 PDMCritSectLeave(pCritSect);
611 LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
612 }
613
614 pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
615 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
616}
617#endif /* IN_RING3 || IN_RING0 */
618
619
620/**
621 * Checks the caller is the owner of the critical section.
622 *
623 * @returns true if owner.
624 * @returns false if not owner.
625 * @param pCritSect The critical section.
626 */
627VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
628{
629#ifdef IN_RING3
630 return RTCritSectIsOwner(&pCritSect->s.Core);
631#else
632 PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
633 PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
634 if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
635 return false;
636 return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
637#endif
638}
639
640
641/**
642 * Checks the specified VCPU is the owner of the critical section.
643 *
644 * @returns true if owner.
645 * @returns false if not owner.
646 * @param pCritSect The critical section.
647 * @param idCpu VCPU id
648 */
649VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, VMCPUID idCpu)
650{
651#ifdef IN_RING3
652 NOREF(idCpu);
653 return RTCritSectIsOwner(&pCritSect->s.Core);
654#else
655 PVM pVM = pCritSect->s.CTX_SUFF(pVM);
656 AssertPtr(pVM);
657 Assert(idCpu < pVM->cCpus);
658 return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread
659 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
660#endif
661}
662
663
664/**
665 * Checks if somebody currently owns the critical section.
666 *
667 * @returns true if locked.
668 * @returns false if not locked.
669 *
670 * @param pCritSect The critical section.
671 *
672 * @remarks This doesn't prove that no deadlocks will occur later on; it's
673 * just a debugging tool
674 */
675VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect)
676{
677 return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD
678 && (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
679}
680
681
682/**
683 * Checks if anyone is waiting on the critical section we own.
684 *
685 * @returns true if someone is waitings.
686 * @returns false if no one is waiting.
687 * @param pCritSect The critical section.
688 */
689VMMDECL(bool) PDMCritSectHasWaiters(PCPDMCRITSECT pCritSect)
690{
691 AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
692 Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
693 return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
694}
695
696
697/**
698 * Checks if a critical section is initialized or not.
699 *
700 * @returns true if initialized.
701 * @returns false if not initialized.
702 * @param pCritSect The critical section.
703 */
704VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
705{
706 return RTCritSectIsInitialized(&pCritSect->s.Core);
707}
708
709
710/**
711 * Gets the recursion depth.
712 *
713 * @returns The recursion depth.
714 * @param pCritSect The critical section.
715 */
716VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
717{
718 return RTCritSectGetRecursion(&pCritSect->s.Core);
719}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette