VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 57249

Last change on this file since 57249 was 57249, checked in by vboxsync, 10 years ago

GVMMR0,VMMR0: More AC != 0 checks to try pinpoint the problem.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 80.5 KB
Line 
1/* $Id: VMMR0.cpp 57249 2015-08-08 01:18:40Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VMM
22#include <VBox/vmm/vmm.h>
23#include <VBox/sup.h>
24#include <VBox/vmm/trpm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/stam.h>
29#include <VBox/vmm/tm.h>
30#include "VMMInternal.h"
31#include <VBox/vmm/vm.h>
32#ifdef VBOX_WITH_PCI_PASSTHROUGH
33# include <VBox/vmm/pdmpci.h>
34#endif
35
36#include <VBox/vmm/gvmm.h>
37#include <VBox/vmm/gmm.h>
38#include <VBox/vmm/gim.h>
39#include <VBox/intnet.h>
40#include <VBox/vmm/hm.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <VBox/version.h>
44#include <VBox/log.h>
45
46#include <iprt/asm-amd64-x86.h>
47#include <iprt/assert.h>
48#include <iprt/crc.h>
49#include <iprt/mp.h>
50#include <iprt/once.h>
51#include <iprt/stdarg.h>
52#include <iprt/string.h>
53#include <iprt/thread.h>
54#include <iprt/timer.h>
55
56#include "dtrace/VBoxVMM.h"
57
58
59#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
60# pragma intrinsic(_AddressOfReturnAddress)
61#endif
62
63
64/*******************************************************************************
65* Defined Constants And Macros *
66*******************************************************************************/
67/** @def VMM_CHECK_SMAP_SETUP
68 * SMAP check setup. */
69/** @def VMM_CHECK_SMAP_CHECK
70 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
71 * it will be logged and @a a_BadExpr is executed. */
72/** @def VMM_CHECK_SMAP_CHECK2
73 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
74 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
75 * executed. */
76#if defined(VBOX_STRICT) || 1
77# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
78# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
79 do { \
80 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
81 { \
82 RTCCUINTREG fEflCheck = ASMGetFlags(); \
83 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
84 { /* likely */ } \
85 else \
86 { \
87 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
88 a_BadExpr; \
89 } \
90 } \
91 } while (0)
92# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
93 do { \
94 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
95 { \
96 RTCCUINTREG fEflCheck = ASMGetFlags(); \
97 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
98 { /* likely */ } \
99 else \
100 { \
101 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
102 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
103 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
104 a_BadExpr; \
105 } \
106 } \
107 } while (0)
108#else
109# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
110# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
111# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) NOREF(fKernelFeatures)
112#endif
113
114
115/*******************************************************************************
116* Internal Functions *
117*******************************************************************************/
118RT_C_DECLS_BEGIN
119#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
120extern uint64_t __udivdi3(uint64_t, uint64_t);
121extern uint64_t __umoddi3(uint64_t, uint64_t);
122#endif
123RT_C_DECLS_END
124
125
126/*******************************************************************************
127* Global Variables *
128*******************************************************************************/
129/** Drag in necessary library bits.
130 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
131PFNRT g_VMMR0Deps[] =
132{
133 (PFNRT)RTCrc32,
134 (PFNRT)RTOnce,
135#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
136 (PFNRT)__udivdi3,
137 (PFNRT)__umoddi3,
138#endif
139 NULL
140};
141
142#ifdef RT_OS_SOLARIS
143/* Dependency information for the native solaris loader. */
144extern "C" { char _depends_on[] = "vboxdrv"; }
145#endif
146
147
148
149/**
150 * Initialize the module.
151 * This is called when we're first loaded.
152 *
153 * @returns 0 on success.
154 * @returns VBox status on failure.
155 * @param hMod Image handle for use in APIs.
156 */
157DECLEXPORT(int) ModuleInit(void *hMod)
158{
159 VMM_CHECK_SMAP_SETUP();
160 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
161
162#ifdef VBOX_WITH_DTRACE_R0
163 /*
164 * The first thing to do is register the static tracepoints.
165 * (Deregistration is automatic.)
166 */
167 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
168 if (RT_FAILURE(rc2))
169 return rc2;
170#endif
171 LogFlow(("ModuleInit:\n"));
172
173#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
174 /*
175 * Display the CMOS debug code.
176 */
177 ASMOutU8(0x72, 0x03);
178 uint8_t bDebugCode = ASMInU8(0x73);
179 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
180 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
181#endif
182
183 /*
184 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
185 */
186 int rc = vmmInitFormatTypes();
187 if (RT_SUCCESS(rc))
188 {
189 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
190 rc = GVMMR0Init();
191 if (RT_SUCCESS(rc))
192 {
193 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
194 rc = GMMR0Init();
195 if (RT_SUCCESS(rc))
196 {
197 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
198 rc = HMR0Init();
199 if (RT_SUCCESS(rc))
200 {
201 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
202 rc = PGMRegisterStringFormatTypes();
203 if (RT_SUCCESS(rc))
204 {
205 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
206#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
207 rc = PGMR0DynMapInit();
208#endif
209 if (RT_SUCCESS(rc))
210 {
211 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
212 rc = IntNetR0Init();
213 if (RT_SUCCESS(rc))
214 {
215#ifdef VBOX_WITH_PCI_PASSTHROUGH
216 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
217 rc = PciRawR0Init();
218#endif
219 if (RT_SUCCESS(rc))
220 {
221 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
222 rc = CPUMR0ModuleInit();
223 if (RT_SUCCESS(rc))
224 {
225#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
226 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
227 rc = vmmR0TripleFaultHackInit();
228 if (RT_SUCCESS(rc))
229#endif
230 {
231 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
232 if (RT_SUCCESS(rc))
233 {
234 LogFlow(("ModuleInit: returns success.\n"));
235 return VINF_SUCCESS;
236 }
237 }
238
239 /*
240 * Bail out.
241 */
242#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
243 vmmR0TripleFaultHackTerm();
244#endif
245 }
246 else
247 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
248#ifdef VBOX_WITH_PCI_PASSTHROUGH
249 PciRawR0Term();
250#endif
251 }
252 else
253 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
254 IntNetR0Term();
255 }
256 else
257 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
258#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
259 PGMR0DynMapTerm();
260#endif
261 }
262 else
263 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
264 PGMDeregisterStringFormatTypes();
265 }
266 else
267 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
268 HMR0Term();
269 }
270 else
271 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
272 GMMR0Term();
273 }
274 else
275 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
276 GVMMR0Term();
277 }
278 else
279 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
280 vmmTermFormatTypes();
281 }
282 else
283 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
284
285 LogFlow(("ModuleInit: failed %Rrc\n", rc));
286 return rc;
287}
288
289
290/**
291 * Terminate the module.
292 * This is called when we're finally unloaded.
293 *
294 * @param hMod Image handle for use in APIs.
295 */
296DECLEXPORT(void) ModuleTerm(void *hMod)
297{
298 NOREF(hMod);
299 LogFlow(("ModuleTerm:\n"));
300
301 /*
302 * Terminate the CPUM module (Local APIC cleanup).
303 */
304 CPUMR0ModuleTerm();
305
306 /*
307 * Terminate the internal network service.
308 */
309 IntNetR0Term();
310
311 /*
312 * PGM (Darwin), HM and PciRaw global cleanup.
313 */
314#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
315 PGMR0DynMapTerm();
316#endif
317#ifdef VBOX_WITH_PCI_PASSTHROUGH
318 PciRawR0Term();
319#endif
320 PGMDeregisterStringFormatTypes();
321 HMR0Term();
322#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
323 vmmR0TripleFaultHackTerm();
324#endif
325
326 /*
327 * Destroy the GMM and GVMM instances.
328 */
329 GMMR0Term();
330 GVMMR0Term();
331
332 vmmTermFormatTypes();
333
334 LogFlow(("ModuleTerm: returns\n"));
335}
336
337
338/**
339 * Initiates the R0 driver for a particular VM instance.
340 *
341 * @returns VBox status code.
342 *
343 * @param pVM Pointer to the VM.
344 * @param uSvnRev The SVN revision of the ring-3 part.
345 * @param uBuildType Build type indicator.
346 * @thread EMT.
347 */
348static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
349{
350 VMM_CHECK_SMAP_SETUP();
351 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
352
353 /*
354 * Match the SVN revisions and build type.
355 */
356 if (uSvnRev != VMMGetSvnRev())
357 {
358 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
359 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
360 return VERR_VMM_R0_VERSION_MISMATCH;
361 }
362 if (uBuildType != vmmGetBuildType())
363 {
364 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
365 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
366 return VERR_VMM_R0_VERSION_MISMATCH;
367 }
368 if ( !VALID_PTR(pVM)
369 || pVM->pVMR0 != pVM)
370 return VERR_INVALID_PARAMETER;
371
372
373#ifdef LOG_ENABLED
374 /*
375 * Register the EMT R0 logger instance for VCPU 0.
376 */
377 PVMCPU pVCpu = &pVM->aCpus[0];
378
379 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
380 if (pR0Logger)
381 {
382# if 0 /* testing of the logger. */
383 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
384 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
385 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
386 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
387
388 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
389 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
390 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
391 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
392
393 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
394 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
395 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
396 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
397
398 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
399 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
400 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
401 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
402 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
403 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
404
405 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
406 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
407
408 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
409 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
410 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
411# endif
412 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
413 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
414 pR0Logger->fRegistered = true;
415 }
416#endif /* LOG_ENABLED */
417
418 /*
419 * Check if the host supports high resolution timers or not.
420 */
421 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
422 && !RTTimerCanDoHighResolution())
423 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
424
425 /*
426 * Initialize the per VM data for GVMM and GMM.
427 */
428 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
429 int rc = GVMMR0InitVM(pVM);
430// if (RT_SUCCESS(rc))
431// rc = GMMR0InitPerVMData(pVM);
432 if (RT_SUCCESS(rc))
433 {
434 /*
435 * Init HM, CPUM and PGM (Darwin only).
436 */
437 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
438 rc = HMR0InitVM(pVM);
439 if (RT_SUCCESS(rc))
440 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
441 if (RT_SUCCESS(rc))
442 {
443 rc = CPUMR0InitVM(pVM);
444 if (RT_SUCCESS(rc))
445 {
446 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
447#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
448 rc = PGMR0DynMapInitVM(pVM);
449#endif
450 if (RT_SUCCESS(rc))
451 {
452 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
453#ifdef VBOX_WITH_PCI_PASSTHROUGH
454 rc = PciRawR0InitVM(pVM);
455#endif
456 if (RT_SUCCESS(rc))
457 {
458 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
459 rc = GIMR0InitVM(pVM);
460 if (RT_SUCCESS(rc))
461 {
462 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
463 if (RT_SUCCESS(rc))
464 {
465 GVMMR0DoneInitVM(pVM);
466 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
467 return rc;
468 }
469
470 /* bail out*/
471 GIMR0TermVM(pVM);
472 }
473#ifdef VBOX_WITH_PCI_PASSTHROUGH
474 PciRawR0TermVM(pVM);
475#endif
476 }
477 }
478 }
479 HMR0TermVM(pVM);
480 }
481 }
482
483 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
484 return rc;
485}
486
487
488/**
489 * Terminates the R0 bits for a particular VM instance.
490 *
491 * This is normally called by ring-3 as part of the VM termination process, but
492 * may alternatively be called during the support driver session cleanup when
493 * the VM object is destroyed (see GVMM).
494 *
495 * @returns VBox status code.
496 *
497 * @param pVM Pointer to the VM.
498 * @param pGVM Pointer to the global VM structure. Optional.
499 * @thread EMT or session clean up thread.
500 */
501VMMR0_INT_DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
502{
503#ifdef VBOX_WITH_PCI_PASSTHROUGH
504 PciRawR0TermVM(pVM);
505#endif
506
507 /*
508 * Tell GVMM what we're up to and check that we only do this once.
509 */
510 if (GVMMR0DoingTermVM(pVM, pGVM))
511 {
512 GIMR0TermVM(pVM);
513
514 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
515 * here to make sure we don't leak any shared pages if we crash... */
516#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
517 PGMR0DynMapTermVM(pVM);
518#endif
519 HMR0TermVM(pVM);
520 }
521
522 /*
523 * Deregister the logger.
524 */
525 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
526 return VINF_SUCCESS;
527}
528
529
530/**
531 * VMM ring-0 thread-context callback.
532 *
533 * This does common HM state updating and calls the HM-specific thread-context
534 * callback.
535 *
536 * @param enmEvent The thread-context event.
537 * @param pvUser Opaque pointer to the VMCPU.
538 *
539 * @thread EMT(pvUser)
540 */
541static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
542{
543 PVMCPU pVCpu = (PVMCPU)pvUser;
544
545 switch (enmEvent)
546 {
547 case RTTHREADCTXEVENT_IN:
548 {
549 /*
550 * Linux may call us with preemption enabled (really!) but technically we
551 * cannot get preempted here, otherwise we end up in an infinite recursion
552 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
553 * ad infinitum). Let's just disable preemption for now...
554 */
555 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
556 * preemption after doing the callout (one or two functions up the
557 * call chain). */
558 /** @todo r=ramshankar: See @bugref{5313#c30}. */
559 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
560 RTThreadPreemptDisable(&ParanoidPreemptState);
561
562 /* We need to update the VCPU <-> host CPU mapping. */
563 RTCPUID idHostCpu;
564 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
565 pVCpu->iHostCpuSet = iHostCpuSet;
566 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
567
568 /* In the very unlikely event that the GIP delta for the CPU we're
569 rescheduled needs calculating, try force a return to ring-3.
570 We unfortunately cannot do the measurements right here. */
571 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
572 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
573
574 /* Invoke the HM-specific thread-context callback. */
575 HMR0ThreadCtxCallback(enmEvent, pvUser);
576
577 /* Restore preemption. */
578 RTThreadPreemptRestore(&ParanoidPreemptState);
579 break;
580 }
581
582 case RTTHREADCTXEVENT_OUT:
583 {
584 /* Invoke the HM-specific thread-context callback. */
585 HMR0ThreadCtxCallback(enmEvent, pvUser);
586
587 /*
588 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
589 * have the same host CPU associated with it.
590 */
591 pVCpu->iHostCpuSet = UINT32_MAX;
592 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
593 break;
594 }
595
596 default:
597 /* Invoke the HM-specific thread-context callback. */
598 HMR0ThreadCtxCallback(enmEvent, pvUser);
599 break;
600 }
601}
602
603
604/**
605 * Creates thread switching hook for the current EMT thread.
606 *
607 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
608 * platform does not implement switcher hooks, no hooks will be create and the
609 * member set to NIL_RTTHREADCTXHOOK.
610 *
611 * @returns VBox status code.
612 * @param pVCpu Pointer to the cross context CPU structure.
613 * @thread EMT(pVCpu)
614 */
615VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu)
616{
617 VMCPU_ASSERT_EMT(pVCpu);
618 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
619
620 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
621 if (RT_SUCCESS(rc))
622 return rc;
623
624 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
625 if (rc == VERR_NOT_SUPPORTED)
626 return VINF_SUCCESS;
627
628 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
629 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
630}
631
632
633/**
634 * Destroys the thread switching hook for the specified VCPU.
635 *
636 * @param pVCpu Pointer to the cross context CPU structure.
637 * @remarks Can be called from any thread.
638 */
639VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu)
640{
641 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
642 AssertRC(rc);
643}
644
645
646/**
647 * Disables the thread switching hook for this VCPU (if we got one).
648 *
649 * @param pVCpu Pointer to the cross context CPU structure.
650 * @thread EMT(pVCpu)
651 *
652 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
653 * this call. This means you have to be careful with what you do!
654 */
655VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu)
656{
657 /*
658 * Clear the VCPU <-> host CPU mapping as we've left HM context.
659 * @bugref{7726#c19} explains the need for this trick:
660 *
661 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
662 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
663 * longjmp & normal return to ring-3, which opens a window where we may be
664 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
665 * the CPU starts executing a different EMT. Both functions first disables
666 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
667 * an opening for getting preempted.
668 */
669 /** @todo Make HM not need this API! Then we could leave the hooks enabled
670 * all the time. */
671 /** @todo move this into the context hook disabling if(). */
672 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
673
674 /*
675 * Disable the context hook, if we got one.
676 */
677 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
678 {
679 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
680 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
681 AssertRC(rc);
682 }
683}
684
685
686/**
687 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
688 *
689 * @returns true if registered, false otherwise.
690 * @param pVCpu Pointer to the VMCPU.
691 */
692DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
693{
694 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
695}
696
697
698/**
699 * Whether thread-context hooks are registered for this VCPU.
700 *
701 * @returns true if registered, false otherwise.
702 * @param pVCpu Pointer to the VMCPU.
703 */
704VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
705{
706 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
707}
708
709
710#ifdef VBOX_WITH_STATISTICS
711/**
712 * Record return code statistics
713 * @param pVM Pointer to the VM.
714 * @param pVCpu Pointer to the VMCPU.
715 * @param rc The status code.
716 */
717static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
718{
719 /*
720 * Collect statistics.
721 */
722 switch (rc)
723 {
724 case VINF_SUCCESS:
725 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
726 break;
727 case VINF_EM_RAW_INTERRUPT:
728 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
729 break;
730 case VINF_EM_RAW_INTERRUPT_HYPER:
731 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
732 break;
733 case VINF_EM_RAW_GUEST_TRAP:
734 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
735 break;
736 case VINF_EM_RAW_RING_SWITCH:
737 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
738 break;
739 case VINF_EM_RAW_RING_SWITCH_INT:
740 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
741 break;
742 case VINF_EM_RAW_STALE_SELECTOR:
743 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
744 break;
745 case VINF_EM_RAW_IRET_TRAP:
746 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
747 break;
748 case VINF_IOM_R3_IOPORT_READ:
749 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
750 break;
751 case VINF_IOM_R3_IOPORT_WRITE:
752 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
753 break;
754 case VINF_IOM_R3_MMIO_READ:
755 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
756 break;
757 case VINF_IOM_R3_MMIO_WRITE:
758 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
759 break;
760 case VINF_IOM_R3_MMIO_READ_WRITE:
761 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
762 break;
763 case VINF_PATM_HC_MMIO_PATCH_READ:
764 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
765 break;
766 case VINF_PATM_HC_MMIO_PATCH_WRITE:
767 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
768 break;
769 case VINF_CPUM_R3_MSR_READ:
770 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
771 break;
772 case VINF_CPUM_R3_MSR_WRITE:
773 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
774 break;
775 case VINF_EM_RAW_EMULATE_INSTR:
776 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
777 break;
778 case VINF_EM_RAW_EMULATE_IO_BLOCK:
779 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
780 break;
781 case VINF_PATCH_EMULATE_INSTR:
782 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
783 break;
784 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
785 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
786 break;
787 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
788 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
789 break;
790 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
791 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
792 break;
793 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
794 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
795 break;
796 case VINF_CSAM_PENDING_ACTION:
797 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
798 break;
799 case VINF_PGM_SYNC_CR3:
800 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
801 break;
802 case VINF_PATM_PATCH_INT3:
803 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
804 break;
805 case VINF_PATM_PATCH_TRAP_PF:
806 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
807 break;
808 case VINF_PATM_PATCH_TRAP_GP:
809 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
810 break;
811 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
812 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
813 break;
814 case VINF_EM_RESCHEDULE_REM:
815 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
816 break;
817 case VINF_EM_RAW_TO_R3:
818 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
819 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
820 else if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
821 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
822 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES))
823 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
824 else if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
825 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
826 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
827 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
828 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER))
829 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
830 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
831 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
832 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3))
833 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
834 else
835 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
836 break;
837
838 case VINF_EM_RAW_TIMER_PENDING:
839 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
840 break;
841 case VINF_EM_RAW_INTERRUPT_PENDING:
842 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
843 break;
844 case VINF_VMM_CALL_HOST:
845 switch (pVCpu->vmm.s.enmCallRing3Operation)
846 {
847 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
848 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
849 break;
850 case VMMCALLRING3_PDM_LOCK:
851 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
852 break;
853 case VMMCALLRING3_PGM_POOL_GROW:
854 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
855 break;
856 case VMMCALLRING3_PGM_LOCK:
857 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
858 break;
859 case VMMCALLRING3_PGM_MAP_CHUNK:
860 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
861 break;
862 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
863 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
864 break;
865 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
866 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
867 break;
868 case VMMCALLRING3_VMM_LOGGER_FLUSH:
869 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
870 break;
871 case VMMCALLRING3_VM_SET_ERROR:
872 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
873 break;
874 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
875 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
876 break;
877 case VMMCALLRING3_VM_R0_ASSERTION:
878 default:
879 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
880 break;
881 }
882 break;
883 case VINF_PATM_DUPLICATE_FUNCTION:
884 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
885 break;
886 case VINF_PGM_CHANGE_MODE:
887 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
888 break;
889 case VINF_PGM_POOL_FLUSH_PENDING:
890 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
891 break;
892 case VINF_EM_PENDING_REQUEST:
893 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
894 break;
895 case VINF_EM_HM_PATCH_TPR_INSTR:
896 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
897 break;
898 default:
899 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
900 break;
901 }
902}
903#endif /* VBOX_WITH_STATISTICS */
904
905
906/**
907 * Unused ring-0 entry point that used to be called from the interrupt gate.
908 *
909 * Will be removed one of the next times we do a major SUPDrv version bump.
910 *
911 * @returns VBox status code.
912 * @param pVM Pointer to the VM.
913 * @param enmOperation Which operation to execute.
914 * @param pvArg Argument to the operation.
915 * @remarks Assume called with interrupts disabled.
916 */
917VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
918{
919 /*
920 * We're returning VERR_NOT_SUPPORT here so we've got something else
921 * than -1 which the interrupt gate glue code might return.
922 */
923 Log(("operation %#x is not supported\n", enmOperation));
924 NOREF(enmOperation); NOREF(pvArg); NOREF(pVM);
925 return VERR_NOT_SUPPORTED;
926}
927
928
929/**
930 * The Ring 0 entry point, called by the fast-ioctl path.
931 *
932 * @param pVM Pointer to the VM.
933 * The return code is stored in pVM->vmm.s.iLastGZRc.
934 * @param idCpu The Virtual CPU ID of the calling EMT.
935 * @param enmOperation Which operation to execute.
936 * @remarks Assume called with interrupts _enabled_.
937 */
938VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
939{
940 /*
941 * Validation.
942 */
943 if (RT_UNLIKELY(idCpu >= pVM->cCpus))
944 return;
945 PVMCPU pVCpu = &pVM->aCpus[idCpu];
946 if (RT_UNLIKELY(pVCpu->hNativeThreadR0 != RTThreadNativeSelf()))
947 return;
948 VMM_CHECK_SMAP_SETUP();
949 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
950
951 /*
952 * Perform requested operation.
953 */
954 switch (enmOperation)
955 {
956 /*
957 * Switch to GC and run guest raw mode code.
958 * Disable interrupts before doing the world switch.
959 */
960 case VMMR0_DO_RAW_RUN:
961 {
962#ifdef VBOX_WITH_RAW_MODE
963# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
964 /* Some safety precautions first. */
965 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
966 {
967 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
968 break;
969 }
970# endif
971
972 /*
973 * Disable preemption.
974 */
975 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
976 RTThreadPreemptDisable(&PreemptState);
977
978 /*
979 * Get the host CPU identifiers, make sure they are valid and that
980 * we've got a TSC delta for the CPU.
981 */
982 RTCPUID idHostCpu;
983 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
984 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
985 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
986 {
987 /*
988 * Commit the CPU identifiers and update the periodict preemption timer if it's active.
989 */
990# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
991 CPUMR0SetLApic(pVCpu, iHostCpuSet);
992# endif
993 pVCpu->iHostCpuSet = iHostCpuSet;
994 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
995
996 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
997 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
998
999 /*
1000 * We might need to disable VT-x if the active switcher turns off paging.
1001 */
1002 bool fVTxDisabled;
1003 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1004 if (RT_SUCCESS(rc))
1005 {
1006 /*
1007 * Disable interrupts and run raw-mode code. The loop is for efficiently
1008 * dispatching tracepoints that fired in raw-mode context.
1009 */
1010 RTCCUINTREG uFlags = ASMIntDisableFlags();
1011
1012 for (;;)
1013 {
1014 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
1015 TMNotifyStartOfExecution(pVCpu);
1016
1017 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1018 pVCpu->vmm.s.iLastGZRc = rc;
1019
1020 TMNotifyEndOfExecution(pVCpu);
1021 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1022
1023 if (rc != VINF_VMM_CALL_TRACER)
1024 break;
1025 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
1026 }
1027
1028 /*
1029 * Re-enable VT-x before we dispatch any pending host interrupts and
1030 * re-enables interrupts.
1031 */
1032 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1033
1034 if ( rc == VINF_EM_RAW_INTERRUPT
1035 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1036 TRPMR0DispatchHostInterrupt(pVM);
1037
1038 ASMSetFlags(uFlags);
1039
1040 /* Fire dtrace probe and collect statistics. */
1041 VBOXVMM_R0_VMM_RETURN_TO_RING3_RC(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1042# ifdef VBOX_WITH_STATISTICS
1043 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1044 vmmR0RecordRC(pVM, pVCpu, rc);
1045# endif
1046 }
1047 else
1048 pVCpu->vmm.s.iLastGZRc = rc;
1049
1050 /*
1051 * Invalidate the host CPU identifiers as we restore preemption.
1052 */
1053 pVCpu->iHostCpuSet = UINT32_MAX;
1054 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1055
1056 RTThreadPreemptRestore(&PreemptState);
1057 }
1058 /*
1059 * Invalid CPU set index or TSC delta in need of measuring.
1060 */
1061 else
1062 {
1063 RTThreadPreemptRestore(&PreemptState);
1064 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1065 {
1066 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1067 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1068 0 /*default cTries*/);
1069 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1070 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1071 else
1072 pVCpu->vmm.s.iLastGZRc = rc;
1073 }
1074 else
1075 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1076 }
1077
1078#else /* !VBOX_WITH_RAW_MODE */
1079 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_NOT_SUPPORTED;
1080#endif
1081 break;
1082 }
1083
1084 /*
1085 * Run guest code using the available hardware acceleration technology.
1086 */
1087 case VMMR0_DO_HM_RUN:
1088 {
1089 /*
1090 * Disable preemption.
1091 */
1092 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1093 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1094 RTThreadPreemptDisable(&PreemptState);
1095
1096 /*
1097 * Get the host CPU identifiers, make sure they are valid and that
1098 * we've got a TSC delta for the CPU.
1099 */
1100 RTCPUID idHostCpu;
1101 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1102 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1103 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1104 {
1105 pVCpu->iHostCpuSet = iHostCpuSet;
1106 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1107
1108 /*
1109 * Update the periodic preemption timer if it's active.
1110 */
1111 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1112 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1113 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1114
1115#ifdef LOG_ENABLED
1116 /*
1117 * Ugly: Lazy registration of ring 0 loggers.
1118 */
1119 if (pVCpu->idCpu > 0)
1120 {
1121 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
1122 if ( pR0Logger
1123 && RT_UNLIKELY(!pR0Logger->fRegistered))
1124 {
1125 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
1126 pR0Logger->fRegistered = true;
1127 }
1128 }
1129#endif
1130
1131 int rc;
1132 bool fPreemptRestored = false;
1133 if (!HMR0SuspendPending())
1134 {
1135 /*
1136 * Enable the context switching hook.
1137 */
1138 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1139 {
1140 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1141 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1142 }
1143
1144 /*
1145 * Enter HM context.
1146 */
1147 rc = HMR0Enter(pVM, pVCpu);
1148 if (RT_SUCCESS(rc))
1149 {
1150 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1151
1152 /*
1153 * When preemption hooks are in place, enable preemption now that
1154 * we're in HM context.
1155 */
1156 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1157 {
1158 fPreemptRestored = true;
1159 RTThreadPreemptRestore(&PreemptState);
1160 }
1161
1162 /*
1163 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1164 */
1165 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1166 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1167 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1168
1169 /*
1170 * Assert sanity on the way out. Using manual assertions code here as normal
1171 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1172 */
1173 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1174 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1175 {
1176 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1177 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1178 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1179 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1180 }
1181 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1182 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1183 {
1184 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1185 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1186 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1187 rc = VERR_INVALID_STATE;
1188 }
1189
1190 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1191 }
1192 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1193
1194 /*
1195 * Invalidate the host CPU identifiers before we disable the context
1196 * hook / restore preemption.
1197 */
1198 pVCpu->iHostCpuSet = UINT32_MAX;
1199 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1200
1201 /*
1202 * Disable context hooks. Due to unresolved cleanup issues, we
1203 * cannot leave the hooks enabled when we return to ring-3.
1204 *
1205 * Note! At the moment HM may also have disabled the hook
1206 * when we get here, but the IPRT API handles that.
1207 */
1208 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1209 {
1210 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1211 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1212 }
1213 }
1214 /*
1215 * The system is about to go into suspend mode; go back to ring 3.
1216 */
1217 else
1218 {
1219 rc = VINF_EM_RAW_INTERRUPT;
1220 pVCpu->iHostCpuSet = UINT32_MAX;
1221 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1222 }
1223
1224 /** @todo When HM stops messing with the context hook state, we'll disable
1225 * preemption again before the RTThreadCtxHookDisable call. */
1226 if (!fPreemptRestored)
1227 RTThreadPreemptRestore(&PreemptState);
1228
1229 pVCpu->vmm.s.iLastGZRc = rc;
1230
1231 /* Fire dtrace probe and collect statistics. */
1232 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1233#ifdef VBOX_WITH_STATISTICS
1234 vmmR0RecordRC(pVM, pVCpu, rc);
1235#endif
1236 }
1237 /*
1238 * Invalid CPU set index or TSC delta in need of measuring.
1239 */
1240 else
1241 {
1242 pVCpu->iHostCpuSet = UINT32_MAX;
1243 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1244 RTThreadPreemptRestore(&PreemptState);
1245 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1246 {
1247 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1248 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1249 0 /*default cTries*/);
1250 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1251 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1252 else
1253 pVCpu->vmm.s.iLastGZRc = rc;
1254 }
1255 else
1256 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1257 }
1258 break;
1259 }
1260
1261 /*
1262 * For profiling.
1263 */
1264 case VMMR0_DO_NOP:
1265 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1266 break;
1267
1268 /*
1269 * Impossible.
1270 */
1271 default:
1272 AssertMsgFailed(("%#x\n", enmOperation));
1273 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1274 break;
1275 }
1276 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1277}
1278
1279
1280/**
1281 * Validates a session or VM session argument.
1282 *
1283 * @returns true / false accordingly.
1284 * @param pVM Pointer to the VM.
1285 * @param pSession The session argument.
1286 */
1287DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1288{
1289 /* This must be set! */
1290 if (!pSession)
1291 return false;
1292
1293 /* Only one out of the two. */
1294 if (pVM && pClaimedSession)
1295 return false;
1296 if (pVM)
1297 pClaimedSession = pVM->pSession;
1298 return pClaimedSession == pSession;
1299}
1300
1301
1302/**
1303 * VMMR0EntryEx worker function, either called directly or when ever possible
1304 * called thru a longjmp so we can exit safely on failure.
1305 *
1306 * @returns VBox status code.
1307 * @param pVM Pointer to the VM.
1308 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1309 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1310 * @param enmOperation Which operation to execute.
1311 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1312 * The support driver validates this if it's present.
1313 * @param u64Arg Some simple constant argument.
1314 * @param pSession The session of the caller.
1315 * @remarks Assume called with interrupts _enabled_.
1316 */
1317static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1318{
1319 /*
1320 * Common VM pointer validation.
1321 */
1322 if (pVM)
1323 {
1324 if (RT_UNLIKELY( !VALID_PTR(pVM)
1325 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
1326 {
1327 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
1328 return VERR_INVALID_POINTER;
1329 }
1330 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
1331 || pVM->enmVMState > VMSTATE_TERMINATED
1332 || pVM->pVMR0 != pVM))
1333 {
1334 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
1335 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
1336 return VERR_INVALID_POINTER;
1337 }
1338
1339 if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
1340 {
1341 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
1342 return VERR_INVALID_PARAMETER;
1343 }
1344 }
1345 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID))
1346 {
1347 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1348 return VERR_INVALID_PARAMETER;
1349 }
1350 VMM_CHECK_SMAP_SETUP();
1351 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1352 int rc;
1353
1354 switch (enmOperation)
1355 {
1356 /*
1357 * GVM requests
1358 */
1359 case VMMR0_DO_GVMM_CREATE_VM:
1360 if (pVM || u64Arg || idCpu != NIL_VMCPUID)
1361 return VERR_INVALID_PARAMETER;
1362 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
1363 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1364 break;
1365
1366 case VMMR0_DO_GVMM_DESTROY_VM:
1367 if (pReqHdr || u64Arg)
1368 return VERR_INVALID_PARAMETER;
1369 rc = GVMMR0DestroyVM(pVM);
1370 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1371 break;
1372
1373 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1374 {
1375 if (!pVM)
1376 return VERR_INVALID_PARAMETER;
1377 rc = GVMMR0RegisterVCpu(pVM, idCpu);
1378 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1379 break;
1380 }
1381
1382 case VMMR0_DO_GVMM_SCHED_HALT:
1383 if (pReqHdr)
1384 return VERR_INVALID_PARAMETER;
1385 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1386 rc = GVMMR0SchedHalt(pVM, idCpu, u64Arg);
1387 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1388 break;
1389
1390 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1391 if (pReqHdr || u64Arg)
1392 return VERR_INVALID_PARAMETER;
1393 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1394 rc = GVMMR0SchedWakeUp(pVM, idCpu);
1395 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1396 break;
1397
1398 case VMMR0_DO_GVMM_SCHED_POKE:
1399 if (pReqHdr || u64Arg)
1400 return VERR_INVALID_PARAMETER;
1401 rc = GVMMR0SchedPoke(pVM, idCpu);
1402 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1403 break;
1404
1405 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1406 if (u64Arg)
1407 return VERR_INVALID_PARAMETER;
1408 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1409 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1410 break;
1411
1412 case VMMR0_DO_GVMM_SCHED_POLL:
1413 if (pReqHdr || u64Arg > 1)
1414 return VERR_INVALID_PARAMETER;
1415 rc = GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
1416 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1417 break;
1418
1419 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1420 if (u64Arg)
1421 return VERR_INVALID_PARAMETER;
1422 rc = GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
1423 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1424 break;
1425
1426 case VMMR0_DO_GVMM_RESET_STATISTICS:
1427 if (u64Arg)
1428 return VERR_INVALID_PARAMETER;
1429 rc = GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
1430 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1431 break;
1432
1433 /*
1434 * Initialize the R0 part of a VM instance.
1435 */
1436 case VMMR0_DO_VMMR0_INIT:
1437 rc = vmmR0InitVM(pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1438 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1439 break;
1440
1441 /*
1442 * Terminate the R0 part of a VM instance.
1443 */
1444 case VMMR0_DO_VMMR0_TERM:
1445 rc = VMMR0TermVM(pVM, NULL);
1446 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1447 break;
1448
1449 /*
1450 * Attempt to enable hm mode and check the current setting.
1451 */
1452 case VMMR0_DO_HM_ENABLE:
1453 rc = HMR0EnableAllCpus(pVM);
1454 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1455 break;
1456
1457 /*
1458 * Setup the hardware accelerated session.
1459 */
1460 case VMMR0_DO_HM_SETUP_VM:
1461 rc = HMR0SetupVM(pVM);
1462 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1463 break;
1464
1465 /*
1466 * Switch to RC to execute Hypervisor function.
1467 */
1468 case VMMR0_DO_CALL_HYPERVISOR:
1469 {
1470#ifdef VBOX_WITH_RAW_MODE
1471 /*
1472 * Validate input / context.
1473 */
1474 if (RT_UNLIKELY(idCpu != 0))
1475 return VERR_INVALID_CPU_ID;
1476 if (RT_UNLIKELY(pVM->cCpus != 1))
1477 return VERR_INVALID_PARAMETER;
1478 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1479# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1480 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1481 return VERR_PGM_NO_CR3_SHADOW_ROOT;
1482# endif
1483
1484 /*
1485 * Disable interrupts.
1486 */
1487 RTCCUINTREG fFlags = ASMIntDisableFlags();
1488
1489 /*
1490 * Get the host CPU identifiers, make sure they are valid and that
1491 * we've got a TSC delta for the CPU.
1492 */
1493 RTCPUID idHostCpu;
1494 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1495 if (RT_UNLIKELY(iHostCpuSet >= RTCPUSET_MAX_CPUS))
1496 {
1497 ASMSetFlags(fFlags);
1498 return VERR_INVALID_CPU_INDEX;
1499 }
1500 if (RT_UNLIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1501 {
1502 ASMSetFlags(fFlags);
1503 rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1504 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1505 0 /*default cTries*/);
1506 if (RT_FAILURE(rc) && rc != VERR_CPU_OFFLINE)
1507 {
1508 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1509 return rc;
1510 }
1511 }
1512
1513 /*
1514 * Commit the CPU identifiers.
1515 */
1516# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1517 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1518# endif
1519 pVCpu->iHostCpuSet = iHostCpuSet;
1520 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1521
1522 /*
1523 * We might need to disable VT-x if the active switcher turns off paging.
1524 */
1525 bool fVTxDisabled;
1526 rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1527 if (RT_SUCCESS(rc))
1528 {
1529 /*
1530 * Go through the wormhole...
1531 */
1532 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1533
1534 /*
1535 * Re-enable VT-x before we dispatch any pending host interrupts.
1536 */
1537 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1538
1539 if ( rc == VINF_EM_RAW_INTERRUPT
1540 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1541 TRPMR0DispatchHostInterrupt(pVM);
1542 }
1543
1544 /*
1545 * Invalidate the host CPU identifiers as we restore interrupts.
1546 */
1547 pVCpu->iHostCpuSet = UINT32_MAX;
1548 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1549 ASMSetFlags(fFlags);
1550
1551#else /* !VBOX_WITH_RAW_MODE */
1552 rc = VERR_RAW_MODE_NOT_SUPPORTED;
1553#endif
1554 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1555 break;
1556 }
1557
1558 /*
1559 * PGM wrappers.
1560 */
1561 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1562 if (idCpu == NIL_VMCPUID)
1563 return VERR_INVALID_CPU_ID;
1564 rc = PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
1565 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1566 break;
1567
1568 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1569 if (idCpu == NIL_VMCPUID)
1570 return VERR_INVALID_CPU_ID;
1571 rc = PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu]);
1572 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1573 break;
1574
1575 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1576 if (idCpu == NIL_VMCPUID)
1577 return VERR_INVALID_CPU_ID;
1578 rc = PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
1579 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1580 break;
1581
1582 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1583 if (idCpu != 0)
1584 return VERR_INVALID_CPU_ID;
1585 rc = PGMR0PhysSetupIommu(pVM);
1586 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1587 break;
1588
1589 /*
1590 * GMM wrappers.
1591 */
1592 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1593 if (u64Arg)
1594 return VERR_INVALID_PARAMETER;
1595 rc = GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1596 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1597 break;
1598
1599 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1600 if (u64Arg)
1601 return VERR_INVALID_PARAMETER;
1602 rc = GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1603 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1604 break;
1605
1606 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1607 if (u64Arg)
1608 return VERR_INVALID_PARAMETER;
1609 rc = GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1610 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1611 break;
1612
1613 case VMMR0_DO_GMM_FREE_PAGES:
1614 if (u64Arg)
1615 return VERR_INVALID_PARAMETER;
1616 rc = GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1617 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1618 break;
1619
1620 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1621 if (u64Arg)
1622 return VERR_INVALID_PARAMETER;
1623 rc = GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1624 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1625 break;
1626
1627 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1628 if (u64Arg)
1629 return VERR_INVALID_PARAMETER;
1630 rc = GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
1631 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1632 break;
1633
1634 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1635 if (idCpu == NIL_VMCPUID)
1636 return VERR_INVALID_CPU_ID;
1637 if (u64Arg)
1638 return VERR_INVALID_PARAMETER;
1639 rc = GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1640 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1641 break;
1642
1643 case VMMR0_DO_GMM_BALLOONED_PAGES:
1644 if (u64Arg)
1645 return VERR_INVALID_PARAMETER;
1646 rc = GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1647 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1648 break;
1649
1650 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1651 if (u64Arg)
1652 return VERR_INVALID_PARAMETER;
1653 rc = GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1654 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1655 break;
1656
1657 case VMMR0_DO_GMM_SEED_CHUNK:
1658 if (pReqHdr)
1659 return VERR_INVALID_PARAMETER;
1660 rc = GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
1661 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1662 break;
1663
1664 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1665 if (idCpu == NIL_VMCPUID)
1666 return VERR_INVALID_CPU_ID;
1667 if (u64Arg)
1668 return VERR_INVALID_PARAMETER;
1669 rc = GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1670 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1671 break;
1672
1673 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1674 if (idCpu == NIL_VMCPUID)
1675 return VERR_INVALID_CPU_ID;
1676 if (u64Arg)
1677 return VERR_INVALID_PARAMETER;
1678 rc = GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1679 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1680 break;
1681
1682 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1683 if (idCpu == NIL_VMCPUID)
1684 return VERR_INVALID_CPU_ID;
1685 if ( u64Arg
1686 || pReqHdr)
1687 return VERR_INVALID_PARAMETER;
1688 rc = GMMR0ResetSharedModules(pVM, idCpu);
1689 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1690 break;
1691
1692#ifdef VBOX_WITH_PAGE_SHARING
1693 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1694 {
1695 if (idCpu == NIL_VMCPUID)
1696 return VERR_INVALID_CPU_ID;
1697 if ( u64Arg
1698 || pReqHdr)
1699 return VERR_INVALID_PARAMETER;
1700
1701 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1702 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf());
1703
1704# ifdef DEBUG_sandervl
1705 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
1706 /* Todo: this can have bad side effects for unexpected jumps back to r3. */
1707 rc = GMMR0CheckSharedModulesStart(pVM);
1708 if (rc == VINF_SUCCESS)
1709 {
1710 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
1711 Assert( rc == VINF_SUCCESS
1712 || (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
1713 GMMR0CheckSharedModulesEnd(pVM);
1714 }
1715# else
1716 rc = GMMR0CheckSharedModules(pVM, pVCpu);
1717# endif
1718 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1719 break;
1720 }
1721#endif
1722
1723#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1724 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1725 if (u64Arg)
1726 return VERR_INVALID_PARAMETER;
1727 rc = GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1728 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1729 break;
1730#endif
1731
1732 case VMMR0_DO_GMM_QUERY_STATISTICS:
1733 if (u64Arg)
1734 return VERR_INVALID_PARAMETER;
1735 rc = GMMR0QueryStatisticsReq(pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1736 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1737 break;
1738
1739 case VMMR0_DO_GMM_RESET_STATISTICS:
1740 if (u64Arg)
1741 return VERR_INVALID_PARAMETER;
1742 rc = GMMR0ResetStatisticsReq(pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1743 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1744 break;
1745
1746 /*
1747 * A quick GCFGM mock-up.
1748 */
1749 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1750 case VMMR0_DO_GCFGM_SET_VALUE:
1751 case VMMR0_DO_GCFGM_QUERY_VALUE:
1752 {
1753 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1754 return VERR_INVALID_PARAMETER;
1755 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1756 if (pReq->Hdr.cbReq != sizeof(*pReq))
1757 return VERR_INVALID_PARAMETER;
1758 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1759 {
1760 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1761 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1762 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1763 }
1764 else
1765 {
1766 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1767 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1768 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1769 }
1770 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1771 break;
1772 }
1773
1774 /*
1775 * PDM Wrappers.
1776 */
1777 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1778 {
1779 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1780 return VERR_INVALID_PARAMETER;
1781 rc = PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1782 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1783 break;
1784 }
1785
1786 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1787 {
1788 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1789 return VERR_INVALID_PARAMETER;
1790 rc = PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1791 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1792 break;
1793 }
1794
1795 /*
1796 * Requests to the internal networking service.
1797 */
1798 case VMMR0_DO_INTNET_OPEN:
1799 {
1800 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1801 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1802 return VERR_INVALID_PARAMETER;
1803 rc = IntNetR0OpenReq(pSession, pReq);
1804 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1805 break;
1806 }
1807
1808 case VMMR0_DO_INTNET_IF_CLOSE:
1809 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1810 return VERR_INVALID_PARAMETER;
1811 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1812 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1813 break;
1814
1815
1816 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1817 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1818 return VERR_INVALID_PARAMETER;
1819 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1820 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1821 break;
1822
1823 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1824 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1825 return VERR_INVALID_PARAMETER;
1826 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1827 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1828 break;
1829
1830 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1831 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1832 return VERR_INVALID_PARAMETER;
1833 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1834 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1835 break;
1836
1837 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1838 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1839 return VERR_INVALID_PARAMETER;
1840 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1841 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1842 break;
1843
1844 case VMMR0_DO_INTNET_IF_SEND:
1845 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1846 return VERR_INVALID_PARAMETER;
1847 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1848 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1849 break;
1850
1851 case VMMR0_DO_INTNET_IF_WAIT:
1852 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1853 return VERR_INVALID_PARAMETER;
1854 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1855 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1856 break;
1857
1858 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1859 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1860 return VERR_INVALID_PARAMETER;
1861 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1862 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1863 break;
1864
1865#ifdef VBOX_WITH_PCI_PASSTHROUGH
1866 /*
1867 * Requests to host PCI driver service.
1868 */
1869 case VMMR0_DO_PCIRAW_REQ:
1870 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1871 return VERR_INVALID_PARAMETER;
1872 rc = PciRawR0ProcessReq(pSession, pVM, (PPCIRAWSENDREQ)pReqHdr);
1873 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1874 break;
1875#endif
1876 /*
1877 * For profiling.
1878 */
1879 case VMMR0_DO_NOP:
1880 case VMMR0_DO_SLOW_NOP:
1881 return VINF_SUCCESS;
1882
1883 /*
1884 * For testing Ring-0 APIs invoked in this environment.
1885 */
1886 case VMMR0_DO_TESTS:
1887 /** @todo make new test */
1888 return VINF_SUCCESS;
1889
1890
1891#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1892 case VMMR0_DO_TEST_SWITCHER3264:
1893 if (idCpu == NIL_VMCPUID)
1894 return VERR_INVALID_CPU_ID;
1895 rc = HMR0TestSwitcher3264(pVM);
1896 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1897 break;
1898#endif
1899 default:
1900 /*
1901 * We're returning VERR_NOT_SUPPORT here so we've got something else
1902 * than -1 which the interrupt gate glue code might return.
1903 */
1904 Log(("operation %#x is not supported\n", enmOperation));
1905 return VERR_NOT_SUPPORTED;
1906 }
1907 return rc;
1908}
1909
1910
1911/**
1912 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1913 */
1914typedef struct VMMR0ENTRYEXARGS
1915{
1916 PVM pVM;
1917 VMCPUID idCpu;
1918 VMMR0OPERATION enmOperation;
1919 PSUPVMMR0REQHDR pReq;
1920 uint64_t u64Arg;
1921 PSUPDRVSESSION pSession;
1922} VMMR0ENTRYEXARGS;
1923/** Pointer to a vmmR0EntryExWrapper argument package. */
1924typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1925
1926/**
1927 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1928 *
1929 * @returns VBox status code.
1930 * @param pvArgs The argument package
1931 */
1932static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
1933{
1934 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1935 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1936 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1937 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1938 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1939 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1940}
1941
1942
1943/**
1944 * The Ring 0 entry point, called by the support library (SUP).
1945 *
1946 * @returns VBox status code.
1947 * @param pVM Pointer to the VM.
1948 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1949 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1950 * @param enmOperation Which operation to execute.
1951 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
1952 * @param u64Arg Some simple constant argument.
1953 * @param pSession The session of the caller.
1954 * @remarks Assume called with interrupts _enabled_.
1955 */
1956VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1957{
1958 /*
1959 * Requests that should only happen on the EMT thread will be
1960 * wrapped in a setjmp so we can assert without causing trouble.
1961 */
1962 if ( VALID_PTR(pVM)
1963 && pVM->pVMR0
1964 && idCpu < pVM->cCpus)
1965 {
1966 switch (enmOperation)
1967 {
1968 /* These might/will be called before VMMR3Init. */
1969 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1970 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1971 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1972 case VMMR0_DO_GMM_FREE_PAGES:
1973 case VMMR0_DO_GMM_BALLOONED_PAGES:
1974 /* On the mac we might not have a valid jmp buf, so check these as well. */
1975 case VMMR0_DO_VMMR0_INIT:
1976 case VMMR0_DO_VMMR0_TERM:
1977 {
1978 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1979
1980 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
1981 break;
1982
1983 /** @todo validate this EMT claim... GVM knows. */
1984 VMMR0ENTRYEXARGS Args;
1985 Args.pVM = pVM;
1986 Args.idCpu = idCpu;
1987 Args.enmOperation = enmOperation;
1988 Args.pReq = pReq;
1989 Args.u64Arg = u64Arg;
1990 Args.pSession = pSession;
1991 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
1992 }
1993
1994 default:
1995 break;
1996 }
1997 }
1998 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
1999}
2000
2001
2002/**
2003 * Checks whether we've armed the ring-0 long jump machinery.
2004 *
2005 * @returns @c true / @c false
2006 * @param pVCpu Pointer to the VMCPU.
2007 * @thread EMT
2008 * @sa VMMIsLongJumpArmed
2009 */
2010VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
2011{
2012#ifdef RT_ARCH_X86
2013 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2014 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2015#else
2016 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2017 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2018#endif
2019}
2020
2021
2022/**
2023 * Checks whether we've done a ring-3 long jump.
2024 *
2025 * @returns @c true / @c false
2026 * @param pVCpu Pointer to the VMCPU.
2027 * @thread EMT
2028 */
2029VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu)
2030{
2031 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2032}
2033
2034
2035/**
2036 * Internal R0 logger worker: Flush logger.
2037 *
2038 * @param pLogger The logger instance to flush.
2039 * @remark This function must be exported!
2040 */
2041VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2042{
2043#ifdef LOG_ENABLED
2044 /*
2045 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2046 * (This is a bit paranoid code.)
2047 */
2048 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2049 if ( !VALID_PTR(pR0Logger)
2050 || !VALID_PTR(pR0Logger + 1)
2051 || pLogger->u32Magic != RTLOGGER_MAGIC)
2052 {
2053# ifdef DEBUG
2054 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2055# endif
2056 return;
2057 }
2058 if (pR0Logger->fFlushingDisabled)
2059 return; /* quietly */
2060
2061 PVM pVM = pR0Logger->pVM;
2062 if ( !VALID_PTR(pVM)
2063 || pVM->pVMR0 != pVM)
2064 {
2065# ifdef DEBUG
2066 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
2067# endif
2068 return;
2069 }
2070
2071 PVMCPU pVCpu = VMMGetCpu(pVM);
2072 if (pVCpu)
2073 {
2074 /*
2075 * Check that the jump buffer is armed.
2076 */
2077# ifdef RT_ARCH_X86
2078 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2079 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2080# else
2081 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2082 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2083# endif
2084 {
2085# ifdef DEBUG
2086 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2087# endif
2088 return;
2089 }
2090 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2091 }
2092# ifdef DEBUG
2093 else
2094 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2095# endif
2096#endif
2097}
2098
2099/**
2100 * Internal R0 logger worker: Custom prefix.
2101 *
2102 * @returns Number of chars written.
2103 *
2104 * @param pLogger The logger instance.
2105 * @param pchBuf The output buffer.
2106 * @param cchBuf The size of the buffer.
2107 * @param pvUser User argument (ignored).
2108 */
2109VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
2110{
2111 NOREF(pvUser);
2112#ifdef LOG_ENABLED
2113 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2114 if ( !VALID_PTR(pR0Logger)
2115 || !VALID_PTR(pR0Logger + 1)
2116 || pLogger->u32Magic != RTLOGGER_MAGIC
2117 || cchBuf < 2)
2118 return 0;
2119
2120 static const char s_szHex[17] = "0123456789abcdef";
2121 VMCPUID const idCpu = pR0Logger->idCpu;
2122 pchBuf[1] = s_szHex[ idCpu & 15];
2123 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
2124
2125 return 2;
2126#else
2127 return 0;
2128#endif
2129}
2130
2131#ifdef LOG_ENABLED
2132
2133/**
2134 * Disables flushing of the ring-0 debug log.
2135 *
2136 * @param pVCpu Pointer to the VMCPU.
2137 */
2138VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
2139{
2140 if (pVCpu->vmm.s.pR0LoggerR0)
2141 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2142}
2143
2144
2145/**
2146 * Enables flushing of the ring-0 debug log.
2147 *
2148 * @param pVCpu Pointer to the VMCPU.
2149 */
2150VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
2151{
2152 if (pVCpu->vmm.s.pR0LoggerR0)
2153 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2154}
2155
2156
2157/**
2158 * Checks if log flushing is disabled or not.
2159 *
2160 * @param pVCpu Pointer to the VMCPU.
2161 */
2162VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
2163{
2164 if (pVCpu->vmm.s.pR0LoggerR0)
2165 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2166 return true;
2167}
2168#endif /* LOG_ENABLED */
2169
2170/**
2171 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2172 *
2173 * @returns true if the breakpoint should be hit, false if it should be ignored.
2174 */
2175DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2176{
2177#if 0
2178 return true;
2179#else
2180 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2181 if (pVM)
2182 {
2183 PVMCPU pVCpu = VMMGetCpu(pVM);
2184
2185 if (pVCpu)
2186 {
2187#ifdef RT_ARCH_X86
2188 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2189 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2190#else
2191 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2192 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2193#endif
2194 {
2195 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2196 return RT_FAILURE_NP(rc);
2197 }
2198 }
2199 }
2200#ifdef RT_OS_LINUX
2201 return true;
2202#else
2203 return false;
2204#endif
2205#endif
2206}
2207
2208
2209/**
2210 * Override this so we can push it up to ring-3.
2211 *
2212 * @param pszExpr Expression. Can be NULL.
2213 * @param uLine Location line number.
2214 * @param pszFile Location file name.
2215 * @param pszFunction Location function name.
2216 */
2217DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2218{
2219 /*
2220 * To the log.
2221 */
2222 LogAlways(("\n!!R0-Assertion Failed!!\n"
2223 "Expression: %s\n"
2224 "Location : %s(%d) %s\n",
2225 pszExpr, pszFile, uLine, pszFunction));
2226
2227 /*
2228 * To the global VMM buffer.
2229 */
2230 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2231 if (pVM)
2232 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2233 "\n!!R0-Assertion Failed!!\n"
2234 "Expression: %s\n"
2235 "Location : %s(%d) %s\n",
2236 pszExpr, pszFile, uLine, pszFunction);
2237
2238 /*
2239 * Continue the normal way.
2240 */
2241 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2242}
2243
2244
2245/**
2246 * Callback for RTLogFormatV which writes to the ring-3 log port.
2247 * See PFNLOGOUTPUT() for details.
2248 */
2249static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2250{
2251 for (size_t i = 0; i < cbChars; i++)
2252 LogAlways(("%c", pachChars[i]));
2253
2254 NOREF(pv);
2255 return cbChars;
2256}
2257
2258
2259/**
2260 * Override this so we can push it up to ring-3.
2261 *
2262 * @param pszFormat The format string.
2263 * @param va Arguments.
2264 */
2265DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2266{
2267 va_list vaCopy;
2268
2269 /*
2270 * Push the message to the loggers.
2271 */
2272 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2273 if (pLog)
2274 {
2275 va_copy(vaCopy, va);
2276 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2277 va_end(vaCopy);
2278 }
2279 pLog = RTLogRelGetDefaultInstance();
2280 if (pLog)
2281 {
2282 va_copy(vaCopy, va);
2283 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2284 va_end(vaCopy);
2285 }
2286
2287 /*
2288 * Push it to the global VMM buffer.
2289 */
2290 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2291 if (pVM)
2292 {
2293 va_copy(vaCopy, va);
2294 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2295 va_end(vaCopy);
2296 }
2297
2298 /*
2299 * Continue the normal way.
2300 */
2301 RTAssertMsg2V(pszFormat, va);
2302}
2303
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette