VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 57358

Last change on this file since 57358 was 57358, checked in by vboxsync, 10 years ago

*: scm cleanup run.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 81.1 KB
Line 
1/* $Id: VMMR0.cpp 57358 2015-08-14 15:16:38Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_VMM
23#include <VBox/vmm/vmm.h>
24#include <VBox/sup.h>
25#include <VBox/vmm/trpm.h>
26#include <VBox/vmm/cpum.h>
27#include <VBox/vmm/pdmapi.h>
28#include <VBox/vmm/pgm.h>
29#include <VBox/vmm/stam.h>
30#include <VBox/vmm/tm.h>
31#include "VMMInternal.h"
32#include <VBox/vmm/vm.h>
33#ifdef VBOX_WITH_PCI_PASSTHROUGH
34# include <VBox/vmm/pdmpci.h>
35#endif
36
37#include <VBox/vmm/gvmm.h>
38#include <VBox/vmm/gmm.h>
39#include <VBox/vmm/gim.h>
40#include <VBox/intnet.h>
41#include <VBox/vmm/hm.h>
42#include <VBox/param.h>
43#include <VBox/err.h>
44#include <VBox/version.h>
45#include <VBox/log.h>
46
47#include <iprt/asm-amd64-x86.h>
48#include <iprt/assert.h>
49#include <iprt/crc.h>
50#include <iprt/mp.h>
51#include <iprt/once.h>
52#include <iprt/stdarg.h>
53#include <iprt/string.h>
54#include <iprt/thread.h>
55#include <iprt/timer.h>
56
57#include "dtrace/VBoxVMM.h"
58
59
60#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
61# pragma intrinsic(_AddressOfReturnAddress)
62#endif
63
64
65/*********************************************************************************************************************************
66* Defined Constants And Macros *
67*********************************************************************************************************************************/
68/** @def VMM_CHECK_SMAP_SETUP
69 * SMAP check setup. */
70/** @def VMM_CHECK_SMAP_CHECK
71 * Checks that the AC flag is set if SMAP is enabled. If AC is not set,
72 * it will be logged and @a a_BadExpr is executed. */
73/** @def VMM_CHECK_SMAP_CHECK2
74 * Checks that the AC flag is set if SMAP is enabled. If AC is not set, it will
75 * be logged, written to the VMs assertion text buffer, and @a a_BadExpr is
76 * executed. */
77#if defined(VBOX_STRICT) || 1
78# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = SUPR0GetKernelFeatures()
79# define VMM_CHECK_SMAP_CHECK(a_BadExpr) \
80 do { \
81 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
82 { \
83 RTCCUINTREG fEflCheck = ASMGetFlags(); \
84 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
85 { /* likely */ } \
86 else \
87 { \
88 SUPR0Printf("%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
89 a_BadExpr; \
90 } \
91 } \
92 } while (0)
93# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) \
94 do { \
95 if (fKernelFeatures & SUPKERNELFEATURES_SMAP) \
96 { \
97 RTCCUINTREG fEflCheck = ASMGetFlags(); \
98 if (RT_LIKELY(fEflCheck & X86_EFL_AC)) \
99 { /* likely */ } \
100 else \
101 { \
102 SUPR0BadContext((a_pVM) ? (a_pVM)->pSession : NULL, __FILE__, __LINE__, "EFLAGS.AC is zero!"); \
103 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1), \
104 "%s, line %d: EFLAGS.AC is clear! (%#x)\n", __FUNCTION__, __LINE__, (uint32_t)fEflCheck); \
105 a_BadExpr; \
106 } \
107 } \
108 } while (0)
109#else
110# define VMM_CHECK_SMAP_SETUP() uint32_t const fKernelFeatures = 0
111# define VMM_CHECK_SMAP_CHECK(a_BadExpr) NOREF(fKernelFeatures)
112# define VMM_CHECK_SMAP_CHECK2(a_pVM, a_BadExpr) NOREF(fKernelFeatures)
113#endif
114
115
116/*********************************************************************************************************************************
117* Internal Functions *
118*********************************************************************************************************************************/
119RT_C_DECLS_BEGIN
120#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
121extern uint64_t __udivdi3(uint64_t, uint64_t);
122extern uint64_t __umoddi3(uint64_t, uint64_t);
123#endif
124RT_C_DECLS_END
125
126
127/*********************************************************************************************************************************
128* Global Variables *
129*********************************************************************************************************************************/
130/** Drag in necessary library bits.
131 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
132PFNRT g_VMMR0Deps[] =
133{
134 (PFNRT)RTCrc32,
135 (PFNRT)RTOnce,
136#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
137 (PFNRT)__udivdi3,
138 (PFNRT)__umoddi3,
139#endif
140 NULL
141};
142
143#ifdef RT_OS_SOLARIS
144/* Dependency information for the native solaris loader. */
145extern "C" { char _depends_on[] = "vboxdrv"; }
146#endif
147
148
149
150/**
151 * Initialize the module.
152 * This is called when we're first loaded.
153 *
154 * @returns 0 on success.
155 * @returns VBox status on failure.
156 * @param hMod Image handle for use in APIs.
157 */
158DECLEXPORT(int) ModuleInit(void *hMod)
159{
160 VMM_CHECK_SMAP_SETUP();
161 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
162
163#ifdef VBOX_WITH_DTRACE_R0
164 /*
165 * The first thing to do is register the static tracepoints.
166 * (Deregistration is automatic.)
167 */
168 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
169 if (RT_FAILURE(rc2))
170 return rc2;
171#endif
172 LogFlow(("ModuleInit:\n"));
173
174#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
175 /*
176 * Display the CMOS debug code.
177 */
178 ASMOutU8(0x72, 0x03);
179 uint8_t bDebugCode = ASMInU8(0x73);
180 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
181 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
182#endif
183
184 /*
185 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
186 */
187 int rc = vmmInitFormatTypes();
188 if (RT_SUCCESS(rc))
189 {
190 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
191 rc = GVMMR0Init();
192 if (RT_SUCCESS(rc))
193 {
194 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
195 rc = GMMR0Init();
196 if (RT_SUCCESS(rc))
197 {
198 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
199 rc = HMR0Init();
200 if (RT_SUCCESS(rc))
201 {
202 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
203 rc = PGMRegisterStringFormatTypes();
204 if (RT_SUCCESS(rc))
205 {
206 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
207#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
208 rc = PGMR0DynMapInit();
209#endif
210 if (RT_SUCCESS(rc))
211 {
212 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
213 rc = IntNetR0Init();
214 if (RT_SUCCESS(rc))
215 {
216#ifdef VBOX_WITH_PCI_PASSTHROUGH
217 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
218 rc = PciRawR0Init();
219#endif
220 if (RT_SUCCESS(rc))
221 {
222 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
223 rc = CPUMR0ModuleInit();
224 if (RT_SUCCESS(rc))
225 {
226#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
227 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
228 rc = vmmR0TripleFaultHackInit();
229 if (RT_SUCCESS(rc))
230#endif
231 {
232 VMM_CHECK_SMAP_CHECK(rc = VERR_VMM_SMAP_BUT_AC_CLEAR);
233 if (RT_SUCCESS(rc))
234 {
235 LogFlow(("ModuleInit: returns success.\n"));
236 return VINF_SUCCESS;
237 }
238 }
239
240 /*
241 * Bail out.
242 */
243#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
244 vmmR0TripleFaultHackTerm();
245#endif
246 }
247 else
248 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
249#ifdef VBOX_WITH_PCI_PASSTHROUGH
250 PciRawR0Term();
251#endif
252 }
253 else
254 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
255 IntNetR0Term();
256 }
257 else
258 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
259#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
260 PGMR0DynMapTerm();
261#endif
262 }
263 else
264 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
265 PGMDeregisterStringFormatTypes();
266 }
267 else
268 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
269 HMR0Term();
270 }
271 else
272 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
273 GMMR0Term();
274 }
275 else
276 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
277 GVMMR0Term();
278 }
279 else
280 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
281 vmmTermFormatTypes();
282 }
283 else
284 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
285
286 LogFlow(("ModuleInit: failed %Rrc\n", rc));
287 return rc;
288}
289
290
291/**
292 * Terminate the module.
293 * This is called when we're finally unloaded.
294 *
295 * @param hMod Image handle for use in APIs.
296 */
297DECLEXPORT(void) ModuleTerm(void *hMod)
298{
299 NOREF(hMod);
300 LogFlow(("ModuleTerm:\n"));
301
302 /*
303 * Terminate the CPUM module (Local APIC cleanup).
304 */
305 CPUMR0ModuleTerm();
306
307 /*
308 * Terminate the internal network service.
309 */
310 IntNetR0Term();
311
312 /*
313 * PGM (Darwin), HM and PciRaw global cleanup.
314 */
315#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
316 PGMR0DynMapTerm();
317#endif
318#ifdef VBOX_WITH_PCI_PASSTHROUGH
319 PciRawR0Term();
320#endif
321 PGMDeregisterStringFormatTypes();
322 HMR0Term();
323#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
324 vmmR0TripleFaultHackTerm();
325#endif
326
327 /*
328 * Destroy the GMM and GVMM instances.
329 */
330 GMMR0Term();
331 GVMMR0Term();
332
333 vmmTermFormatTypes();
334
335 LogFlow(("ModuleTerm: returns\n"));
336}
337
338
339/**
340 * Initiates the R0 driver for a particular VM instance.
341 *
342 * @returns VBox status code.
343 *
344 * @param pVM Pointer to the VM.
345 * @param uSvnRev The SVN revision of the ring-3 part.
346 * @param uBuildType Build type indicator.
347 * @thread EMT.
348 */
349static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
350{
351 VMM_CHECK_SMAP_SETUP();
352 VMM_CHECK_SMAP_CHECK(return VERR_VMM_SMAP_BUT_AC_CLEAR);
353
354 /*
355 * Match the SVN revisions and build type.
356 */
357 if (uSvnRev != VMMGetSvnRev())
358 {
359 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
360 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
361 return VERR_VMM_R0_VERSION_MISMATCH;
362 }
363 if (uBuildType != vmmGetBuildType())
364 {
365 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
366 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
367 return VERR_VMM_R0_VERSION_MISMATCH;
368 }
369 if ( !VALID_PTR(pVM)
370 || pVM->pVMR0 != pVM)
371 return VERR_INVALID_PARAMETER;
372
373
374#ifdef LOG_ENABLED
375 /*
376 * Register the EMT R0 logger instance for VCPU 0.
377 */
378 PVMCPU pVCpu = &pVM->aCpus[0];
379
380 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
381 if (pR0Logger)
382 {
383# if 0 /* testing of the logger. */
384 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
385 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
386 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
387 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
388
389 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
390 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
391 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
392 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
393
394 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
395 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
396 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
397 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
398
399 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
400 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
401 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
402 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
403 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
404 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
405
406 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
407 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
408
409 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
410 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
411 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
412# endif
413 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
414 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
415 pR0Logger->fRegistered = true;
416 }
417#endif /* LOG_ENABLED */
418
419 /*
420 * Check if the host supports high resolution timers or not.
421 */
422 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
423 && !RTTimerCanDoHighResolution())
424 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
425
426 /*
427 * Initialize the per VM data for GVMM and GMM.
428 */
429 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
430 int rc = GVMMR0InitVM(pVM);
431// if (RT_SUCCESS(rc))
432// rc = GMMR0InitPerVMData(pVM);
433 if (RT_SUCCESS(rc))
434 {
435 /*
436 * Init HM, CPUM and PGM (Darwin only).
437 */
438 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
439 rc = HMR0InitVM(pVM);
440 if (RT_SUCCESS(rc))
441 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION); /* CPUR0InitVM will otherwise panic the host */
442 if (RT_SUCCESS(rc))
443 {
444 rc = CPUMR0InitVM(pVM);
445 if (RT_SUCCESS(rc))
446 {
447 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
448#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
449 rc = PGMR0DynMapInitVM(pVM);
450#endif
451 if (RT_SUCCESS(rc))
452 {
453 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
454#ifdef VBOX_WITH_PCI_PASSTHROUGH
455 rc = PciRawR0InitVM(pVM);
456#endif
457 if (RT_SUCCESS(rc))
458 {
459 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
460 rc = GIMR0InitVM(pVM);
461 if (RT_SUCCESS(rc))
462 {
463 VMM_CHECK_SMAP_CHECK2(pVM, rc = VERR_VMM_RING0_ASSERTION);
464 if (RT_SUCCESS(rc))
465 {
466 GVMMR0DoneInitVM(pVM);
467 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
468 return rc;
469 }
470
471 /* bail out*/
472 GIMR0TermVM(pVM);
473 }
474#ifdef VBOX_WITH_PCI_PASSTHROUGH
475 PciRawR0TermVM(pVM);
476#endif
477 }
478 }
479 }
480 HMR0TermVM(pVM);
481 }
482 }
483
484 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
485 return rc;
486}
487
488
489/**
490 * Terminates the R0 bits for a particular VM instance.
491 *
492 * This is normally called by ring-3 as part of the VM termination process, but
493 * may alternatively be called during the support driver session cleanup when
494 * the VM object is destroyed (see GVMM).
495 *
496 * @returns VBox status code.
497 *
498 * @param pVM Pointer to the VM.
499 * @param pGVM Pointer to the global VM structure. Optional.
500 * @thread EMT or session clean up thread.
501 */
502VMMR0_INT_DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
503{
504#ifdef VBOX_WITH_PCI_PASSTHROUGH
505 PciRawR0TermVM(pVM);
506#endif
507
508 /*
509 * Tell GVMM what we're up to and check that we only do this once.
510 */
511 if (GVMMR0DoingTermVM(pVM, pGVM))
512 {
513 GIMR0TermVM(pVM);
514
515 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
516 * here to make sure we don't leak any shared pages if we crash... */
517#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
518 PGMR0DynMapTermVM(pVM);
519#endif
520 HMR0TermVM(pVM);
521 }
522
523 /*
524 * Deregister the logger.
525 */
526 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
527 return VINF_SUCCESS;
528}
529
530
531/**
532 * VMM ring-0 thread-context callback.
533 *
534 * This does common HM state updating and calls the HM-specific thread-context
535 * callback.
536 *
537 * @param enmEvent The thread-context event.
538 * @param pvUser Opaque pointer to the VMCPU.
539 *
540 * @thread EMT(pvUser)
541 */
542static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
543{
544 PVMCPU pVCpu = (PVMCPU)pvUser;
545
546 switch (enmEvent)
547 {
548 case RTTHREADCTXEVENT_IN:
549 {
550 /*
551 * Linux may call us with preemption enabled (really!) but technically we
552 * cannot get preempted here, otherwise we end up in an infinite recursion
553 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
554 * ad infinitum). Let's just disable preemption for now...
555 */
556 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
557 * preemption after doing the callout (one or two functions up the
558 * call chain). */
559 /** @todo r=ramshankar: See @bugref{5313#c30}. */
560 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
561 RTThreadPreemptDisable(&ParanoidPreemptState);
562
563 /* We need to update the VCPU <-> host CPU mapping. */
564 RTCPUID idHostCpu;
565 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
566 pVCpu->iHostCpuSet = iHostCpuSet;
567 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
568
569 /* In the very unlikely event that the GIP delta for the CPU we're
570 rescheduled needs calculating, try force a return to ring-3.
571 We unfortunately cannot do the measurements right here. */
572 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
573 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
574
575 /* Invoke the HM-specific thread-context callback. */
576 HMR0ThreadCtxCallback(enmEvent, pvUser);
577
578 /* Restore preemption. */
579 RTThreadPreemptRestore(&ParanoidPreemptState);
580 break;
581 }
582
583 case RTTHREADCTXEVENT_OUT:
584 {
585 /* Invoke the HM-specific thread-context callback. */
586 HMR0ThreadCtxCallback(enmEvent, pvUser);
587
588 /*
589 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
590 * have the same host CPU associated with it.
591 */
592 pVCpu->iHostCpuSet = UINT32_MAX;
593 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
594 break;
595 }
596
597 default:
598 /* Invoke the HM-specific thread-context callback. */
599 HMR0ThreadCtxCallback(enmEvent, pvUser);
600 break;
601 }
602}
603
604
605/**
606 * Creates thread switching hook for the current EMT thread.
607 *
608 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
609 * platform does not implement switcher hooks, no hooks will be create and the
610 * member set to NIL_RTTHREADCTXHOOK.
611 *
612 * @returns VBox status code.
613 * @param pVCpu Pointer to the cross context CPU structure.
614 * @thread EMT(pVCpu)
615 */
616VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu)
617{
618 VMCPU_ASSERT_EMT(pVCpu);
619 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
620
621 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
622 if (RT_SUCCESS(rc))
623 return rc;
624
625 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
626 if (rc == VERR_NOT_SUPPORTED)
627 return VINF_SUCCESS;
628
629 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
630 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
631}
632
633
634/**
635 * Destroys the thread switching hook for the specified VCPU.
636 *
637 * @param pVCpu Pointer to the cross context CPU structure.
638 * @remarks Can be called from any thread.
639 */
640VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu)
641{
642 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
643 AssertRC(rc);
644}
645
646
647/**
648 * Disables the thread switching hook for this VCPU (if we got one).
649 *
650 * @param pVCpu Pointer to the cross context CPU structure.
651 * @thread EMT(pVCpu)
652 *
653 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
654 * this call. This means you have to be careful with what you do!
655 */
656VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu)
657{
658 /*
659 * Clear the VCPU <-> host CPU mapping as we've left HM context.
660 * @bugref{7726#c19} explains the need for this trick:
661 *
662 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
663 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
664 * longjmp & normal return to ring-3, which opens a window where we may be
665 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
666 * the CPU starts executing a different EMT. Both functions first disables
667 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
668 * an opening for getting preempted.
669 */
670 /** @todo Make HM not need this API! Then we could leave the hooks enabled
671 * all the time. */
672 /** @todo move this into the context hook disabling if(). */
673 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
674
675 /*
676 * Disable the context hook, if we got one.
677 */
678 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
679 {
680 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
681 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
682 AssertRC(rc);
683 }
684}
685
686
687/**
688 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
689 *
690 * @returns true if registered, false otherwise.
691 * @param pVCpu Pointer to the VMCPU.
692 */
693DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
694{
695 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
696}
697
698
699/**
700 * Whether thread-context hooks are registered for this VCPU.
701 *
702 * @returns true if registered, false otherwise.
703 * @param pVCpu Pointer to the VMCPU.
704 */
705VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
706{
707 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
708}
709
710
711#ifdef VBOX_WITH_STATISTICS
712/**
713 * Record return code statistics
714 * @param pVM Pointer to the VM.
715 * @param pVCpu Pointer to the VMCPU.
716 * @param rc The status code.
717 */
718static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
719{
720 /*
721 * Collect statistics.
722 */
723 switch (rc)
724 {
725 case VINF_SUCCESS:
726 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
727 break;
728 case VINF_EM_RAW_INTERRUPT:
729 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
730 break;
731 case VINF_EM_RAW_INTERRUPT_HYPER:
732 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
733 break;
734 case VINF_EM_RAW_GUEST_TRAP:
735 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
736 break;
737 case VINF_EM_RAW_RING_SWITCH:
738 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
739 break;
740 case VINF_EM_RAW_RING_SWITCH_INT:
741 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
742 break;
743 case VINF_EM_RAW_STALE_SELECTOR:
744 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
745 break;
746 case VINF_EM_RAW_IRET_TRAP:
747 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
748 break;
749 case VINF_IOM_R3_IOPORT_READ:
750 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
751 break;
752 case VINF_IOM_R3_IOPORT_WRITE:
753 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
754 break;
755 case VINF_IOM_R3_MMIO_READ:
756 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
757 break;
758 case VINF_IOM_R3_MMIO_WRITE:
759 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
760 break;
761 case VINF_IOM_R3_MMIO_READ_WRITE:
762 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
763 break;
764 case VINF_PATM_HC_MMIO_PATCH_READ:
765 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
766 break;
767 case VINF_PATM_HC_MMIO_PATCH_WRITE:
768 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
769 break;
770 case VINF_CPUM_R3_MSR_READ:
771 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
772 break;
773 case VINF_CPUM_R3_MSR_WRITE:
774 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
775 break;
776 case VINF_EM_RAW_EMULATE_INSTR:
777 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
778 break;
779 case VINF_EM_RAW_EMULATE_IO_BLOCK:
780 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
781 break;
782 case VINF_PATCH_EMULATE_INSTR:
783 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
784 break;
785 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
786 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
787 break;
788 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
789 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
790 break;
791 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
792 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
793 break;
794 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
795 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
796 break;
797 case VINF_CSAM_PENDING_ACTION:
798 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
799 break;
800 case VINF_PGM_SYNC_CR3:
801 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
802 break;
803 case VINF_PATM_PATCH_INT3:
804 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
805 break;
806 case VINF_PATM_PATCH_TRAP_PF:
807 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
808 break;
809 case VINF_PATM_PATCH_TRAP_GP:
810 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
811 break;
812 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
813 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
814 break;
815 case VINF_EM_RESCHEDULE_REM:
816 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
817 break;
818 case VINF_EM_RAW_TO_R3:
819 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
820 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
821 else if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
822 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
823 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES))
824 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
825 else if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
826 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
827 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
828 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
829 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER))
830 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
831 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
832 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
833 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3))
834 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
835 else
836 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
837 break;
838
839 case VINF_EM_RAW_TIMER_PENDING:
840 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
841 break;
842 case VINF_EM_RAW_INTERRUPT_PENDING:
843 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
844 break;
845 case VINF_VMM_CALL_HOST:
846 switch (pVCpu->vmm.s.enmCallRing3Operation)
847 {
848 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
849 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
850 break;
851 case VMMCALLRING3_PDM_LOCK:
852 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
853 break;
854 case VMMCALLRING3_PGM_POOL_GROW:
855 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
856 break;
857 case VMMCALLRING3_PGM_LOCK:
858 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
859 break;
860 case VMMCALLRING3_PGM_MAP_CHUNK:
861 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
862 break;
863 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
864 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
865 break;
866 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
867 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
868 break;
869 case VMMCALLRING3_VMM_LOGGER_FLUSH:
870 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
871 break;
872 case VMMCALLRING3_VM_SET_ERROR:
873 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
874 break;
875 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
876 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
877 break;
878 case VMMCALLRING3_VM_R0_ASSERTION:
879 default:
880 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
881 break;
882 }
883 break;
884 case VINF_PATM_DUPLICATE_FUNCTION:
885 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
886 break;
887 case VINF_PGM_CHANGE_MODE:
888 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
889 break;
890 case VINF_PGM_POOL_FLUSH_PENDING:
891 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
892 break;
893 case VINF_EM_PENDING_REQUEST:
894 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
895 break;
896 case VINF_EM_HM_PATCH_TPR_INSTR:
897 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
898 break;
899 default:
900 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
901 break;
902 }
903}
904#endif /* VBOX_WITH_STATISTICS */
905
906
907/**
908 * Unused ring-0 entry point that used to be called from the interrupt gate.
909 *
910 * Will be removed one of the next times we do a major SUPDrv version bump.
911 *
912 * @returns VBox status code.
913 * @param pVM Pointer to the VM.
914 * @param enmOperation Which operation to execute.
915 * @param pvArg Argument to the operation.
916 * @remarks Assume called with interrupts disabled.
917 */
918VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
919{
920 /*
921 * We're returning VERR_NOT_SUPPORT here so we've got something else
922 * than -1 which the interrupt gate glue code might return.
923 */
924 Log(("operation %#x is not supported\n", enmOperation));
925 NOREF(enmOperation); NOREF(pvArg); NOREF(pVM);
926 return VERR_NOT_SUPPORTED;
927}
928
929
930/**
931 * The Ring 0 entry point, called by the fast-ioctl path.
932 *
933 * @param pVM Pointer to the VM.
934 * The return code is stored in pVM->vmm.s.iLastGZRc.
935 * @param idCpu The Virtual CPU ID of the calling EMT.
936 * @param enmOperation Which operation to execute.
937 * @remarks Assume called with interrupts _enabled_.
938 */
939VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
940{
941 /*
942 * Validation.
943 */
944 if (RT_UNLIKELY(idCpu >= pVM->cCpus))
945 return;
946 PVMCPU pVCpu = &pVM->aCpus[idCpu];
947 if (RT_UNLIKELY(pVCpu->hNativeThreadR0 != RTThreadNativeSelf()))
948 return;
949 VMM_CHECK_SMAP_SETUP();
950 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
951
952 /*
953 * Perform requested operation.
954 */
955 switch (enmOperation)
956 {
957 /*
958 * Switch to GC and run guest raw mode code.
959 * Disable interrupts before doing the world switch.
960 */
961 case VMMR0_DO_RAW_RUN:
962 {
963#ifdef VBOX_WITH_RAW_MODE
964# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
965 /* Some safety precautions first. */
966 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
967 {
968 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
969 break;
970 }
971# endif
972
973 /*
974 * Disable preemption.
975 */
976 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
977 RTThreadPreemptDisable(&PreemptState);
978
979 /*
980 * Get the host CPU identifiers, make sure they are valid and that
981 * we've got a TSC delta for the CPU.
982 */
983 RTCPUID idHostCpu;
984 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
985 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
986 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
987 {
988 /*
989 * Commit the CPU identifiers and update the periodict preemption timer if it's active.
990 */
991# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
992 CPUMR0SetLApic(pVCpu, iHostCpuSet);
993# endif
994 pVCpu->iHostCpuSet = iHostCpuSet;
995 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
996
997 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
998 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
999
1000 /*
1001 * We might need to disable VT-x if the active switcher turns off paging.
1002 */
1003 bool fVTxDisabled;
1004 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1005 if (RT_SUCCESS(rc))
1006 {
1007 /*
1008 * Disable interrupts and run raw-mode code. The loop is for efficiently
1009 * dispatching tracepoints that fired in raw-mode context.
1010 */
1011 RTCCUINTREG uFlags = ASMIntDisableFlags();
1012
1013 for (;;)
1014 {
1015 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
1016 TMNotifyStartOfExecution(pVCpu);
1017
1018 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1019 pVCpu->vmm.s.iLastGZRc = rc;
1020
1021 TMNotifyEndOfExecution(pVCpu);
1022 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1023
1024 if (rc != VINF_VMM_CALL_TRACER)
1025 break;
1026 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
1027 }
1028
1029 /*
1030 * Re-enable VT-x before we dispatch any pending host interrupts and
1031 * re-enables interrupts.
1032 */
1033 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1034
1035 if ( rc == VINF_EM_RAW_INTERRUPT
1036 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1037 TRPMR0DispatchHostInterrupt(pVM);
1038
1039 ASMSetFlags(uFlags);
1040
1041 /* Fire dtrace probe and collect statistics. */
1042 VBOXVMM_R0_VMM_RETURN_TO_RING3_RC(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1043# ifdef VBOX_WITH_STATISTICS
1044 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1045 vmmR0RecordRC(pVM, pVCpu, rc);
1046# endif
1047 }
1048 else
1049 pVCpu->vmm.s.iLastGZRc = rc;
1050
1051 /*
1052 * Invalidate the host CPU identifiers as we restore preemption.
1053 */
1054 pVCpu->iHostCpuSet = UINT32_MAX;
1055 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1056
1057 RTThreadPreemptRestore(&PreemptState);
1058 }
1059 /*
1060 * Invalid CPU set index or TSC delta in need of measuring.
1061 */
1062 else
1063 {
1064 RTThreadPreemptRestore(&PreemptState);
1065 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1066 {
1067 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1068 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1069 0 /*default cTries*/);
1070 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1071 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1072 else
1073 pVCpu->vmm.s.iLastGZRc = rc;
1074 }
1075 else
1076 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1077 }
1078
1079#else /* !VBOX_WITH_RAW_MODE */
1080 pVCpu->vmm.s.iLastGZRc = VERR_RAW_MODE_NOT_SUPPORTED;
1081#endif
1082 break;
1083 }
1084
1085 /*
1086 * Run guest code using the available hardware acceleration technology.
1087 */
1088 case VMMR0_DO_HM_RUN:
1089 {
1090 /*
1091 * Disable preemption.
1092 */
1093 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1094 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1095 RTThreadPreemptDisable(&PreemptState);
1096
1097 /*
1098 * Get the host CPU identifiers, make sure they are valid and that
1099 * we've got a TSC delta for the CPU.
1100 */
1101 RTCPUID idHostCpu;
1102 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1103 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1104 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1105 {
1106 pVCpu->iHostCpuSet = iHostCpuSet;
1107 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1108
1109 /*
1110 * Update the periodic preemption timer if it's active.
1111 */
1112 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1113 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1114 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1115
1116#ifdef LOG_ENABLED
1117 /*
1118 * Ugly: Lazy registration of ring 0 loggers.
1119 */
1120 if (pVCpu->idCpu > 0)
1121 {
1122 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
1123 if ( pR0Logger
1124 && RT_UNLIKELY(!pR0Logger->fRegistered))
1125 {
1126 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
1127 pR0Logger->fRegistered = true;
1128 }
1129 }
1130#endif
1131
1132 int rc;
1133 bool fPreemptRestored = false;
1134 if (!HMR0SuspendPending())
1135 {
1136 /*
1137 * Enable the context switching hook.
1138 */
1139 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1140 {
1141 Assert(!RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook));
1142 int rc2 = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook); AssertRC(rc2);
1143 }
1144
1145 /*
1146 * Enter HM context.
1147 */
1148 rc = HMR0Enter(pVM, pVCpu);
1149 if (RT_SUCCESS(rc))
1150 {
1151 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1152
1153 /*
1154 * When preemption hooks are in place, enable preemption now that
1155 * we're in HM context.
1156 */
1157 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1158 {
1159 fPreemptRestored = true;
1160 RTThreadPreemptRestore(&PreemptState);
1161 }
1162
1163 /*
1164 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1165 */
1166 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1167 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1168 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1169
1170 /*
1171 * Assert sanity on the way out. Using manual assertions code here as normal
1172 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1173 */
1174 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1175 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1176 {
1177 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1178 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1179 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1180 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1181 }
1182 /** @todo Get rid of this. HM shouldn't disable the context hook. */
1183 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1184 {
1185 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1186 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1187 "Thread-context hooks still enabled! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1188 rc = VERR_INVALID_STATE;
1189 }
1190
1191 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1192 }
1193 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1194
1195 /*
1196 * Invalidate the host CPU identifiers before we disable the context
1197 * hook / restore preemption.
1198 */
1199 pVCpu->iHostCpuSet = UINT32_MAX;
1200 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1201
1202 /*
1203 * Disable context hooks. Due to unresolved cleanup issues, we
1204 * cannot leave the hooks enabled when we return to ring-3.
1205 *
1206 * Note! At the moment HM may also have disabled the hook
1207 * when we get here, but the IPRT API handles that.
1208 */
1209 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
1210 {
1211 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1212 RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
1213 }
1214 }
1215 /*
1216 * The system is about to go into suspend mode; go back to ring 3.
1217 */
1218 else
1219 {
1220 rc = VINF_EM_RAW_INTERRUPT;
1221 pVCpu->iHostCpuSet = UINT32_MAX;
1222 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1223 }
1224
1225 /** @todo When HM stops messing with the context hook state, we'll disable
1226 * preemption again before the RTThreadCtxHookDisable call. */
1227 if (!fPreemptRestored)
1228 RTThreadPreemptRestore(&PreemptState);
1229
1230 pVCpu->vmm.s.iLastGZRc = rc;
1231
1232 /* Fire dtrace probe and collect statistics. */
1233 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1234#ifdef VBOX_WITH_STATISTICS
1235 vmmR0RecordRC(pVM, pVCpu, rc);
1236#endif
1237 }
1238 /*
1239 * Invalid CPU set index or TSC delta in need of measuring.
1240 */
1241 else
1242 {
1243 pVCpu->iHostCpuSet = UINT32_MAX;
1244 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1245 RTThreadPreemptRestore(&PreemptState);
1246 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1247 {
1248 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1249 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1250 0 /*default cTries*/);
1251 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1252 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1253 else
1254 pVCpu->vmm.s.iLastGZRc = rc;
1255 }
1256 else
1257 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1258 }
1259 break;
1260 }
1261
1262 /*
1263 * For profiling.
1264 */
1265 case VMMR0_DO_NOP:
1266 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1267 break;
1268
1269 /*
1270 * Impossible.
1271 */
1272 default:
1273 AssertMsgFailed(("%#x\n", enmOperation));
1274 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1275 break;
1276 }
1277 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1278}
1279
1280
1281/**
1282 * Validates a session or VM session argument.
1283 *
1284 * @returns true / false accordingly.
1285 * @param pVM Pointer to the VM.
1286 * @param pSession The session argument.
1287 */
1288DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1289{
1290 /* This must be set! */
1291 if (!pSession)
1292 return false;
1293
1294 /* Only one out of the two. */
1295 if (pVM && pClaimedSession)
1296 return false;
1297 if (pVM)
1298 pClaimedSession = pVM->pSession;
1299 return pClaimedSession == pSession;
1300}
1301
1302
1303/**
1304 * VMMR0EntryEx worker function, either called directly or when ever possible
1305 * called thru a longjmp so we can exit safely on failure.
1306 *
1307 * @returns VBox status code.
1308 * @param pVM Pointer to the VM.
1309 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1310 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1311 * @param enmOperation Which operation to execute.
1312 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1313 * The support driver validates this if it's present.
1314 * @param u64Arg Some simple constant argument.
1315 * @param pSession The session of the caller.
1316 * @remarks Assume called with interrupts _enabled_.
1317 */
1318static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1319{
1320 /*
1321 * Common VM pointer validation.
1322 */
1323 if (pVM)
1324 {
1325 if (RT_UNLIKELY( !VALID_PTR(pVM)
1326 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
1327 {
1328 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
1329 return VERR_INVALID_POINTER;
1330 }
1331 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
1332 || pVM->enmVMState > VMSTATE_TERMINATED
1333 || pVM->pVMR0 != pVM))
1334 {
1335 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
1336 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
1337 return VERR_INVALID_POINTER;
1338 }
1339
1340 if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
1341 {
1342 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
1343 return VERR_INVALID_PARAMETER;
1344 }
1345 }
1346 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID))
1347 {
1348 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1349 return VERR_INVALID_PARAMETER;
1350 }
1351 VMM_CHECK_SMAP_SETUP();
1352 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1353 int rc;
1354
1355 switch (enmOperation)
1356 {
1357 /*
1358 * GVM requests
1359 */
1360 case VMMR0_DO_GVMM_CREATE_VM:
1361 if (pVM || u64Arg || idCpu != NIL_VMCPUID)
1362 return VERR_INVALID_PARAMETER;
1363 rc = GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
1364 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1365 break;
1366
1367 case VMMR0_DO_GVMM_DESTROY_VM:
1368 if (pReqHdr || u64Arg)
1369 return VERR_INVALID_PARAMETER;
1370 rc = GVMMR0DestroyVM(pVM);
1371 VMM_CHECK_SMAP_CHECK(RT_NOTHING);
1372 break;
1373
1374 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1375 {
1376 if (!pVM)
1377 return VERR_INVALID_PARAMETER;
1378 rc = GVMMR0RegisterVCpu(pVM, idCpu);
1379 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1380 break;
1381 }
1382
1383 case VMMR0_DO_GVMM_SCHED_HALT:
1384 if (pReqHdr)
1385 return VERR_INVALID_PARAMETER;
1386 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1387 rc = GVMMR0SchedHalt(pVM, idCpu, u64Arg);
1388 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1389 break;
1390
1391 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1392 if (pReqHdr || u64Arg)
1393 return VERR_INVALID_PARAMETER;
1394 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1395 rc = GVMMR0SchedWakeUp(pVM, idCpu);
1396 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1397 break;
1398
1399 case VMMR0_DO_GVMM_SCHED_POKE:
1400 if (pReqHdr || u64Arg)
1401 return VERR_INVALID_PARAMETER;
1402 rc = GVMMR0SchedPoke(pVM, idCpu);
1403 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1404 break;
1405
1406 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1407 if (u64Arg)
1408 return VERR_INVALID_PARAMETER;
1409 rc = GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1410 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1411 break;
1412
1413 case VMMR0_DO_GVMM_SCHED_POLL:
1414 if (pReqHdr || u64Arg > 1)
1415 return VERR_INVALID_PARAMETER;
1416 rc = GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
1417 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1418 break;
1419
1420 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1421 if (u64Arg)
1422 return VERR_INVALID_PARAMETER;
1423 rc = GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
1424 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1425 break;
1426
1427 case VMMR0_DO_GVMM_RESET_STATISTICS:
1428 if (u64Arg)
1429 return VERR_INVALID_PARAMETER;
1430 rc = GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
1431 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1432 break;
1433
1434 /*
1435 * Initialize the R0 part of a VM instance.
1436 */
1437 case VMMR0_DO_VMMR0_INIT:
1438 rc = vmmR0InitVM(pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1439 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1440 break;
1441
1442 /*
1443 * Terminate the R0 part of a VM instance.
1444 */
1445 case VMMR0_DO_VMMR0_TERM:
1446 rc = VMMR0TermVM(pVM, NULL);
1447 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1448 break;
1449
1450 /*
1451 * Attempt to enable hm mode and check the current setting.
1452 */
1453 case VMMR0_DO_HM_ENABLE:
1454 rc = HMR0EnableAllCpus(pVM);
1455 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1456 break;
1457
1458 /*
1459 * Setup the hardware accelerated session.
1460 */
1461 case VMMR0_DO_HM_SETUP_VM:
1462 rc = HMR0SetupVM(pVM);
1463 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1464 break;
1465
1466 /*
1467 * Switch to RC to execute Hypervisor function.
1468 */
1469 case VMMR0_DO_CALL_HYPERVISOR:
1470 {
1471#ifdef VBOX_WITH_RAW_MODE
1472 /*
1473 * Validate input / context.
1474 */
1475 if (RT_UNLIKELY(idCpu != 0))
1476 return VERR_INVALID_CPU_ID;
1477 if (RT_UNLIKELY(pVM->cCpus != 1))
1478 return VERR_INVALID_PARAMETER;
1479 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1480# ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1481 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1482 return VERR_PGM_NO_CR3_SHADOW_ROOT;
1483# endif
1484
1485 /*
1486 * Disable interrupts.
1487 */
1488 RTCCUINTREG fFlags = ASMIntDisableFlags();
1489
1490 /*
1491 * Get the host CPU identifiers, make sure they are valid and that
1492 * we've got a TSC delta for the CPU.
1493 */
1494 RTCPUID idHostCpu;
1495 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1496 if (RT_UNLIKELY(iHostCpuSet >= RTCPUSET_MAX_CPUS))
1497 {
1498 ASMSetFlags(fFlags);
1499 return VERR_INVALID_CPU_INDEX;
1500 }
1501 if (RT_UNLIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1502 {
1503 ASMSetFlags(fFlags);
1504 rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1505 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1506 0 /*default cTries*/);
1507 if (RT_FAILURE(rc) && rc != VERR_CPU_OFFLINE)
1508 {
1509 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1510 return rc;
1511 }
1512 }
1513
1514 /*
1515 * Commit the CPU identifiers.
1516 */
1517# ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1518 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1519# endif
1520 pVCpu->iHostCpuSet = iHostCpuSet;
1521 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1522
1523 /*
1524 * We might need to disable VT-x if the active switcher turns off paging.
1525 */
1526 bool fVTxDisabled;
1527 rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1528 if (RT_SUCCESS(rc))
1529 {
1530 /*
1531 * Go through the wormhole...
1532 */
1533 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1534
1535 /*
1536 * Re-enable VT-x before we dispatch any pending host interrupts.
1537 */
1538 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1539
1540 if ( rc == VINF_EM_RAW_INTERRUPT
1541 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1542 TRPMR0DispatchHostInterrupt(pVM);
1543 }
1544
1545 /*
1546 * Invalidate the host CPU identifiers as we restore interrupts.
1547 */
1548 pVCpu->iHostCpuSet = UINT32_MAX;
1549 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1550 ASMSetFlags(fFlags);
1551
1552#else /* !VBOX_WITH_RAW_MODE */
1553 rc = VERR_RAW_MODE_NOT_SUPPORTED;
1554#endif
1555 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1556 break;
1557 }
1558
1559 /*
1560 * PGM wrappers.
1561 */
1562 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1563 if (idCpu == NIL_VMCPUID)
1564 return VERR_INVALID_CPU_ID;
1565 rc = PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
1566 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1567 break;
1568
1569 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1570 if (idCpu == NIL_VMCPUID)
1571 return VERR_INVALID_CPU_ID;
1572 rc = PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu]);
1573 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1574 break;
1575
1576 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1577 if (idCpu == NIL_VMCPUID)
1578 return VERR_INVALID_CPU_ID;
1579 rc = PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
1580 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1581 break;
1582
1583 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1584 if (idCpu != 0)
1585 return VERR_INVALID_CPU_ID;
1586 rc = PGMR0PhysSetupIommu(pVM);
1587 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1588 break;
1589
1590 /*
1591 * GMM wrappers.
1592 */
1593 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1594 if (u64Arg)
1595 return VERR_INVALID_PARAMETER;
1596 rc = GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1597 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1598 break;
1599
1600 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1601 if (u64Arg)
1602 return VERR_INVALID_PARAMETER;
1603 rc = GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1604 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1605 break;
1606
1607 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1608 if (u64Arg)
1609 return VERR_INVALID_PARAMETER;
1610 rc = GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1611 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1612 break;
1613
1614 case VMMR0_DO_GMM_FREE_PAGES:
1615 if (u64Arg)
1616 return VERR_INVALID_PARAMETER;
1617 rc = GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1618 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1619 break;
1620
1621 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1622 if (u64Arg)
1623 return VERR_INVALID_PARAMETER;
1624 rc = GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1625 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1626 break;
1627
1628 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1629 if (u64Arg)
1630 return VERR_INVALID_PARAMETER;
1631 rc = GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
1632 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1633 break;
1634
1635 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1636 if (idCpu == NIL_VMCPUID)
1637 return VERR_INVALID_CPU_ID;
1638 if (u64Arg)
1639 return VERR_INVALID_PARAMETER;
1640 rc = GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1641 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1642 break;
1643
1644 case VMMR0_DO_GMM_BALLOONED_PAGES:
1645 if (u64Arg)
1646 return VERR_INVALID_PARAMETER;
1647 rc = GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1648 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1649 break;
1650
1651 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1652 if (u64Arg)
1653 return VERR_INVALID_PARAMETER;
1654 rc = GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1655 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1656 break;
1657
1658 case VMMR0_DO_GMM_SEED_CHUNK:
1659 if (pReqHdr)
1660 return VERR_INVALID_PARAMETER;
1661 rc = GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
1662 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1663 break;
1664
1665 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1666 if (idCpu == NIL_VMCPUID)
1667 return VERR_INVALID_CPU_ID;
1668 if (u64Arg)
1669 return VERR_INVALID_PARAMETER;
1670 rc = GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1671 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1672 break;
1673
1674 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1675 if (idCpu == NIL_VMCPUID)
1676 return VERR_INVALID_CPU_ID;
1677 if (u64Arg)
1678 return VERR_INVALID_PARAMETER;
1679 rc = GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1680 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1681 break;
1682
1683 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1684 if (idCpu == NIL_VMCPUID)
1685 return VERR_INVALID_CPU_ID;
1686 if ( u64Arg
1687 || pReqHdr)
1688 return VERR_INVALID_PARAMETER;
1689 rc = GMMR0ResetSharedModules(pVM, idCpu);
1690 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1691 break;
1692
1693#ifdef VBOX_WITH_PAGE_SHARING
1694 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1695 {
1696 if (idCpu == NIL_VMCPUID)
1697 return VERR_INVALID_CPU_ID;
1698 if ( u64Arg
1699 || pReqHdr)
1700 return VERR_INVALID_PARAMETER;
1701
1702 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1703 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf());
1704
1705# ifdef DEBUG_sandervl
1706 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
1707 /* Todo: this can have bad side effects for unexpected jumps back to r3. */
1708 rc = GMMR0CheckSharedModulesStart(pVM);
1709 if (rc == VINF_SUCCESS)
1710 {
1711 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
1712 Assert( rc == VINF_SUCCESS
1713 || (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
1714 GMMR0CheckSharedModulesEnd(pVM);
1715 }
1716# else
1717 rc = GMMR0CheckSharedModules(pVM, pVCpu);
1718# endif
1719 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1720 break;
1721 }
1722#endif
1723
1724#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1725 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1726 if (u64Arg)
1727 return VERR_INVALID_PARAMETER;
1728 rc = GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1729 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1730 break;
1731#endif
1732
1733 case VMMR0_DO_GMM_QUERY_STATISTICS:
1734 if (u64Arg)
1735 return VERR_INVALID_PARAMETER;
1736 rc = GMMR0QueryStatisticsReq(pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1737 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1738 break;
1739
1740 case VMMR0_DO_GMM_RESET_STATISTICS:
1741 if (u64Arg)
1742 return VERR_INVALID_PARAMETER;
1743 rc = GMMR0ResetStatisticsReq(pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1744 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1745 break;
1746
1747 /*
1748 * A quick GCFGM mock-up.
1749 */
1750 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1751 case VMMR0_DO_GCFGM_SET_VALUE:
1752 case VMMR0_DO_GCFGM_QUERY_VALUE:
1753 {
1754 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1755 return VERR_INVALID_PARAMETER;
1756 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1757 if (pReq->Hdr.cbReq != sizeof(*pReq))
1758 return VERR_INVALID_PARAMETER;
1759 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1760 {
1761 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1762 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1763 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1764 }
1765 else
1766 {
1767 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1768 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1769 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1770 }
1771 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1772 break;
1773 }
1774
1775 /*
1776 * PDM Wrappers.
1777 */
1778 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1779 {
1780 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1781 return VERR_INVALID_PARAMETER;
1782 rc = PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1783 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1784 break;
1785 }
1786
1787 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1788 {
1789 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1790 return VERR_INVALID_PARAMETER;
1791 rc = PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1792 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1793 break;
1794 }
1795
1796 /*
1797 * Requests to the internal networking service.
1798 */
1799 case VMMR0_DO_INTNET_OPEN:
1800 {
1801 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1802 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1803 return VERR_INVALID_PARAMETER;
1804 rc = IntNetR0OpenReq(pSession, pReq);
1805 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1806 break;
1807 }
1808
1809 case VMMR0_DO_INTNET_IF_CLOSE:
1810 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1811 return VERR_INVALID_PARAMETER;
1812 rc = IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1813 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1814 break;
1815
1816
1817 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1818 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1819 return VERR_INVALID_PARAMETER;
1820 rc = IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1821 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1822 break;
1823
1824 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1825 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1826 return VERR_INVALID_PARAMETER;
1827 rc = IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1828 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1829 break;
1830
1831 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1832 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1833 return VERR_INVALID_PARAMETER;
1834 rc = IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1835 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1836 break;
1837
1838 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1839 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1840 return VERR_INVALID_PARAMETER;
1841 rc = IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1842 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1843 break;
1844
1845 case VMMR0_DO_INTNET_IF_SEND:
1846 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1847 return VERR_INVALID_PARAMETER;
1848 rc = IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1849 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1850 break;
1851
1852 case VMMR0_DO_INTNET_IF_WAIT:
1853 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1854 return VERR_INVALID_PARAMETER;
1855 rc = IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1856 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1857 break;
1858
1859 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1860 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1861 return VERR_INVALID_PARAMETER;
1862 rc = IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1863 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1864 break;
1865
1866#ifdef VBOX_WITH_PCI_PASSTHROUGH
1867 /*
1868 * Requests to host PCI driver service.
1869 */
1870 case VMMR0_DO_PCIRAW_REQ:
1871 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1872 return VERR_INVALID_PARAMETER;
1873 rc = PciRawR0ProcessReq(pSession, pVM, (PPCIRAWSENDREQ)pReqHdr);
1874 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1875 break;
1876#endif
1877 /*
1878 * For profiling.
1879 */
1880 case VMMR0_DO_NOP:
1881 case VMMR0_DO_SLOW_NOP:
1882 return VINF_SUCCESS;
1883
1884 /*
1885 * For testing Ring-0 APIs invoked in this environment.
1886 */
1887 case VMMR0_DO_TESTS:
1888 /** @todo make new test */
1889 return VINF_SUCCESS;
1890
1891
1892#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1893 case VMMR0_DO_TEST_SWITCHER3264:
1894 if (idCpu == NIL_VMCPUID)
1895 return VERR_INVALID_CPU_ID;
1896 rc = HMR0TestSwitcher3264(pVM);
1897 VMM_CHECK_SMAP_CHECK2(pVM, RT_NOTHING);
1898 break;
1899#endif
1900 default:
1901 /*
1902 * We're returning VERR_NOT_SUPPORT here so we've got something else
1903 * than -1 which the interrupt gate glue code might return.
1904 */
1905 Log(("operation %#x is not supported\n", enmOperation));
1906 return VERR_NOT_SUPPORTED;
1907 }
1908 return rc;
1909}
1910
1911
1912/**
1913 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1914 */
1915typedef struct VMMR0ENTRYEXARGS
1916{
1917 PVM pVM;
1918 VMCPUID idCpu;
1919 VMMR0OPERATION enmOperation;
1920 PSUPVMMR0REQHDR pReq;
1921 uint64_t u64Arg;
1922 PSUPDRVSESSION pSession;
1923} VMMR0ENTRYEXARGS;
1924/** Pointer to a vmmR0EntryExWrapper argument package. */
1925typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1926
1927/**
1928 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1929 *
1930 * @returns VBox status code.
1931 * @param pvArgs The argument package
1932 */
1933static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
1934{
1935 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1936 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1937 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1938 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1939 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1940 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1941}
1942
1943
1944/**
1945 * The Ring 0 entry point, called by the support library (SUP).
1946 *
1947 * @returns VBox status code.
1948 * @param pVM Pointer to the VM.
1949 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1950 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1951 * @param enmOperation Which operation to execute.
1952 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
1953 * @param u64Arg Some simple constant argument.
1954 * @param pSession The session of the caller.
1955 * @remarks Assume called with interrupts _enabled_.
1956 */
1957VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1958{
1959 /*
1960 * Requests that should only happen on the EMT thread will be
1961 * wrapped in a setjmp so we can assert without causing trouble.
1962 */
1963 if ( VALID_PTR(pVM)
1964 && pVM->pVMR0
1965 && idCpu < pVM->cCpus)
1966 {
1967 switch (enmOperation)
1968 {
1969 /* These might/will be called before VMMR3Init. */
1970 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1971 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1972 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1973 case VMMR0_DO_GMM_FREE_PAGES:
1974 case VMMR0_DO_GMM_BALLOONED_PAGES:
1975 /* On the mac we might not have a valid jmp buf, so check these as well. */
1976 case VMMR0_DO_VMMR0_INIT:
1977 case VMMR0_DO_VMMR0_TERM:
1978 {
1979 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1980
1981 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
1982 break;
1983
1984 /** @todo validate this EMT claim... GVM knows. */
1985 VMMR0ENTRYEXARGS Args;
1986 Args.pVM = pVM;
1987 Args.idCpu = idCpu;
1988 Args.enmOperation = enmOperation;
1989 Args.pReq = pReq;
1990 Args.u64Arg = u64Arg;
1991 Args.pSession = pSession;
1992 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
1993 }
1994
1995 default:
1996 break;
1997 }
1998 }
1999 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
2000}
2001
2002
2003/**
2004 * Checks whether we've armed the ring-0 long jump machinery.
2005 *
2006 * @returns @c true / @c false
2007 * @param pVCpu Pointer to the VMCPU.
2008 * @thread EMT
2009 * @sa VMMIsLongJumpArmed
2010 */
2011VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
2012{
2013#ifdef RT_ARCH_X86
2014 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
2015 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2016#else
2017 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
2018 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2019#endif
2020}
2021
2022
2023/**
2024 * Checks whether we've done a ring-3 long jump.
2025 *
2026 * @returns @c true / @c false
2027 * @param pVCpu Pointer to the VMCPU.
2028 * @thread EMT
2029 */
2030VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu)
2031{
2032 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
2033}
2034
2035
2036/**
2037 * Internal R0 logger worker: Flush logger.
2038 *
2039 * @param pLogger The logger instance to flush.
2040 * @remark This function must be exported!
2041 */
2042VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
2043{
2044#ifdef LOG_ENABLED
2045 /*
2046 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
2047 * (This is a bit paranoid code.)
2048 */
2049 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2050 if ( !VALID_PTR(pR0Logger)
2051 || !VALID_PTR(pR0Logger + 1)
2052 || pLogger->u32Magic != RTLOGGER_MAGIC)
2053 {
2054# ifdef DEBUG
2055 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
2056# endif
2057 return;
2058 }
2059 if (pR0Logger->fFlushingDisabled)
2060 return; /* quietly */
2061
2062 PVM pVM = pR0Logger->pVM;
2063 if ( !VALID_PTR(pVM)
2064 || pVM->pVMR0 != pVM)
2065 {
2066# ifdef DEBUG
2067 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
2068# endif
2069 return;
2070 }
2071
2072 PVMCPU pVCpu = VMMGetCpu(pVM);
2073 if (pVCpu)
2074 {
2075 /*
2076 * Check that the jump buffer is armed.
2077 */
2078# ifdef RT_ARCH_X86
2079 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
2080 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2081# else
2082 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
2083 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2084# endif
2085 {
2086# ifdef DEBUG
2087 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
2088# endif
2089 return;
2090 }
2091 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
2092 }
2093# ifdef DEBUG
2094 else
2095 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
2096# endif
2097#endif
2098}
2099
2100/**
2101 * Internal R0 logger worker: Custom prefix.
2102 *
2103 * @returns Number of chars written.
2104 *
2105 * @param pLogger The logger instance.
2106 * @param pchBuf The output buffer.
2107 * @param cchBuf The size of the buffer.
2108 * @param pvUser User argument (ignored).
2109 */
2110VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
2111{
2112 NOREF(pvUser);
2113#ifdef LOG_ENABLED
2114 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
2115 if ( !VALID_PTR(pR0Logger)
2116 || !VALID_PTR(pR0Logger + 1)
2117 || pLogger->u32Magic != RTLOGGER_MAGIC
2118 || cchBuf < 2)
2119 return 0;
2120
2121 static const char s_szHex[17] = "0123456789abcdef";
2122 VMCPUID const idCpu = pR0Logger->idCpu;
2123 pchBuf[1] = s_szHex[ idCpu & 15];
2124 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
2125
2126 return 2;
2127#else
2128 return 0;
2129#endif
2130}
2131
2132#ifdef LOG_ENABLED
2133
2134/**
2135 * Disables flushing of the ring-0 debug log.
2136 *
2137 * @param pVCpu Pointer to the VMCPU.
2138 */
2139VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
2140{
2141 if (pVCpu->vmm.s.pR0LoggerR0)
2142 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
2143}
2144
2145
2146/**
2147 * Enables flushing of the ring-0 debug log.
2148 *
2149 * @param pVCpu Pointer to the VMCPU.
2150 */
2151VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
2152{
2153 if (pVCpu->vmm.s.pR0LoggerR0)
2154 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
2155}
2156
2157
2158/**
2159 * Checks if log flushing is disabled or not.
2160 *
2161 * @param pVCpu Pointer to the VMCPU.
2162 */
2163VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
2164{
2165 if (pVCpu->vmm.s.pR0LoggerR0)
2166 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
2167 return true;
2168}
2169#endif /* LOG_ENABLED */
2170
2171/**
2172 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
2173 *
2174 * @returns true if the breakpoint should be hit, false if it should be ignored.
2175 */
2176DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
2177{
2178#if 0
2179 return true;
2180#else
2181 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2182 if (pVM)
2183 {
2184 PVMCPU pVCpu = VMMGetCpu(pVM);
2185
2186 if (pVCpu)
2187 {
2188#ifdef RT_ARCH_X86
2189 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
2190 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2191#else
2192 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
2193 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
2194#endif
2195 {
2196 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
2197 return RT_FAILURE_NP(rc);
2198 }
2199 }
2200 }
2201#ifdef RT_OS_LINUX
2202 return true;
2203#else
2204 return false;
2205#endif
2206#endif
2207}
2208
2209
2210/**
2211 * Override this so we can push it up to ring-3.
2212 *
2213 * @param pszExpr Expression. Can be NULL.
2214 * @param uLine Location line number.
2215 * @param pszFile Location file name.
2216 * @param pszFunction Location function name.
2217 */
2218DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
2219{
2220 /*
2221 * To the log.
2222 */
2223 LogAlways(("\n!!R0-Assertion Failed!!\n"
2224 "Expression: %s\n"
2225 "Location : %s(%d) %s\n",
2226 pszExpr, pszFile, uLine, pszFunction));
2227
2228 /*
2229 * To the global VMM buffer.
2230 */
2231 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2232 if (pVM)
2233 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2234 "\n!!R0-Assertion Failed!!\n"
2235 "Expression: %s\n"
2236 "Location : %s(%d) %s\n",
2237 pszExpr, pszFile, uLine, pszFunction);
2238
2239 /*
2240 * Continue the normal way.
2241 */
2242 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2243}
2244
2245
2246/**
2247 * Callback for RTLogFormatV which writes to the ring-3 log port.
2248 * See PFNLOGOUTPUT() for details.
2249 */
2250static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2251{
2252 for (size_t i = 0; i < cbChars; i++)
2253 LogAlways(("%c", pachChars[i]));
2254
2255 NOREF(pv);
2256 return cbChars;
2257}
2258
2259
2260/**
2261 * Override this so we can push it up to ring-3.
2262 *
2263 * @param pszFormat The format string.
2264 * @param va Arguments.
2265 */
2266DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2267{
2268 va_list vaCopy;
2269
2270 /*
2271 * Push the message to the loggers.
2272 */
2273 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2274 if (pLog)
2275 {
2276 va_copy(vaCopy, va);
2277 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2278 va_end(vaCopy);
2279 }
2280 pLog = RTLogRelGetDefaultInstance();
2281 if (pLog)
2282 {
2283 va_copy(vaCopy, va);
2284 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2285 va_end(vaCopy);
2286 }
2287
2288 /*
2289 * Push it to the global VMM buffer.
2290 */
2291 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2292 if (pVM)
2293 {
2294 va_copy(vaCopy, va);
2295 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2296 va_end(vaCopy);
2297 }
2298
2299 /*
2300 * Continue the normal way.
2301 */
2302 RTAssertMsg2V(pszFormat, va);
2303}
2304
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette