VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/VMMR0.cpp@ 55863

Last change on this file since 55863 was 55863, checked in by vboxsync, 10 years ago

IPRT,SUPDrv,VMM: Revised the context switching hook interface. Do less work when enabling the hook (formerly 'registration'). Drop the reference counting (kept internally for solaris) as it complicates restrictions wrt destroying enabled hooks. Bumped support driver version.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 71.7 KB
Line 
1/* $Id: VMMR0.cpp 55863 2015-05-14 18:29:34Z vboxsync $ */
2/** @file
3 * VMM - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_VMM
22#include <VBox/vmm/vmm.h>
23#include <VBox/sup.h>
24#include <VBox/vmm/trpm.h>
25#include <VBox/vmm/cpum.h>
26#include <VBox/vmm/pdmapi.h>
27#include <VBox/vmm/pgm.h>
28#include <VBox/vmm/stam.h>
29#include <VBox/vmm/tm.h>
30#include "VMMInternal.h"
31#include <VBox/vmm/vm.h>
32#ifdef VBOX_WITH_PCI_PASSTHROUGH
33# include <VBox/vmm/pdmpci.h>
34#endif
35
36#include <VBox/vmm/gvmm.h>
37#include <VBox/vmm/gmm.h>
38#include <VBox/vmm/gim.h>
39#include <VBox/intnet.h>
40#include <VBox/vmm/hm.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <VBox/version.h>
44#include <VBox/log.h>
45
46#include <iprt/asm-amd64-x86.h>
47#include <iprt/assert.h>
48#include <iprt/crc.h>
49#include <iprt/mp.h>
50#include <iprt/once.h>
51#include <iprt/stdarg.h>
52#include <iprt/string.h>
53#include <iprt/thread.h>
54#include <iprt/timer.h>
55
56#include "dtrace/VBoxVMM.h"
57
58
59#if defined(_MSC_VER) && defined(RT_ARCH_AMD64) /** @todo check this with with VC7! */
60# pragma intrinsic(_AddressOfReturnAddress)
61#endif
62
63
64/*******************************************************************************
65* Internal Functions *
66*******************************************************************************/
67RT_C_DECLS_BEGIN
68#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
69extern uint64_t __udivdi3(uint64_t, uint64_t);
70extern uint64_t __umoddi3(uint64_t, uint64_t);
71#endif
72RT_C_DECLS_END
73
74
75/*******************************************************************************
76* Global Variables *
77*******************************************************************************/
78/** Drag in necessary library bits.
79 * The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
80PFNRT g_VMMGCDeps[] =
81{
82 (PFNRT)RTCrc32,
83 (PFNRT)RTOnce,
84#if defined(RT_ARCH_X86) && (defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD))
85 (PFNRT)__udivdi3,
86 (PFNRT)__umoddi3,
87#endif
88 NULL
89};
90
91#ifdef RT_OS_SOLARIS
92/* Dependency information for the native solaris loader. */
93extern "C" { char _depends_on[] = "vboxdrv"; }
94#endif
95
96
97
98/**
99 * Initialize the module.
100 * This is called when we're first loaded.
101 *
102 * @returns 0 on success.
103 * @returns VBox status on failure.
104 * @param hMod Image handle for use in APIs.
105 */
106DECLEXPORT(int) ModuleInit(void *hMod)
107{
108#ifdef VBOX_WITH_DTRACE_R0
109 /*
110 * The first thing to do is register the static tracepoints.
111 * (Deregistration is automatic.)
112 */
113 int rc2 = SUPR0TracerRegisterModule(hMod, &g_VTGObjHeader);
114 if (RT_FAILURE(rc2))
115 return rc2;
116#endif
117 LogFlow(("ModuleInit:\n"));
118
119#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
120 /*
121 * Display the CMOS debug code.
122 */
123 ASMOutU8(0x72, 0x03);
124 uint8_t bDebugCode = ASMInU8(0x73);
125 LogRel(("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode));
126 RTLogComPrintf("CMOS Debug Code: %#x (%d)\n", bDebugCode, bDebugCode);
127#endif
128
129 /*
130 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
131 */
132 int rc = vmmInitFormatTypes();
133 if (RT_SUCCESS(rc))
134 {
135 rc = GVMMR0Init();
136 if (RT_SUCCESS(rc))
137 {
138 rc = GMMR0Init();
139 if (RT_SUCCESS(rc))
140 {
141 rc = HMR0Init();
142 if (RT_SUCCESS(rc))
143 {
144 rc = PGMRegisterStringFormatTypes();
145 if (RT_SUCCESS(rc))
146 {
147#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
148 rc = PGMR0DynMapInit();
149#endif
150 if (RT_SUCCESS(rc))
151 {
152 rc = IntNetR0Init();
153 if (RT_SUCCESS(rc))
154 {
155#ifdef VBOX_WITH_PCI_PASSTHROUGH
156 rc = PciRawR0Init();
157#endif
158 if (RT_SUCCESS(rc))
159 {
160 rc = CPUMR0ModuleInit();
161 if (RT_SUCCESS(rc))
162 {
163#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
164 rc = vmmR0TripleFaultHackInit();
165 if (RT_SUCCESS(rc))
166#endif
167 {
168 LogFlow(("ModuleInit: returns success.\n"));
169 return VINF_SUCCESS;
170 }
171
172 /*
173 * Bail out.
174 */
175#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
176 vmmR0TripleFaultHackTerm();
177#endif
178 }
179 else
180 LogRel(("ModuleInit: CPUMR0ModuleInit -> %Rrc\n", rc));
181#ifdef VBOX_WITH_PCI_PASSTHROUGH
182 PciRawR0Term();
183#endif
184 }
185 else
186 LogRel(("ModuleInit: PciRawR0Init -> %Rrc\n", rc));
187 IntNetR0Term();
188 }
189 else
190 LogRel(("ModuleInit: IntNetR0Init -> %Rrc\n", rc));
191#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
192 PGMR0DynMapTerm();
193#endif
194 }
195 else
196 LogRel(("ModuleInit: PGMR0DynMapInit -> %Rrc\n", rc));
197 PGMDeregisterStringFormatTypes();
198 }
199 else
200 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc));
201 HMR0Term();
202 }
203 else
204 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc));
205 GMMR0Term();
206 }
207 else
208 LogRel(("ModuleInit: GMMR0Init -> %Rrc\n", rc));
209 GVMMR0Term();
210 }
211 else
212 LogRel(("ModuleInit: GVMMR0Init -> %Rrc\n", rc));
213 vmmTermFormatTypes();
214 }
215 else
216 LogRel(("ModuleInit: vmmInitFormatTypes -> %Rrc\n", rc));
217
218 LogFlow(("ModuleInit: failed %Rrc\n", rc));
219 return rc;
220}
221
222
223/**
224 * Terminate the module.
225 * This is called when we're finally unloaded.
226 *
227 * @param hMod Image handle for use in APIs.
228 */
229DECLEXPORT(void) ModuleTerm(void *hMod)
230{
231 NOREF(hMod);
232 LogFlow(("ModuleTerm:\n"));
233
234 /*
235 * Terminate the CPUM module (Local APIC cleanup).
236 */
237 CPUMR0ModuleTerm();
238
239 /*
240 * Terminate the internal network service.
241 */
242 IntNetR0Term();
243
244 /*
245 * PGM (Darwin), HM and PciRaw global cleanup.
246 */
247#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
248 PGMR0DynMapTerm();
249#endif
250#ifdef VBOX_WITH_PCI_PASSTHROUGH
251 PciRawR0Term();
252#endif
253 PGMDeregisterStringFormatTypes();
254 HMR0Term();
255#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
256 vmmR0TripleFaultHackTerm();
257#endif
258
259 /*
260 * Destroy the GMM and GVMM instances.
261 */
262 GMMR0Term();
263 GVMMR0Term();
264
265 vmmTermFormatTypes();
266
267 LogFlow(("ModuleTerm: returns\n"));
268}
269
270
271/**
272 * Initiates the R0 driver for a particular VM instance.
273 *
274 * @returns VBox status code.
275 *
276 * @param pVM Pointer to the VM.
277 * @param uSvnRev The SVN revision of the ring-3 part.
278 * @param uBuildType Build type indicator.
279 * @thread EMT.
280 */
281static int vmmR0InitVM(PVM pVM, uint32_t uSvnRev, uint32_t uBuildType)
282{
283 /*
284 * Match the SVN revisions and build type.
285 */
286 if (uSvnRev != VMMGetSvnRev())
287 {
288 LogRel(("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev()));
289 SUPR0Printf("VMMR0InitVM: Revision mismatch, r3=%d r0=%d\n", uSvnRev, VMMGetSvnRev());
290 return VERR_VMM_R0_VERSION_MISMATCH;
291 }
292 if (uBuildType != vmmGetBuildType())
293 {
294 LogRel(("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType()));
295 SUPR0Printf("VMMR0InitVM: Build type mismatch, r3=%#x r0=%#x\n", uBuildType, vmmGetBuildType());
296 return VERR_VMM_R0_VERSION_MISMATCH;
297 }
298 if ( !VALID_PTR(pVM)
299 || pVM->pVMR0 != pVM)
300 return VERR_INVALID_PARAMETER;
301
302
303#ifdef LOG_ENABLED
304 /*
305 * Register the EMT R0 logger instance for VCPU 0.
306 */
307 PVMCPU pVCpu = &pVM->aCpus[0];
308
309 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
310 if (pR0Logger)
311 {
312# if 0 /* testing of the logger. */
313 LogCom(("vmmR0InitVM: before %p\n", RTLogDefaultInstance()));
314 LogCom(("vmmR0InitVM: pfnFlush=%p actual=%p\n", pR0Logger->Logger.pfnFlush, vmmR0LoggerFlush));
315 LogCom(("vmmR0InitVM: pfnLogger=%p actual=%p\n", pR0Logger->Logger.pfnLogger, vmmR0LoggerWrapper));
316 LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
317
318 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
319 LogCom(("vmmR0InitVM: after %p reg\n", RTLogDefaultInstance()));
320 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
321 LogCom(("vmmR0InitVM: after %p dereg\n", RTLogDefaultInstance()));
322
323 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
324 LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
325 pR0Logger->Logger.pfnFlush(&pR0Logger->Logger);
326 LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
327
328 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
329 LogCom(("vmmR0InitVM: after %p reg2\n", RTLogDefaultInstance()));
330 pR0Logger->Logger.pfnLogger("hello ring-0 logger\n");
331 LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
332 RTLogSetDefaultInstanceThread(NULL, pVM->pSession);
333 LogCom(("vmmR0InitVM: after %p dereg2\n", RTLogDefaultInstance()));
334
335 RTLogLoggerEx(&pR0Logger->Logger, 0, ~0U, "hello ring-0 logger (RTLogLoggerEx)\n");
336 LogCom(("vmmR0InitVM: RTLogLoggerEx returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
337
338 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
339 RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
340 LogCom(("vmmR0InitVM: RTLogPrintf returned fine offScratch=%d\n", pR0Logger->Logger.offScratch));
341# endif
342 Log(("Switching to per-thread logging instance %p (key=%p)\n", &pR0Logger->Logger, pVM->pSession));
343 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
344 pR0Logger->fRegistered = true;
345 }
346#endif /* LOG_ENABLED */
347
348 /*
349 * Check if the host supports high resolution timers or not.
350 */
351 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers
352 && !RTTimerCanDoHighResolution())
353 pVM->vmm.s.fUsePeriodicPreemptionTimers = false;
354
355 /*
356 * Initialize the per VM data for GVMM and GMM.
357 */
358 int rc = GVMMR0InitVM(pVM);
359// if (RT_SUCCESS(rc))
360// rc = GMMR0InitPerVMData(pVM);
361 if (RT_SUCCESS(rc))
362 {
363 /*
364 * Init HM, CPUM and PGM (Darwin only).
365 */
366 rc = HMR0InitVM(pVM);
367 if (RT_SUCCESS(rc))
368 {
369 rc = CPUMR0InitVM(pVM);
370 if (RT_SUCCESS(rc))
371 {
372#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
373 rc = PGMR0DynMapInitVM(pVM);
374#endif
375 if (RT_SUCCESS(rc))
376 {
377#ifdef VBOX_WITH_PCI_PASSTHROUGH
378 rc = PciRawR0InitVM(pVM);
379#endif
380 if (RT_SUCCESS(rc))
381 {
382 rc = GIMR0InitVM(pVM);
383 if (RT_SUCCESS(rc))
384 {
385 GVMMR0DoneInitVM(pVM);
386 return rc;
387 }
388
389 /* bail out*/
390#ifdef VBOX_WITH_PCI_PASSTHROUGH
391 PciRawR0TermVM(pVM);
392#endif
393 }
394 }
395 }
396 HMR0TermVM(pVM);
397 }
398 }
399
400
401 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
402 return rc;
403}
404
405
406/**
407 * Terminates the R0 bits for a particular VM instance.
408 *
409 * This is normally called by ring-3 as part of the VM termination process, but
410 * may alternatively be called during the support driver session cleanup when
411 * the VM object is destroyed (see GVMM).
412 *
413 * @returns VBox status code.
414 *
415 * @param pVM Pointer to the VM.
416 * @param pGVM Pointer to the global VM structure. Optional.
417 * @thread EMT or session clean up thread.
418 */
419VMMR0_INT_DECL(int) VMMR0TermVM(PVM pVM, PGVM pGVM)
420{
421#ifdef VBOX_WITH_PCI_PASSTHROUGH
422 PciRawR0TermVM(pVM);
423#endif
424
425 /*
426 * Tell GVMM what we're up to and check that we only do this once.
427 */
428 if (GVMMR0DoingTermVM(pVM, pGVM))
429 {
430 GIMR0TermVM(pVM);
431
432 /** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
433 * here to make sure we don't leak any shared pages if we crash... */
434#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
435 PGMR0DynMapTermVM(pVM);
436#endif
437 HMR0TermVM(pVM);
438 }
439
440 /*
441 * Deregister the logger.
442 */
443 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pVM->pSession);
444 return VINF_SUCCESS;
445}
446
447
448/**
449 * VMM ring-0 thread-context callback.
450 *
451 * This does common HM state updating and calls the HM-specific thread-context
452 * callback.
453 *
454 * @param enmEvent The thread-context event.
455 * @param pvUser Opaque pointer to the VMCPU.
456 *
457 * @thread EMT(pvUser)
458 */
459static DECLCALLBACK(void) vmmR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, void *pvUser)
460{
461 PVMCPU pVCpu = (PVMCPU)pvUser;
462
463 switch (enmEvent)
464 {
465 case RTTHREADCTXEVENT_IN:
466 {
467 /*
468 * Linux may call us with preemption enabled (really!) but technically we
469 * cannot get preempted here, otherwise we end up in an infinite recursion
470 * scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
471 * ad infinitum). Let's just disable preemption for now...
472 */
473 /** @todo r=bird: I don't believe the above. The linux code is clearly enabling
474 * preemption after doing the callout (one or two functions up the
475 * call chain). */
476 RTTHREADPREEMPTSTATE ParanoidPreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
477 RTThreadPreemptDisable(&ParanoidPreemptState);
478
479 /* We need to update the VCPU <-> host CPU mapping. */
480 RTCPUID idHostCpu;
481 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
482 pVCpu->iHostCpuSet = iHostCpuSet;
483 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
484
485 /* In the very unlikely event that the GIP delta for the CPU we're
486 rescheduled needs calculating, try force a return to ring-3.
487 We unfortunately cannot do the measurements right here. */
488 if (RT_UNLIKELY(SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
489 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
490
491 /* Invoke the HM-specific thread-context callback. */
492 HMR0ThreadCtxCallback(enmEvent, pvUser);
493
494 /* Restore preemption. */
495 RTThreadPreemptRestore(&ParanoidPreemptState);
496 break;
497 }
498
499 case RTTHREADCTXEVENT_OUT:
500 {
501 /* Invoke the HM-specific thread-context callback. */
502 HMR0ThreadCtxCallback(enmEvent, pvUser);
503
504 /*
505 * Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
506 * have the same host CPU associated with it.
507 */
508 pVCpu->iHostCpuSet = UINT32_MAX;
509 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
510 break;
511 }
512
513 default:
514 /* Invoke the HM-specific thread-context callback. */
515 HMR0ThreadCtxCallback(enmEvent, pvUser);
516 break;
517 }
518}
519
520
521/**
522 * Creates thread switching hook for the current EMT thread.
523 *
524 * This is called by GVMMR0CreateVM and GVMMR0RegisterVCpu. If the host
525 * platform does not implement switcher hooks, no hooks will be create and the
526 * member set to NIL_RTTHREADCTXHOOK.
527 *
528 * @returns VBox status code.
529 * @param pVCpu Pointer to the cross context CPU structure.
530 * @thread EMT(pVCpu)
531 */
532VMMR0_INT_DECL(int) VMMR0ThreadCtxHookCreateForEmt(PVMCPU pVCpu)
533{
534 VMCPU_ASSERT_EMT(pVCpu);
535 Assert(pVCpu->vmm.s.hCtxHook == NIL_RTTHREADCTXHOOK);
536
537 int rc = RTThreadCtxHookCreate(&pVCpu->vmm.s.hCtxHook, 0, vmmR0ThreadCtxCallback, pVCpu);
538 if (RT_SUCCESS(rc))
539 return rc;
540
541 pVCpu->vmm.s.hCtxHook = NIL_RTTHREADCTXHOOK;
542 if (rc == VERR_NOT_SUPPORTED)
543 return VINF_SUCCESS;
544
545 LogRelMax(32, ("RTThreadCtxHookCreate failed! rc=%Rrc pVCpu=%p idCpu=%RU32\n", rc, pVCpu, pVCpu->idCpu));
546 return VINF_SUCCESS; /* Just ignore it, we can live without context hooks. */
547}
548
549
550/**
551 * Destroys the thread switching hook for the specified VCPU.
552 *
553 * @param pVCpu Pointer to the cross context CPU structure.
554 * @remarks Can be called from any thread.
555 */
556VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDestroyForEmt(PVMCPU pVCpu)
557{
558 int rc = RTThreadCtxHookDestroy(pVCpu->vmm.s.hCtxHook);
559 AssertRC(rc);
560}
561
562
563/**
564 * Disables the thread switching hook for this VCPU (if we got one).
565 *
566 * @param pVCpu Pointer to the cross context CPU structure.
567 * @thread EMT(pVCpu)
568 *
569 * @remarks This also clears VMCPU::idHostCpu, so the mapping is invalid after
570 * this call. This means you have to be careful with what you do!
571 */
572VMMR0_INT_DECL(void) VMMR0ThreadCtxHookDisable(PVMCPU pVCpu)
573{
574 /*
575 * Clear the VCPU <-> host CPU mapping as we've left HM context.
576 * @bugref{7726} comment #19 explains the need for this trick:
577 *
578 * hmR0VmxCallRing3Callback/hmR0SvmCallRing3Callback &
579 * hmR0VmxLeaveSession/hmR0SvmLeaveSession disables context hooks during
580 * longjmp & normal return to ring-3, which opens a window where we may be
581 * rescheduled without changing VMCPUID::idHostCpu and cause confusion if
582 * the CPU starts executing a different EMT. Both functions first disables
583 * preemption and then calls HMR0LeaveCpu which invalids idHostCpu, leaving
584 * an opening for getting preempted.
585 */
586 /** @todo Make HM not need this API! Then we could leave the hooks enabled
587 * all the time. */
588 /** @todo move this into the context hook disabling if(). */
589 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
590
591 /*
592 * Disable the context hook, if we got one.
593 */
594 if (pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK)
595 {
596 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
597 int rc = RTThreadCtxHookDisable(pVCpu->vmm.s.hCtxHook);
598 AssertRC(rc);
599 }
600}
601
602
603/**
604 * Internal version of VMMR0ThreadCtxHooksAreRegistered.
605 *
606 * @returns true if registered, false otherwise.
607 * @param pVCpu Pointer to the VMCPU.
608 */
609DECLINLINE(bool) vmmR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
610{
611 return RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook);
612}
613
614
615/**
616 * Whether thread-context hooks are registered for this VCPU.
617 *
618 * @returns true if registered, false otherwise.
619 * @param pVCpu Pointer to the VMCPU.
620 */
621VMMR0_INT_DECL(bool) VMMR0ThreadCtxHookIsEnabled(PVMCPU pVCpu)
622{
623 return vmmR0ThreadCtxHookIsEnabled(pVCpu);
624}
625
626
627#ifdef VBOX_WITH_STATISTICS
628/**
629 * Record return code statistics
630 * @param pVM Pointer to the VM.
631 * @param pVCpu Pointer to the VMCPU.
632 * @param rc The status code.
633 */
634static void vmmR0RecordRC(PVM pVM, PVMCPU pVCpu, int rc)
635{
636 /*
637 * Collect statistics.
638 */
639 switch (rc)
640 {
641 case VINF_SUCCESS:
642 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetNormal);
643 break;
644 case VINF_EM_RAW_INTERRUPT:
645 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterrupt);
646 break;
647 case VINF_EM_RAW_INTERRUPT_HYPER:
648 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptHyper);
649 break;
650 case VINF_EM_RAW_GUEST_TRAP:
651 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGuestTrap);
652 break;
653 case VINF_EM_RAW_RING_SWITCH:
654 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitch);
655 break;
656 case VINF_EM_RAW_RING_SWITCH_INT:
657 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRingSwitchInt);
658 break;
659 case VINF_EM_RAW_STALE_SELECTOR:
660 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetStaleSelector);
661 break;
662 case VINF_EM_RAW_IRET_TRAP:
663 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIRETTrap);
664 break;
665 case VINF_IOM_R3_IOPORT_READ:
666 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIORead);
667 break;
668 case VINF_IOM_R3_IOPORT_WRITE:
669 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOWrite);
670 break;
671 case VINF_IOM_R3_MMIO_READ:
672 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIORead);
673 break;
674 case VINF_IOM_R3_MMIO_WRITE:
675 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOWrite);
676 break;
677 case VINF_IOM_R3_MMIO_READ_WRITE:
678 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOReadWrite);
679 break;
680 case VINF_PATM_HC_MMIO_PATCH_READ:
681 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchRead);
682 break;
683 case VINF_PATM_HC_MMIO_PATCH_WRITE:
684 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMMIOPatchWrite);
685 break;
686 case VINF_CPUM_R3_MSR_READ:
687 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRRead);
688 break;
689 case VINF_CPUM_R3_MSR_WRITE:
690 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMSRWrite);
691 break;
692 case VINF_EM_RAW_EMULATE_INSTR:
693 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetEmulate);
694 break;
695 case VINF_EM_RAW_EMULATE_IO_BLOCK:
696 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIOBlockEmulate);
697 break;
698 case VINF_PATCH_EMULATE_INSTR:
699 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchEmulate);
700 break;
701 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
702 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetLDTFault);
703 break;
704 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
705 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetGDTFault);
706 break;
707 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
708 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetIDTFault);
709 break;
710 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
711 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTSSFault);
712 break;
713 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
714 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPDFault);
715 break;
716 case VINF_CSAM_PENDING_ACTION:
717 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCSAMTask);
718 break;
719 case VINF_PGM_SYNC_CR3:
720 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetSyncCR3);
721 break;
722 case VINF_PATM_PATCH_INT3:
723 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchInt3);
724 break;
725 case VINF_PATM_PATCH_TRAP_PF:
726 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchPF);
727 break;
728 case VINF_PATM_PATCH_TRAP_GP:
729 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchGP);
730 break;
731 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
732 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchIretIRQ);
733 break;
734 case VINF_EM_RESCHEDULE_REM:
735 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetRescheduleREM);
736 break;
737 case VINF_EM_RAW_TO_R3:
738 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
739 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3TMVirt);
740 else if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
741 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3HandyPages);
742 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_QUEUES))
743 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3PDMQueues);
744 else if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
745 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Rendezvous);
746 else if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
747 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3DMA);
748 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER))
749 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Timer);
750 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
751 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3CritSect);
752 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TO_R3))
753 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3);
754 else
755 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetToR3Unknown);
756 break;
757
758 case VINF_EM_RAW_TIMER_PENDING:
759 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetTimerPending);
760 break;
761 case VINF_EM_RAW_INTERRUPT_PENDING:
762 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetInterruptPending);
763 break;
764 case VINF_VMM_CALL_HOST:
765 switch (pVCpu->vmm.s.enmCallRing3Operation)
766 {
767 case VMMCALLRING3_PDM_CRIT_SECT_ENTER:
768 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMCritSectEnter);
769 break;
770 case VMMCALLRING3_PDM_LOCK:
771 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPDMLock);
772 break;
773 case VMMCALLRING3_PGM_POOL_GROW:
774 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMPoolGrow);
775 break;
776 case VMMCALLRING3_PGM_LOCK:
777 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMLock);
778 break;
779 case VMMCALLRING3_PGM_MAP_CHUNK:
780 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMMapChunk);
781 break;
782 case VMMCALLRING3_PGM_ALLOCATE_HANDY_PAGES:
783 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallPGMAllocHandy);
784 break;
785 case VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS:
786 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallRemReplay);
787 break;
788 case VMMCALLRING3_VMM_LOGGER_FLUSH:
789 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallLogFlush);
790 break;
791 case VMMCALLRING3_VM_SET_ERROR:
792 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetError);
793 break;
794 case VMMCALLRING3_VM_SET_RUNTIME_ERROR:
795 STAM_COUNTER_INC(&pVM->vmm.s.StatRZCallVMSetRuntimeError);
796 break;
797 case VMMCALLRING3_VM_R0_ASSERTION:
798 default:
799 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetCallRing3);
800 break;
801 }
802 break;
803 case VINF_PATM_DUPLICATE_FUNCTION:
804 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPATMDuplicateFn);
805 break;
806 case VINF_PGM_CHANGE_MODE:
807 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMChangeMode);
808 break;
809 case VINF_PGM_POOL_FLUSH_PENDING:
810 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPGMFlushPending);
811 break;
812 case VINF_EM_PENDING_REQUEST:
813 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest);
814 break;
815 case VINF_EM_HM_PATCH_TPR_INSTR:
816 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR);
817 break;
818 default:
819 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetMisc);
820 break;
821 }
822}
823#endif /* VBOX_WITH_STATISTICS */
824
825
826/**
827 * Unused ring-0 entry point that used to be called from the interrupt gate.
828 *
829 * Will be removed one of the next times we do a major SUPDrv version bump.
830 *
831 * @returns VBox status code.
832 * @param pVM Pointer to the VM.
833 * @param enmOperation Which operation to execute.
834 * @param pvArg Argument to the operation.
835 * @remarks Assume called with interrupts disabled.
836 */
837VMMR0DECL(int) VMMR0EntryInt(PVM pVM, VMMR0OPERATION enmOperation, void *pvArg)
838{
839 /*
840 * We're returning VERR_NOT_SUPPORT here so we've got something else
841 * than -1 which the interrupt gate glue code might return.
842 */
843 Log(("operation %#x is not supported\n", enmOperation));
844 NOREF(enmOperation); NOREF(pvArg); NOREF(pVM);
845 return VERR_NOT_SUPPORTED;
846}
847
848
849/**
850 * The Ring 0 entry point, called by the fast-ioctl path.
851 *
852 * @param pVM Pointer to the VM.
853 * The return code is stored in pVM->vmm.s.iLastGZRc.
854 * @param idCpu The Virtual CPU ID of the calling EMT.
855 * @param enmOperation Which operation to execute.
856 * @remarks Assume called with interrupts _enabled_.
857 */
858VMMR0DECL(void) VMMR0EntryFast(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation)
859{
860 /*
861 * Validation.
862 */
863 if (RT_UNLIKELY(idCpu >= pVM->cCpus))
864 return;
865 PVMCPU pVCpu = &pVM->aCpus[idCpu];
866 if (RT_UNLIKELY(pVCpu->hNativeThreadR0 != RTThreadNativeSelf()))
867 return;
868
869 /*
870 * Perform requested operation.
871 */
872 switch (enmOperation)
873 {
874 /*
875 * Switch to GC and run guest raw mode code.
876 * Disable interrupts before doing the world switch.
877 */
878 case VMMR0_DO_RAW_RUN:
879 {
880#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
881 /* Some safety precautions first. */
882 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
883 {
884 pVCpu->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT;
885 break;
886 }
887#endif
888
889 /*
890 * Disable preemption.
891 */
892 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
893 RTThreadPreemptDisable(&PreemptState);
894
895 /*
896 * Get the host CPU identifiers, make sure they are valid and that
897 * we've got a TSC delta for the CPU.
898 */
899 RTCPUID idHostCpu;
900 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
901 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
902 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
903 {
904 /*
905 * Commit the CPU identifiers and update the periodict preemption timer if it's active.
906 */
907#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
908 CPUMR0SetLApic(pVCpu, iHostCpuSet);
909#endif
910 pVCpu->iHostCpuSet = iHostCpuSet;
911 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
912
913 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
914 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
915
916 /*
917 * We might need to disable VT-x if the active switcher turns off paging.
918 */
919 bool fVTxDisabled;
920 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
921 if (RT_SUCCESS(rc))
922 {
923 /*
924 * Disable interrupts and run raw-mode code. The loop is for efficiently
925 * dispatching tracepoints that fired in raw-mode context.
926 */
927 RTCCUINTREG uFlags = ASMIntDisableFlags();
928
929 for (;;)
930 {
931 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
932 TMNotifyStartOfExecution(pVCpu);
933
934 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
935 pVCpu->vmm.s.iLastGZRc = rc;
936
937 TMNotifyEndOfExecution(pVCpu);
938 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
939
940 if (rc != VINF_VMM_CALL_TRACER)
941 break;
942 SUPR0TracerUmodProbeFire(pVM->pSession, &pVCpu->vmm.s.TracerCtx);
943 }
944
945 /*
946 * Re-enable VT-x before we dispatch any pending host interrupts and
947 * re-enables interrupts.
948 */
949 HMR0LeaveSwitcher(pVM, fVTxDisabled);
950
951 if ( rc == VINF_EM_RAW_INTERRUPT
952 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
953 TRPMR0DispatchHostInterrupt(pVM);
954
955 ASMSetFlags(uFlags);
956
957 /* Fire dtrace probe and collect statistics. */
958 VBOXVMM_R0_VMM_RETURN_TO_RING3_RC(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
959#ifdef VBOX_WITH_STATISTICS
960 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
961 vmmR0RecordRC(pVM, pVCpu, rc);
962#endif
963 }
964 else
965 pVCpu->vmm.s.iLastGZRc = rc;
966
967 /*
968 * Invalidate the host CPU identifiers as we restore preemption.
969 */
970 pVCpu->iHostCpuSet = UINT32_MAX;
971 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
972
973 RTThreadPreemptRestore(&PreemptState);
974 }
975 /*
976 * Invalid CPU set index or TSC delta in need of measuring.
977 */
978 else
979 {
980 RTThreadPreemptRestore(&PreemptState);
981 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
982 {
983 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
984 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
985 0 /*default cTries*/);
986 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
987 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
988 else
989 pVCpu->vmm.s.iLastGZRc = rc;
990 }
991 else
992 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
993 }
994 break;
995 }
996
997 /*
998 * Run guest code using the available hardware acceleration technology.
999 */
1000 case VMMR0_DO_HM_RUN:
1001 {
1002 /*
1003 * Disable preemption.
1004 */
1005 Assert(!vmmR0ThreadCtxHookIsEnabled(pVCpu));
1006 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
1007 RTThreadPreemptDisable(&PreemptState);
1008
1009 /*
1010 * Get the host CPU identifiers, make sure they are valid and that
1011 * we've got a TSC delta for the CPU.
1012 */
1013 RTCPUID idHostCpu;
1014 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1015 if (RT_LIKELY( iHostCpuSet < RTCPUSET_MAX_CPUS
1016 && SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1017 {
1018 pVCpu->iHostCpuSet = iHostCpuSet;
1019 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1020
1021 /*
1022 * Update the periodic preemption timer if it's active.
1023 */
1024 if (pVM->vmm.s.fUsePeriodicPreemptionTimers)
1025 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
1026
1027#ifdef LOG_ENABLED
1028 /*
1029 * Ugly: Lazy registration of ring 0 loggers.
1030 */
1031 if (pVCpu->idCpu > 0)
1032 {
1033 PVMMR0LOGGER pR0Logger = pVCpu->vmm.s.pR0LoggerR0;
1034 if ( pR0Logger
1035 && RT_UNLIKELY(!pR0Logger->fRegistered))
1036 {
1037 RTLogSetDefaultInstanceThread(&pR0Logger->Logger, (uintptr_t)pVM->pSession);
1038 pR0Logger->fRegistered = true;
1039 }
1040 }
1041#endif
1042
1043 int rc;
1044 bool fPreemptRestored = false;
1045 if (!HMR0SuspendPending())
1046 {
1047 /*
1048 * Register thread-context hooks if required.
1049 */
1050 if ( pVCpu->vmm.s.hCtxHook != NIL_RTTHREADCTXHOOK
1051 && !RTThreadCtxHookIsEnabled(pVCpu->vmm.s.hCtxHook))
1052 {
1053 rc = RTThreadCtxHookEnable(pVCpu->vmm.s.hCtxHook);
1054 AssertRC(rc);
1055 }
1056
1057 /*
1058 * Enter HM context.
1059 */
1060 rc = HMR0Enter(pVM, pVCpu);
1061 if (RT_SUCCESS(rc))
1062 {
1063 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
1064
1065 /*
1066 * When preemption hooks are in place, enable preemption now that
1067 * we're in HM context.
1068 */
1069 if (vmmR0ThreadCtxHookIsEnabled(pVCpu))
1070 {
1071 fPreemptRestored = true;
1072 RTThreadPreemptRestore(&PreemptState);
1073 }
1074
1075 /*
1076 * Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
1077 */
1078 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu);
1079
1080 /*
1081 * Assert sanity on the way out. Using manual assertions code here as normal
1082 * assertions are going to panic the host since we're outside the setjmp/longjmp zone.
1083 */
1084 if (RT_UNLIKELY( VMCPU_GET_STATE(pVCpu) != VMCPUSTATE_STARTED_HM
1085 && RT_SUCCESS_NP(rc) && rc != VINF_VMM_CALL_HOST ))
1086 {
1087 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1088 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1089 "Got VMCPU state %d expected %d.\n", VMCPU_GET_STATE(pVCpu), VMCPUSTATE_STARTED_HM);
1090 rc = VERR_VMM_WRONG_HM_VMCPU_STATE;
1091 }
1092 else if (RT_UNLIKELY(vmmR0ThreadCtxHookIsEnabled(pVCpu)))
1093 {
1094 pVM->vmm.s.szRing0AssertMsg1[0] = '\0';
1095 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2),
1096 "Thread-context hooks still registered! VCPU=%p Id=%u rc=%d.\n", pVCpu, pVCpu->idCpu, rc);
1097 rc = VERR_INVALID_STATE;
1098 }
1099
1100 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
1101 }
1102 STAM_COUNTER_INC(&pVM->vmm.s.StatRunRC);
1103
1104 /** @todo shouldn't we disable the ctx hook here??? */
1105 }
1106 /*
1107 * The system is about to go into suspend mode; go back to ring 3.
1108 */
1109 else
1110 rc = VINF_EM_RAW_INTERRUPT;
1111
1112 /*
1113 * Invalidate the host CPU identifiers as we restore preemption.
1114 */
1115 pVCpu->iHostCpuSet = UINT32_MAX;
1116 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1117
1118 if (!fPreemptRestored)
1119 RTThreadPreemptRestore(&PreemptState);
1120
1121 pVCpu->vmm.s.iLastGZRc = rc;
1122
1123 /* Fire dtrace probe and collect statistics. */
1124 VBOXVMM_R0_VMM_RETURN_TO_RING3_HM(pVCpu, CPUMQueryGuestCtxPtr(pVCpu), rc);
1125#ifdef VBOX_WITH_STATISTICS
1126 vmmR0RecordRC(pVM, pVCpu, rc);
1127#endif
1128 }
1129 /*
1130 * Invalid CPU set index or TSC delta in need of measuring.
1131 */
1132 else
1133 {
1134 pVCpu->iHostCpuSet = UINT32_MAX;
1135 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1136 RTThreadPreemptRestore(&PreemptState);
1137 if (iHostCpuSet < RTCPUSET_MAX_CPUS)
1138 {
1139 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1140 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1141 0 /*default cTries*/);
1142 if (RT_SUCCESS(rc) || rc == VERR_CPU_OFFLINE)
1143 pVCpu->vmm.s.iLastGZRc = VINF_EM_RAW_TO_R3;
1144 else
1145 pVCpu->vmm.s.iLastGZRc = rc;
1146 }
1147 else
1148 pVCpu->vmm.s.iLastGZRc = VERR_INVALID_CPU_INDEX;
1149 }
1150 break;
1151 }
1152
1153 /*
1154 * For profiling.
1155 */
1156 case VMMR0_DO_NOP:
1157 pVCpu->vmm.s.iLastGZRc = VINF_SUCCESS;
1158 break;
1159
1160 /*
1161 * Impossible.
1162 */
1163 default:
1164 AssertMsgFailed(("%#x\n", enmOperation));
1165 pVCpu->vmm.s.iLastGZRc = VERR_NOT_SUPPORTED;
1166 break;
1167 }
1168}
1169
1170
1171/**
1172 * Validates a session or VM session argument.
1173 *
1174 * @returns true / false accordingly.
1175 * @param pVM Pointer to the VM.
1176 * @param pSession The session argument.
1177 */
1178DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
1179{
1180 /* This must be set! */
1181 if (!pSession)
1182 return false;
1183
1184 /* Only one out of the two. */
1185 if (pVM && pClaimedSession)
1186 return false;
1187 if (pVM)
1188 pClaimedSession = pVM->pSession;
1189 return pClaimedSession == pSession;
1190}
1191
1192
1193/**
1194 * VMMR0EntryEx worker function, either called directly or when ever possible
1195 * called thru a longjmp so we can exit safely on failure.
1196 *
1197 * @returns VBox status code.
1198 * @param pVM Pointer to the VM.
1199 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1200 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1201 * @param enmOperation Which operation to execute.
1202 * @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
1203 * The support driver validates this if it's present.
1204 * @param u64Arg Some simple constant argument.
1205 * @param pSession The session of the caller.
1206 * @remarks Assume called with interrupts _enabled_.
1207 */
1208static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
1209{
1210 /*
1211 * Common VM pointer validation.
1212 */
1213 if (pVM)
1214 {
1215 if (RT_UNLIKELY( !VALID_PTR(pVM)
1216 || ((uintptr_t)pVM & PAGE_OFFSET_MASK)))
1217 {
1218 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p! (op=%d)\n", pVM, enmOperation);
1219 return VERR_INVALID_POINTER;
1220 }
1221 if (RT_UNLIKELY( pVM->enmVMState < VMSTATE_CREATING
1222 || pVM->enmVMState > VMSTATE_TERMINATED
1223 || pVM->pVMR0 != pVM))
1224 {
1225 SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
1226 pVM, pVM->enmVMState, pVM->pVMR0, enmOperation);
1227 return VERR_INVALID_POINTER;
1228 }
1229
1230 if (RT_UNLIKELY(idCpu >= pVM->cCpus && idCpu != NIL_VMCPUID))
1231 {
1232 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu (%u vs cCpus=%u)\n", idCpu, pVM->cCpus);
1233 return VERR_INVALID_PARAMETER;
1234 }
1235 }
1236 else if (RT_UNLIKELY(idCpu != NIL_VMCPUID))
1237 {
1238 SUPR0Printf("vmmR0EntryExWorker: Invalid idCpu=%u\n", idCpu);
1239 return VERR_INVALID_PARAMETER;
1240 }
1241
1242
1243 switch (enmOperation)
1244 {
1245 /*
1246 * GVM requests
1247 */
1248 case VMMR0_DO_GVMM_CREATE_VM:
1249 if (pVM || u64Arg || idCpu != NIL_VMCPUID)
1250 return VERR_INVALID_PARAMETER;
1251 return GVMMR0CreateVMReq((PGVMMCREATEVMREQ)pReqHdr);
1252
1253 case VMMR0_DO_GVMM_DESTROY_VM:
1254 if (pReqHdr || u64Arg)
1255 return VERR_INVALID_PARAMETER;
1256 return GVMMR0DestroyVM(pVM);
1257
1258 case VMMR0_DO_GVMM_REGISTER_VMCPU:
1259 {
1260 if (!pVM)
1261 return VERR_INVALID_PARAMETER;
1262 return GVMMR0RegisterVCpu(pVM, idCpu);
1263 }
1264
1265 case VMMR0_DO_GVMM_SCHED_HALT:
1266 if (pReqHdr)
1267 return VERR_INVALID_PARAMETER;
1268 return GVMMR0SchedHalt(pVM, idCpu, u64Arg);
1269
1270 case VMMR0_DO_GVMM_SCHED_WAKE_UP:
1271 if (pReqHdr || u64Arg)
1272 return VERR_INVALID_PARAMETER;
1273 return GVMMR0SchedWakeUp(pVM, idCpu);
1274
1275 case VMMR0_DO_GVMM_SCHED_POKE:
1276 if (pReqHdr || u64Arg)
1277 return VERR_INVALID_PARAMETER;
1278 return GVMMR0SchedPoke(pVM, idCpu);
1279
1280 case VMMR0_DO_GVMM_SCHED_WAKE_UP_AND_POKE_CPUS:
1281 if (u64Arg)
1282 return VERR_INVALID_PARAMETER;
1283 return GVMMR0SchedWakeUpAndPokeCpusReq(pVM, (PGVMMSCHEDWAKEUPANDPOKECPUSREQ)pReqHdr);
1284
1285 case VMMR0_DO_GVMM_SCHED_POLL:
1286 if (pReqHdr || u64Arg > 1)
1287 return VERR_INVALID_PARAMETER;
1288 return GVMMR0SchedPoll(pVM, idCpu, !!u64Arg);
1289
1290 case VMMR0_DO_GVMM_QUERY_STATISTICS:
1291 if (u64Arg)
1292 return VERR_INVALID_PARAMETER;
1293 return GVMMR0QueryStatisticsReq(pVM, (PGVMMQUERYSTATISTICSSREQ)pReqHdr);
1294
1295 case VMMR0_DO_GVMM_RESET_STATISTICS:
1296 if (u64Arg)
1297 return VERR_INVALID_PARAMETER;
1298 return GVMMR0ResetStatisticsReq(pVM, (PGVMMRESETSTATISTICSSREQ)pReqHdr);
1299
1300 /*
1301 * Initialize the R0 part of a VM instance.
1302 */
1303 case VMMR0_DO_VMMR0_INIT:
1304 return vmmR0InitVM(pVM, RT_LODWORD(u64Arg), RT_HIDWORD(u64Arg));
1305
1306 /*
1307 * Terminate the R0 part of a VM instance.
1308 */
1309 case VMMR0_DO_VMMR0_TERM:
1310 return VMMR0TermVM(pVM, NULL);
1311
1312 /*
1313 * Attempt to enable hm mode and check the current setting.
1314 */
1315 case VMMR0_DO_HM_ENABLE:
1316 return HMR0EnableAllCpus(pVM);
1317
1318 /*
1319 * Setup the hardware accelerated session.
1320 */
1321 case VMMR0_DO_HM_SETUP_VM:
1322 return HMR0SetupVM(pVM);
1323
1324 /*
1325 * Switch to RC to execute Hypervisor function.
1326 */
1327 case VMMR0_DO_CALL_HYPERVISOR:
1328 {
1329 /*
1330 * Validate input / context.
1331 */
1332 if (RT_UNLIKELY(idCpu != 0))
1333 return VERR_INVALID_CPU_ID;
1334 if (RT_UNLIKELY(pVM->cCpus != 1))
1335 return VERR_INVALID_PARAMETER;
1336 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1337#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
1338 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu)))
1339 return VERR_PGM_NO_CR3_SHADOW_ROOT;
1340#endif
1341
1342 /*
1343 * Disable interrupts.
1344 */
1345 RTCCUINTREG fFlags = ASMIntDisableFlags();
1346
1347 /*
1348 * Get the host CPU identifiers, make sure they are valid and that
1349 * we've got a TSC delta for the CPU.
1350 */
1351 RTCPUID idHostCpu;
1352 uint32_t iHostCpuSet = RTMpCurSetIndexAndId(&idHostCpu);
1353 if (RT_UNLIKELY(iHostCpuSet >= RTCPUSET_MAX_CPUS))
1354 {
1355 ASMSetFlags(fFlags);
1356 return VERR_INVALID_CPU_INDEX;
1357 }
1358 if (RT_UNLIKELY(!SUPIsTscDeltaAvailableForCpuSetIndex(iHostCpuSet)))
1359 {
1360 ASMSetFlags(fFlags);
1361 int rc = SUPR0TscDeltaMeasureBySetIndex(pVM->pSession, iHostCpuSet, 0 /*fFlags*/,
1362 2 /*cMsWaitRetry*/, 5*RT_MS_1SEC /*cMsWaitThread*/,
1363 0 /*default cTries*/);
1364 if (RT_FAILURE(rc) && rc != VERR_CPU_OFFLINE)
1365 return rc;
1366 }
1367
1368 /*
1369 * Commit the CPU identifiers.
1370 */
1371#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
1372 CPUMR0SetLApic(pVCpu, iHostCpuSet);
1373#endif
1374 pVCpu->iHostCpuSet = iHostCpuSet;
1375 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu);
1376
1377 /*
1378 * We might need to disable VT-x if the active switcher turns off paging.
1379 */
1380 bool fVTxDisabled;
1381 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);
1382 if (RT_SUCCESS(rc))
1383 {
1384 /*
1385 * Go through the wormhole...
1386 */
1387 rc = pVM->vmm.s.pfnR0ToRawMode(pVM);
1388
1389 /*
1390 * Re-enable VT-x before we dispatch any pending host interrupts.
1391 */
1392 HMR0LeaveSwitcher(pVM, fVTxDisabled);
1393
1394 if ( rc == VINF_EM_RAW_INTERRUPT
1395 || rc == VINF_EM_RAW_INTERRUPT_HYPER)
1396 TRPMR0DispatchHostInterrupt(pVM);
1397 }
1398
1399 /*
1400 * Invalidate the host CPU identifiers as we restore interrupts.
1401 */
1402 pVCpu->iHostCpuSet = UINT32_MAX;
1403 ASMAtomicWriteU32(&pVCpu->idHostCpu, NIL_RTCPUID);
1404 ASMSetFlags(fFlags);
1405 return rc;
1406 }
1407
1408 /*
1409 * PGM wrappers.
1410 */
1411 case VMMR0_DO_PGM_ALLOCATE_HANDY_PAGES:
1412 if (idCpu == NIL_VMCPUID)
1413 return VERR_INVALID_CPU_ID;
1414 return PGMR0PhysAllocateHandyPages(pVM, &pVM->aCpus[idCpu]);
1415
1416 case VMMR0_DO_PGM_FLUSH_HANDY_PAGES:
1417 if (idCpu == NIL_VMCPUID)
1418 return VERR_INVALID_CPU_ID;
1419 return PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu]);
1420
1421 case VMMR0_DO_PGM_ALLOCATE_LARGE_HANDY_PAGE:
1422 if (idCpu == NIL_VMCPUID)
1423 return VERR_INVALID_CPU_ID;
1424 return PGMR0PhysAllocateLargeHandyPage(pVM, &pVM->aCpus[idCpu]);
1425
1426 case VMMR0_DO_PGM_PHYS_SETUP_IOMMU:
1427 if (idCpu != 0)
1428 return VERR_INVALID_CPU_ID;
1429 return PGMR0PhysSetupIommu(pVM);
1430
1431 /*
1432 * GMM wrappers.
1433 */
1434 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1435 if (u64Arg)
1436 return VERR_INVALID_PARAMETER;
1437 return GMMR0InitialReservationReq(pVM, idCpu, (PGMMINITIALRESERVATIONREQ)pReqHdr);
1438
1439 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1440 if (u64Arg)
1441 return VERR_INVALID_PARAMETER;
1442 return GMMR0UpdateReservationReq(pVM, idCpu, (PGMMUPDATERESERVATIONREQ)pReqHdr);
1443
1444 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1445 if (u64Arg)
1446 return VERR_INVALID_PARAMETER;
1447 return GMMR0AllocatePagesReq(pVM, idCpu, (PGMMALLOCATEPAGESREQ)pReqHdr);
1448
1449 case VMMR0_DO_GMM_FREE_PAGES:
1450 if (u64Arg)
1451 return VERR_INVALID_PARAMETER;
1452 return GMMR0FreePagesReq(pVM, idCpu, (PGMMFREEPAGESREQ)pReqHdr);
1453
1454 case VMMR0_DO_GMM_FREE_LARGE_PAGE:
1455 if (u64Arg)
1456 return VERR_INVALID_PARAMETER;
1457 return GMMR0FreeLargePageReq(pVM, idCpu, (PGMMFREELARGEPAGEREQ)pReqHdr);
1458
1459 case VMMR0_DO_GMM_QUERY_HYPERVISOR_MEM_STATS:
1460 if (u64Arg)
1461 return VERR_INVALID_PARAMETER;
1462 return GMMR0QueryHypervisorMemoryStatsReq(pVM, (PGMMMEMSTATSREQ)pReqHdr);
1463
1464 case VMMR0_DO_GMM_QUERY_MEM_STATS:
1465 if (idCpu == NIL_VMCPUID)
1466 return VERR_INVALID_CPU_ID;
1467 if (u64Arg)
1468 return VERR_INVALID_PARAMETER;
1469 return GMMR0QueryMemoryStatsReq(pVM, idCpu, (PGMMMEMSTATSREQ)pReqHdr);
1470
1471 case VMMR0_DO_GMM_BALLOONED_PAGES:
1472 if (u64Arg)
1473 return VERR_INVALID_PARAMETER;
1474 return GMMR0BalloonedPagesReq(pVM, idCpu, (PGMMBALLOONEDPAGESREQ)pReqHdr);
1475
1476 case VMMR0_DO_GMM_MAP_UNMAP_CHUNK:
1477 if (u64Arg)
1478 return VERR_INVALID_PARAMETER;
1479 return GMMR0MapUnmapChunkReq(pVM, (PGMMMAPUNMAPCHUNKREQ)pReqHdr);
1480
1481 case VMMR0_DO_GMM_SEED_CHUNK:
1482 if (pReqHdr)
1483 return VERR_INVALID_PARAMETER;
1484 return GMMR0SeedChunk(pVM, idCpu, (RTR3PTR)u64Arg);
1485
1486 case VMMR0_DO_GMM_REGISTER_SHARED_MODULE:
1487 if (idCpu == NIL_VMCPUID)
1488 return VERR_INVALID_CPU_ID;
1489 if (u64Arg)
1490 return VERR_INVALID_PARAMETER;
1491 return GMMR0RegisterSharedModuleReq(pVM, idCpu, (PGMMREGISTERSHAREDMODULEREQ)pReqHdr);
1492
1493 case VMMR0_DO_GMM_UNREGISTER_SHARED_MODULE:
1494 if (idCpu == NIL_VMCPUID)
1495 return VERR_INVALID_CPU_ID;
1496 if (u64Arg)
1497 return VERR_INVALID_PARAMETER;
1498 return GMMR0UnregisterSharedModuleReq(pVM, idCpu, (PGMMUNREGISTERSHAREDMODULEREQ)pReqHdr);
1499
1500 case VMMR0_DO_GMM_RESET_SHARED_MODULES:
1501 if (idCpu == NIL_VMCPUID)
1502 return VERR_INVALID_CPU_ID;
1503 if ( u64Arg
1504 || pReqHdr)
1505 return VERR_INVALID_PARAMETER;
1506 return GMMR0ResetSharedModules(pVM, idCpu);
1507
1508#ifdef VBOX_WITH_PAGE_SHARING
1509 case VMMR0_DO_GMM_CHECK_SHARED_MODULES:
1510 {
1511 if (idCpu == NIL_VMCPUID)
1512 return VERR_INVALID_CPU_ID;
1513 if ( u64Arg
1514 || pReqHdr)
1515 return VERR_INVALID_PARAMETER;
1516
1517 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1518 Assert(pVCpu->hNativeThreadR0 == RTThreadNativeSelf());
1519
1520# ifdef DEBUG_sandervl
1521 /* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
1522 /* Todo: this can have bad side effects for unexpected jumps back to r3. */
1523 int rc = GMMR0CheckSharedModulesStart(pVM);
1524 if (rc == VINF_SUCCESS)
1525 {
1526 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
1527 Assert( rc == VINF_SUCCESS
1528 || (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
1529 GMMR0CheckSharedModulesEnd(pVM);
1530 }
1531# else
1532 int rc = GMMR0CheckSharedModules(pVM, pVCpu);
1533# endif
1534 return rc;
1535 }
1536#endif
1537
1538#if defined(VBOX_STRICT) && HC_ARCH_BITS == 64
1539 case VMMR0_DO_GMM_FIND_DUPLICATE_PAGE:
1540 if (u64Arg)
1541 return VERR_INVALID_PARAMETER;
1542 return GMMR0FindDuplicatePageReq(pVM, (PGMMFINDDUPLICATEPAGEREQ)pReqHdr);
1543#endif
1544
1545 case VMMR0_DO_GMM_QUERY_STATISTICS:
1546 if (u64Arg)
1547 return VERR_INVALID_PARAMETER;
1548 return GMMR0QueryStatisticsReq(pVM, (PGMMQUERYSTATISTICSSREQ)pReqHdr);
1549
1550 case VMMR0_DO_GMM_RESET_STATISTICS:
1551 if (u64Arg)
1552 return VERR_INVALID_PARAMETER;
1553 return GMMR0ResetStatisticsReq(pVM, (PGMMRESETSTATISTICSSREQ)pReqHdr);
1554
1555 /*
1556 * A quick GCFGM mock-up.
1557 */
1558 /** @todo GCFGM with proper access control, ring-3 management interface and all that. */
1559 case VMMR0_DO_GCFGM_SET_VALUE:
1560 case VMMR0_DO_GCFGM_QUERY_VALUE:
1561 {
1562 if (pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1563 return VERR_INVALID_PARAMETER;
1564 PGCFGMVALUEREQ pReq = (PGCFGMVALUEREQ)pReqHdr;
1565 if (pReq->Hdr.cbReq != sizeof(*pReq))
1566 return VERR_INVALID_PARAMETER;
1567 int rc;
1568 if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
1569 {
1570 rc = GVMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1571 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1572 // rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
1573 }
1574 else
1575 {
1576 rc = GVMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1577 //if (rc == VERR_CFGM_VALUE_NOT_FOUND)
1578 // rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
1579 }
1580 return rc;
1581 }
1582
1583 /*
1584 * PDM Wrappers.
1585 */
1586 case VMMR0_DO_PDM_DRIVER_CALL_REQ_HANDLER:
1587 {
1588 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1589 return VERR_INVALID_PARAMETER;
1590 return PDMR0DriverCallReqHandler(pVM, (PPDMDRIVERCALLREQHANDLERREQ)pReqHdr);
1591 }
1592
1593 case VMMR0_DO_PDM_DEVICE_CALL_REQ_HANDLER:
1594 {
1595 if (!pVM || !pReqHdr || u64Arg || idCpu != NIL_VMCPUID)
1596 return VERR_INVALID_PARAMETER;
1597 return PDMR0DeviceCallReqHandler(pVM, (PPDMDEVICECALLREQHANDLERREQ)pReqHdr);
1598 }
1599
1600 /*
1601 * Requests to the internal networking service.
1602 */
1603 case VMMR0_DO_INTNET_OPEN:
1604 {
1605 PINTNETOPENREQ pReq = (PINTNETOPENREQ)pReqHdr;
1606 if (u64Arg || !pReq || !vmmR0IsValidSession(pVM, pReq->pSession, pSession) || idCpu != NIL_VMCPUID)
1607 return VERR_INVALID_PARAMETER;
1608 return IntNetR0OpenReq(pSession, pReq);
1609 }
1610
1611 case VMMR0_DO_INTNET_IF_CLOSE:
1612 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1613 return VERR_INVALID_PARAMETER;
1614 return IntNetR0IfCloseReq(pSession, (PINTNETIFCLOSEREQ)pReqHdr);
1615
1616 case VMMR0_DO_INTNET_IF_GET_BUFFER_PTRS:
1617 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1618 return VERR_INVALID_PARAMETER;
1619 return IntNetR0IfGetBufferPtrsReq(pSession, (PINTNETIFGETBUFFERPTRSREQ)pReqHdr);
1620
1621 case VMMR0_DO_INTNET_IF_SET_PROMISCUOUS_MODE:
1622 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1623 return VERR_INVALID_PARAMETER;
1624 return IntNetR0IfSetPromiscuousModeReq(pSession, (PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr);
1625
1626 case VMMR0_DO_INTNET_IF_SET_MAC_ADDRESS:
1627 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1628 return VERR_INVALID_PARAMETER;
1629 return IntNetR0IfSetMacAddressReq(pSession, (PINTNETIFSETMACADDRESSREQ)pReqHdr);
1630
1631 case VMMR0_DO_INTNET_IF_SET_ACTIVE:
1632 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1633 return VERR_INVALID_PARAMETER;
1634 return IntNetR0IfSetActiveReq(pSession, (PINTNETIFSETACTIVEREQ)pReqHdr);
1635
1636 case VMMR0_DO_INTNET_IF_SEND:
1637 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1638 return VERR_INVALID_PARAMETER;
1639 return IntNetR0IfSendReq(pSession, (PINTNETIFSENDREQ)pReqHdr);
1640
1641 case VMMR0_DO_INTNET_IF_WAIT:
1642 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1643 return VERR_INVALID_PARAMETER;
1644 return IntNetR0IfWaitReq(pSession, (PINTNETIFWAITREQ)pReqHdr);
1645
1646 case VMMR0_DO_INTNET_IF_ABORT_WAIT:
1647 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1648 return VERR_INVALID_PARAMETER;
1649 return IntNetR0IfAbortWaitReq(pSession, (PINTNETIFABORTWAITREQ)pReqHdr);
1650
1651#ifdef VBOX_WITH_PCI_PASSTHROUGH
1652 /*
1653 * Requests to host PCI driver service.
1654 */
1655 case VMMR0_DO_PCIRAW_REQ:
1656 if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
1657 return VERR_INVALID_PARAMETER;
1658 return PciRawR0ProcessReq(pSession, pVM, (PPCIRAWSENDREQ)pReqHdr);
1659#endif
1660 /*
1661 * For profiling.
1662 */
1663 case VMMR0_DO_NOP:
1664 case VMMR0_DO_SLOW_NOP:
1665 return VINF_SUCCESS;
1666
1667 /*
1668 * For testing Ring-0 APIs invoked in this environment.
1669 */
1670 case VMMR0_DO_TESTS:
1671 /** @todo make new test */
1672 return VINF_SUCCESS;
1673
1674
1675#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1676 case VMMR0_DO_TEST_SWITCHER3264:
1677 if (idCpu == NIL_VMCPUID)
1678 return VERR_INVALID_CPU_ID;
1679 return HMR0TestSwitcher3264(pVM);
1680#endif
1681 default:
1682 /*
1683 * We're returning VERR_NOT_SUPPORT here so we've got something else
1684 * than -1 which the interrupt gate glue code might return.
1685 */
1686 Log(("operation %#x is not supported\n", enmOperation));
1687 return VERR_NOT_SUPPORTED;
1688 }
1689}
1690
1691
1692/**
1693 * Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
1694 */
1695typedef struct VMMR0ENTRYEXARGS
1696{
1697 PVM pVM;
1698 VMCPUID idCpu;
1699 VMMR0OPERATION enmOperation;
1700 PSUPVMMR0REQHDR pReq;
1701 uint64_t u64Arg;
1702 PSUPDRVSESSION pSession;
1703} VMMR0ENTRYEXARGS;
1704/** Pointer to a vmmR0EntryExWrapper argument package. */
1705typedef VMMR0ENTRYEXARGS *PVMMR0ENTRYEXARGS;
1706
1707/**
1708 * This is just a longjmp wrapper function for VMMR0EntryEx calls.
1709 *
1710 * @returns VBox status code.
1711 * @param pvArgs The argument package
1712 */
1713static DECLCALLBACK(int) vmmR0EntryExWrapper(void *pvArgs)
1714{
1715 return vmmR0EntryExWorker(((PVMMR0ENTRYEXARGS)pvArgs)->pVM,
1716 ((PVMMR0ENTRYEXARGS)pvArgs)->idCpu,
1717 ((PVMMR0ENTRYEXARGS)pvArgs)->enmOperation,
1718 ((PVMMR0ENTRYEXARGS)pvArgs)->pReq,
1719 ((PVMMR0ENTRYEXARGS)pvArgs)->u64Arg,
1720 ((PVMMR0ENTRYEXARGS)pvArgs)->pSession);
1721}
1722
1723
1724/**
1725 * The Ring 0 entry point, called by the support library (SUP).
1726 *
1727 * @returns VBox status code.
1728 * @param pVM Pointer to the VM.
1729 * @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
1730 * is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
1731 * @param enmOperation Which operation to execute.
1732 * @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
1733 * @param u64Arg Some simple constant argument.
1734 * @param pSession The session of the caller.
1735 * @remarks Assume called with interrupts _enabled_.
1736 */
1737VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
1738{
1739 /*
1740 * Requests that should only happen on the EMT thread will be
1741 * wrapped in a setjmp so we can assert without causing trouble.
1742 */
1743 if ( VALID_PTR(pVM)
1744 && pVM->pVMR0
1745 && idCpu < pVM->cCpus)
1746 {
1747 switch (enmOperation)
1748 {
1749 /* These might/will be called before VMMR3Init. */
1750 case VMMR0_DO_GMM_INITIAL_RESERVATION:
1751 case VMMR0_DO_GMM_UPDATE_RESERVATION:
1752 case VMMR0_DO_GMM_ALLOCATE_PAGES:
1753 case VMMR0_DO_GMM_FREE_PAGES:
1754 case VMMR0_DO_GMM_BALLOONED_PAGES:
1755 /* On the mac we might not have a valid jmp buf, so check these as well. */
1756 case VMMR0_DO_VMMR0_INIT:
1757 case VMMR0_DO_VMMR0_TERM:
1758 {
1759 PVMCPU pVCpu = &pVM->aCpus[idCpu];
1760
1761 if (!pVCpu->vmm.s.CallRing3JmpBufR0.pvSavedStack)
1762 break;
1763
1764 /** @todo validate this EMT claim... GVM knows. */
1765 VMMR0ENTRYEXARGS Args;
1766 Args.pVM = pVM;
1767 Args.idCpu = idCpu;
1768 Args.enmOperation = enmOperation;
1769 Args.pReq = pReq;
1770 Args.u64Arg = u64Arg;
1771 Args.pSession = pSession;
1772 return vmmR0CallRing3SetJmpEx(&pVCpu->vmm.s.CallRing3JmpBufR0, vmmR0EntryExWrapper, &Args);
1773 }
1774
1775 default:
1776 break;
1777 }
1778 }
1779 return vmmR0EntryExWorker(pVM, idCpu, enmOperation, pReq, u64Arg, pSession);
1780}
1781
1782
1783/**
1784 * Checks whether we've armed the ring-0 long jump machinery.
1785 *
1786 * @returns @c true / @c false
1787 * @param pVCpu Pointer to the VMCPU.
1788 * @thread EMT
1789 * @sa VMMIsLongJumpArmed
1790 */
1791VMMR0_INT_DECL(bool) VMMR0IsLongJumpArmed(PVMCPU pVCpu)
1792{
1793#ifdef RT_ARCH_X86
1794 return pVCpu->vmm.s.CallRing3JmpBufR0.eip
1795 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
1796#else
1797 return pVCpu->vmm.s.CallRing3JmpBufR0.rip
1798 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
1799#endif
1800}
1801
1802
1803/**
1804 * Checks whether we've done a ring-3 long jump.
1805 *
1806 * @returns @c true / @c false
1807 * @param pVCpu Pointer to the VMCPU.
1808 * @thread EMT
1809 */
1810VMMR0_INT_DECL(bool) VMMR0IsInRing3LongJump(PVMCPU pVCpu)
1811{
1812 return pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call;
1813}
1814
1815
1816/**
1817 * Internal R0 logger worker: Flush logger.
1818 *
1819 * @param pLogger The logger instance to flush.
1820 * @remark This function must be exported!
1821 */
1822VMMR0DECL(void) vmmR0LoggerFlush(PRTLOGGER pLogger)
1823{
1824#ifdef LOG_ENABLED
1825 /*
1826 * Convert the pLogger into a VM handle and 'call' back to Ring-3.
1827 * (This is a bit paranoid code.)
1828 */
1829 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1830 if ( !VALID_PTR(pR0Logger)
1831 || !VALID_PTR(pR0Logger + 1)
1832 || pLogger->u32Magic != RTLOGGER_MAGIC)
1833 {
1834# ifdef DEBUG
1835 SUPR0Printf("vmmR0LoggerFlush: pLogger=%p!\n", pLogger);
1836# endif
1837 return;
1838 }
1839 if (pR0Logger->fFlushingDisabled)
1840 return; /* quietly */
1841
1842 PVM pVM = pR0Logger->pVM;
1843 if ( !VALID_PTR(pVM)
1844 || pVM->pVMR0 != pVM)
1845 {
1846# ifdef DEBUG
1847 SUPR0Printf("vmmR0LoggerFlush: pVM=%p! pVMR0=%p! pLogger=%p\n", pVM, pVM->pVMR0, pLogger);
1848# endif
1849 return;
1850 }
1851
1852 PVMCPU pVCpu = VMMGetCpu(pVM);
1853 if (pVCpu)
1854 {
1855 /*
1856 * Check that the jump buffer is armed.
1857 */
1858# ifdef RT_ARCH_X86
1859 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.eip
1860 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1861# else
1862 if ( !pVCpu->vmm.s.CallRing3JmpBufR0.rip
1863 || pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1864# endif
1865 {
1866# ifdef DEBUG
1867 SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
1868# endif
1869 return;
1870 }
1871 VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VMM_LOGGER_FLUSH, 0);
1872 }
1873# ifdef DEBUG
1874 else
1875 SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
1876# endif
1877#endif
1878}
1879
1880/**
1881 * Internal R0 logger worker: Custom prefix.
1882 *
1883 * @returns Number of chars written.
1884 *
1885 * @param pLogger The logger instance.
1886 * @param pchBuf The output buffer.
1887 * @param cchBuf The size of the buffer.
1888 * @param pvUser User argument (ignored).
1889 */
1890VMMR0DECL(size_t) vmmR0LoggerPrefix(PRTLOGGER pLogger, char *pchBuf, size_t cchBuf, void *pvUser)
1891{
1892 NOREF(pvUser);
1893#ifdef LOG_ENABLED
1894 PVMMR0LOGGER pR0Logger = (PVMMR0LOGGER)((uintptr_t)pLogger - RT_OFFSETOF(VMMR0LOGGER, Logger));
1895 if ( !VALID_PTR(pR0Logger)
1896 || !VALID_PTR(pR0Logger + 1)
1897 || pLogger->u32Magic != RTLOGGER_MAGIC
1898 || cchBuf < 2)
1899 return 0;
1900
1901 static const char s_szHex[17] = "0123456789abcdef";
1902 VMCPUID const idCpu = pR0Logger->idCpu;
1903 pchBuf[1] = s_szHex[ idCpu & 15];
1904 pchBuf[0] = s_szHex[(idCpu >> 4) & 15];
1905
1906 return 2;
1907#else
1908 return 0;
1909#endif
1910}
1911
1912#ifdef LOG_ENABLED
1913
1914/**
1915 * Disables flushing of the ring-0 debug log.
1916 *
1917 * @param pVCpu Pointer to the VMCPU.
1918 */
1919VMMR0_INT_DECL(void) VMMR0LogFlushDisable(PVMCPU pVCpu)
1920{
1921 if (pVCpu->vmm.s.pR0LoggerR0)
1922 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = true;
1923}
1924
1925
1926/**
1927 * Enables flushing of the ring-0 debug log.
1928 *
1929 * @param pVCpu Pointer to the VMCPU.
1930 */
1931VMMR0_INT_DECL(void) VMMR0LogFlushEnable(PVMCPU pVCpu)
1932{
1933 if (pVCpu->vmm.s.pR0LoggerR0)
1934 pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled = false;
1935}
1936
1937
1938/**
1939 * Checks if log flushing is disabled or not.
1940 *
1941 * @param pVCpu Pointer to the VMCPU.
1942 */
1943VMMR0_INT_DECL(bool) VMMR0IsLogFlushDisabled(PVMCPU pVCpu)
1944{
1945 if (pVCpu->vmm.s.pR0LoggerR0)
1946 return pVCpu->vmm.s.pR0LoggerR0->fFlushingDisabled;
1947 return true;
1948}
1949#endif /* LOG_ENABLED */
1950
1951/**
1952 * Jump back to ring-3 if we're the EMT and the longjmp is armed.
1953 *
1954 * @returns true if the breakpoint should be hit, false if it should be ignored.
1955 */
1956DECLEXPORT(bool) RTCALL RTAssertShouldPanic(void)
1957{
1958#if 0
1959 return true;
1960#else
1961 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
1962 if (pVM)
1963 {
1964 PVMCPU pVCpu = VMMGetCpu(pVM);
1965
1966 if (pVCpu)
1967 {
1968#ifdef RT_ARCH_X86
1969 if ( pVCpu->vmm.s.CallRing3JmpBufR0.eip
1970 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1971#else
1972 if ( pVCpu->vmm.s.CallRing3JmpBufR0.rip
1973 && !pVCpu->vmm.s.CallRing3JmpBufR0.fInRing3Call)
1974#endif
1975 {
1976 int rc = VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_VM_R0_ASSERTION, 0);
1977 return RT_FAILURE_NP(rc);
1978 }
1979 }
1980 }
1981#ifdef RT_OS_LINUX
1982 return true;
1983#else
1984 return false;
1985#endif
1986#endif
1987}
1988
1989
1990/**
1991 * Override this so we can push it up to ring-3.
1992 *
1993 * @param pszExpr Expression. Can be NULL.
1994 * @param uLine Location line number.
1995 * @param pszFile Location file name.
1996 * @param pszFunction Location function name.
1997 */
1998DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
1999{
2000 /*
2001 * To the log.
2002 */
2003 LogAlways(("\n!!R0-Assertion Failed!!\n"
2004 "Expression: %s\n"
2005 "Location : %s(%d) %s\n",
2006 pszExpr, pszFile, uLine, pszFunction));
2007
2008 /*
2009 * To the global VMM buffer.
2010 */
2011 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2012 if (pVM)
2013 RTStrPrintf(pVM->vmm.s.szRing0AssertMsg1, sizeof(pVM->vmm.s.szRing0AssertMsg1),
2014 "\n!!R0-Assertion Failed!!\n"
2015 "Expression: %s\n"
2016 "Location : %s(%d) %s\n",
2017 pszExpr, pszFile, uLine, pszFunction);
2018
2019 /*
2020 * Continue the normal way.
2021 */
2022 RTAssertMsg1(pszExpr, uLine, pszFile, pszFunction);
2023}
2024
2025
2026/**
2027 * Callback for RTLogFormatV which writes to the ring-3 log port.
2028 * See PFNLOGOUTPUT() for details.
2029 */
2030static DECLCALLBACK(size_t) rtLogOutput(void *pv, const char *pachChars, size_t cbChars)
2031{
2032 for (size_t i = 0; i < cbChars; i++)
2033 LogAlways(("%c", pachChars[i]));
2034
2035 NOREF(pv);
2036 return cbChars;
2037}
2038
2039
2040/**
2041 * Override this so we can push it up to ring-3.
2042 *
2043 * @param pszFormat The format string.
2044 * @param va Arguments.
2045 */
2046DECLEXPORT(void) RTCALL RTAssertMsg2WeakV(const char *pszFormat, va_list va)
2047{
2048 va_list vaCopy;
2049
2050 /*
2051 * Push the message to the loggers.
2052 */
2053 PRTLOGGER pLog = RTLogGetDefaultInstance(); /* Don't initialize it here... */
2054 if (pLog)
2055 {
2056 va_copy(vaCopy, va);
2057 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2058 va_end(vaCopy);
2059 }
2060 pLog = RTLogRelDefaultInstance();
2061 if (pLog)
2062 {
2063 va_copy(vaCopy, va);
2064 RTLogFormatV(rtLogOutput, pLog, pszFormat, vaCopy);
2065 va_end(vaCopy);
2066 }
2067
2068 /*
2069 * Push it to the global VMM buffer.
2070 */
2071 PVM pVM = GVMMR0GetVMByEMT(NIL_RTNATIVETHREAD);
2072 if (pVM)
2073 {
2074 va_copy(vaCopy, va);
2075 RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
2076 va_end(vaCopy);
2077 }
2078
2079 /*
2080 * Continue the normal way.
2081 */
2082 RTAssertMsg2V(pszFormat, va);
2083}
2084
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette