VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 107194

Last change on this file since 107194 was 107194, checked in by vboxsync, 6 months ago

VMM: More adjustments for VBOX_WITH_ONLY_PGM_NEM_MODE, VBOX_WITH_MINIMAL_R0, VBOX_WITH_HWVIRT and such. jiraref:VBP-1466

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 115.4 KB
Line 
1/* $Id: EM.cpp 107194 2024-11-29 14:47:06Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.215389.xyz.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28/** @page pg_em EM - The Execution Monitor / Manager
29 *
30 * The Execution Monitor/Manager is responsible for running the VM, scheduling
31 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
32 * Interpreted), and keeping the CPU states in sync. The function
33 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
34 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
35 * emR3RmExecute).
36 *
37 * The interpreted execution is only used to avoid switching between
38 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
39 * The interpretation is thus implemented as part of EM.
40 *
41 * @see grp_em
42 */
43
44
45/*********************************************************************************************************************************
46* Header Files *
47*********************************************************************************************************************************/
48#define LOG_GROUP LOG_GROUP_EM
49#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET & interrupt injection */
50#include <VBox/vmm/em.h>
51#include <VBox/vmm/vmm.h>
52#include <VBox/vmm/selm.h>
53#include <VBox/vmm/trpm.h>
54#include <VBox/vmm/iem.h>
55#include <VBox/vmm/nem.h>
56#include <VBox/vmm/iom.h>
57#include <VBox/vmm/dbgf.h>
58#include <VBox/vmm/pgm.h>
59#include <VBox/vmm/pdmapic.h>
60#include <VBox/vmm/tm.h>
61#include <VBox/vmm/mm.h>
62#include <VBox/vmm/ssm.h>
63#include <VBox/vmm/pdmapi.h>
64#include <VBox/vmm/pdmcritsect.h>
65#include <VBox/vmm/pdmqueue.h>
66#include <VBox/vmm/hm.h>
67#include "EMInternal.h"
68#include <VBox/vmm/vm.h>
69#include <VBox/vmm/uvm.h>
70#include <VBox/vmm/cpumdis.h>
71#include <VBox/dis.h>
72#include <VBox/err.h>
73#include "VMMTracing.h"
74
75#include <iprt/asm.h>
76#include <iprt/string.h>
77#include <iprt/stream.h>
78#include <iprt/thread.h>
79
80#include "EMInline.h"
81
82
83/*********************************************************************************************************************************
84* Internal Functions *
85*********************************************************************************************************************************/
86static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
87static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
88#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
89static const char *emR3GetStateName(EMSTATE enmState);
90#endif
91static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
92
93
94/**
95 * Initializes the EM.
96 *
97 * @returns VBox status code.
98 * @param pVM The cross context VM structure.
99 */
100VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
101{
102 LogFlow(("EMR3Init\n"));
103 /*
104 * Assert alignment and sizes.
105 */
106 AssertCompileMemberAlignment(VM, em.s, 32);
107 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
108 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s.u.FatalLongJump) <= RT_SIZEOFMEMB(VMCPU, em.s.u.achPaddingFatalLongJump));
109 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s) <= RT_SIZEOFMEMB(VMCPU, em.padding));
110
111 /*
112 * Init the structure.
113 */
114 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
115 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
116
117 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll,
118#if defined(RT_ARCH_ARM64) && defined(RT_OS_DARWIN) && !defined(VBOX_VMM_TARGET_ARMV8)
119 true
120#else
121 false
122#endif
123 );
124 AssertLogRelRCReturn(rc, rc);
125
126 bool fEnabled;
127 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
128 AssertLogRelRCReturn(rc, rc);
129 pVM->em.s.fGuruOnTripleFault = !fEnabled;
130 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
131 {
132 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
133 pVM->em.s.fGuruOnTripleFault = true;
134 }
135
136 LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
137
138 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
139 * Whether to try correlate exit history in any context, detect hot spots and
140 * try optimize these using IEM if there are other exits close by. This
141 * overrides the context specific settings. */
142 bool fExitOptimizationEnabled = true;
143 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
144 AssertLogRelRCReturn(rc, rc);
145
146 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
147 * Whether to optimize exits in ring-0. Setting this to false will also disable
148 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
149 * capabilities of the host kernel, this optimization may be unavailable. */
150 bool fExitOptimizationEnabledR0 = true;
151 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
152 AssertLogRelRCReturn(rc, rc);
153 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
154
155 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
156 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
157 * hooks are in effect). */
158 /** @todo change the default to true here */
159 bool fExitOptimizationEnabledR0PreemptDisabled = true;
160 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
161 AssertLogRelRCReturn(rc, rc);
162 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
163
164 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
165 * Maximum number of instruction to let EMHistoryExec execute in one go. */
166 uint16_t cHistoryExecMaxInstructions = 8192;
167 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
168 AssertLogRelRCReturn(rc, rc);
169 if (cHistoryExecMaxInstructions < 16)
170 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
171
172 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
173 * Maximum number of instruction between exits during probing. */
174 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
175#ifdef RT_OS_WINDOWS
176 if (VM_IS_NEM_ENABLED(pVM))
177 cHistoryProbeMaxInstructionsWithoutExit = 32;
178#endif
179 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
180 cHistoryProbeMaxInstructionsWithoutExit);
181 AssertLogRelRCReturn(rc, rc);
182 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
183 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
184 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
185
186 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
187 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
188 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
189 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
190 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
191 cHistoryProbeMinInstructions);
192 AssertLogRelRCReturn(rc, rc);
193
194 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
195 {
196 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
197 pVCpu->em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
198 pVCpu->em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
199 pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
200 pVCpu->em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
201 pVCpu->em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
202 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
203 }
204
205#ifdef VBOX_WITH_IEM_RECOMPILER
206 /** @cfgm{/EM/IemRecompiled, bool, true}
207 * Whether IEM bulk execution is recompiled or interpreted. */
208 rc = CFGMR3QueryBoolDef(pCfgEM, "IemRecompiled", &pVM->em.s.fIemRecompiled, true);
209 AssertLogRelRCReturn(rc, rc);
210#endif
211
212 /*
213 * Saved state.
214 */
215 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
216 NULL, NULL, NULL,
217 NULL, emR3Save, NULL,
218 NULL, emR3Load, NULL);
219 if (RT_FAILURE(rc))
220 return rc;
221
222 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
223 {
224 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
225
226 pVCpu->em.s.enmState = idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
227 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
228 pVCpu->em.s.msTimeSliceStart = 0; /* paranoia */
229 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
230
231# define EM_REG_COUNTER(a, b, c) \
232 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
233 AssertRC(rc);
234
235# define EM_REG_COUNTER_USED(a, b, c) \
236 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, idCpu); \
237 AssertRC(rc);
238
239# define EM_REG_PROFILE(a, b, c) \
240 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
241 AssertRC(rc);
242
243# define EM_REG_PROFILE_ADV(a, b, c) \
244 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
245 AssertRC(rc);
246
247 /*
248 * Statistics.
249 */
250#ifdef VBOX_WITH_STATISTICS
251 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoRestarted, "/EM/CPU%u/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
252 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoIem, "/EM/CPU%u/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
253
254 /* these should be considered for release statistics. */
255 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%u/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
256 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%u/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
257 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%u/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
258#endif
259 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%u/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
260 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%u/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
261#ifdef VBOX_WITH_STATISTICS
262 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%u/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
263 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%u/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
264 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%u/EM/NEMEnter", "Profiling NEM entry overhead.");
265#endif
266 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%u/EM/NEMExec", "Profiling NEM execution.");
267 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
268#ifdef VBOX_WITH_STATISTICS
269 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution.");
270#endif
271
272 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution.");
273 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
274 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep).");
275 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RecompilerExecute (excluding FFs).");
276
277 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM.");
278
279 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
280 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", idCpu);
281 AssertRC(rc);
282
283 /* History record statistics */
284 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
285 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", idCpu);
286 AssertRC(rc);
287
288 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
289 {
290 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
291 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", idCpu, iStep);
292 AssertRC(rc);
293 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
294 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", idCpu, iStep);
295 AssertRC(rc);
296 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
297 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", idCpu, iStep);
298 AssertRC(rc);
299 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
300 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", idCpu, iStep);
301 AssertRC(rc);
302 }
303
304 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%u/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
305 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%u/ExitOpt/ExecSavedExit", "Net number of saved exits.");
306 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%u/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
307 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%u/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
308 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%u/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
309 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%u/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
310 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%u/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
311 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%u/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
312 }
313
314 emR3InitDbg(pVM);
315 return VINF_SUCCESS;
316}
317
318
319/**
320 * Called when a VM initialization stage is completed.
321 *
322 * @returns VBox status code.
323 * @param pVM The cross context VM structure.
324 * @param enmWhat The initialization state that was completed.
325 */
326VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
327{
328 if (enmWhat == VMINITCOMPLETED_RING0)
329 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
330 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabled, pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0,
331 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0PreemptDisabled));
332 return VINF_SUCCESS;
333}
334
335
336/**
337 * Applies relocations to data and code managed by this
338 * component. This function will be called at init and
339 * whenever the VMM need to relocate it self inside the GC.
340 *
341 * @param pVM The cross context VM structure.
342 */
343VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
344{
345 LogFlow(("EMR3Relocate\n"));
346 RT_NOREF(pVM);
347}
348
349
350/**
351 * Reset the EM state for a CPU.
352 *
353 * Called by EMR3Reset and hot plugging.
354 *
355 * @param pVCpu The cross context virtual CPU structure.
356 */
357VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
358{
359 /* Reset scheduling state. */
360 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
361
362 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
363 out of the HALTED state here so that enmPrevState doesn't end up as
364 HALTED when EMR3Execute returns. */
365 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
366 {
367 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
368 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
369 }
370}
371
372
373/**
374 * Reset notification.
375 *
376 * @param pVM The cross context VM structure.
377 */
378VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
379{
380 Log(("EMR3Reset: \n"));
381 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
382 EMR3ResetCpu(pVM->apCpusR3[idCpu]);
383}
384
385
386/**
387 * Terminates the EM.
388 *
389 * Termination means cleaning up and freeing all resources,
390 * the VM it self is at this point powered off or suspended.
391 *
392 * @returns VBox status code.
393 * @param pVM The cross context VM structure.
394 */
395VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
396{
397 RT_NOREF(pVM);
398 return VINF_SUCCESS;
399}
400
401
402/**
403 * Execute state save operation.
404 *
405 * @returns VBox status code.
406 * @param pVM The cross context VM structure.
407 * @param pSSM SSM operation handle.
408 */
409static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
410{
411 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
412 {
413 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
414
415 SSMR3PutBool(pSSM, false /*fForceRAW*/);
416
417 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
418 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
419 SSMR3PutU32(pSSM,
420 pVCpu->em.s.enmPrevState == EMSTATE_NONE
421 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED
422 || pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
423 ? pVCpu->em.s.enmPrevState : EMSTATE_NONE);
424
425 /* Save mwait state. */
426 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
427 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
428 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
429 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
430 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
431 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
432 AssertRCReturn(rc, rc);
433 }
434 return VINF_SUCCESS;
435}
436
437
438/**
439 * Execute state load operation.
440 *
441 * @returns VBox status code.
442 * @param pVM The cross context VM structure.
443 * @param pSSM SSM operation handle.
444 * @param uVersion Data layout version.
445 * @param uPass The data pass.
446 */
447static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
448{
449 /*
450 * Validate version.
451 */
452 if ( uVersion > EM_SAVED_STATE_VERSION
453 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
454 {
455 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
456 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
457 }
458 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
459
460 /*
461 * Load the saved state.
462 */
463 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
464 {
465 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
466
467 bool fForceRAWIgnored;
468 int rc = SSMR3GetBool(pSSM, &fForceRAWIgnored);
469 AssertRCReturn(rc, rc);
470
471 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
472 {
473 /* We are only intereseted in two enmPrevState values for use when
474 EMR3ExecuteVM is called.
475 Since ~r157540. only these two and EMSTATE_NONE are saved. */
476 SSM_GET_ENUM32_RET(pSSM, pVCpu->em.s.enmPrevState, EMSTATE);
477 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
478 if ( pVCpu->em.s.enmPrevState != EMSTATE_WAIT_SIPI
479 && pVCpu->em.s.enmPrevState != EMSTATE_HALTED)
480 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
481
482 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
483 }
484 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
485 {
486 /* Load mwait state. */
487 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
488 AssertRCReturn(rc, rc);
489 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
490 AssertRCReturn(rc, rc);
491 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
492 AssertRCReturn(rc, rc);
493 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
494 AssertRCReturn(rc, rc);
495 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
496 AssertRCReturn(rc, rc);
497 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
498 AssertRCReturn(rc, rc);
499 }
500 }
501 return VINF_SUCCESS;
502}
503
504
505/**
506 * Argument packet for emR3SetExecutionPolicy.
507 */
508struct EMR3SETEXECPOLICYARGS
509{
510 EMEXECPOLICY enmPolicy;
511 bool fEnforce;
512};
513
514
515/**
516 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
517 */
518static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
519{
520 /*
521 * Only the first CPU changes the variables.
522 */
523 if (pVCpu->idCpu == 0)
524 {
525 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
526 switch (pArgs->enmPolicy)
527 {
528 case EMEXECPOLICY_IEM_ALL:
529 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
530
531 /* For making '.alliem 1' useful during debugging, transition the
532 EMSTATE_DEBUG_GUEST_XXX to EMSTATE_DEBUG_GUEST_IEM. */
533 for (VMCPUID i = 0; i < pVM->cCpus; i++)
534 {
535 PVMCPU pVCpuX = pVM->apCpusR3[i];
536 switch (pVCpuX->em.s.enmState)
537 {
538 case EMSTATE_DEBUG_GUEST_RECOMPILER:
539 if (pVM->em.s.fIemRecompiled)
540 break;
541 RT_FALL_THROUGH();
542 case EMSTATE_DEBUG_GUEST_RAW:
543 case EMSTATE_DEBUG_GUEST_HM:
544 case EMSTATE_DEBUG_GUEST_NEM:
545 Log(("EM: idCpu=%u: %s -> EMSTATE_DEBUG_GUEST_IEM\n", i, emR3GetStateName(pVCpuX->em.s.enmState) ));
546 pVCpuX->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
547 break;
548 case EMSTATE_DEBUG_GUEST_IEM:
549 default:
550 break;
551 }
552 }
553 break;
554
555 case EMEXECPOLICY_IEM_RECOMPILED:
556 pVM->em.s.fIemRecompiled = pArgs->fEnforce;
557 break;
558
559 default:
560 AssertFailedReturn(VERR_INVALID_PARAMETER);
561 }
562 Log(("EM: Set execution policy: fIemExecutesAll=%RTbool fIemRecompiled=%RTbool\n",
563 pVM->em.s.fIemExecutesAll, pVM->em.s.fIemRecompiled));
564 }
565
566 /*
567 * Force rescheduling if in HM, NEM, IEM/interpreter or IEM/recompiler.
568 */
569 Assert(pVCpu->em.s.enmState != EMSTATE_RAW_OBSOLETE);
570 return pVCpu->em.s.enmState == EMSTATE_HM
571 || pVCpu->em.s.enmState == EMSTATE_NEM
572 || pVCpu->em.s.enmState == EMSTATE_IEM
573 || pVCpu->em.s.enmState == EMSTATE_RECOMPILER
574 ? VINF_EM_RESCHEDULE
575 : VINF_SUCCESS;
576}
577
578
579/**
580 * Changes an execution scheduling policy parameter.
581 *
582 * This is used to enable or disable raw-mode / hardware-virtualization
583 * execution of user and supervisor code.
584 *
585 * @returns VINF_SUCCESS on success.
586 * @returns VINF_RESCHEDULE if a rescheduling might be required.
587 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
588 *
589 * @param pUVM The user mode VM handle.
590 * @param enmPolicy The scheduling policy to change.
591 * @param fEnforce Whether to enforce the policy or not.
592 */
593VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
594{
595 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
596 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
597 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
598
599 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
600 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
601}
602
603
604/**
605 * Queries an execution scheduling policy parameter.
606 *
607 * @returns VBox status code
608 * @param pUVM The user mode VM handle.
609 * @param enmPolicy The scheduling policy to query.
610 * @param pfEnforced Where to return the current value.
611 */
612VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
613{
614 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
615 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
616 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
617 PVM pVM = pUVM->pVM;
618 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
619
620 /* No need to bother EMTs with a query. */
621 switch (enmPolicy)
622 {
623 case EMEXECPOLICY_IEM_ALL:
624 *pfEnforced = pVM->em.s.fIemExecutesAll;
625 break;
626 case EMEXECPOLICY_IEM_RECOMPILED:
627 *pfEnforced = pVM->em.s.fIemRecompiled;
628 break;
629 default:
630 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
631 }
632
633 return VINF_SUCCESS;
634}
635
636
637/**
638 * Queries the main execution engine of the VM.
639 *
640 * @returns VBox status code
641 * @param pUVM The user mode VM handle.
642 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
643 */
644VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
645{
646 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
647 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
648
649 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
650 PVM pVM = pUVM->pVM;
651 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
652
653 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
654 return VINF_SUCCESS;
655}
656
657
658/**
659 * Raise a fatal error.
660 *
661 * Safely terminate the VM with full state report and stuff. This function
662 * will naturally never return.
663 *
664 * @param pVCpu The cross context virtual CPU structure.
665 * @param rc VBox status code.
666 */
667VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
668{
669 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
670 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
671}
672
673
674#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
675/**
676 * Gets the EM state name.
677 *
678 * @returns pointer to read only state name,
679 * @param enmState The state.
680 */
681static const char *emR3GetStateName(EMSTATE enmState)
682{
683 switch (enmState)
684 {
685 case EMSTATE_NONE: return "EMSTATE_NONE";
686 case EMSTATE_RAW_OBSOLETE: return "EMSTATE_RAW_OBSOLETE";
687 case EMSTATE_HM: return "EMSTATE_HM";
688 case EMSTATE_IEM: return "EMSTATE_IEM";
689 case EMSTATE_RECOMPILER: return "EMSTATE_RECOMPILER";
690 case EMSTATE_HALTED: return "EMSTATE_HALTED";
691 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
692 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
693 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
694 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
695 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
696 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
697 case EMSTATE_DEBUG_GUEST_RECOMPILER: return "EMSTATE_DEBUG_GUEST_RECOMPILER";
698 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
699 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
700 case EMSTATE_IEM_THEN_REM_OBSOLETE: return "EMSTATE_IEM_THEN_REM_OBSOLETE";
701 case EMSTATE_NEM: return "EMSTATE_NEM";
702 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
703 default: return "Unknown!";
704 }
705}
706#endif /* LOG_ENABLED || VBOX_STRICT */
707
708#if !defined(VBOX_VMM_TARGET_ARMV8)
709
710/**
711 * Handle pending ring-3 I/O port write.
712 *
713 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
714 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
715 *
716 * @returns Strict VBox status code.
717 * @param pVM The cross context VM structure.
718 * @param pVCpu The cross context virtual CPU structure.
719 */
720VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
721{
722 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
723
724 /* Get and clear the pending data. */
725 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
726 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
727 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
728 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
729 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
730
731 /* Assert sanity. */
732 switch (cbValue)
733 {
734 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
735 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
736 case 4: break;
737 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
738 }
739 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
740
741 /* Do the work.*/
742 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
743 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
744 if (IOM_SUCCESS(rcStrict))
745 {
746 pVCpu->cpum.GstCtx.rip += cbInstr;
747 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
748 }
749 return rcStrict;
750}
751
752
753/**
754 * Handle pending ring-3 I/O port write.
755 *
756 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
757 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
758 *
759 * @returns Strict VBox status code.
760 * @param pVM The cross context VM structure.
761 * @param pVCpu The cross context virtual CPU structure.
762 */
763VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
764{
765 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
766
767 /* Get and clear the pending data. */
768 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
769 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
770 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
771 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
772
773 /* Assert sanity. */
774 switch (cbValue)
775 {
776 case 1: break;
777 case 2: break;
778 case 4: break;
779 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
780 }
781 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
782 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
783
784 /* Do the work.*/
785 uint32_t uValue = 0;
786 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
787 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
788 if (IOM_SUCCESS(rcStrict))
789 {
790 if (cbValue == 4)
791 pVCpu->cpum.GstCtx.rax = uValue;
792 else if (cbValue == 2)
793 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
794 else
795 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
796 pVCpu->cpum.GstCtx.rip += cbInstr;
797 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
798 }
799 return rcStrict;
800}
801
802
803/**
804 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
805 * Worker for emR3ExecuteSplitLockInstruction}
806 */
807static DECLCALLBACK(VBOXSTRICTRC) emR3ExecuteSplitLockInstructionRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
808{
809 /* Only execute on the specified EMT. */
810 if (pVCpu == (PVMCPU)pvUser)
811 {
812 LogFunc(("\n"));
813 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
814 LogFunc(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
815 if (rcStrict == VINF_IEM_RAISED_XCPT)
816 rcStrict = VINF_SUCCESS;
817 return rcStrict;
818 }
819 RT_NOREF(pVM);
820 return VINF_SUCCESS;
821}
822
823
824/**
825 * Handle an instruction causing a split cacheline lock access in SMP VMs.
826 *
827 * Generally we only get here if the host has split-lock detection enabled and
828 * this caused an \#AC because of something the guest did. If we interpret the
829 * instruction as-is, we'll likely just repeat the split-lock access and
830 * possibly be killed, get a SIGBUS, or trigger a warning followed by extra MSR
831 * changes on context switching (costs a tiny bit). Assuming these \#ACs are
832 * rare to non-existing, we'll do a rendezvous of all EMTs and tell IEM to
833 * disregard the lock prefix when emulating the instruction.
834 *
835 * Yes, we could probably modify the MSR (or MSRs) controlling the detection
836 * feature when entering guest context, but the support for the feature isn't a
837 * 100% given and we'll need the debug-only supdrvOSMsrProberRead and
838 * supdrvOSMsrProberWrite functionality from SUPDrv.cpp to safely detect it.
839 * Thus the approach is to just deal with the spurious \#ACs first and maybe add
840 * propert detection to SUPDrv later if we find it necessary.
841 *
842 * @see @bugref{10052}
843 *
844 * @returns Strict VBox status code.
845 * @param pVM The cross context VM structure.
846 * @param pVCpu The cross context virtual CPU structure.
847 */
848VBOXSTRICTRC emR3ExecuteSplitLockInstruction(PVM pVM, PVMCPU pVCpu)
849{
850 LogFunc(("\n"));
851 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, emR3ExecuteSplitLockInstructionRendezvous, pVCpu);
852}
853
854#endif /* VBOX_VMM_TARGET_ARMV8 */
855
856/**
857 * Debug loop.
858 *
859 * @returns VBox status code for EM.
860 * @param pVM The cross context VM structure.
861 * @param pVCpu The cross context virtual CPU structure.
862 * @param rc Current EM VBox status code.
863 */
864static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
865{
866 for (;;)
867 {
868 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
869 const VBOXSTRICTRC rcLast = rc;
870
871 /*
872 * Debug related RC.
873 */
874 switch (VBOXSTRICTRC_VAL(rc))
875 {
876 /*
877 * Single step an instruction.
878 */
879 case VINF_EM_DBG_STEP:
880 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
881 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
882 AssertLogRelMsgFailedStmt(("Bad EM state."), rc = VERR_EM_INTERNAL_ERROR);
883#ifdef VBOX_WITH_HWVIRT
884 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
885 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
886#endif
887#if !defined(VBOX_VMM_TARGET_ARMV8)
888 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
889 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
890 else
891 {
892 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
893 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
894 rc = VINF_EM_DBG_STEPPED;
895 }
896
897 if (rc != VINF_EM_EMULATE_SPLIT_LOCK)
898 { /* likely */ }
899 else
900 {
901 rc = emR3ExecuteSplitLockInstruction(pVM, pVCpu);
902 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
903 rc = VINF_EM_DBG_STEPPED;
904 }
905#else
906 AssertMsg(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM,
907 ("%u\n", pVCpu->em.s.enmState));
908 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
909#endif
910 break;
911
912 /*
913 * Simple events: stepped, breakpoint, stop/assertion.
914 */
915 case VINF_EM_DBG_STEPPED:
916 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
917 break;
918
919 case VINF_EM_DBG_BREAKPOINT:
920 rc = DBGFR3BpHit(pVM, pVCpu);
921 break;
922
923 case VINF_EM_DBG_STOP:
924 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
925 break;
926
927 case VINF_EM_DBG_EVENT:
928 rc = DBGFR3EventHandlePending(pVM, pVCpu);
929 break;
930
931 case VINF_EM_DBG_HYPER_STEPPED:
932 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
933 break;
934
935 case VINF_EM_DBG_HYPER_BREAKPOINT:
936 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
937 break;
938
939 case VINF_EM_DBG_HYPER_ASSERTION:
940 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
941 RTLogFlush(NULL);
942 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
943 break;
944
945 /*
946 * Guru meditation.
947 */
948 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
949 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
950 break;
951 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
952 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
953 break;
954
955 default: /** @todo don't use default for guru, but make special errors code! */
956 {
957 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
958 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
959 break;
960 }
961 }
962
963 /*
964 * Process the result.
965 */
966 switch (VBOXSTRICTRC_VAL(rc))
967 {
968 /*
969 * Continue the debugging loop.
970 */
971 case VINF_EM_DBG_STEP:
972 case VINF_EM_DBG_STOP:
973 case VINF_EM_DBG_EVENT:
974 case VINF_EM_DBG_STEPPED:
975 case VINF_EM_DBG_BREAKPOINT:
976 case VINF_EM_DBG_HYPER_STEPPED:
977 case VINF_EM_DBG_HYPER_BREAKPOINT:
978 case VINF_EM_DBG_HYPER_ASSERTION:
979 break;
980
981 /*
982 * Resuming execution (in some form) has to be done here if we got
983 * a hypervisor debug event.
984 */
985 case VINF_SUCCESS:
986 case VINF_EM_RESUME:
987 case VINF_EM_SUSPEND:
988 case VINF_EM_RESCHEDULE:
989 case VINF_EM_RESCHEDULE_REM:
990 case VINF_EM_HALT:
991 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
992 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
993 if (rc == VINF_SUCCESS)
994 rc = VINF_EM_RESCHEDULE;
995 return rc;
996
997 /*
998 * The debugger isn't attached.
999 * We'll simply turn the thing off since that's the easiest thing to do.
1000 */
1001 case VERR_DBGF_NOT_ATTACHED:
1002 switch (VBOXSTRICTRC_VAL(rcLast))
1003 {
1004 case VINF_EM_DBG_HYPER_STEPPED:
1005 case VINF_EM_DBG_HYPER_BREAKPOINT:
1006 case VINF_EM_DBG_HYPER_ASSERTION:
1007 case VERR_TRPM_PANIC:
1008 case VERR_TRPM_DONT_PANIC:
1009 case VERR_VMM_RING0_ASSERTION:
1010 case VERR_VMM_HYPER_CR3_MISMATCH:
1011 case VERR_VMM_RING3_CALL_DISABLED:
1012 return rcLast;
1013 }
1014 return VINF_EM_OFF;
1015
1016 /*
1017 * Status codes terminating the VM in one or another sense.
1018 */
1019 case VINF_EM_TERMINATE:
1020 case VINF_EM_OFF:
1021 case VINF_EM_RESET:
1022 case VINF_EM_NO_MEMORY:
1023 case VINF_EM_RAW_STALE_SELECTOR:
1024 case VINF_EM_RAW_IRET_TRAP:
1025 case VERR_TRPM_PANIC:
1026 case VERR_TRPM_DONT_PANIC:
1027 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1028 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1029 case VERR_VMM_RING0_ASSERTION:
1030 case VERR_VMM_HYPER_CR3_MISMATCH:
1031 case VERR_VMM_RING3_CALL_DISABLED:
1032 case VERR_INTERNAL_ERROR:
1033 case VERR_INTERNAL_ERROR_2:
1034 case VERR_INTERNAL_ERROR_3:
1035 case VERR_INTERNAL_ERROR_4:
1036 case VERR_INTERNAL_ERROR_5:
1037 case VERR_IPE_UNEXPECTED_STATUS:
1038 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1039 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1040 return rc;
1041
1042 /*
1043 * The rest is unexpected, and will keep us here.
1044 */
1045 default:
1046 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1047 break;
1048 }
1049 } /* debug for ever */
1050}
1051
1052
1053/**
1054 * Executes recompiled code.
1055 *
1056 * This function contains the recompiler version of the inner
1057 * execution loop (the outer loop being in EMR3ExecuteVM()).
1058 *
1059 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1060 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1061 *
1062 * @param pVM The cross context VM structure.
1063 * @param pVCpu The cross context virtual CPU structure.
1064 * @param fWasHalted Set if we're comming out of a CPU HALT state.
1065 * @param pfFFDone Where to store an indicator telling whether or not
1066 * FFs were done before returning.
1067 *
1068 */
1069static VBOXSTRICTRC emR3RecompilerExecute(PVM pVM, PVMCPU pVCpu, bool fWasHalted, bool *pfFFDone)
1070{
1071 STAM_REL_PROFILE_START(&pVCpu->em.s.StatREMTotal, a);
1072#ifdef VBOX_VMM_TARGET_ARMV8
1073 LogFlow(("emR3RecompilerExecute/%u: (pc=%RGv)\n", pVCpu->idCpu, (RTGCPTR)pVCpu->cpum.GstCtx.Pc.u64));
1074#else
1075 LogFlow(("emR3RecompilerExecute/%u: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip));
1076#endif
1077
1078 /*
1079 * Loop till we get a forced action which returns anything but VINF_SUCCESS.
1080 */
1081 *pfFFDone = false;
1082 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1083 for (;;)
1084 {
1085#ifdef LOG_ENABLED
1086# if defined(VBOX_VMM_TARGET_ARMV8)
1087 Log3(("EM: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu)));
1088# else
1089 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1090 Log(("EMR%d: %04X:%08RX64 RSP=%08RX64 IF=%d CR0=%x eflags=%x\n", CPUMGetGuestCPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel,
1091 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF,
1092 (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1093 else
1094 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1095# endif
1096#endif
1097
1098 /*
1099 * Execute.
1100 */
1101 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1102 {
1103 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1104#ifdef VBOX_WITH_IEM_RECOMPILER
1105 if (pVM->em.s.fIemRecompiled)
1106 rcStrict = IEMExecRecompiler(pVM, pVCpu, fWasHalted);
1107 else
1108#endif
1109 rcStrict = IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/);
1110 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1111 }
1112 else
1113 {
1114 /* Give up this time slice; virtual time continues */
1115 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1116 RTThreadSleep(5);
1117 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1118 rcStrict = VINF_SUCCESS;
1119 }
1120
1121 /*
1122 * Deal with high priority post execution FFs before doing anything
1123 * else. Sync back the state and leave the lock to be on the safe side.
1124 */
1125 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1126 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1127 rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, rcStrict);
1128
1129 /*
1130 * Process the returned status code.
1131 */
1132 if (rcStrict != VINF_SUCCESS)
1133 {
1134#ifndef VBOX_VMM_TARGET_ARMV8
1135 if (rcStrict == VINF_EM_EMULATE_SPLIT_LOCK)
1136 rcStrict = emR3ExecuteSplitLockInstruction(pVM, pVCpu);
1137#endif
1138 if (rcStrict != VINF_SUCCESS)
1139 {
1140#if 0
1141 if (RT_LIKELY(rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST))
1142 break;
1143 /* Fatal error: */
1144#endif
1145 break;
1146 }
1147 }
1148
1149
1150 /*
1151 * Check and execute forced actions.
1152 *
1153 * Sync back the VM state and leave the lock before calling any of
1154 * these, you never know what's going to happen here.
1155 */
1156#ifdef VBOX_HIGH_RES_TIMERS_HACK
1157 TMTimerPollVoid(pVM, pVCpu);
1158#endif
1159 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1160 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1161 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) )
1162 {
1163 rcStrict = emR3ForcedActions(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict));
1164 VBOXVMM_EM_FF_ALL_RET(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1165 if ( rcStrict != VINF_SUCCESS
1166 && rcStrict != VINF_EM_RESCHEDULE_REM)
1167 {
1168 *pfFFDone = true;
1169 break;
1170 }
1171 }
1172
1173 /*
1174 * Check if we can switch back to the main execution engine now.
1175 */
1176#if !defined(VBOX_VMM_TARGET_ARMV8)
1177 if (VM_IS_HM_ENABLED(pVM))
1178 {
1179 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1180 {
1181 *pfFFDone = true;
1182 rcStrict = VINF_EM_RESCHEDULE_EXEC_ENGINE;
1183 break;
1184 }
1185 }
1186 else
1187#endif
1188 if (VM_IS_NEM_ENABLED(pVM))
1189 {
1190 if (NEMR3CanExecuteGuest(pVM, pVCpu))
1191 {
1192 *pfFFDone = true;
1193 rcStrict = VINF_EM_RESCHEDULE_EXEC_ENGINE;
1194 break;
1195 }
1196 }
1197
1198#ifdef VBOX_WITH_IEM_RECOMPILER
1199 fWasHalted = false;
1200#else
1201 RT_NOREF(fWasHalted);
1202#endif
1203 } /* The Inner Loop, recompiled execution mode version. */
1204
1205 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatREMTotal, a);
1206 return rcStrict;
1207}
1208
1209
1210/**
1211 * Decides whether to execute HM, NEM, IEM/interpreter or IEM/recompiler.
1212 *
1213 * @returns new EM state
1214 * @param pVM The cross context VM structure.
1215 * @param pVCpu The cross context virtual CPU structure.
1216 */
1217EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1218{
1219 /*
1220 * We stay in the wait for SIPI state unless explicitly told otherwise.
1221 */
1222 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1223 return EMSTATE_WAIT_SIPI;
1224
1225 /*
1226 * Can we use the default engine. IEM is the fallback.
1227 */
1228 if (!pVM->em.s.fIemExecutesAll)
1229 {
1230 switch (pVM->bMainExecutionEngine)
1231 {
1232#ifdef VBOX_WITH_HWVIRT
1233 case VM_EXEC_ENGINE_HW_VIRT:
1234 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1235 return EMSTATE_HM;
1236 break;
1237#endif
1238#ifdef VBOX_WITH_NATIVE_NEM
1239 case VM_EXEC_ENGINE_NATIVE_API:
1240 if (NEMR3CanExecuteGuest(pVM, pVCpu))
1241 return EMSTATE_NEM;
1242 break;
1243#endif
1244 case VM_EXEC_ENGINE_IEM:
1245 break;
1246 default:
1247 AssertMsgFailed(("bMainExecutionEngine=%d\n", pVM->bMainExecutionEngine));
1248 break;
1249 }
1250 }
1251#ifdef VBOX_WITH_IEM_RECOMPILER
1252 return pVM->em.s.fIemRecompiled ? EMSTATE_RECOMPILER : EMSTATE_IEM;
1253#else
1254 return EMSTATE_IEM;
1255#endif
1256}
1257
1258
1259/**
1260 * Executes all high priority post execution force actions.
1261 *
1262 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1263 * fatal error status code.
1264 *
1265 * @param pVM The cross context VM structure.
1266 * @param pVCpu The cross context virtual CPU structure.
1267 * @param rc The current strict VBox status code rc.
1268 */
1269VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1270{
1271 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1272
1273 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1274 PDMCritSectBothFF(pVM, pVCpu);
1275
1276#if !defined(VBOX_VMM_TARGET_ARMV8)
1277 /* Update CR3 (Nested Paging case for HM). */
1278 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1279 {
1280 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1281 int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1282 if (RT_FAILURE(rc2))
1283 return rc2;
1284 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1285 }
1286#endif
1287
1288 /* IEM has pending work (typically memory write after INS instruction). */
1289 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1290 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1291
1292 /* IOM has pending work (comitting an I/O or MMIO write). */
1293 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1294 {
1295 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1296 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1297 { /* half likely, or at least it's a line shorter. */ }
1298 else if (rc == VINF_SUCCESS)
1299 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1300 else
1301 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1302 }
1303
1304 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1305 {
1306 if ( rc > VINF_EM_NO_MEMORY
1307 && rc <= VINF_EM_LAST)
1308 rc = VINF_EM_NO_MEMORY;
1309 }
1310
1311 return rc;
1312}
1313
1314
1315#if !defined(VBOX_VMM_TARGET_ARMV8)
1316/**
1317 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
1318 *
1319 * @returns VBox status code.
1320 * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
1321 * @param pVCpu The cross context virtual CPU structure.
1322 */
1323static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
1324{
1325#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1326 /* Handle the "external interrupt" VM-exit intercept. */
1327 if ( CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
1328 && !CPUMIsGuestVmxExitCtlsSet(&pVCpu->cpum.GstCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
1329 {
1330 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
1331 AssertMsg( rcStrict != VINF_VMX_VMEXIT /* VM-exit should have been converted to VINF_SUCCESS. */
1332 && rcStrict != VINF_NO_CHANGE
1333 && rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1334 return VBOXSTRICTRC_VAL(rcStrict);
1335 }
1336#else
1337 RT_NOREF(pVCpu);
1338#endif
1339 return VINF_NO_CHANGE;
1340}
1341
1342
1343/**
1344 * Helper for emR3ForcedActions() for SVM interrupt intercept.
1345 *
1346 * @returns VBox status code.
1347 * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
1348 * @param pVCpu The cross context virtual CPU structure.
1349 */
1350static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
1351{
1352#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1353 /* Handle the physical interrupt intercept (can be masked by the nested hypervisor). */
1354 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1355 {
1356 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1357 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1358 if (RT_SUCCESS(rcStrict))
1359 {
1360 AssertMsg( rcStrict != VINF_SVM_VMEXIT
1361 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1362 return VBOXSTRICTRC_VAL(rcStrict);
1363 }
1364
1365 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1366 return VINF_EM_TRIPLE_FAULT;
1367 }
1368#else
1369 NOREF(pVCpu);
1370#endif
1371 return VINF_NO_CHANGE;
1372}
1373
1374
1375/**
1376 * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
1377 *
1378 * @returns VBox status code.
1379 * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
1380 * @param pVCpu The cross context virtual CPU structure.
1381 */
1382static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
1383{
1384#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1385 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1386 {
1387 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1388 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1389 if (RT_SUCCESS(rcStrict))
1390 {
1391 Assert(rcStrict != VINF_SVM_VMEXIT);
1392 return VBOXSTRICTRC_VAL(rcStrict);
1393 }
1394 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1395 return VINF_EM_TRIPLE_FAULT;
1396 }
1397#else
1398 NOREF(pVCpu);
1399#endif
1400 return VINF_NO_CHANGE;
1401}
1402#endif
1403
1404
1405/**
1406 * Executes all pending forced actions.
1407 *
1408 * Forced actions can cause execution delays and execution
1409 * rescheduling. The first we deal with using action priority, so
1410 * that for instance pending timers aren't scheduled and ran until
1411 * right before execution. The rescheduling we deal with using
1412 * return codes. The same goes for VM termination, only in that case
1413 * we exit everything.
1414 *
1415 * @returns VBox status code of equal or greater importance/severity than rc.
1416 * The most important ones are: VINF_EM_RESCHEDULE,
1417 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1418 *
1419 * @param pVM The cross context VM structure.
1420 * @param pVCpu The cross context virtual CPU structure.
1421 * @param rc The current rc.
1422 *
1423 */
1424int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1425{
1426 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1427#ifdef VBOX_STRICT
1428 int rcIrq = VINF_SUCCESS;
1429#endif
1430 int rc2;
1431#define UPDATE_RC() \
1432 do { \
1433 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1434 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1435 break; \
1436 if (!rc || rc2 < rc) \
1437 rc = rc2; \
1438 } while (0)
1439 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1440
1441 /*
1442 * Post execution chunk first.
1443 */
1444 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1445 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1446 {
1447 /*
1448 * EMT Rendezvous (must be serviced before termination).
1449 */
1450 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1451 {
1452 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1453 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1454 UPDATE_RC();
1455 /** @todo HACK ALERT! The following test is to make sure EM+TM
1456 * thinks the VM is stopped/reset before the next VM state change
1457 * is made. We need a better solution for this, or at least make it
1458 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1459 * VINF_EM_SUSPEND). */
1460 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1461 {
1462 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1463 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1464 return rc;
1465 }
1466 }
1467
1468 /*
1469 * State change request (cleared by vmR3SetStateLocked).
1470 */
1471 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1472 {
1473 VMSTATE enmState = VMR3GetState(pVM);
1474 switch (enmState)
1475 {
1476 case VMSTATE_FATAL_ERROR:
1477 case VMSTATE_FATAL_ERROR_LS:
1478 case VMSTATE_GURU_MEDITATION:
1479 case VMSTATE_GURU_MEDITATION_LS:
1480 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1481 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1482 return VINF_EM_SUSPEND;
1483
1484 case VMSTATE_DESTROYING:
1485 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1486 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1487 return VINF_EM_TERMINATE;
1488
1489 default:
1490 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1491 }
1492 }
1493
1494 /*
1495 * Debugger Facility polling.
1496 */
1497 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1498 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1499 {
1500 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1501 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1502 /** @todo why that VINF_EM_DBG_EVENT here? Duplicate info, should be handled
1503 * somewhere before we get here, I would think. */
1504 if (rc == VINF_EM_DBG_EVENT) /* HACK! We should've handled pending debug event. */
1505 rc = rc2;
1506 else
1507 UPDATE_RC();
1508 }
1509
1510 /*
1511 * Postponed reset request.
1512 */
1513 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1514 {
1515 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1516 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1517 UPDATE_RC();
1518 }
1519
1520#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
1521 /*
1522 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1523 */
1524 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1525 {
1526 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1527 UPDATE_RC();
1528 if (rc == VINF_EM_NO_MEMORY)
1529 return rc;
1530 }
1531#endif
1532
1533 /* check that we got them all */
1534 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1535 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_DBGF);
1536 }
1537
1538 /*
1539 * Normal priority then.
1540 * (Executed in no particular order.)
1541 */
1542 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1543 {
1544 /*
1545 * PDM Queues are pending.
1546 */
1547 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1548 PDMR3QueueFlushAll(pVM);
1549
1550 /*
1551 * PDM DMA transfers are pending.
1552 */
1553 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1554 PDMR3DmaRun(pVM);
1555
1556 /*
1557 * EMT Rendezvous (make sure they are handled before the requests).
1558 */
1559 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1560 {
1561 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1562 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1563 UPDATE_RC();
1564 /** @todo HACK ALERT! The following test is to make sure EM+TM
1565 * thinks the VM is stopped/reset before the next VM state change
1566 * is made. We need a better solution for this, or at least make it
1567 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1568 * VINF_EM_SUSPEND). */
1569 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1570 {
1571 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1572 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1573 return rc;
1574 }
1575 }
1576
1577 /*
1578 * Requests from other threads.
1579 */
1580 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1581 {
1582 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1583 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1584 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1585 {
1586 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1587 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1588 return rc2;
1589 }
1590 UPDATE_RC();
1591 /** @todo HACK ALERT! The following test is to make sure EM+TM
1592 * thinks the VM is stopped/reset before the next VM state change
1593 * is made. We need a better solution for this, or at least make it
1594 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1595 * VINF_EM_SUSPEND). */
1596 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1597 {
1598 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1599 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1600 return rc;
1601 }
1602 }
1603
1604 /* check that we got them all */
1605 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS));
1606 }
1607
1608 /*
1609 * Normal priority then. (per-VCPU)
1610 * (Executed in no particular order.)
1611 */
1612 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1613 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1614 {
1615 /*
1616 * Requests from other threads.
1617 */
1618 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
1619 {
1620 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1621 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1622 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1623 {
1624 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1625 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1626 return rc2;
1627 }
1628 UPDATE_RC();
1629 /** @todo HACK ALERT! The following test is to make sure EM+TM
1630 * thinks the VM is stopped/reset before the next VM state change
1631 * is made. We need a better solution for this, or at least make it
1632 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1633 * VINF_EM_SUSPEND). */
1634 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1635 {
1636 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1637 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1638 return rc;
1639 }
1640 }
1641
1642 /* check that we got them all */
1643 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
1644 }
1645
1646 /*
1647 * High priority pre execution chunk last.
1648 * (Executed in ascending priority order.)
1649 */
1650 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1651 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1652 {
1653 /*
1654 * Timers before interrupts.
1655 */
1656 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
1657 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1658 TMR3TimerQueuesDo(pVM);
1659
1660#if !defined(VBOX_VMM_TARGET_ARMV8)
1661 /*
1662 * Pick up asynchronously posted interrupts into the APIC.
1663 */
1664 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1665 PDMApicUpdatePendingInterrupts(pVCpu);
1666
1667 /*
1668 * The instruction following an emulated STI should *always* be executed!
1669 *
1670 * Note! We intentionally don't clear CPUMCTX_INHIBIT_INT here if
1671 * the eip is the same as the inhibited instr address. Before we
1672 * are able to execute this instruction in raw mode (iret to
1673 * guest code) an external interrupt might force a world switch
1674 * again. Possibly allowing a guest interrupt to be dispatched
1675 * in the process. This could break the guest. Sounds very
1676 * unlikely, but such timing sensitive problem are not as rare as
1677 * you might think.
1678 *
1679 * Note! This used to be a force action flag. Can probably ditch this code.
1680 */
1681 /** @todo r=bird: the clearing case will *never* be taken here as
1682 * CPUMIsInInterruptShadow already makes sure the RIPs matches. */
1683 if ( CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
1684 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1685 {
1686 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_INHIBIT_INT);
1687 if (CPUMGetGuestRIP(pVCpu) != pVCpu->cpum.GstCtx.uRipInhibitInt)
1688 {
1689 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
1690 Log(("Clearing CPUMCTX_INHIBIT_INT at %RGv - successor %RGv\n",
1691 (RTGCPTR)CPUMGetGuestRIP(pVCpu), (RTGCPTR)pVCpu->cpum.GstCtx.uRipInhibitInt));
1692 }
1693 else
1694 Log(("Leaving CPUMCTX_INHIBIT_INT set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1695 }
1696
1697 /** @todo SMIs. If we implement SMIs, this is where they will have to be
1698 * delivered. */
1699
1700# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1701 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
1702 | VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW))
1703 {
1704 /*
1705 * VMX Nested-guest APIC-write pending (can cause VM-exits).
1706 * Takes priority over even SMI and INIT signals.
1707 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
1708 */
1709 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
1710 {
1711 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
1712 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1713 UPDATE_RC();
1714 }
1715
1716 /*
1717 * APIC write emulation MAY have a caused a VM-exit.
1718 * If it did cause a VM-exit, there's no point checking the other VMX non-root mode FFs here.
1719 */
1720 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
1721 {
1722 /*
1723 * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
1724 * Takes priority over "Traps on the previous instruction".
1725 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
1726 */
1727 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
1728 {
1729 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
1730 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1731 UPDATE_RC();
1732 }
1733 /*
1734 * VMX Nested-guest preemption timer VM-exit.
1735 * Takes priority over NMI-window VM-exits.
1736 */
1737 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
1738 {
1739 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
1740 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1741 UPDATE_RC();
1742 }
1743 /*
1744 * VMX interrupt-window and NMI-window VM-exits.
1745 * Takes priority over non-maskable interrupts (NMIs) and external interrupts respectively.
1746 * If we are in an interrupt shadow or if we already in the process of delivering
1747 * an event then these VM-exits cannot occur.
1748 *
1749 * Interrupt shadows block NMI-window VM-exits.
1750 * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
1751 *
1752 * See Intel spec. 25.2 "Other Causes Of VM Exits".
1753 * See Intel spec. 26.7.6 "NMI-Window Exiting".
1754 * See Intel spec. 6.7 "Nonmaskable Interrupt (NMI)".
1755 */
1756 else if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
1757 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx)
1758 && !TRPMHasTrap(pVCpu))
1759 {
1760 /*
1761 * VMX NMI-window VM-exit.
1762 */
1763 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
1764 && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
1765 {
1766 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
1767 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1768 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
1769 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1770 && rc2 != VINF_VMX_VMEXIT
1771 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1772 UPDATE_RC();
1773 }
1774 /*
1775 * VMX interrupt-window VM-exit.
1776 * This is a bit messy with the way the code below is currently structured,
1777 * but checking VMCPU_FF_INTERRUPT_NMI here (combined with CPUMAreInterruptsInhibitedByNmi
1778 * already checked at this point) should allow a pending NMI to be delivered prior to
1779 * causing an interrupt-window VM-exit.
1780 */
1781 /** @todo Restructure this later to happen after injecting NMI/causing NMI-exit, see
1782 * code in VMX R0 event delivery. */
1783 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
1784 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1785 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1786 {
1787 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
1788 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1789 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
1790 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1791 && rc2 != VINF_VMX_VMEXIT
1792 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1793 UPDATE_RC();
1794 }
1795 }
1796 }
1797
1798 /*
1799 * Interrupt-window and NMI-window force flags might still be pending if we didn't actually cause
1800 * a VM-exit above. They will get cleared eventually when ANY nested-guest VM-exit occurs.
1801 * However, the force flags asserted below MUST have been cleared at this point.
1802 */
1803 Assert(!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER));
1804 }
1805# endif
1806
1807 /*
1808 * Guest event injection.
1809 */
1810 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)));
1811 bool fWakeupPending = false;
1812 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW
1813 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_NESTED_GUEST
1814 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1815 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1816 && (!rc || rc >= VINF_EM_RESCHEDULE_EXEC_ENGINE)
1817 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx) /* Interrupt shadows block both NMIs and interrupts. */
1818 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
1819 {
1820 if (CPUMGetGuestGif(&pVCpu->cpum.GstCtx))
1821 {
1822 bool fInVmxNonRootMode;
1823 bool fInSvmHwvirtMode;
1824 if (!CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx))
1825 {
1826 fInVmxNonRootMode = false;
1827 fInSvmHwvirtMode = false;
1828 }
1829 else
1830 {
1831 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
1832 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
1833 }
1834
1835 /*
1836 * NMIs (take priority over external interrupts).
1837 */
1838 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1839 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
1840 {
1841# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1842 if ( fInVmxNonRootMode
1843 && CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
1844 {
1845 /* We MUST clear the NMI force-flag here, see @bugref{10318#c19}. */
1846 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1847 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
1848 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1849 UPDATE_RC();
1850 }
1851 else
1852# endif
1853# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1854 if ( fInSvmHwvirtMode
1855 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
1856 {
1857 rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
1858 AssertMsg( rc2 != VINF_SVM_VMEXIT
1859 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1860 UPDATE_RC();
1861 }
1862 else
1863# endif
1864 {
1865 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_NMI);
1866 if (rc2 == VINF_SUCCESS)
1867 {
1868 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1869 fWakeupPending = true;
1870# if 0 /* HMR3IsActive is not reliable (esp. after restore), just return VINF_EM_RESCHEDULE. */
1871 if (pVM->em.s.fIemExecutesAll)
1872 rc2 = VINF_EM_RESCHEDULE;
1873 else
1874 {
1875 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
1876 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
1877 : VINF_EM_RESCHEDULE_REM;
1878 }
1879# else
1880 rc2 = VINF_EM_RESCHEDULE;
1881# endif
1882 }
1883 UPDATE_RC();
1884 }
1885 }
1886# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1887 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
1888 * actually pending like we currently do. */
1889# endif
1890 /*
1891 * External interrupts.
1892 */
1893 else
1894 {
1895 /*
1896 * VMX: virtual interrupts takes priority over physical interrupts.
1897 * SVM: physical interrupts takes priority over virtual interrupts.
1898 */
1899 if ( fInVmxNonRootMode
1900 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1901 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1902 {
1903 /** @todo NSTVMX: virtual-interrupt delivery. */
1904 rc2 = VINF_SUCCESS;
1905 }
1906 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1907 && CPUMIsGuestPhysIntrEnabled(pVCpu))
1908 {
1909 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1910 if (fInVmxNonRootMode)
1911 rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
1912 else if (fInSvmHwvirtMode)
1913 rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
1914 else
1915 rc2 = VINF_NO_CHANGE;
1916
1917 if (rc2 == VINF_NO_CHANGE)
1918 {
1919 bool fInjected = false;
1920 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1921 /** @todo this really isn't nice, should properly handle this */
1922 /* Note! This can still cause a VM-exit (on Intel). */
1923 LogFlow(("Calling TRPMR3InjectEvent: %04x:%08RX64 efl=%#x\n",
1924 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags));
1925 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
1926 fWakeupPending = true;
1927 if ( pVM->em.s.fIemExecutesAll
1928 && ( rc2 == VINF_EM_RESCHEDULE_REM
1929 || rc2 == VINF_EM_RESCHEDULE_EXEC_ENGINE))
1930 rc2 = VINF_EM_RESCHEDULE;
1931# ifdef VBOX_STRICT
1932 if (fInjected)
1933 rcIrq = rc2;
1934# endif
1935 }
1936 UPDATE_RC();
1937 }
1938 else if ( fInSvmHwvirtMode
1939 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1940 && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
1941 {
1942 rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
1943 if (rc2 == VINF_NO_CHANGE)
1944 {
1945 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1946 uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
1947 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
1948 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1949 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1950 rc2 = VINF_EM_RESCHEDULE;
1951# ifdef VBOX_STRICT
1952 rcIrq = rc2;
1953# endif
1954 }
1955 UPDATE_RC();
1956 }
1957 }
1958 } /* CPUMGetGuestGif */
1959 }
1960
1961#else /* VBOX_VMM_TARGET_ARMV8 */
1962 bool fWakeupPending = false;
1963
1964 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VTIMER_ACTIVATED))
1965 {
1966 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_VTIMER_ACTIVATED);
1967
1968 fWakeupPending = true;
1969 rc2 = VINF_EM_RESCHEDULE;
1970 UPDATE_RC();
1971 }
1972#endif /* VBOX_VMM_TARGET_ARMV8 */
1973
1974#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
1975 /*
1976 * Allocate handy pages.
1977 */
1978 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1979 {
1980 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1981 UPDATE_RC();
1982 }
1983#endif
1984
1985 /*
1986 * Debugger Facility request.
1987 */
1988 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1989 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1990 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
1991 {
1992 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1993 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1994 UPDATE_RC();
1995 }
1996
1997 /*
1998 * EMT Rendezvous (must be serviced before termination).
1999 */
2000 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2001 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
2002 {
2003 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2004 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2005 UPDATE_RC();
2006 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2007 * stopped/reset before the next VM state change is made. We need a better
2008 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2009 * && rc >= VINF_EM_SUSPEND). */
2010 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2011 {
2012 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2013 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2014 return rc;
2015 }
2016 }
2017
2018 /*
2019 * State change request (cleared by vmR3SetStateLocked).
2020 */
2021 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2022 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
2023 {
2024 VMSTATE enmState = VMR3GetState(pVM);
2025 switch (enmState)
2026 {
2027 case VMSTATE_FATAL_ERROR:
2028 case VMSTATE_FATAL_ERROR_LS:
2029 case VMSTATE_GURU_MEDITATION:
2030 case VMSTATE_GURU_MEDITATION_LS:
2031 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2032 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2033 return VINF_EM_SUSPEND;
2034
2035 case VMSTATE_DESTROYING:
2036 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2037 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2038 return VINF_EM_TERMINATE;
2039
2040 default:
2041 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2042 }
2043 }
2044
2045 /*
2046 * Out of memory? Since most of our fellow high priority actions may cause us
2047 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2048 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2049 * than us since we can terminate without allocating more memory.
2050 */
2051 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2052 {
2053#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
2054 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2055#else
2056 rc2 = VINF_EM_NO_MEMORY;
2057#endif
2058 UPDATE_RC();
2059 if (rc == VINF_EM_NO_MEMORY)
2060 return rc;
2061 }
2062
2063 /*
2064 * If the virtual sync clock is still stopped, make TM restart it.
2065 */
2066 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2067 TMR3VirtualSyncFF(pVM, pVCpu);
2068
2069#ifdef DEBUG
2070 /*
2071 * Debug, pause the VM.
2072 */
2073 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2074 {
2075 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2076 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2077 return VINF_EM_SUSPEND;
2078 }
2079#endif
2080
2081 /* check that we got them all */
2082 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2083#if defined(VBOX_VMM_TARGET_ARMV8)
2084 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_DBGF));
2085#else
2086 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
2087#endif
2088 }
2089
2090#undef UPDATE_RC
2091 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2092 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2093 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2094 return rc;
2095}
2096
2097
2098/**
2099 * Check if the preset execution time cap restricts guest execution scheduling.
2100 *
2101 * @returns true if allowed, false otherwise
2102 * @param pVM The cross context VM structure.
2103 * @param pVCpu The cross context virtual CPU structure.
2104 */
2105bool emR3IsExecutionAllowedSlow(PVM pVM, PVMCPU pVCpu)
2106{
2107 Assert(pVM->uCpuExecutionCap != 100);
2108 uint64_t cMsUserTime;
2109 uint64_t cMsKernelTime;
2110 if (RT_SUCCESS(RTThreadGetExecutionTimeMilli(&cMsKernelTime, &cMsUserTime)))
2111 {
2112 uint64_t const msTimeNow = RTTimeMilliTS();
2113 if (pVCpu->em.s.msTimeSliceStart + EM_TIME_SLICE < msTimeNow)
2114 {
2115 /* New time slice. */
2116 pVCpu->em.s.msTimeSliceStart = msTimeNow;
2117 pVCpu->em.s.cMsTimeSliceStartExec = cMsKernelTime + cMsUserTime;
2118 pVCpu->em.s.cMsTimeSliceExec = 0;
2119 }
2120 pVCpu->em.s.cMsTimeSliceExec = cMsKernelTime + cMsUserTime - pVCpu->em.s.cMsTimeSliceStartExec;
2121
2122 bool const fRet = pVCpu->em.s.cMsTimeSliceExec < (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100;
2123 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.msTimeSliceStart,
2124 pVCpu->em.s.cMsTimeSliceStartExec, pVCpu->em.s.cMsTimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2125 return fRet;
2126 }
2127 return true;
2128}
2129
2130
2131/**
2132 * Execute VM.
2133 *
2134 * This function is the main loop of the VM. The emulation thread
2135 * calls this function when the VM has been successfully constructed
2136 * and we're ready for executing the VM.
2137 *
2138 * Returning from this function means that the VM is turned off or
2139 * suspended (state already saved) and deconstruction is next in line.
2140 *
2141 * All interaction from other thread are done using forced actions
2142 * and signalling of the wait object.
2143 *
2144 * @returns VBox status code, informational status codes may indicate failure.
2145 * @param pVM The cross context VM structure.
2146 * @param pVCpu The cross context virtual CPU structure.
2147 */
2148VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2149{
2150 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s)\n",
2151 pVM,
2152 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2153 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2154 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState) ));
2155 VM_ASSERT_EMT(pVM);
2156 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2157 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2158 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2159 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2160
2161 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2162 if (rc == 0)
2163 {
2164 /*
2165 * Start the virtual time.
2166 */
2167 TMR3NotifyResume(pVM, pVCpu);
2168
2169 /*
2170 * The Outer Main Loop.
2171 */
2172 bool fFFDone = false;
2173
2174 /* Reschedule right away to start in the right state. */
2175 rc = VINF_SUCCESS;
2176
2177 /* If resuming after a pause or a state load, restore the previous
2178 state or else we'll start executing code. Else, just reschedule. */
2179 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2180 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2181 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2182 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2183 else
2184 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2185 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2186
2187 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2188 for (;;)
2189 {
2190 /*
2191 * Before we can schedule anything (we're here because
2192 * scheduling is required) we must service any pending
2193 * forced actions to avoid any pending action causing
2194 * immediate rescheduling upon entering an inner loop
2195 *
2196 * Do forced actions.
2197 */
2198 if ( !fFFDone
2199 && RT_SUCCESS(rc)
2200 && rc != VINF_EM_TERMINATE
2201 && rc != VINF_EM_OFF
2202 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2203 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2204 {
2205 rc = emR3ForcedActions(pVM, pVCpu, rc);
2206 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2207 }
2208 else if (fFFDone)
2209 fFFDone = false;
2210
2211#if defined(VBOX_STRICT) && !defined(VBOX_VMM_TARGET_ARMV8)
2212 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
2213#endif
2214
2215 /*
2216 * Now what to do?
2217 */
2218 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2219 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2220 switch (rc)
2221 {
2222 /*
2223 * Keep doing what we're currently doing.
2224 */
2225 case VINF_SUCCESS:
2226 break;
2227
2228 /*
2229 * Reschedule - to main execution engine (HM, NEM, IEM/REM).
2230 */
2231 case VINF_EM_RESCHEDULE_EXEC_ENGINE:
2232 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2233 if (!pVM->em.s.fIemExecutesAll)
2234 {
2235#if !defined(VBOX_VMM_TARGET_ARMV8)
2236 if (VM_IS_HM_ENABLED(pVM))
2237 {
2238 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
2239 {
2240 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_EXEC_ENGINE: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2241 pVCpu->em.s.enmState = EMSTATE_HM;
2242 break;
2243 }
2244 }
2245 else
2246#endif
2247 if (VM_IS_NEM_ENABLED(pVM) && NEMR3CanExecuteGuest(pVM, pVCpu))
2248 {
2249 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_EXEC_ENGINE: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2250 pVCpu->em.s.enmState = EMSTATE_NEM;
2251 break;
2252 }
2253 }
2254
2255 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_EXEC_ENGINE: %d -> %d (EMSTATE_RECOMPILER)\n", enmOldState, EMSTATE_RECOMPILER));
2256 pVCpu->em.s.enmState = EMSTATE_RECOMPILER;
2257 break;
2258
2259 /*
2260 * Reschedule - to recompiled execution.
2261 */
2262 case VINF_EM_RESCHEDULE_REM:
2263 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2264 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n",
2265 enmOldState, EMSTATE_RECOMPILER));
2266 pVCpu->em.s.enmState = EMSTATE_RECOMPILER;
2267 break;
2268
2269 /*
2270 * Resume.
2271 */
2272 case VINF_EM_RESUME:
2273 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2274 /* Don't reschedule in the halted or wait-for-SIPI cases. */
2275 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2276 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2277 {
2278 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2279 break;
2280 }
2281 /* fall through and get scheduled. */
2282 RT_FALL_THRU();
2283
2284 /*
2285 * Reschedule.
2286 */
2287 case VINF_EM_RESCHEDULE:
2288 {
2289 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2290 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2291 pVCpu->em.s.enmState = enmState;
2292 break;
2293 }
2294
2295 /*
2296 * Halted.
2297 */
2298 case VINF_EM_HALT:
2299 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2300 pVCpu->em.s.enmState = EMSTATE_HALTED;
2301 break;
2302
2303 /*
2304 * Switch to the wait for SIPI state (application processor only)
2305 */
2306 case VINF_EM_WAIT_SIPI:
2307 Assert(pVCpu->idCpu != 0);
2308 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2309 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2310 break;
2311
2312
2313 /*
2314 * Suspend.
2315 */
2316 case VINF_EM_SUSPEND:
2317 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2318 Assert(enmOldState != EMSTATE_SUSPENDED);
2319 pVCpu->em.s.enmPrevState = enmOldState;
2320 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2321 break;
2322
2323 /*
2324 * Reset.
2325 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2326 */
2327 case VINF_EM_RESET:
2328 {
2329 if (pVCpu->idCpu == 0)
2330 {
2331 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2332 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2333 pVCpu->em.s.enmState = enmState;
2334 }
2335 else
2336 {
2337 /* All other VCPUs go into the wait for SIPI state. */
2338 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2339 }
2340 break;
2341 }
2342
2343 /*
2344 * Power Off.
2345 */
2346 case VINF_EM_OFF:
2347 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2348 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2349 TMR3NotifySuspend(pVM, pVCpu);
2350 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2351 return rc;
2352
2353 /*
2354 * Terminate the VM.
2355 */
2356 case VINF_EM_TERMINATE:
2357 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2358 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2359 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2360 TMR3NotifySuspend(pVM, pVCpu);
2361 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2362 return rc;
2363
2364
2365 /*
2366 * Out of memory, suspend the VM and stuff.
2367 */
2368 case VINF_EM_NO_MEMORY:
2369 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2370 Assert(enmOldState != EMSTATE_SUSPENDED);
2371 pVCpu->em.s.enmPrevState = enmOldState;
2372 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2373 TMR3NotifySuspend(pVM, pVCpu);
2374 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2375
2376 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2377 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2378 if (rc != VINF_EM_SUSPEND)
2379 {
2380 if (RT_SUCCESS_NP(rc))
2381 {
2382 AssertLogRelMsgFailed(("%Rrc\n", rc));
2383 rc = VERR_EM_INTERNAL_ERROR;
2384 }
2385 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2386 }
2387 return rc;
2388
2389 /*
2390 * Guest debug events.
2391 */
2392 case VINF_EM_DBG_STEPPED:
2393 case VINF_EM_DBG_STOP:
2394 case VINF_EM_DBG_EVENT:
2395 case VINF_EM_DBG_BREAKPOINT:
2396 case VINF_EM_DBG_STEP:
2397 if (enmOldState == EMSTATE_HM)
2398 {
2399 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2400 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2401 }
2402 else if (enmOldState == EMSTATE_NEM)
2403 {
2404 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2405 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2406 }
2407 else if (enmOldState == EMSTATE_RECOMPILER)
2408 {
2409 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RECOMPILER));
2410 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RECOMPILER;
2411 }
2412 else
2413 {
2414#ifdef VBOX_VMM_TARGET_ARMV8
2415 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2416 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM; /** @todo No IEM yet and this gets selected if enmOldState == EMSTATE_HALTED. */
2417#else
2418 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2419 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2420#endif
2421 }
2422 break;
2423
2424 /*
2425 * Hypervisor debug events.
2426 */
2427 case VINF_EM_DBG_HYPER_STEPPED:
2428 case VINF_EM_DBG_HYPER_BREAKPOINT:
2429 case VINF_EM_DBG_HYPER_ASSERTION:
2430 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2431 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2432 break;
2433
2434 /*
2435 * Triple fault.
2436 */
2437 case VINF_EM_TRIPLE_FAULT:
2438 if (!pVM->em.s.fGuruOnTripleFault)
2439 {
2440 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2441 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2442 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2443 continue;
2444 }
2445 /* Else fall through and trigger a guru. */
2446 RT_FALL_THRU();
2447
2448 case VERR_VMM_RING0_ASSERTION:
2449 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2450 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2451 break;
2452
2453 /*
2454 * Any error code showing up here other than the ones we
2455 * know and process above are considered to be FATAL.
2456 *
2457 * Unknown warnings and informational status codes are also
2458 * included in this.
2459 */
2460 default:
2461 if (RT_SUCCESS_NP(rc))
2462 {
2463 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2464 rc = VERR_EM_INTERNAL_ERROR;
2465 }
2466 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2467 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2468 break;
2469 }
2470
2471 /*
2472 * Act on state transition.
2473 */
2474 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2475 if (enmOldState != enmNewState)
2476 {
2477 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2478
2479 /* Clear MWait flags and the unhalt FF. */
2480 if ( enmOldState == EMSTATE_HALTED
2481 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2482 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2483 && ( enmNewState == EMSTATE_HM
2484 || enmNewState == EMSTATE_NEM
2485 || enmNewState == EMSTATE_RECOMPILER
2486 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2487 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2488 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2489 || enmNewState == EMSTATE_DEBUG_GUEST_RECOMPILER) )
2490 {
2491 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2492 {
2493 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2494 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2495 }
2496 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2497 {
2498 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2499 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2500 }
2501 }
2502 }
2503 else
2504 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2505
2506 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2507 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2508
2509 /*
2510 * Act on the new state.
2511 */
2512 switch (enmNewState)
2513 {
2514 /*
2515 * Execute hardware accelerated raw.
2516 */
2517 case EMSTATE_HM:
2518#ifdef VBOX_WITH_HWVIRT
2519 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2520#else
2521 AssertReleaseFailedStmt(rc = VERR_EM_INTERNAL_ERROR); /* Should never get here. */
2522#endif
2523 break;
2524
2525 /*
2526 * Execute hardware accelerated raw.
2527 */
2528 case EMSTATE_NEM:
2529 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2530 break;
2531
2532 /*
2533 * Execute recompiled.
2534 */
2535 case EMSTATE_RECOMPILER:
2536 rc = VBOXSTRICTRC_TODO(emR3RecompilerExecute(pVM, pVCpu, enmOldState == EMSTATE_HALTED, &fFFDone));
2537 Log2(("EMR3ExecuteVM: emR3RecompilerExecute -> %Rrc\n", rc));
2538 break;
2539
2540 /*
2541 * Execute in the interpreter.
2542 */
2543 case EMSTATE_IEM:
2544 {
2545#if 0 /* For comparing HM and IEM (@bugref{10464}). */
2546 PCPUMCTX const pCtx = &pVCpu->cpum.GstCtx;
2547 PCX86FXSTATE const pX87 = &pCtx->XState.x87;
2548 Log11(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
2549 "eip=%08x esp=%08x ebp=%08x eflags=%08x\n"
2550 "cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x\n"
2551 "fsw=%04x fcw=%04x ftw=%02x top=%u%s%s%s%s%s%s%s%s%s\n"
2552 "st0=%.10Rhxs st1=%.10Rhxs st2=%.10Rhxs st3=%.10Rhxs\n"
2553 "st4=%.10Rhxs st5=%.10Rhxs st6=%.10Rhxs st7=%.10Rhxs\n",
2554 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->edi, pCtx->edi,
2555 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.u,
2556 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel, pCtx->fs.Sel, pCtx->gs.Sel,
2557 pX87->FSW, pX87->FCW, pX87->FTW, X86_FSW_TOP_GET(pX87->FSW),
2558 pX87->FSW & X86_FSW_ES ? " ES!" : "",
2559 pX87->FSW & X86_FSW_IE ? " IE" : "",
2560 pX87->FSW & X86_FSW_DE ? " DE" : "",
2561 pX87->FSW & X86_FSW_SF ? " SF" : "",
2562 pX87->FSW & X86_FSW_B ? " B!" : "",
2563 pX87->FSW & X86_FSW_C0 ? " C0" : "",
2564 pX87->FSW & X86_FSW_C1 ? " C1" : "",
2565 pX87->FSW & X86_FSW_C2 ? " C2" : "",
2566 pX87->FSW & X86_FSW_C3 ? " C3" : "",
2567 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(0)],
2568 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(1)],
2569 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(2)],
2570 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(3)],
2571 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(4)],
2572 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(5)],
2573 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(6)],
2574 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(7)]));
2575 DBGFR3DisasInstrCurrentLogInternal(pVCpu, NULL);
2576#endif
2577
2578 uint32_t cInstructions = 0;
2579#if 0 /* For testing purposes. */
2580 //STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2581 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2582 //STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2583 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_EXEC_ENGINE || rc == VINF_EM_RESCHEDULE_REM)
2584 rc = VINF_SUCCESS;
2585 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2586#endif
2587 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
2588 if (pVM->em.s.fIemExecutesAll)
2589 {
2590 Assert(rc != VINF_EM_RESCHEDULE_REM);
2591 Assert(rc != VINF_EM_RESCHEDULE_EXEC_ENGINE);
2592#ifdef VBOX_HIGH_RES_TIMERS_HACK
2593 if (cInstructions < 2048)
2594 TMTimerPollVoid(pVM, pVCpu);
2595#endif
2596 }
2597 else if (rc == VINF_SUCCESS)
2598 rc = VINF_EM_RESCHEDULE; /* Need to check whether we can run in HM or NEM again. */
2599#ifndef VBOX_VMM_TARGET_ARMV8
2600 if (rc != VINF_EM_EMULATE_SPLIT_LOCK)
2601 { /* likely */ }
2602 else
2603 rc = VBOXSTRICTRC_TODO(emR3ExecuteSplitLockInstruction(pVM, pVCpu));
2604#endif
2605 fFFDone = false;
2606 break;
2607 }
2608
2609 /*
2610 * Application processor execution halted until SIPI.
2611 */
2612 case EMSTATE_WAIT_SIPI:
2613 /* no break */
2614 /*
2615 * hlt - execution halted until interrupt.
2616 */
2617 case EMSTATE_HALTED:
2618 {
2619 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2620 /* If HM (or someone else) store a pending interrupt in
2621 TRPM, it must be dispatched ASAP without any halting.
2622 Anything pending in TRPM has been accepted and the CPU
2623 should already be the right state to receive it. */
2624 if (TRPMHasTrap(pVCpu))
2625 rc = VINF_EM_RESCHEDULE;
2626#if !defined(VBOX_VMM_TARGET_ARMV8)
2627 /* MWAIT has a special extension where it's woken up when
2628 an interrupt is pending even when IF=0. */
2629 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2630 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2631 {
2632 rc = VMR3WaitHalted(pVM, pVCpu, 0 /*fFlags*/);
2633 if (rc == VINF_SUCCESS)
2634 {
2635 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2636 PDMApicUpdatePendingInterrupts(pVCpu);
2637
2638 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2639 | VMCPU_FF_INTERRUPT_NESTED_GUEST
2640 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2641 {
2642 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2643 rc = VINF_EM_RESCHEDULE;
2644 }
2645
2646 }
2647 }
2648#endif
2649 else
2650 {
2651#if defined(VBOX_VMM_TARGET_ARMV8)
2652 const uint32_t fWaitHalted = 0; /* WFI/WFE always return when an interrupt happens. */
2653#else
2654 const uint32_t fWaitHalted = (CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF) ? 0 : VMWAITHALTED_F_IGNORE_IRQS;
2655#endif
2656 rc = VMR3WaitHalted(pVM, pVCpu, fWaitHalted);
2657 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2658 check VMCPU_FF_UPDATE_APIC here. */
2659 if ( rc == VINF_SUCCESS
2660#if defined(VBOX_VMM_TARGET_ARMV8)
2661 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_VTIMER_ACTIVATED
2662 | VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_INTERRUPT_IRQ)
2663#else
2664 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT)
2665#endif
2666 )
2667 {
2668 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2669 rc = VINF_EM_RESCHEDULE;
2670 }
2671 }
2672
2673 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2674 break;
2675 }
2676
2677 /*
2678 * Suspended - return to VM.cpp.
2679 */
2680 case EMSTATE_SUSPENDED:
2681 TMR3NotifySuspend(pVM, pVCpu);
2682 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2683 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2684 return VINF_EM_SUSPEND;
2685
2686 /*
2687 * Debugging in the guest.
2688 */
2689 case EMSTATE_DEBUG_GUEST_RAW:
2690 case EMSTATE_DEBUG_GUEST_HM:
2691 case EMSTATE_DEBUG_GUEST_NEM:
2692 case EMSTATE_DEBUG_GUEST_IEM:
2693 case EMSTATE_DEBUG_GUEST_RECOMPILER:
2694 TMR3NotifySuspend(pVM, pVCpu);
2695 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2696 TMR3NotifyResume(pVM, pVCpu);
2697 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2698 break;
2699
2700 /*
2701 * Debugging in the hypervisor.
2702 */
2703 case EMSTATE_DEBUG_HYPER:
2704 {
2705 TMR3NotifySuspend(pVM, pVCpu);
2706 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2707
2708 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2709 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2710 if (rc != VINF_SUCCESS)
2711 {
2712 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2713 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2714 else
2715 {
2716 /* switch to guru meditation mode */
2717 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2718 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2719 VMMR3FatalDump(pVM, pVCpu, rc);
2720 }
2721 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2722 return rc;
2723 }
2724
2725 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2726 TMR3NotifyResume(pVM, pVCpu);
2727 break;
2728 }
2729
2730 /*
2731 * Guru meditation takes place in the debugger.
2732 */
2733 case EMSTATE_GURU_MEDITATION:
2734 {
2735 TMR3NotifySuspend(pVM, pVCpu);
2736 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2737 VMMR3FatalDump(pVM, pVCpu, rc);
2738 emR3Debug(pVM, pVCpu, rc);
2739 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2740 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2741 return rc;
2742 }
2743
2744 /*
2745 * The states we don't expect here.
2746 */
2747 case EMSTATE_NONE:
2748 case EMSTATE_RAW_OBSOLETE:
2749 case EMSTATE_IEM_THEN_REM_OBSOLETE:
2750 case EMSTATE_TERMINATING:
2751 default:
2752 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2753 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2754 TMR3NotifySuspend(pVM, pVCpu);
2755 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2756 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2757 return VERR_EM_INTERNAL_ERROR;
2758 }
2759 } /* The Outer Main Loop */
2760 }
2761 else
2762 {
2763 /*
2764 * Fatal error.
2765 */
2766 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2767 TMR3NotifySuspend(pVM, pVCpu);
2768 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2769 VMMR3FatalDump(pVM, pVCpu, rc);
2770 emR3Debug(pVM, pVCpu, rc);
2771 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2772 /** @todo change the VM state! */
2773 return rc;
2774 }
2775
2776 /* not reached */
2777}
2778
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette