VirtualBox

source: vbox/trunk/src/VBox/VMM/EM.cpp@ 25816

Last change on this file since 25816 was 25816, checked in by vboxsync, 15 years ago

CPU hotplug: Merge the first patch. Resets a CPU state if a CPU was removed from the VM

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 97.3 KB
Line 
1/* $Id: EM.cpp 25816 2010-01-13 21:05:35Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/** @page pg_em EM - The Execution Monitor / Manager
23 *
24 * The Execution Monitor/Manager is responsible for running the VM, scheduling
25 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
26 * Interpreted), and keeping the CPU states in sync. The function
27 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
28 * modes has different inner loops (emR3RawExecute, emR3HwAccExecute, and
29 * emR3RemExecute).
30 *
31 * The interpreted execution is only used to avoid switching between
32 * raw-mode/hwaccm and the recompiler when fielding virtualization traps/faults.
33 * The interpretation is thus implemented as part of EM.
34 *
35 * @see grp_em
36 */
37
38/*******************************************************************************
39* Header Files *
40*******************************************************************************/
41#define LOG_GROUP LOG_GROUP_EM
42#include <VBox/em.h>
43#include <VBox/vmm.h>
44#ifdef VBOX_WITH_VMI
45# include <VBox/parav.h>
46#endif
47#include <VBox/patm.h>
48#include <VBox/csam.h>
49#include <VBox/selm.h>
50#include <VBox/trpm.h>
51#include <VBox/iom.h>
52#include <VBox/dbgf.h>
53#include <VBox/pgm.h>
54#include <VBox/rem.h>
55#include <VBox/tm.h>
56#include <VBox/mm.h>
57#include <VBox/ssm.h>
58#include <VBox/pdmapi.h>
59#include <VBox/pdmcritsect.h>
60#include <VBox/pdmqueue.h>
61#include <VBox/hwaccm.h>
62#include <VBox/patm.h>
63#include "EMInternal.h"
64#include <VBox/vm.h>
65#include <VBox/cpumdis.h>
66#include <VBox/dis.h>
67#include <VBox/disopcode.h>
68#include <VBox/dbgf.h>
69
70#include <iprt/string.h>
71#include <iprt/stream.h>
72
73
74/*******************************************************************************
75* Defined Constants And Macros *
76*******************************************************************************/
77#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
78#define EM_NOTIFY_HWACCM
79#endif
80
81
82/*******************************************************************************
83* Internal Functions *
84*******************************************************************************/
85static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
86static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
87static const char *emR3GetStateName(EMSTATE enmState);
88static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc);
89static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
90static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
91DECLINLINE(int) emR3RawExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS);
92int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
93
94
95/**
96 * Initializes the EM.
97 *
98 * @returns VBox status code.
99 * @param pVM The VM to operate on.
100 */
101VMMR3DECL(int) EMR3Init(PVM pVM)
102{
103 LogFlow(("EMR3Init\n"));
104 /*
105 * Assert alignment and sizes.
106 */
107 AssertCompileMemberAlignment(VM, em.s, 32);
108 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
109 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
110 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
111
112 /*
113 * Init the structure.
114 */
115 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
116 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR3Enabled", &pVM->fRawR3Enabled);
117 if (RT_FAILURE(rc))
118 pVM->fRawR3Enabled = true;
119 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR0Enabled", &pVM->fRawR0Enabled);
120 if (RT_FAILURE(rc))
121 pVM->fRawR0Enabled = true;
122 Log(("EMR3Init: fRawR3Enabled=%d fRawR0Enabled=%d\n", pVM->fRawR3Enabled, pVM->fRawR0Enabled));
123
124 /*
125 * Initialize the REM critical section.
126 */
127 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
128 AssertRCReturn(rc, rc);
129
130 /*
131 * Saved state.
132 */
133 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
134 NULL, NULL, NULL,
135 NULL, emR3Save, NULL,
136 NULL, emR3Load, NULL);
137 if (RT_FAILURE(rc))
138 return rc;
139
140 for (VMCPUID i = 0; i < pVM->cCpus; i++)
141 {
142 PVMCPU pVCpu = &pVM->aCpus[i];
143
144 pVCpu->em.s.offVMCPU = RT_OFFSETOF(VMCPU, em.s);
145
146 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
147 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
148 pVCpu->em.s.fForceRAW = false;
149
150 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
151 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
152 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
153
154# define EM_REG_COUNTER(a, b, c) \
155 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
156 AssertRC(rc);
157
158# define EM_REG_COUNTER_USED(a, b, c) \
159 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
160 AssertRC(rc);
161
162# define EM_REG_PROFILE(a, b, c) \
163 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
164 AssertRC(rc);
165
166# define EM_REG_PROFILE_ADV(a, b, c) \
167 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
168 AssertRC(rc);
169
170 /*
171 * Statistics.
172 */
173#ifdef VBOX_WITH_STATISTICS
174 PEMSTATS pStats;
175 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
176 if (RT_FAILURE(rc))
177 return rc;
178
179 pVCpu->em.s.pStatsR3 = pStats;
180 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
181 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
182
183 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
184 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
185
186 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
187 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
188
189 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
190 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
191 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
192 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
193 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
194 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
195 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
196 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
197 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
198 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
199 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
200 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
201 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
202 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
203 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
204 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
205 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
206 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
207 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
208 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
209 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
210 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
211 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
212 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
213 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
214 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
215 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
216 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
217 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
218 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
219 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
220 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
221 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
222 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
223 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
224 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
225 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
226 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
227 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
261
262 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
263 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
264
265 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
302 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
303 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MONITOR was not interpreted.");
304 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MONITOR was not interpreted.");
305 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
315
316 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
317 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
318 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
344
345 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
346 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
347 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
348 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
349
350 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
351 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
352 EM_REG_COUNTER_USED(&pStats->StatIn, "/EM/CPU%d/R3/PrivInst/In", "Number of in instructions.");
353 EM_REG_COUNTER_USED(&pStats->StatOut, "/EM/CPU%d/R3/PrivInst/Out", "Number of out instructions.");
354 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "Number of restarted i/o instructions.");
355 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
356 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
357 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
358 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 read instructions.");
359 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 read instructions.");
360 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 read instructions.");
361 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 read instructions.");
362 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 read instructions.");
363 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 write instructions.");
364 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 write instructions.");
365 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 write instructions.");
366 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 write instructions.");
367 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 write instructions.");
368 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
369 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
370 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
371 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
372 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
373 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
374 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
375 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
376 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
377
378 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
379 pVCpu->em.s.pCliStatTree = 0;
380
381 /* these should be considered for release statistics. */
382 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
383 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
384 EM_REG_COUNTER(&pVCpu->em.s.StatMiscEmu, "/PROF/CPU%d/EM/Emulation/Misc", "Profiling of emR3RawExecuteInstruction.");
385 EM_REG_PROFILE(&pVCpu->em.s.StatHwAccEntry, "/PROF/CPU%d/EM/HwAccEnter", "Profiling Hardware Accelerated Mode entry overhead.");
386 EM_REG_PROFILE(&pVCpu->em.s.StatHwAccExec, "/PROF/CPU%d/EM/HwAccExec", "Profiling Hardware Accelerated Mode execution.");
387 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
388 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
389 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
390 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
391 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
392 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
393
394#endif /* VBOX_WITH_STATISTICS */
395
396 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
397 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
398 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
399 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
400
401 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
402 }
403
404 return VINF_SUCCESS;
405}
406
407
408/**
409 * Initializes the per-VCPU EM.
410 *
411 * @returns VBox status code.
412 * @param pVM The VM to operate on.
413 */
414VMMR3DECL(int) EMR3InitCPU(PVM pVM)
415{
416 LogFlow(("EMR3InitCPU\n"));
417 return VINF_SUCCESS;
418}
419
420
421/**
422 * Applies relocations to data and code managed by this
423 * component. This function will be called at init and
424 * whenever the VMM need to relocate it self inside the GC.
425 *
426 * @param pVM The VM.
427 */
428VMMR3DECL(void) EMR3Relocate(PVM pVM)
429{
430 LogFlow(("EMR3Relocate\n"));
431 for (VMCPUID i = 0; i < pVM->cCpus; i++)
432 {
433 PVMCPU pVCpu = &pVM->aCpus[i];
434 if (pVCpu->em.s.pStatsR3)
435 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
436 }
437}
438
439VMMR3DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
440{
441 pVCpu->em.s.fForceRAW = false;
442
443 /* VMR3Reset may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
444 out of the HALTED state here so that enmPrevState doesn't end up as
445 HALTED when EMR3Execute returns. */
446 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
447 {
448 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
449 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
450 }
451}
452
453/**
454 * Reset notification.
455 *
456 * @param pVM
457 */
458VMMR3DECL(void) EMR3Reset(PVM pVM)
459{
460 Log(("EMR3Reset: \n"));
461 for (VMCPUID i = 0; i < pVM->cCpus; i++)
462 {
463 PVMCPU pVCpu = &pVM->aCpus[i];
464 EMR3ResetCpu(pVCpu);
465 }
466}
467
468
469/**
470 * Terminates the EM.
471 *
472 * Termination means cleaning up and freeing all resources,
473 * the VM it self is at this point powered off or suspended.
474 *
475 * @returns VBox status code.
476 * @param pVM The VM to operate on.
477 */
478VMMR3DECL(int) EMR3Term(PVM pVM)
479{
480 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
481
482 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
483 return VINF_SUCCESS;
484}
485
486/**
487 * Terminates the per-VCPU EM.
488 *
489 * Termination means cleaning up and freeing all resources,
490 * the VM it self is at this point powered off or suspended.
491 *
492 * @returns VBox status code.
493 * @param pVM The VM to operate on.
494 */
495VMMR3DECL(int) EMR3TermCPU(PVM pVM)
496{
497 return 0;
498}
499
500/**
501 * Execute state save operation.
502 *
503 * @returns VBox status code.
504 * @param pVM VM Handle.
505 * @param pSSM SSM operation handle.
506 */
507static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
508{
509 for (VMCPUID i = 0; i < pVM->cCpus; i++)
510 {
511 PVMCPU pVCpu = &pVM->aCpus[i];
512
513 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
514 AssertRCReturn(rc, rc);
515
516 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
517 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
518 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
519 AssertRCReturn(rc, rc);
520 }
521 return VINF_SUCCESS;
522}
523
524
525/**
526 * Execute state load operation.
527 *
528 * @returns VBox status code.
529 * @param pVM VM Handle.
530 * @param pSSM SSM operation handle.
531 * @param uVersion Data layout version.
532 * @param uPass The data pass.
533 */
534static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
535{
536 /*
537 * Validate version.
538 */
539 if ( uVersion != EM_SAVED_STATE_VERSION
540 && uVersion != EM_SAVED_STATE_VERSION_PRE_SMP)
541 {
542 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
543 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
544 }
545 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
546
547 /*
548 * Load the saved state.
549 */
550 for (VMCPUID i = 0; i < pVM->cCpus; i++)
551 {
552 PVMCPU pVCpu = &pVM->aCpus[i];
553
554 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
555 if (RT_FAILURE(rc))
556 pVCpu->em.s.fForceRAW = false;
557 AssertRCReturn(rc, rc);
558
559 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
560 {
561 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
562 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
563 AssertRCReturn(rc, rc);
564 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
565
566 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
567 }
568 Assert(!pVCpu->em.s.pCliStatTree);
569 }
570 return VINF_SUCCESS;
571}
572
573
574/**
575 * Raise a fatal error.
576 *
577 * Safely terminate the VM with full state report and stuff. This function
578 * will naturally never return.
579 *
580 * @param pVCpu VMCPU handle.
581 * @param rc VBox status code.
582 */
583VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
584{
585 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
586 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
587 AssertReleaseMsgFailed(("longjmp returned!\n"));
588}
589
590
591/**
592 * Gets the EM state name.
593 *
594 * @returns pointer to read only state name,
595 * @param enmState The state.
596 */
597static const char *emR3GetStateName(EMSTATE enmState)
598{
599 switch (enmState)
600 {
601 case EMSTATE_NONE: return "EMSTATE_NONE";
602 case EMSTATE_RAW: return "EMSTATE_RAW";
603 case EMSTATE_HWACC: return "EMSTATE_HWACC";
604 case EMSTATE_REM: return "EMSTATE_REM";
605 case EMSTATE_PARAV: return "EMSTATE_PARAV";
606 case EMSTATE_HALTED: return "EMSTATE_HALTED";
607 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
608 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
609 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
610 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
611 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
612 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
613 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
614 default: return "Unknown!";
615 }
616}
617
618
619#ifdef VBOX_WITH_STATISTICS
620/**
621 * Just a braindead function to keep track of cli addresses.
622 * @param pVM VM handle.
623 * @param pVMCPU VMCPU handle.
624 * @param GCPtrInstr The EIP of the cli instruction.
625 */
626static void emR3RecordCli(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrInstr)
627{
628 PCLISTAT pRec;
629
630 pRec = (PCLISTAT)RTAvlPVGet(&pVCpu->em.s.pCliStatTree, (AVLPVKEY)GCPtrInstr);
631 if (!pRec)
632 {
633 /* New cli instruction; insert into the tree. */
634 pRec = (PCLISTAT)MMR3HeapAllocZ(pVM, MM_TAG_EM, sizeof(*pRec));
635 Assert(pRec);
636 if (!pRec)
637 return;
638 pRec->Core.Key = (AVLPVKEY)GCPtrInstr;
639
640 char szCliStatName[32];
641 RTStrPrintf(szCliStatName, sizeof(szCliStatName), "/EM/Cli/0x%RGv", GCPtrInstr);
642 STAM_REG(pVM, &pRec->Counter, STAMTYPE_COUNTER, szCliStatName, STAMUNIT_OCCURENCES, "Number of times cli was executed.");
643
644 bool fRc = RTAvlPVInsert(&pVCpu->em.s.pCliStatTree, &pRec->Core);
645 Assert(fRc); NOREF(fRc);
646 }
647 STAM_COUNTER_INC(&pRec->Counter);
648 STAM_COUNTER_INC(&pVCpu->em.s.StatTotalClis);
649}
650#endif /* VBOX_WITH_STATISTICS */
651
652
653/**
654 * Debug loop.
655 *
656 * @returns VBox status code for EM.
657 * @param pVM VM handle.
658 * @param pVCpu VMCPU handle.
659 * @param rc Current EM VBox status code..
660 */
661static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc)
662{
663 for (;;)
664 {
665 Log(("emR3Debug: rc=%Rrc\n", rc));
666 const int rcLast = rc;
667
668 /*
669 * Debug related RC.
670 */
671 switch (rc)
672 {
673 /*
674 * Single step an instruction.
675 */
676 case VINF_EM_DBG_STEP:
677 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
678 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
679 || pVCpu->em.s.fForceRAW /* paranoia */)
680 rc = emR3RawStep(pVM, pVCpu);
681 else
682 {
683 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);
684 rc = emR3RemStep(pVM, pVCpu);
685 }
686 break;
687
688 /*
689 * Simple events: stepped, breakpoint, stop/assertion.
690 */
691 case VINF_EM_DBG_STEPPED:
692 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
693 break;
694
695 case VINF_EM_DBG_BREAKPOINT:
696 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
697 break;
698
699 case VINF_EM_DBG_STOP:
700 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
701 break;
702
703 case VINF_EM_DBG_HYPER_STEPPED:
704 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
705 break;
706
707 case VINF_EM_DBG_HYPER_BREAKPOINT:
708 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
709 break;
710
711 case VINF_EM_DBG_HYPER_ASSERTION:
712 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
713 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
714 break;
715
716 /*
717 * Guru meditation.
718 */
719 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
720 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
721 break;
722 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
723 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
724 break;
725
726 default: /** @todo don't use default for guru, but make special errors code! */
727 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
728 break;
729 }
730
731 /*
732 * Process the result.
733 */
734 do
735 {
736 switch (rc)
737 {
738 /*
739 * Continue the debugging loop.
740 */
741 case VINF_EM_DBG_STEP:
742 case VINF_EM_DBG_STOP:
743 case VINF_EM_DBG_STEPPED:
744 case VINF_EM_DBG_BREAKPOINT:
745 case VINF_EM_DBG_HYPER_STEPPED:
746 case VINF_EM_DBG_HYPER_BREAKPOINT:
747 case VINF_EM_DBG_HYPER_ASSERTION:
748 break;
749
750 /*
751 * Resuming execution (in some form) has to be done here if we got
752 * a hypervisor debug event.
753 */
754 case VINF_SUCCESS:
755 case VINF_EM_RESUME:
756 case VINF_EM_SUSPEND:
757 case VINF_EM_RESCHEDULE:
758 case VINF_EM_RESCHEDULE_RAW:
759 case VINF_EM_RESCHEDULE_REM:
760 case VINF_EM_HALT:
761 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
762 {
763 rc = emR3RawResumeHyper(pVM, pVCpu);
764 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
765 continue;
766 }
767 if (rc == VINF_SUCCESS)
768 rc = VINF_EM_RESCHEDULE;
769 return rc;
770
771 /*
772 * The debugger isn't attached.
773 * We'll simply turn the thing off since that's the easiest thing to do.
774 */
775 case VERR_DBGF_NOT_ATTACHED:
776 switch (rcLast)
777 {
778 case VINF_EM_DBG_HYPER_STEPPED:
779 case VINF_EM_DBG_HYPER_BREAKPOINT:
780 case VINF_EM_DBG_HYPER_ASSERTION:
781 case VERR_TRPM_PANIC:
782 case VERR_TRPM_DONT_PANIC:
783 case VERR_VMM_RING0_ASSERTION:
784 case VERR_VMM_HYPER_CR3_MISMATCH:
785 case VERR_VMM_RING3_CALL_DISABLED:
786 return rcLast;
787 }
788 return VINF_EM_OFF;
789
790 /*
791 * Status codes terminating the VM in one or another sense.
792 */
793 case VINF_EM_TERMINATE:
794 case VINF_EM_OFF:
795 case VINF_EM_RESET:
796 case VINF_EM_NO_MEMORY:
797 case VINF_EM_RAW_STALE_SELECTOR:
798 case VINF_EM_RAW_IRET_TRAP:
799 case VERR_TRPM_PANIC:
800 case VERR_TRPM_DONT_PANIC:
801 case VERR_VMM_RING0_ASSERTION:
802 case VERR_VMM_HYPER_CR3_MISMATCH:
803 case VERR_VMM_RING3_CALL_DISABLED:
804 case VERR_INTERNAL_ERROR:
805 case VERR_INTERNAL_ERROR_2:
806 case VERR_INTERNAL_ERROR_3:
807 case VERR_INTERNAL_ERROR_4:
808 case VERR_INTERNAL_ERROR_5:
809 case VERR_IPE_UNEXPECTED_STATUS:
810 case VERR_IPE_UNEXPECTED_INFO_STATUS:
811 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
812 return rc;
813
814 /*
815 * The rest is unexpected, and will keep us here.
816 */
817 default:
818 AssertMsgFailed(("Unxpected rc %Rrc!\n", rc));
819 break;
820 }
821 } while (false);
822 } /* debug for ever */
823}
824
825/**
826 * Steps recompiled code.
827 *
828 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
829 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
830 *
831 * @param pVM VM handle.
832 * @param pVCpu VMCPU handle.
833 */
834static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
835{
836 LogFlow(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
837
838 EMRemLock(pVM);
839
840 /*
841 * Switch to REM, step instruction, switch back.
842 */
843 int rc = REMR3State(pVM, pVCpu);
844 if (RT_SUCCESS(rc))
845 {
846 rc = REMR3Step(pVM, pVCpu);
847 REMR3StateBack(pVM, pVCpu);
848 }
849 EMRemUnlock(pVM);
850
851 LogFlow(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
852 return rc;
853}
854
855
856/**
857 * Executes recompiled code.
858 *
859 * This function contains the recompiler version of the inner
860 * execution loop (the outer loop being in EMR3ExecuteVM()).
861 *
862 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
863 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
864 *
865 * @param pVM VM handle.
866 * @param pVCpu VMCPU handle.
867 * @param pfFFDone Where to store an indicator telling wheter or not
868 * FFs were done before returning.
869 *
870 */
871static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
872{
873#ifdef LOG_ENABLED
874 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
875 uint32_t cpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));
876
877 if (pCtx->eflags.Bits.u1VM)
878 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs, pCtx->eip, pCtx->eflags.Bits.u1IF));
879 else
880 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x\n", cpl, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0));
881#endif
882 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
883
884#if defined(VBOX_STRICT) && defined(DEBUG_bird)
885 AssertMsg( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
886 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo #1419 - get flat address. */
887 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
888#endif
889
890 /* Big lock, but you are not supposed to own any lock when coming in here. */
891 EMRemLock(pVM);
892
893 /*
894 * Spin till we get a forced action which returns anything but VINF_SUCCESS
895 * or the REM suggests raw-mode execution.
896 */
897 *pfFFDone = false;
898 bool fInREMState = false;
899 int rc = VINF_SUCCESS;
900
901 /* Flush the recompiler TLB if the VCPU has changed. */
902 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
903 {
904 REMFlushTBs(pVM);
905 /* Also sync the entire state. */
906 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
907 }
908 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
909
910 for (;;)
911 {
912 /*
913 * Update REM state if not already in sync.
914 */
915 if (!fInREMState)
916 {
917 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
918 rc = REMR3State(pVM, pVCpu);
919 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
920 if (RT_FAILURE(rc))
921 break;
922 fInREMState = true;
923
924 /*
925 * We might have missed the raising of VMREQ, TIMER and some other
926 * imporant FFs while we were busy switching the state. So, check again.
927 */
928 if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_RESET)
929 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
930 {
931 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
932 goto l_REMDoForcedActions;
933 }
934 }
935
936
937 /*
938 * Execute REM.
939 */
940 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
941 rc = REMR3Run(pVM, pVCpu);
942 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
943
944
945 /*
946 * Deal with high priority post execution FFs before doing anything else.
947 */
948 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
949 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
950 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
951
952 /*
953 * Process the returned status code.
954 * (Try keep this short! Call functions!)
955 */
956 if (rc != VINF_SUCCESS)
957 {
958 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
959 break;
960 if (rc != VINF_REM_INTERRUPED_FF)
961 {
962 /*
963 * Anything which is not known to us means an internal error
964 * and the termination of the VM!
965 */
966 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
967 break;
968 }
969 }
970
971
972 /*
973 * Check and execute forced actions.
974 * Sync back the VM state before calling any of these.
975 */
976#ifdef VBOX_HIGH_RES_TIMERS_HACK
977 TMTimerPollVoid(pVM, pVCpu);
978#endif
979 AssertCompile((VMCPU_FF_ALL_BUT_RAW_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)) & VMCPU_FF_TIMER);
980 if ( VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK)
981 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_BUT_RAW_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)))
982 {
983l_REMDoForcedActions:
984 if (fInREMState)
985 {
986 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, d);
987 REMR3StateBack(pVM, pVCpu);
988 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, d);
989 fInREMState = false;
990 }
991 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
992 rc = emR3ForcedActions(pVM, pVCpu, rc);
993 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
994 if ( rc != VINF_SUCCESS
995 && rc != VINF_EM_RESCHEDULE_REM)
996 {
997 *pfFFDone = true;
998 break;
999 }
1000 }
1001
1002 } /* The Inner Loop, recompiled execution mode version. */
1003
1004
1005 /*
1006 * Returning. Sync back the VM state if required.
1007 */
1008 if (fInREMState)
1009 {
1010 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, e);
1011 REMR3StateBack(pVM, pVCpu);
1012 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, e);
1013 }
1014 EMRemUnlock(pVM);
1015
1016 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1017 return rc;
1018}
1019
1020
1021#ifdef DEBUG
1022
1023int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1024{
1025 EMSTATE enmOldState = pVCpu->em.s.enmState;
1026
1027 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1028
1029 Log(("Single step BEGIN:\n"));
1030 for (uint32_t i = 0; i < cIterations; i++)
1031 {
1032 DBGFR3PrgStep(pVCpu);
1033 DBGFR3DisasInstrCurrentLog(pVCpu, "RSS: ");
1034 emR3RemStep(pVM, pVCpu);
1035 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1036 break;
1037 }
1038 Log(("Single step END:\n"));
1039 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1040 pVCpu->em.s.enmState = enmOldState;
1041 return VINF_EM_RESCHEDULE;
1042}
1043
1044#endif /* DEBUG */
1045
1046
1047/**
1048 * Decides whether to execute RAW, HWACC or REM.
1049 *
1050 * @returns new EM state
1051 * @param pVM The VM.
1052 * @param pVCpu The VMCPU handle.
1053 * @param pCtx The CPU context.
1054 */
1055EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1056{
1057 /*
1058 * When forcing raw-mode execution, things are simple.
1059 */
1060 if (pVCpu->em.s.fForceRAW)
1061 return EMSTATE_RAW;
1062
1063 /*
1064 * We stay in the wait for SIPI state unless explicitly told otherwise.
1065 */
1066 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1067 return EMSTATE_WAIT_SIPI;
1068
1069 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1070 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1071 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1072
1073 X86EFLAGS EFlags = pCtx->eflags;
1074 if (HWACCMIsEnabled(pVM))
1075 {
1076 /* Hardware accelerated raw-mode:
1077 *
1078 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1079 */
1080 if (HWACCMR3CanExecuteGuest(pVM, pCtx) == true)
1081 return EMSTATE_HWACC;
1082
1083 /* Note: Raw mode and hw accelerated mode are incompatible. The latter turns
1084 * off monitoring features essential for raw mode! */
1085 return EMSTATE_REM;
1086 }
1087
1088 /*
1089 * Standard raw-mode:
1090 *
1091 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1092 * or 32 bits protected mode ring 0 code
1093 *
1094 * The tests are ordered by the likelyhood of being true during normal execution.
1095 */
1096 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1097 {
1098 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1099 return EMSTATE_REM;
1100 }
1101
1102#ifndef VBOX_RAW_V86
1103 if (EFlags.u32 & X86_EFL_VM) {
1104 Log2(("raw mode refused: VM_MASK\n"));
1105 return EMSTATE_REM;
1106 }
1107#endif
1108
1109 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1110 uint32_t u32CR0 = pCtx->cr0;
1111 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1112 {
1113 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1114 return EMSTATE_REM;
1115 }
1116
1117 if (pCtx->cr4 & X86_CR4_PAE)
1118 {
1119 uint32_t u32Dummy, u32Features;
1120
1121 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1122 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1123 return EMSTATE_REM;
1124 }
1125
1126 unsigned uSS = pCtx->ss;
1127 if ( pCtx->eflags.Bits.u1VM
1128 || (uSS & X86_SEL_RPL) == 3)
1129 {
1130 if (!EMIsRawRing3Enabled(pVM))
1131 return EMSTATE_REM;
1132
1133 if (!(EFlags.u32 & X86_EFL_IF))
1134 {
1135 Log2(("raw mode refused: IF (RawR3)\n"));
1136 return EMSTATE_REM;
1137 }
1138
1139 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1140 {
1141 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1142 return EMSTATE_REM;
1143 }
1144 }
1145 else
1146 {
1147 if (!EMIsRawRing0Enabled(pVM))
1148 return EMSTATE_REM;
1149
1150 /* Only ring 0 supervisor code. */
1151 if ((uSS & X86_SEL_RPL) != 0)
1152 {
1153 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1154 return EMSTATE_REM;
1155 }
1156
1157 // Let's start with pure 32 bits ring 0 code first
1158 /** @todo What's pure 32-bit mode? flat? */
1159 if ( !(pCtx->ssHid.Attr.n.u1DefBig)
1160 || !(pCtx->csHid.Attr.n.u1DefBig))
1161 {
1162 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1163 return EMSTATE_REM;
1164 }
1165
1166 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1167 if (!(u32CR0 & X86_CR0_WP))
1168 {
1169 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1170 return EMSTATE_REM;
1171 }
1172
1173 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1174 {
1175 Log2(("raw r0 mode forced: patch code\n"));
1176 return EMSTATE_RAW;
1177 }
1178
1179#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1180 if (!(EFlags.u32 & X86_EFL_IF))
1181 {
1182 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1183 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1184 return EMSTATE_REM;
1185 }
1186#endif
1187
1188 /** @todo still necessary??? */
1189 if (EFlags.Bits.u2IOPL != 0)
1190 {
1191 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1192 return EMSTATE_REM;
1193 }
1194 }
1195
1196 Assert(PGMPhysIsA20Enabled(pVCpu));
1197 return EMSTATE_RAW;
1198}
1199
1200
1201/**
1202 * Executes all high priority post execution force actions.
1203 *
1204 * @returns rc or a fatal status code.
1205 *
1206 * @param pVM VM handle.
1207 * @param pVCpu VMCPU handle.
1208 * @param rc The current rc.
1209 */
1210int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1211{
1212 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1213 PDMCritSectFF(pVCpu);
1214
1215 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1216 CSAMR3DoPendingAction(pVM, pVCpu);
1217
1218 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1219 {
1220 if ( rc > VINF_EM_NO_MEMORY
1221 && rc <= VINF_EM_LAST)
1222 rc = VINF_EM_NO_MEMORY;
1223 }
1224
1225 return rc;
1226}
1227
1228
1229/**
1230 * Executes all pending forced actions.
1231 *
1232 * Forced actions can cause execution delays and execution
1233 * rescheduling. The first we deal with using action priority, so
1234 * that for instance pending timers aren't scheduled and ran until
1235 * right before execution. The rescheduling we deal with using
1236 * return codes. The same goes for VM termination, only in that case
1237 * we exit everything.
1238 *
1239 * @returns VBox status code of equal or greater importance/severity than rc.
1240 * The most important ones are: VINF_EM_RESCHEDULE,
1241 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1242 *
1243 * @param pVM VM handle.
1244 * @param pVCpu VMCPU handle.
1245 * @param rc The current rc.
1246 *
1247 */
1248int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1249{
1250 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1251#ifdef VBOX_STRICT
1252 int rcIrq = VINF_SUCCESS;
1253#endif
1254 int rc2;
1255#define UPDATE_RC() \
1256 do { \
1257 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1258 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1259 break; \
1260 if (!rc || rc2 < rc) \
1261 rc = rc2; \
1262 } while (0)
1263
1264 /*
1265 * Post execution chunk first.
1266 */
1267 if ( VM_FF_ISPENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1268 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK))
1269 {
1270 /*
1271 * EMT Rendezvous (must be serviced before termination).
1272 */
1273 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1274 {
1275 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1276 UPDATE_RC();
1277 /** @todo HACK ALERT! The following test is to make sure EM+TM things the VM is
1278 * stopped/reset before the next VM state change is made. We need a better
1279 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1280 * && rc >= VINF_EM_SUSPEND). */
1281 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1282 {
1283 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1284 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1285 return rc;
1286 }
1287 }
1288
1289 /*
1290 * Termination request.
1291 */
1292 if (VM_FF_ISPENDING(pVM, VM_FF_TERMINATE))
1293 {
1294 Log2(("emR3ForcedActions: returns VINF_EM_TERMINATE\n"));
1295 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1296 return VINF_EM_TERMINATE;
1297 }
1298
1299 /*
1300 * Debugger Facility polling.
1301 */
1302 if (VM_FF_ISPENDING(pVM, VM_FF_DBGF))
1303 {
1304 rc2 = DBGFR3VMMForcedAction(pVM);
1305 UPDATE_RC();
1306 }
1307
1308 /*
1309 * Postponed reset request.
1310 */
1311 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_RESET))
1312 {
1313 rc2 = VMR3Reset(pVM);
1314 UPDATE_RC();
1315 }
1316
1317 /*
1318 * CSAM page scanning.
1319 */
1320 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
1321 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1322 {
1323 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1324
1325 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
1326 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1327
1328 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
1329 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1330 }
1331
1332 /*
1333 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1334 */
1335 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1336 {
1337 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1338 UPDATE_RC();
1339 if (rc == VINF_EM_NO_MEMORY)
1340 return rc;
1341 }
1342
1343 /* check that we got them all */
1344 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1345 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_CSAM_SCAN_PAGE);
1346 }
1347
1348 /*
1349 * Normal priority then.
1350 * (Executed in no particular order.)
1351 */
1352 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1353 {
1354 /*
1355 * PDM Queues are pending.
1356 */
1357 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1358 PDMR3QueueFlushAll(pVM);
1359
1360 /*
1361 * PDM DMA transfers are pending.
1362 */
1363 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1364 PDMR3DmaRun(pVM);
1365
1366 /*
1367 * EMT Rendezvous (make sure they are handled before the requests).
1368 */
1369 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1370 {
1371 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1372 UPDATE_RC();
1373 /** @todo HACK ALERT! The following test is to make sure EM+TM things the VM is
1374 * stopped/reset before the next VM state change is made. We need a better
1375 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1376 * && rc >= VINF_EM_SUSPEND). */
1377 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1378 {
1379 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1380 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1381 return rc;
1382 }
1383 }
1384
1385 /*
1386 * Requests from other threads.
1387 */
1388 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1389 {
1390 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY);
1391 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1392 {
1393 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1394 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1395 return rc2;
1396 }
1397 UPDATE_RC();
1398 /** @todo HACK ALERT! The following test is to make sure EM+TM things the VM is
1399 * stopped/reset before the next VM state change is made. We need a better
1400 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1401 * && rc >= VINF_EM_SUSPEND). */
1402 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1403 {
1404 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1405 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1406 return rc;
1407 }
1408 }
1409
1410 /* Replay the handler notification changes. */
1411 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1412 {
1413 /* Try not to cause deadlocks. */
1414 if ( pVM->cCpus == 1
1415 || ( !PGMIsLockOwner(pVM)
1416 && !IOMIsLockOwner(pVM))
1417 )
1418 {
1419 EMRemLock(pVM);
1420 REMR3ReplayHandlerNotifications(pVM);
1421 EMRemUnlock(pVM);
1422 }
1423 }
1424
1425 /* check that we got them all */
1426 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1427 }
1428
1429 /*
1430 * Normal priority then. (per-VCPU)
1431 * (Executed in no particular order.)
1432 */
1433 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
1434 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1435 {
1436 /*
1437 * Requests from other threads.
1438 */
1439 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST))
1440 {
1441 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu);
1442 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1443 {
1444 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1445 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1446 return rc2;
1447 }
1448 UPDATE_RC();
1449 /** @todo HACK ALERT! The following test is to make sure EM+TM things the VM is
1450 * stopped/reset before the next VM state change is made. We need a better
1451 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1452 * && rc >= VINF_EM_SUSPEND). */
1453 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1454 {
1455 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1456 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1457 return rc;
1458 }
1459 }
1460
1461 /* check that we got them all */
1462 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST)));
1463 }
1464
1465 /*
1466 * High priority pre execution chunk last.
1467 * (Executed in ascending priority order.)
1468 */
1469 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1470 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1471 {
1472 /*
1473 * Timers before interrupts.
1474 */
1475 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER)
1476 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1477 TMR3TimerQueuesDo(pVM);
1478
1479 /*
1480 * The instruction following an emulated STI should *always* be executed!
1481 */
1482 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1483 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1484 {
1485 Log(("VM_FF_EMULATED_STI at %RGv successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1486 if (CPUMGetGuestEIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1487 {
1488 /* Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if the eip is the same as the inhibited instr address.
1489 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
1490 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
1491 * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think.
1492 */
1493 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1494 }
1495 if (HWACCMR3IsActive(pVCpu))
1496 rc2 = VINF_EM_RESCHEDULE_HWACC;
1497 else
1498 rc2 = PATMAreInterruptsEnabled(pVM) ? VINF_EM_RESCHEDULE_RAW : VINF_EM_RESCHEDULE_REM;
1499
1500 UPDATE_RC();
1501 }
1502
1503 /*
1504 * Interrupts.
1505 */
1506 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
1507 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1508 && (!rc || rc >= VINF_EM_RESCHEDULE_HWACC)
1509 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
1510 && PATMAreInterruptsEnabled(pVM)
1511 && !HWACCMR3IsEventPending(pVCpu))
1512 {
1513 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1514 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1515 {
1516 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1517 /** @todo this really isn't nice, should properly handle this */
1518 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1519#ifdef VBOX_STRICT
1520 rcIrq = rc2;
1521#endif
1522 UPDATE_RC();
1523 }
1524 /** @todo really ugly; if we entered the hlt state when exiting the recompiler and an interrupt was pending, we previously got stuck in the halted state. */
1525 else if (REMR3QueryPendingInterrupt(pVM, pVCpu) != REM_NO_PENDING_IRQ)
1526 {
1527 rc2 = VINF_EM_RESCHEDULE_REM;
1528 UPDATE_RC();
1529 }
1530 }
1531
1532 /*
1533 * Allocate handy pages.
1534 */
1535 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1536 {
1537 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1538 UPDATE_RC();
1539 }
1540
1541 /*
1542 * Debugger Facility request.
1543 */
1544 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_DBGF, VM_FF_PGM_NO_MEMORY))
1545 {
1546 rc2 = DBGFR3VMMForcedAction(pVM);
1547 UPDATE_RC();
1548 }
1549
1550 /*
1551 * EMT Rendezvous (must be serviced before termination).
1552 */
1553 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1554 {
1555 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1556 UPDATE_RC();
1557 /** @todo HACK ALERT! The following test is to make sure EM+TM things the VM is
1558 * stopped/reset before the next VM state change is made. We need a better
1559 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1560 * && rc >= VINF_EM_SUSPEND). */
1561 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1562 {
1563 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1564 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1565 return rc;
1566 }
1567 }
1568
1569 /*
1570 * Termination request.
1571 */
1572 if (VM_FF_ISPENDING(pVM, VM_FF_TERMINATE))
1573 {
1574 Log2(("emR3ForcedActions: returns VINF_EM_TERMINATE\n"));
1575 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1576 return VINF_EM_TERMINATE;
1577 }
1578
1579 /*
1580 * Out of memory? Since most of our fellow high priority actions may cause us
1581 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
1582 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
1583 * than us since we can terminate without allocating more memory.
1584 */
1585 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1586 {
1587 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1588 UPDATE_RC();
1589 if (rc == VINF_EM_NO_MEMORY)
1590 return rc;
1591 }
1592
1593 /*
1594 * If the virtual sync clock is still stopped, make TM restart it.
1595 */
1596 if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
1597 TMR3VirtualSyncFF(pVM, pVCpu);
1598
1599#ifdef DEBUG
1600 /*
1601 * Debug, pause the VM.
1602 */
1603 if (VM_FF_ISPENDING(pVM, VM_FF_DEBUG_SUSPEND))
1604 {
1605 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
1606 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
1607 return VINF_EM_SUSPEND;
1608 }
1609#endif
1610
1611 /* check that we got them all */
1612 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1613 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS));
1614 }
1615
1616#undef UPDATE_RC
1617 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1618 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1619 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
1620 return rc;
1621}
1622
1623/**
1624 * Release the IOM lock if owned by the current VCPU
1625 *
1626 * @param pVM The VM to operate on.
1627 */
1628VMMR3DECL(void) EMR3ReleaseOwnedLocks(PVM pVM)
1629{
1630 while (PDMCritSectIsOwner(&pVM->em.s.CritSectREM))
1631 PDMCritSectLeave(&pVM->em.s.CritSectREM);
1632}
1633
1634
1635/**
1636 * Execute VM.
1637 *
1638 * This function is the main loop of the VM. The emulation thread
1639 * calls this function when the VM has been successfully constructed
1640 * and we're ready for executing the VM.
1641 *
1642 * Returning from this function means that the VM is turned off or
1643 * suspended (state already saved) and deconstruction in next in line.
1644 *
1645 * All interaction from other thread are done using forced actions
1646 * and signaling of the wait object.
1647 *
1648 * @returns VBox status code, informational status codes may indicate failure.
1649 * @param pVM The VM to operate on.
1650 * @param pVCpu The VMCPU to operate on.
1651 */
1652VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
1653{
1654 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
1655 pVM,
1656 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
1657 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
1658 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
1659 pVCpu->em.s.fForceRAW));
1660 VM_ASSERT_EMT(pVM);
1661 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
1662 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
1663 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
1664 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
1665
1666 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
1667 if (rc == 0)
1668 {
1669 /*
1670 * Start the virtual time.
1671 */
1672 TMR3NotifyResume(pVM, pVCpu);
1673
1674 /*
1675 * The Outer Main Loop.
1676 */
1677 bool fFFDone = false;
1678
1679 /* Reschedule right away to start in the right state. */
1680 rc = VINF_SUCCESS;
1681
1682 /* If resuming after a pause or a state load, restore the previous
1683 state or else we'll start executing code. Else, just reschedule. */
1684 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
1685 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
1686 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
1687 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
1688 else
1689 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1690
1691 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
1692 for (;;)
1693 {
1694 /*
1695 * Before we can schedule anything (we're here because
1696 * scheduling is required) we must service any pending
1697 * forced actions to avoid any pending action causing
1698 * immediate rescheduling upon entering an inner loop
1699 *
1700 * Do forced actions.
1701 */
1702 if ( !fFFDone
1703 && rc != VINF_EM_TERMINATE
1704 && rc != VINF_EM_OFF
1705 && ( VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK)
1706 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_BUT_RAW_MASK)))
1707 {
1708 rc = emR3ForcedActions(pVM, pVCpu, rc);
1709 if ( ( rc == VINF_EM_RESCHEDULE_REM
1710 || rc == VINF_EM_RESCHEDULE_HWACC)
1711 && pVCpu->em.s.fForceRAW)
1712 rc = VINF_EM_RESCHEDULE_RAW;
1713 }
1714 else if (fFFDone)
1715 fFFDone = false;
1716
1717 /*
1718 * Now what to do?
1719 */
1720 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
1721 switch (rc)
1722 {
1723 /*
1724 * Keep doing what we're currently doing.
1725 */
1726 case VINF_SUCCESS:
1727 break;
1728
1729 /*
1730 * Reschedule - to raw-mode execution.
1731 */
1732 case VINF_EM_RESCHEDULE_RAW:
1733 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", pVCpu->em.s.enmState, EMSTATE_RAW));
1734 pVCpu->em.s.enmState = EMSTATE_RAW;
1735 break;
1736
1737 /*
1738 * Reschedule - to hardware accelerated raw-mode execution.
1739 */
1740 case VINF_EM_RESCHEDULE_HWACC:
1741 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HWACC: %d -> %d (EMSTATE_HWACC)\n", pVCpu->em.s.enmState, EMSTATE_HWACC));
1742 Assert(!pVCpu->em.s.fForceRAW);
1743 pVCpu->em.s.enmState = EMSTATE_HWACC;
1744 break;
1745
1746 /*
1747 * Reschedule - to recompiled execution.
1748 */
1749 case VINF_EM_RESCHEDULE_REM:
1750 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", pVCpu->em.s.enmState, EMSTATE_REM));
1751 pVCpu->em.s.enmState = EMSTATE_REM;
1752 break;
1753
1754#ifdef VBOX_WITH_VMI
1755 /*
1756 * Reschedule - parav call.
1757 */
1758 case VINF_EM_RESCHEDULE_PARAV:
1759 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_PARAV: %d -> %d (EMSTATE_PARAV)\n", pVCpu->em.s.enmState, EMSTATE_PARAV));
1760 pVCpu->em.s.enmState = EMSTATE_PARAV;
1761 break;
1762#endif
1763
1764 /*
1765 * Resume.
1766 */
1767 case VINF_EM_RESUME:
1768 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", pVCpu->em.s.enmState));
1769 /* Don't reschedule in the halted or wait for SIPI case. */
1770 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
1771 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
1772 break;
1773 /* fall through and get scheduled. */
1774
1775 /*
1776 * Reschedule.
1777 */
1778 case VINF_EM_RESCHEDULE:
1779 {
1780 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1781 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", pVCpu->em.s.enmState, enmState, emR3GetStateName(enmState)));
1782 pVCpu->em.s.enmState = enmState;
1783 break;
1784 }
1785
1786 /*
1787 * Halted.
1788 */
1789 case VINF_EM_HALT:
1790 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_HALTED));
1791 pVCpu->em.s.enmState = EMSTATE_HALTED;
1792 break;
1793
1794 /*
1795 * Switch to the wait for SIPI state (application processor only)
1796 */
1797 case VINF_EM_WAIT_SIPI:
1798 Assert(pVCpu->idCpu != 0);
1799 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_WAIT_SIPI));
1800 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
1801 break;
1802
1803
1804 /*
1805 * Suspend.
1806 */
1807 case VINF_EM_SUSPEND:
1808 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_SUSPENDED));
1809 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1810 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
1811 break;
1812
1813 /*
1814 * Reset.
1815 * We might end up doing a double reset for now, we'll have to clean up the mess later.
1816 */
1817 case VINF_EM_RESET:
1818 {
1819 if (pVCpu->idCpu == 0)
1820 {
1821 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1822 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", pVCpu->em.s.enmState, enmState, emR3GetStateName(enmState)));
1823 pVCpu->em.s.enmState = enmState;
1824 }
1825 else
1826 {
1827 /* All other VCPUs go into the wait for SIPI state. */
1828 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
1829 }
1830 break;
1831 }
1832
1833 /*
1834 * Power Off.
1835 */
1836 case VINF_EM_OFF:
1837 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
1838 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", pVCpu->em.s.enmState, EMSTATE_TERMINATING));
1839 TMR3NotifySuspend(pVM, pVCpu);
1840 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
1841 return rc;
1842
1843 /*
1844 * Terminate the VM.
1845 */
1846 case VINF_EM_TERMINATE:
1847 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
1848 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", pVCpu->em.s.enmState, EMSTATE_TERMINATING));
1849 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
1850 TMR3NotifySuspend(pVM, pVCpu);
1851 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
1852 return rc;
1853
1854
1855 /*
1856 * Out of memory, suspend the VM and stuff.
1857 */
1858 case VINF_EM_NO_MEMORY:
1859 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_SUSPENDED));
1860 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1861 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
1862 TMR3NotifySuspend(pVM, pVCpu);
1863 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
1864
1865 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
1866 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
1867 if (rc != VINF_EM_SUSPEND)
1868 {
1869 if (RT_SUCCESS_NP(rc))
1870 {
1871 AssertLogRelMsgFailed(("%Rrc\n", rc));
1872 rc = VERR_EM_INTERNAL_ERROR;
1873 }
1874 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
1875 }
1876 return rc;
1877
1878 /*
1879 * Guest debug events.
1880 */
1881 case VINF_EM_DBG_STEPPED:
1882 AssertMsgFailed(("VINF_EM_DBG_STEPPED cannot be here!"));
1883 case VINF_EM_DBG_STOP:
1884 case VINF_EM_DBG_BREAKPOINT:
1885 case VINF_EM_DBG_STEP:
1886 if (pVCpu->em.s.enmState == EMSTATE_RAW)
1887 {
1888 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_GUEST_RAW));
1889 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
1890 }
1891 else
1892 {
1893 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_GUEST_REM));
1894 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1895 }
1896 break;
1897
1898 /*
1899 * Hypervisor debug events.
1900 */
1901 case VINF_EM_DBG_HYPER_STEPPED:
1902 case VINF_EM_DBG_HYPER_BREAKPOINT:
1903 case VINF_EM_DBG_HYPER_ASSERTION:
1904 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_HYPER));
1905 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
1906 break;
1907
1908 /*
1909 * Guru mediations.
1910 */
1911 case VERR_VMM_RING0_ASSERTION:
1912 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, pVCpu->em.s.enmState, EMSTATE_GURU_MEDITATION));
1913 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
1914 break;
1915
1916 /*
1917 * Any error code showing up here other than the ones we
1918 * know and process above are considered to be FATAL.
1919 *
1920 * Unknown warnings and informational status codes are also
1921 * included in this.
1922 */
1923 default:
1924 if (RT_SUCCESS_NP(rc))
1925 {
1926 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
1927 rc = VERR_EM_INTERNAL_ERROR;
1928 }
1929 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, pVCpu->em.s.enmState, EMSTATE_GURU_MEDITATION));
1930 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
1931 break;
1932 }
1933
1934 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
1935 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
1936
1937 /*
1938 * Act on the state.
1939 */
1940 switch (pVCpu->em.s.enmState)
1941 {
1942 /*
1943 * Execute raw.
1944 */
1945 case EMSTATE_RAW:
1946 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
1947 break;
1948
1949 /*
1950 * Execute hardware accelerated raw.
1951 */
1952 case EMSTATE_HWACC:
1953 rc = emR3HwAccExecute(pVM, pVCpu, &fFFDone);
1954 break;
1955
1956 /*
1957 * Execute recompiled.
1958 */
1959 case EMSTATE_REM:
1960 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
1961 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
1962 break;
1963
1964#ifdef VBOX_WITH_VMI
1965 /*
1966 * Execute PARAV function.
1967 */
1968 case EMSTATE_PARAV:
1969 rc = PARAVCallFunction(pVM);
1970 pVCpu->em.s.enmState = EMSTATE_REM;
1971 break;
1972#endif
1973
1974 /*
1975 * Application processor execution halted until SIPI.
1976 */
1977 case EMSTATE_WAIT_SIPI:
1978 /* no break */
1979 /*
1980 * hlt - execution halted until interrupt.
1981 */
1982 case EMSTATE_HALTED:
1983 {
1984 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
1985 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
1986 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
1987 break;
1988 }
1989
1990 /*
1991 * Suspended - return to VM.cpp.
1992 */
1993 case EMSTATE_SUSPENDED:
1994 TMR3NotifySuspend(pVM, pVCpu);
1995 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
1996 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
1997 return VINF_EM_SUSPEND;
1998
1999 /*
2000 * Debugging in the guest.
2001 */
2002 case EMSTATE_DEBUG_GUEST_REM:
2003 case EMSTATE_DEBUG_GUEST_RAW:
2004 TMR3NotifySuspend(pVM, pVCpu);
2005 rc = emR3Debug(pVM, pVCpu, rc);
2006 TMR3NotifyResume(pVM, pVCpu);
2007 Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2008 break;
2009
2010 /*
2011 * Debugging in the hypervisor.
2012 */
2013 case EMSTATE_DEBUG_HYPER:
2014 {
2015 TMR3NotifySuspend(pVM, pVCpu);
2016 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2017
2018 rc = emR3Debug(pVM, pVCpu, rc);
2019 Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2020 if (rc != VINF_SUCCESS)
2021 {
2022 /* switch to guru meditation mode */
2023 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2024 VMMR3FatalDump(pVM, pVCpu, rc);
2025 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2026 return rc;
2027 }
2028
2029 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2030 TMR3NotifyResume(pVM, pVCpu);
2031 break;
2032 }
2033
2034 /*
2035 * Guru meditation takes place in the debugger.
2036 */
2037 case EMSTATE_GURU_MEDITATION:
2038 {
2039 TMR3NotifySuspend(pVM, pVCpu);
2040 VMMR3FatalDump(pVM, pVCpu, rc);
2041 emR3Debug(pVM, pVCpu, rc);
2042 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2043 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2044 return rc;
2045 }
2046
2047 /*
2048 * The states we don't expect here.
2049 */
2050 case EMSTATE_NONE:
2051 case EMSTATE_TERMINATING:
2052 default:
2053 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2054 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2055 TMR3NotifySuspend(pVM, pVCpu);
2056 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2057 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2058 return VERR_EM_INTERNAL_ERROR;
2059 }
2060 } /* The Outer Main Loop */
2061 }
2062 else
2063 {
2064 /*
2065 * Fatal error.
2066 */
2067 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2068 TMR3NotifySuspend(pVM, pVCpu);
2069 VMMR3FatalDump(pVM, pVCpu, rc);
2070 emR3Debug(pVM, pVCpu, rc);
2071 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2072 /** @todo change the VM state! */
2073 return rc;
2074 }
2075
2076 /* (won't ever get here). */
2077 AssertFailed();
2078}
2079
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette