VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 58535

Last change on this file since 58535 was 58535, checked in by vboxsync, 10 years ago

REMR3EmulateInstruction: Corrected assertion.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 186.2 KB
Line 
1/* $Id: VBoxRecompiler.c 58535 2015-10-30 13:51:27Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_rem REM - Recompiled Execution Manager.
19 *
20 * The recompiled exeuction manager (REM) serves the final fallback for guest
21 * execution, after HM / raw-mode and IEM have given up.
22 *
23 * The REM is qemu with a whole bunch of VBox specific customization for
24 * interfacing with PATM, CSAM, PGM and other components.
25 *
26 * @sa @ref grp_rem
27 */
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_REM
33#include <stdio.h> /* FILE */
34#include "osdep.h"
35#include "config.h"
36#include "cpu.h"
37#include "exec-all.h"
38#include "ioport.h"
39
40#include <VBox/vmm/rem.h>
41#include <VBox/vmm/vmapi.h>
42#include <VBox/vmm/tm.h>
43#include <VBox/vmm/ssm.h>
44#include <VBox/vmm/em.h>
45#include <VBox/vmm/trpm.h>
46#include <VBox/vmm/iom.h>
47#include <VBox/vmm/mm.h>
48#include <VBox/vmm/pgm.h>
49#include <VBox/vmm/pdm.h>
50#include <VBox/vmm/dbgf.h>
51#include <VBox/dbg.h>
52#include <VBox/vmm/hm.h>
53#include <VBox/vmm/patm.h>
54#include <VBox/vmm/csam.h>
55#include "REMInternal.h"
56#include <VBox/vmm/vm.h>
57#include <VBox/vmm/uvm.h>
58#include <VBox/param.h>
59#include <VBox/err.h>
60
61#include <VBox/log.h>
62#include <iprt/alloca.h>
63#include <iprt/semaphore.h>
64#include <iprt/asm.h>
65#include <iprt/assert.h>
66#include <iprt/thread.h>
67#include <iprt/string.h>
68
69/* Don't wanna include everything. */
70extern void cpu_exec_init_all(uintptr_t tb_size);
71extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
72extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
73extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
74extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
75extern void tlb_flush(CPUX86State *env, int flush_global);
76extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
77extern void sync_ldtr(CPUX86State *env1, int selector);
78
79#ifdef VBOX_STRICT
80ram_addr_t get_phys_page_offset(target_ulong addr);
81#endif
82
83
84/*********************************************************************************************************************************
85* Defined Constants And Macros *
86*********************************************************************************************************************************/
87
88/** Copy 80-bit fpu register at pSrc to pDst.
89 * This is probably faster than *calling* memcpy.
90 */
91#define REM_COPY_FPU_REG(pDst, pSrc) \
92 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
93
94/** How remR3RunLoggingStep operates. */
95#define REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
96
97
98/** Selector flag shift between qemu and VBox.
99 * VBox shifts the qemu bits to the right. */
100#define SEL_FLAGS_SHIFT (8)
101/** Mask applied to the shifted qemu selector flags to get the attributes VBox
102 * (VT-x) needs. */
103#define SEL_FLAGS_SMASK UINT32_C(0x1F0FF)
104
105
106/*********************************************************************************************************************************
107* Internal Functions *
108*********************************************************************************************************************************/
109static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
110static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
111static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
112static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
113
114static uint32_t remR3MMIOReadU8(void *pvEnv, target_phys_addr_t GCPhys);
115static uint32_t remR3MMIOReadU16(void *pvEnv, target_phys_addr_t GCPhys);
116static uint32_t remR3MMIOReadU32(void *pvEnv, target_phys_addr_t GCPhys);
117static void remR3MMIOWriteU8(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
118static void remR3MMIOWriteU16(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
119static void remR3MMIOWriteU32(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32);
120
121static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
122static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
123static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
124static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
125static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
126static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
127
128static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
129static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
130static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
131
132
133/*********************************************************************************************************************************
134* Global Variables *
135*********************************************************************************************************************************/
136
137/** @todo Move stats to REM::s some rainy day we have nothing do to. */
138#ifdef VBOX_WITH_STATISTICS
139static STAMPROFILEADV gStatExecuteSingleInstr;
140static STAMPROFILEADV gStatCompilationQEmu;
141static STAMPROFILEADV gStatRunCodeQEmu;
142static STAMPROFILEADV gStatTotalTimeQEmu;
143static STAMPROFILEADV gStatTimers;
144static STAMPROFILEADV gStatTBLookup;
145static STAMPROFILEADV gStatIRQ;
146static STAMPROFILEADV gStatRawCheck;
147static STAMPROFILEADV gStatMemRead;
148static STAMPROFILEADV gStatMemWrite;
149static STAMPROFILE gStatGCPhys2HCVirt;
150static STAMCOUNTER gStatCpuGetTSC;
151static STAMCOUNTER gStatRefuseTFInhibit;
152static STAMCOUNTER gStatRefuseVM86;
153static STAMCOUNTER gStatRefusePaging;
154static STAMCOUNTER gStatRefusePAE;
155static STAMCOUNTER gStatRefuseIOPLNot0;
156static STAMCOUNTER gStatRefuseIF0;
157static STAMCOUNTER gStatRefuseCode16;
158static STAMCOUNTER gStatRefuseWP0;
159static STAMCOUNTER gStatRefuseRing1or2;
160static STAMCOUNTER gStatRefuseCanExecute;
161static STAMCOUNTER gaStatRefuseStale[6];
162static STAMCOUNTER gStatREMGDTChange;
163static STAMCOUNTER gStatREMIDTChange;
164static STAMCOUNTER gStatREMLDTRChange;
165static STAMCOUNTER gStatREMTRChange;
166static STAMCOUNTER gStatSelOutOfSync[6];
167static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
168static STAMCOUNTER gStatFlushTBs;
169#endif
170/* in exec.c */
171extern uint32_t tlb_flush_count;
172extern uint32_t tb_flush_count;
173extern uint32_t tb_phys_invalidate_count;
174
175/*
176 * Global stuff.
177 */
178
179/** MMIO read callbacks. */
180CPUReadMemoryFunc *g_apfnMMIORead[3] =
181{
182 remR3MMIOReadU8,
183 remR3MMIOReadU16,
184 remR3MMIOReadU32
185};
186
187/** MMIO write callbacks. */
188CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
189{
190 remR3MMIOWriteU8,
191 remR3MMIOWriteU16,
192 remR3MMIOWriteU32
193};
194
195/** Handler read callbacks. */
196CPUReadMemoryFunc *g_apfnHandlerRead[3] =
197{
198 remR3HandlerReadU8,
199 remR3HandlerReadU16,
200 remR3HandlerReadU32
201};
202
203/** Handler write callbacks. */
204CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
205{
206 remR3HandlerWriteU8,
207 remR3HandlerWriteU16,
208 remR3HandlerWriteU32
209};
210
211
212#ifdef VBOX_WITH_DEBUGGER
213/*
214 * Debugger commands.
215 */
216static FNDBGCCMD remR3CmdDisasEnableStepping;;
217
218/** '.remstep' arguments. */
219static const DBGCVARDESC g_aArgRemStep[] =
220{
221 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
222 { 0, ~0U, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
223};
224
225/** Command descriptors. */
226static const DBGCCMD g_aCmds[] =
227{
228 {
229 .pszCmd ="remstep",
230 .cArgsMin = 0,
231 .cArgsMax = 1,
232 .paArgDescs = &g_aArgRemStep[0],
233 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
234 .fFlags = 0,
235 .pfnHandler = remR3CmdDisasEnableStepping,
236 .pszSyntax = "[on/off]",
237 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
238 "If no arguments show the current state."
239 }
240};
241#endif
242
243/** Prologue code, must be in lower 4G to simplify jumps to/from generated code.
244 * @todo huh??? That cannot be the case on the mac... So, this
245 * point is probably not valid any longer. */
246uint8_t *code_gen_prologue;
247
248
249/*********************************************************************************************************************************
250* Internal Functions *
251*********************************************************************************************************************************/
252void remAbort(int rc, const char *pszTip);
253extern int testmath(void);
254
255/* Put them here to avoid unused variable warning. */
256AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
257#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
258//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
259/* Why did this have to be identical?? */
260AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
261#else
262AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
263#endif
264
265
266/**
267 * Initializes the REM.
268 *
269 * @returns VBox status code.
270 * @param pVM The VM to operate on.
271 */
272REMR3DECL(int) REMR3Init(PVM pVM)
273{
274 PREMHANDLERNOTIFICATION pCur;
275 uint32_t u32Dummy;
276 int rc;
277 unsigned i;
278
279#ifdef VBOX_ENABLE_VBOXREM64
280 LogRel(("Using 64-bit aware REM\n"));
281#endif
282
283 /*
284 * Assert sanity.
285 */
286 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
287 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
288 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
289#if 0 /* just an annoyance at the moment. */
290#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
291 Assert(!testmath());
292#endif
293#endif
294
295 /*
296 * Init some internal data members.
297 */
298 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
299 pVM->rem.s.Env.pVM = pVM;
300#ifdef CPU_RAW_MODE_INIT
301 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
302#endif
303
304 /*
305 * Initialize the REM critical section.
306 *
307 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
308 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
309 * deadlocks. (mostly pgm vs rem locking)
310 */
311 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, RT_SRC_POS, "REM-Register");
312 AssertRCReturn(rc, rc);
313
314 /* ctx. */
315 pVM->rem.s.pCtx = NULL; /* set when executing code. */
316 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
317
318 /* ignore all notifications */
319 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
320
321 code_gen_prologue = RTMemExecAlloc(_1K);
322 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
323
324 cpu_exec_init_all(0);
325
326 /*
327 * Init the recompiler.
328 */
329 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
330 {
331 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
332 return VERR_GENERAL_FAILURE;
333 }
334 PVMCPU pVCpu = VMMGetCpu(pVM);
335 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
336 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
337
338 EMRemLock(pVM);
339 cpu_reset(&pVM->rem.s.Env);
340 EMRemUnlock(pVM);
341
342 /* allocate code buffer for single instruction emulation. */
343 pVM->rem.s.Env.cbCodeBuffer = 4096;
344 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
345 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
346
347 /* Finally, set the cpu_single_env global. */
348 cpu_single_env = &pVM->rem.s.Env;
349
350 /* Nothing is pending by default */
351 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
352
353 /*
354 * Register ram types.
355 */
356 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(g_apfnMMIORead, g_apfnMMIOWrite, &pVM->rem.s.Env);
357 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
358 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
359 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
360 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
361
362 /* stop ignoring. */
363 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
364
365 /*
366 * Register the saved state data unit.
367 */
368 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
369 NULL, NULL, NULL,
370 NULL, remR3Save, NULL,
371 NULL, remR3Load, NULL);
372 if (RT_FAILURE(rc))
373 return rc;
374
375#ifdef VBOX_WITH_DEBUGGER
376 /*
377 * Debugger commands.
378 */
379 static bool fRegisteredCmds = false;
380 if (!fRegisteredCmds)
381 {
382 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
383 if (RT_SUCCESS(rc))
384 fRegisteredCmds = true;
385 }
386#endif
387
388#ifdef VBOX_WITH_STATISTICS
389 /*
390 * Statistics.
391 */
392 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
393 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
394 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
395 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
396 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer queue processing.");
397 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling translation block lookup.");
398 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling IRQ delivery.");
399 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling remR3CanExecuteRaw calls.");
400 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
401 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
402 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory conversion (PGMR3PhysTlbGCPhys2Ptr).");
403
404 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
405
406 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
407 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
408 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
409 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
410 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
411 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
412 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
413 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
414 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
415 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
416 STAM_REG(pVM, &gaStatRefuseStale[R_ES], STAMTYPE_COUNTER, "/REM/Refuse/StaleES", STAMUNIT_OCCURENCES, "Raw mode refused because of stale ES");
417 STAM_REG(pVM, &gaStatRefuseStale[R_CS], STAMTYPE_COUNTER, "/REM/Refuse/StaleCS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale CS");
418 STAM_REG(pVM, &gaStatRefuseStale[R_SS], STAMTYPE_COUNTER, "/REM/Refuse/StaleSS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale SS");
419 STAM_REG(pVM, &gaStatRefuseStale[R_DS], STAMTYPE_COUNTER, "/REM/Refuse/StaleDS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale DS");
420 STAM_REG(pVM, &gaStatRefuseStale[R_FS], STAMTYPE_COUNTER, "/REM/Refuse/StaleFS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale FS");
421 STAM_REG(pVM, &gaStatRefuseStale[R_GS], STAMTYPE_COUNTER, "/REM/Refuse/StaleGS", STAMUNIT_OCCURENCES, "Raw mode refused because of stale GS");
422 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
423
424 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
425 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
426 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
427 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
428
429 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
430 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
431 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
432 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
433 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
434 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
435
436 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
437 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
438 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
439 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
440 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
441 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
442
443 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
444#endif /* VBOX_WITH_STATISTICS */
445 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 4);
446 AssertCompileMemberAlignment(CPUX86State, StatTbFlush, 8);
447
448 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
449 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
450 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
451
452
453#ifdef DEBUG_ALL_LOGGING
454 loglevel = ~0;
455#endif
456
457 /*
458 * Init the handler notification lists.
459 */
460 pVM->rem.s.idxPendingList = UINT32_MAX;
461 pVM->rem.s.idxFreeList = 0;
462
463 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications); i++)
464 {
465 pCur = &pVM->rem.s.aHandlerNotifications[i];
466 pCur->idxNext = i + 1;
467 pCur->idxSelf = i;
468 }
469 pCur->idxNext = UINT32_MAX; /* the last record. */
470
471 return rc;
472}
473
474
475/**
476 * Finalizes the REM initialization.
477 *
478 * This is called after all components, devices and drivers has
479 * been initialized. Its main purpose it to finish the RAM related
480 * initialization.
481 *
482 * @returns VBox status code.
483 *
484 * @param pVM The VM handle.
485 */
486REMR3DECL(int) REMR3InitFinalize(PVM pVM)
487{
488 int rc;
489
490 /*
491 * Ram size & dirty bit map.
492 */
493 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
494 pVM->rem.s.fGCPhysLastRamFixed = true;
495#ifdef RT_STRICT
496 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
497#else
498 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
499#endif
500 return rc;
501}
502
503/**
504 * Initializes ram_list.phys_dirty and ram_list.phys_dirty_size.
505 *
506 * @returns VBox status code.
507 * @param pVM The VM handle.
508 * @param fGuarded Whether to guard the map.
509 */
510static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
511{
512 int rc = VINF_SUCCESS;
513 RTGCPHYS cb;
514
515 AssertLogRelReturn(QLIST_EMPTY(&ram_list.blocks), VERR_INTERNAL_ERROR_2);
516
517 cb = pVM->rem.s.GCPhysLastRam + 1;
518 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
519 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
520 VERR_OUT_OF_RANGE);
521
522 ram_list.phys_dirty_size = cb >> PAGE_SHIFT;
523 AssertMsg(((RTGCPHYS)ram_list.phys_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
524
525 if (!fGuarded)
526 {
527 ram_list.phys_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, ram_list.phys_dirty_size);
528 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", ram_list.phys_dirty_size), VERR_NO_MEMORY);
529 }
530 else
531 {
532 /*
533 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
534 */
535 uint32_t cbBitmapAligned = RT_ALIGN_32(ram_list.phys_dirty_size, PAGE_SIZE);
536 uint32_t cbBitmapFull = RT_ALIGN_32(ram_list.phys_dirty_size, (_4G >> PAGE_SHIFT));
537 if (cbBitmapFull == cbBitmapAligned)
538 cbBitmapFull += _4G >> PAGE_SHIFT;
539 else if (cbBitmapFull - cbBitmapAligned < _64K)
540 cbBitmapFull += _64K;
541
542 ram_list.phys_dirty = RTMemPageAlloc(cbBitmapFull);
543 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
544
545 rc = RTMemProtect(ram_list.phys_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
546 if (RT_FAILURE(rc))
547 {
548 RTMemPageFree(ram_list.phys_dirty, cbBitmapFull);
549 AssertLogRelRCReturn(rc, rc);
550 }
551
552 ram_list.phys_dirty += cbBitmapAligned - ram_list.phys_dirty_size;
553 }
554
555 /* initialize it. */
556 memset(ram_list.phys_dirty, 0xff, ram_list.phys_dirty_size);
557 return rc;
558}
559
560
561/**
562 * Terminates the REM.
563 *
564 * Termination means cleaning up and freeing all resources,
565 * the VM it self is at this point powered off or suspended.
566 *
567 * @returns VBox status code.
568 * @param pVM The VM to operate on.
569 */
570REMR3DECL(int) REMR3Term(PVM pVM)
571{
572 /*
573 * Statistics.
574 */
575 STAMR3Deregister(pVM->pUVM, "/PROF/REM/*");
576 STAMR3Deregister(pVM->pUVM, "/REM/*");
577
578 return VINF_SUCCESS;
579}
580
581
582/**
583 * The VM is being reset.
584 *
585 * For the REM component this means to call the cpu_reset() and
586 * reinitialize some state variables.
587 *
588 * @param pVM VM handle.
589 */
590REMR3DECL(void) REMR3Reset(PVM pVM)
591{
592 EMRemLock(pVM); /* Only pro forma, we're in a rendezvous. */
593
594 /*
595 * Reset the REM cpu.
596 */
597 Assert(pVM->rem.s.cIgnoreAll == 0);
598 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
599 cpu_reset(&pVM->rem.s.Env);
600 pVM->rem.s.cInvalidatedPages = 0;
601 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
602 Assert(pVM->rem.s.cIgnoreAll == 0);
603
604 /* Clear raw ring 0 init state */
605 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
606
607 /* Flush the TBs the next time we execute code here. */
608 pVM->rem.s.fFlushTBs = true;
609
610 EMRemUnlock(pVM);
611}
612
613
614/**
615 * Execute state save operation.
616 *
617 * @returns VBox status code.
618 * @param pVM VM Handle.
619 * @param pSSM SSM operation handle.
620 */
621static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
622{
623 PREM pRem = &pVM->rem.s;
624
625 /*
626 * Save the required CPU Env bits.
627 * (Not much because we're never in REM when doing the save.)
628 */
629 LogFlow(("remR3Save:\n"));
630 Assert(!pRem->fInREM);
631 SSMR3PutU32(pSSM, pRem->Env.hflags);
632 SSMR3PutU32(pSSM, ~0); /* separator */
633
634 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
635 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
636 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
637
638 return SSMR3PutU32(pSSM, ~0); /* terminator */
639}
640
641
642/**
643 * Execute state load operation.
644 *
645 * @returns VBox status code.
646 * @param pVM VM Handle.
647 * @param pSSM SSM operation handle.
648 * @param uVersion Data layout version.
649 * @param uPass The data pass.
650 */
651static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
652{
653 uint32_t u32Dummy;
654 uint32_t fRawRing0 = false;
655 uint32_t u32Sep;
656 uint32_t i;
657 int rc;
658 PREM pRem;
659
660 LogFlow(("remR3Load:\n"));
661 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
662
663 /*
664 * Validate version.
665 */
666 if ( uVersion != REM_SAVED_STATE_VERSION
667 && uVersion != REM_SAVED_STATE_VERSION_VER1_6)
668 {
669 AssertMsgFailed(("remR3Load: Invalid version uVersion=%d!\n", uVersion));
670 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
671 }
672
673 /*
674 * Do a reset to be on the safe side...
675 */
676 REMR3Reset(pVM);
677
678 /*
679 * Ignore all ignorable notifications.
680 * (Not doing this will cause serious trouble.)
681 */
682 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
683
684 /*
685 * Load the required CPU Env bits.
686 * (Not much because we're never in REM when doing the save.)
687 */
688 pRem = &pVM->rem.s;
689 Assert(!pRem->fInREM);
690 SSMR3GetU32(pSSM, &pRem->Env.hflags);
691 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
692 {
693 /* Redundant REM CPU state has to be loaded, but can be ignored. */
694 CPUX86State_Ver16 temp;
695 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
696 }
697
698 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
699 if (RT_FAILURE(rc))
700 return rc;
701 if (u32Sep != ~0U)
702 {
703 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
704 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
705 }
706
707 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
708 SSMR3GetUInt(pSSM, &fRawRing0);
709 if (fRawRing0)
710 pRem->Env.state |= CPU_RAW_RING0;
711
712 if (uVersion == REM_SAVED_STATE_VERSION_VER1_6)
713 {
714 /*
715 * Load the REM stuff.
716 */
717 /** @todo r=bird: We should just drop all these items, restoring doesn't make
718 * sense. */
719 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
720 if (RT_FAILURE(rc))
721 return rc;
722 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
723 {
724 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
725 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
726 }
727 for (i = 0; i < pRem->cInvalidatedPages; i++)
728 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
729 }
730
731 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
732 if (RT_FAILURE(rc))
733 return rc;
734
735 /* check the terminator. */
736 rc = SSMR3GetU32(pSSM, &u32Sep);
737 if (RT_FAILURE(rc))
738 return rc;
739 if (u32Sep != ~0U)
740 {
741 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
742 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
743 }
744
745 /*
746 * Get the CPUID features.
747 */
748 PVMCPU pVCpu = VMMGetCpu(pVM);
749 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
750 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
751
752 /*
753 * Stop ignoring ignorable notifications.
754 */
755 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
756
757 /*
758 * Sync the whole CPU state when executing code in the recompiler.
759 */
760 for (i = 0; i < pVM->cCpus; i++)
761 {
762 PVMCPU pVCpu = &pVM->aCpus[i];
763 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
764 }
765 return VINF_SUCCESS;
766}
767
768
769
770#undef LOG_GROUP
771#define LOG_GROUP LOG_GROUP_REM_RUN
772
773/**
774 * Single steps an instruction in recompiled mode.
775 *
776 * Before calling this function the REM state needs to be in sync with
777 * the VM. Call REMR3State() to perform the sync. It's only necessary
778 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
779 * and after calling REMR3StateBack().
780 *
781 * @returns VBox status code.
782 *
783 * @param pVM VM Handle.
784 * @param pVCpu VMCPU Handle.
785 */
786REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
787{
788 int rc, interrupt_request;
789 RTGCPTR GCPtrPC;
790 bool fBp;
791
792 /*
793 * Lock the REM - we don't wanna have anyone interrupting us
794 * while stepping - and enabled single stepping. We also ignore
795 * pending interrupts and suchlike.
796 */
797 interrupt_request = pVM->rem.s.Env.interrupt_request;
798 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB | CPU_INTERRUPT_EXTERNAL_TIMER)));
799 pVM->rem.s.Env.interrupt_request = 0;
800 cpu_single_step(&pVM->rem.s.Env, 1);
801
802 /*
803 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
804 */
805 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
806 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC, BP_GDB);
807
808 /*
809 * Execute and handle the return code.
810 * We execute without enabling the cpu tick, so on success we'll
811 * just flip it on and off to make sure it moves
812 */
813 rc = cpu_exec(&pVM->rem.s.Env);
814 if (rc == EXCP_DEBUG)
815 {
816 TMR3NotifyResume(pVM, pVCpu);
817 TMR3NotifySuspend(pVM, pVCpu);
818 rc = VINF_EM_DBG_STEPPED;
819 }
820 else
821 {
822 switch (rc)
823 {
824 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
825 case EXCP_HLT:
826 case EXCP_HALTED: rc = VINF_EM_HALT; break;
827 case EXCP_RC:
828 rc = pVM->rem.s.rc;
829 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
830 break;
831 case EXCP_EXECUTE_RAW:
832 case EXCP_EXECUTE_HM:
833 /** @todo: is it correct? No! */
834 rc = VINF_SUCCESS;
835 break;
836 default:
837 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
838 rc = VERR_INTERNAL_ERROR;
839 break;
840 }
841 }
842
843 /*
844 * Restore the stuff we changed to prevent interruption.
845 * Unlock the REM.
846 */
847 if (fBp)
848 {
849 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC, BP_GDB, NULL);
850 Assert(rc2 == 0); NOREF(rc2);
851 }
852 cpu_single_step(&pVM->rem.s.Env, 0);
853 pVM->rem.s.Env.interrupt_request = interrupt_request;
854
855 return rc;
856}
857
858
859/**
860 * Set a breakpoint using the REM facilities.
861 *
862 * @returns VBox status code.
863 * @param pVM The VM handle.
864 * @param Address The breakpoint address.
865 * @thread The emulation thread.
866 */
867REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
868{
869 VM_ASSERT_EMT(pVM);
870 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address, BP_GDB, NULL))
871 {
872 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
873 return VINF_SUCCESS;
874 }
875 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
876 return VERR_REM_NO_MORE_BP_SLOTS;
877}
878
879
880/**
881 * Clears a breakpoint set by REMR3BreakpointSet().
882 *
883 * @returns VBox status code.
884 * @param pVM The VM handle.
885 * @param Address The breakpoint address.
886 * @thread The emulation thread.
887 */
888REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
889{
890 VM_ASSERT_EMT(pVM);
891 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address, BP_GDB))
892 {
893 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
894 return VINF_SUCCESS;
895 }
896 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
897 return VERR_REM_BP_NOT_FOUND;
898}
899
900
901/**
902 * Emulate an instruction.
903 *
904 * This function executes one instruction without letting anyone
905 * interrupt it. This is intended for being called while being in
906 * raw mode and thus will take care of all the state syncing between
907 * REM and the rest.
908 *
909 * @returns VBox status code.
910 * @param pVM VM handle.
911 * @param pVCpu VMCPU Handle.
912 */
913REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
914{
915 bool fFlushTBs;
916
917 int rc, rc2;
918 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
919
920 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
921 * CPU_RAW_HM makes sure we never execute interrupt handlers in the recompiler.
922 */
923 if (HMIsEnabled(pVM))
924 pVM->rem.s.Env.state |= CPU_RAW_HM;
925
926 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
927 fFlushTBs = pVM->rem.s.fFlushTBs;
928 pVM->rem.s.fFlushTBs = false;
929
930 /*
931 * Sync the state and enable single instruction / single stepping.
932 */
933 rc = REMR3State(pVM, pVCpu);
934 pVM->rem.s.fFlushTBs = fFlushTBs;
935 if (RT_SUCCESS(rc))
936 {
937 int interrupt_request = pVM->rem.s.Env.interrupt_request;
938 /* Ignore CPU_INTERRUPT_HARD as it only reflects FF. */
939 /* Ignore CPU_INTERRUPT_TIMER as it doesn't seem to be set anywhere anymore and would reflect a FF. */
940 /* Ignore CPU_INTERRUPT_EXTERNAL_HARD as it is subject to races and reflects a FF. */
941 /* Ignore CPU_INTERRUPT_EXTERNAL_TIMER as it is subject to races and reflects a FF. */
942 /* Ignore CPU_INTERRUPT_EXTERNAL_EXIT as it is subject to races and reflects one or more FFs. */
943 Assert(!(interrupt_request & ~(/*CPU_INTERRUPT_HARD |*/ CPU_INTERRUPT_EXITTB /*| CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT*/ | CPU_INTERRUPT_EXTERNAL_FLUSH_TLB /*| CPU_INTERRUPT_EXTERNAL_TIMER*/)));
944#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
945 cpu_single_step(&pVM->rem.s.Env, 0);
946#endif
947 Assert(!pVM->rem.s.Env.singlestep_enabled);
948
949 /*
950 * Now we set the execute single instruction flag and enter the cpu_exec loop.
951 */
952 TMNotifyStartOfExecution(pVCpu);
953 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
954 rc = cpu_exec(&pVM->rem.s.Env);
955 TMNotifyEndOfExecution(pVCpu);
956 switch (rc)
957 {
958 /*
959 * Executed without anything out of the way happening.
960 */
961 case EXCP_SINGLE_INSTR:
962 rc = VINF_EM_RESCHEDULE;
963 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
964 break;
965
966 /*
967 * If we take a trap or start servicing a pending interrupt, we might end up here.
968 * (Timer thread or some other thread wishing EMT's attention.)
969 */
970 case EXCP_INTERRUPT:
971 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
972 rc = VINF_EM_RESCHEDULE;
973 break;
974
975 /*
976 * Single step, we assume!
977 * If there was a breakpoint there we're fucked now.
978 */
979 case EXCP_DEBUG:
980 if (pVM->rem.s.Env.watchpoint_hit)
981 {
982 /** @todo deal with watchpoints */
983 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
984 rc = VINF_EM_DBG_BREAKPOINT;
985 }
986 else
987 {
988 CPUBreakpoint *pBP;
989 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
990 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
991 if (pBP->pc == GCPtrPC)
992 break;
993 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
994 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
995 }
996 break;
997
998 /*
999 * hlt instruction.
1000 */
1001 case EXCP_HLT:
1002 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1003 rc = VINF_EM_HALT;
1004 break;
1005
1006 /*
1007 * The VM has halted.
1008 */
1009 case EXCP_HALTED:
1010 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1011 rc = VINF_EM_HALT;
1012 break;
1013
1014 /*
1015 * Switch to RAW-mode.
1016 */
1017 case EXCP_EXECUTE_RAW:
1018 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1019 rc = VINF_EM_RESCHEDULE_RAW;
1020 break;
1021
1022 /*
1023 * Switch to hardware accelerated RAW-mode.
1024 */
1025 case EXCP_EXECUTE_HM:
1026 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HM\n"));
1027 rc = VINF_EM_RESCHEDULE_HM;
1028 break;
1029
1030 /*
1031 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1032 */
1033 case EXCP_RC:
1034 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1035 rc = pVM->rem.s.rc;
1036 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1037 break;
1038
1039 /*
1040 * Figure out the rest when they arrive....
1041 */
1042 default:
1043 AssertMsgFailed(("rc=%d\n", rc));
1044 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1045 rc = VINF_EM_RESCHEDULE;
1046 break;
1047 }
1048
1049 /*
1050 * Switch back the state.
1051 */
1052 pVM->rem.s.Env.interrupt_request = interrupt_request;
1053 rc2 = REMR3StateBack(pVM, pVCpu);
1054 AssertRC(rc2);
1055 }
1056
1057 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1058 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1059 return rc;
1060}
1061
1062
1063/**
1064 * Used by REMR3Run to handle the case where CPU_EMULATE_SINGLE_STEP is set.
1065 *
1066 * @returns VBox status code.
1067 *
1068 * @param pVM The VM handle.
1069 * @param pVCpu The Virtual CPU handle.
1070 */
1071static int remR3RunLoggingStep(PVM pVM, PVMCPU pVCpu)
1072{
1073 int rc;
1074
1075 Assert(pVM->rem.s.fInREM);
1076#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1077 cpu_single_step(&pVM->rem.s.Env, 1);
1078#else
1079 Assert(!pVM->rem.s.Env.singlestep_enabled);
1080#endif
1081
1082 /*
1083 * Now we set the execute single instruction flag and enter the cpu_exec loop.
1084 */
1085 for (;;)
1086 {
1087 char szBuf[256];
1088
1089 /*
1090 * Log the current registers state and instruction.
1091 */
1092 remR3StateUpdate(pVM, pVCpu);
1093 DBGFR3Info(pVM->pUVM, "cpumguest", NULL, NULL);
1094 szBuf[0] = '\0';
1095 rc = DBGFR3DisasInstrEx(pVM->pUVM,
1096 pVCpu->idCpu,
1097 0, /* Sel */ 0, /* GCPtr */
1098 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
1099 szBuf,
1100 sizeof(szBuf),
1101 NULL);
1102 if (RT_FAILURE(rc))
1103 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
1104 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
1105
1106 /*
1107 * Execute the instruction.
1108 */
1109 TMNotifyStartOfExecution(pVCpu);
1110
1111 if ( pVM->rem.s.Env.exception_index < 0
1112 || pVM->rem.s.Env.exception_index > 256)
1113 pVM->rem.s.Env.exception_index = -1; /** @todo We need to do similar stuff elsewhere, I think. */
1114
1115#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1116 pVM->rem.s.Env.interrupt_request = 0;
1117#else
1118 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
1119#endif
1120 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1121 || pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
1122 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
1123 RTLogPrintf("remR3RunLoggingStep: interrupt_request=%#x halted=%d exception_index=%#x\n",
1124 pVM->rem.s.Env.interrupt_request,
1125 pVM->rem.s.Env.halted,
1126 pVM->rem.s.Env.exception_index
1127 );
1128
1129 rc = cpu_exec(&pVM->rem.s.Env);
1130
1131 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %#x interrupt_request=%#x halted=%d exception_index=%#x\n", rc,
1132 pVM->rem.s.Env.interrupt_request,
1133 pVM->rem.s.Env.halted,
1134 pVM->rem.s.Env.exception_index
1135 );
1136
1137 TMNotifyEndOfExecution(pVCpu);
1138
1139 switch (rc)
1140 {
1141#ifndef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1142 /*
1143 * The normal exit.
1144 */
1145 case EXCP_SINGLE_INSTR:
1146 if ( !VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1147 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1148 continue;
1149 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1150 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1151 rc = VINF_SUCCESS;
1152 break;
1153
1154#else
1155 /*
1156 * The normal exit, check for breakpoints at PC just to be sure.
1157 */
1158#endif
1159 case EXCP_DEBUG:
1160 if (pVM->rem.s.Env.watchpoint_hit)
1161 {
1162 /** @todo deal with watchpoints */
1163 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1164 rc = VINF_EM_DBG_BREAKPOINT;
1165 }
1166 else
1167 {
1168 CPUBreakpoint *pBP;
1169 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1170 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1171 if (pBP->pc == GCPtrPC)
1172 break;
1173 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1174 Log2(("remR3RunLoggingStep: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1175 }
1176#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1177 if (rc == VINF_EM_DBG_STEPPED)
1178 {
1179 if ( !VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1180 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1181 continue;
1182
1183 RTLogPrintf("remR3RunLoggingStep: rc=VINF_SUCCESS w/ FFs (%#x/%#x)\n",
1184 pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions);
1185 rc = VINF_SUCCESS;
1186 }
1187#endif
1188 break;
1189
1190 /*
1191 * If we take a trap or start servicing a pending interrupt, we might end up here.
1192 * (Timer thread or some other thread wishing EMT's attention.)
1193 */
1194 case EXCP_INTERRUPT:
1195 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_INTERRUPT rc=VINF_SUCCESS\n");
1196 rc = VINF_SUCCESS;
1197 break;
1198
1199 /*
1200 * hlt instruction.
1201 */
1202 case EXCP_HLT:
1203 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HLT rc=VINF_EM_HALT\n");
1204 rc = VINF_EM_HALT;
1205 break;
1206
1207 /*
1208 * The VM has halted.
1209 */
1210 case EXCP_HALTED:
1211 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_HALTED rc=VINF_EM_HALT\n");
1212 rc = VINF_EM_HALT;
1213 break;
1214
1215 /*
1216 * Switch to RAW-mode.
1217 */
1218 case EXCP_EXECUTE_RAW:
1219 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_RAW rc=VINF_EM_RESCHEDULE_RAW\n");
1220 rc = VINF_EM_RESCHEDULE_RAW;
1221 break;
1222
1223 /*
1224 * Switch to hardware accelerated RAW-mode.
1225 */
1226 case EXCP_EXECUTE_HM:
1227 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_EXECUTE_HM rc=VINF_EM_RESCHEDULE_HM\n");
1228 rc = VINF_EM_RESCHEDULE_HM;
1229 break;
1230
1231 /*
1232 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1233 */
1234 case EXCP_RC:
1235 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc);
1236 rc = pVM->rem.s.rc;
1237 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1238 break;
1239
1240 /*
1241 * Figure out the rest when they arrive....
1242 */
1243 default:
1244 AssertMsgFailed(("rc=%d\n", rc));
1245 RTLogPrintf("remR3RunLoggingStep: cpu_exec -> %d rc=VINF_EM_RESCHEDULE\n", rc);
1246 rc = VINF_EM_RESCHEDULE;
1247 break;
1248 }
1249 break;
1250 }
1251
1252#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
1253// cpu_single_step(&pVM->rem.s.Env, 0);
1254#else
1255 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
1256#endif
1257 return rc;
1258}
1259
1260
1261/**
1262 * Runs code in recompiled mode.
1263 *
1264 * Before calling this function the REM state needs to be in sync with
1265 * the VM. Call REMR3State() to perform the sync. It's only necessary
1266 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1267 * and after calling REMR3StateBack().
1268 *
1269 * @returns VBox status code.
1270 *
1271 * @param pVM VM Handle.
1272 * @param pVCpu VMCPU Handle.
1273 */
1274REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1275{
1276 int rc;
1277
1278 if (RT_UNLIKELY(pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP))
1279 return remR3RunLoggingStep(pVM, pVCpu);
1280
1281 Assert(pVM->rem.s.fInREM);
1282 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1283
1284 TMNotifyStartOfExecution(pVCpu);
1285 rc = cpu_exec(&pVM->rem.s.Env);
1286 TMNotifyEndOfExecution(pVCpu);
1287 switch (rc)
1288 {
1289 /*
1290 * This happens when the execution was interrupted
1291 * by an external event, like pending timers.
1292 */
1293 case EXCP_INTERRUPT:
1294 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1295 rc = VINF_SUCCESS;
1296 break;
1297
1298 /*
1299 * hlt instruction.
1300 */
1301 case EXCP_HLT:
1302 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1303 rc = VINF_EM_HALT;
1304 break;
1305
1306 /*
1307 * The VM has halted.
1308 */
1309 case EXCP_HALTED:
1310 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1311 rc = VINF_EM_HALT;
1312 break;
1313
1314 /*
1315 * Breakpoint/single step.
1316 */
1317 case EXCP_DEBUG:
1318 if (pVM->rem.s.Env.watchpoint_hit)
1319 {
1320 /** @todo deal with watchpoints */
1321 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc !watchpoint_hit!\n", rc));
1322 rc = VINF_EM_DBG_BREAKPOINT;
1323 }
1324 else
1325 {
1326 CPUBreakpoint *pBP;
1327 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1328 QTAILQ_FOREACH(pBP, &pVM->rem.s.Env.breakpoints, entry)
1329 if (pBP->pc == GCPtrPC)
1330 break;
1331 rc = pBP ? VINF_EM_DBG_BREAKPOINT : VINF_EM_DBG_STEPPED;
1332 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc pBP=%p GCPtrPC=%RGv\n", rc, pBP, GCPtrPC));
1333 }
1334 break;
1335
1336 /*
1337 * Switch to RAW-mode.
1338 */
1339 case EXCP_EXECUTE_RAW:
1340 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW pc=%RGv\n", pVM->rem.s.Env.eip));
1341 rc = VINF_EM_RESCHEDULE_RAW;
1342 break;
1343
1344 /*
1345 * Switch to hardware accelerated RAW-mode.
1346 */
1347 case EXCP_EXECUTE_HM:
1348 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HM\n"));
1349 rc = VINF_EM_RESCHEDULE_HM;
1350 break;
1351
1352 /*
1353 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1354 */
1355 case EXCP_RC:
1356 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1357 rc = pVM->rem.s.rc;
1358 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1359 break;
1360
1361 /*
1362 * Figure out the rest when they arrive....
1363 */
1364 default:
1365 AssertMsgFailed(("rc=%d\n", rc));
1366 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1367 rc = VINF_SUCCESS;
1368 break;
1369 }
1370
1371 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1372 return rc;
1373}
1374
1375
1376/**
1377 * Check if the cpu state is suitable for Raw execution.
1378 *
1379 * @returns true if RAW/HWACC mode is ok, false if we should stay in REM.
1380 *
1381 * @param env The CPU env struct.
1382 * @param eip The EIP to check this for (might differ from env->eip).
1383 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1384 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1385 *
1386 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1387 */
1388bool remR3CanExecuteRaw(CPUX86State *env, RTGCPTR eip, unsigned fFlags, int *piException)
1389{
1390 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1391 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1392 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1393 uint32_t u32CR0;
1394
1395#ifdef IEM_VERIFICATION_MODE
1396 return false;
1397#endif
1398
1399 /* Update counter. */
1400 env->pVM->rem.s.cCanExecuteRaw++;
1401
1402 /* Never when single stepping+logging guest code. */
1403 if (env->state & CPU_EMULATE_SINGLE_STEP)
1404 return false;
1405
1406 if (HMIsEnabled(env->pVM))
1407 {
1408#ifdef RT_OS_WINDOWS
1409 PCPUMCTX pCtx = alloca(sizeof(*pCtx));
1410#else
1411 CPUMCTX Ctx;
1412 PCPUMCTX pCtx = &Ctx;
1413#endif
1414
1415 env->state |= CPU_RAW_HM;
1416
1417 /*
1418 * The simple check first...
1419 */
1420 if (!EMIsHwVirtExecutionEnabled(env->pVM))
1421 return false;
1422
1423 /*
1424 * Create partial context for HMR3CanExecuteGuest
1425 */
1426 pCtx->cr0 = env->cr[0];
1427 pCtx->cr3 = env->cr[3];
1428 pCtx->cr4 = env->cr[4];
1429
1430 pCtx->tr.Sel = env->tr.selector;
1431 pCtx->tr.ValidSel = env->tr.selector;
1432 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
1433 pCtx->tr.u64Base = env->tr.base;
1434 pCtx->tr.u32Limit = env->tr.limit;
1435 pCtx->tr.Attr.u = (env->tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1436
1437 pCtx->ldtr.Sel = env->ldt.selector;
1438 pCtx->ldtr.ValidSel = env->ldt.selector;
1439 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
1440 pCtx->ldtr.u64Base = env->ldt.base;
1441 pCtx->ldtr.u32Limit = env->ldt.limit;
1442 pCtx->ldtr.Attr.u = (env->ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1443
1444 pCtx->idtr.cbIdt = env->idt.limit;
1445 pCtx->idtr.pIdt = env->idt.base;
1446
1447 pCtx->gdtr.cbGdt = env->gdt.limit;
1448 pCtx->gdtr.pGdt = env->gdt.base;
1449
1450 pCtx->rsp = env->regs[R_ESP];
1451 pCtx->rip = env->eip;
1452
1453 pCtx->eflags.u32 = env->eflags;
1454
1455 pCtx->cs.Sel = env->segs[R_CS].selector;
1456 pCtx->cs.ValidSel = env->segs[R_CS].selector;
1457 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1458 pCtx->cs.u64Base = env->segs[R_CS].base;
1459 pCtx->cs.u32Limit = env->segs[R_CS].limit;
1460 pCtx->cs.Attr.u = (env->segs[R_CS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1461
1462 pCtx->ds.Sel = env->segs[R_DS].selector;
1463 pCtx->ds.ValidSel = env->segs[R_DS].selector;
1464 pCtx->ds.fFlags = CPUMSELREG_FLAGS_VALID;
1465 pCtx->ds.u64Base = env->segs[R_DS].base;
1466 pCtx->ds.u32Limit = env->segs[R_DS].limit;
1467 pCtx->ds.Attr.u = (env->segs[R_DS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1468
1469 pCtx->es.Sel = env->segs[R_ES].selector;
1470 pCtx->es.ValidSel = env->segs[R_ES].selector;
1471 pCtx->es.fFlags = CPUMSELREG_FLAGS_VALID;
1472 pCtx->es.u64Base = env->segs[R_ES].base;
1473 pCtx->es.u32Limit = env->segs[R_ES].limit;
1474 pCtx->es.Attr.u = (env->segs[R_ES].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1475
1476 pCtx->fs.Sel = env->segs[R_FS].selector;
1477 pCtx->fs.ValidSel = env->segs[R_FS].selector;
1478 pCtx->fs.fFlags = CPUMSELREG_FLAGS_VALID;
1479 pCtx->fs.u64Base = env->segs[R_FS].base;
1480 pCtx->fs.u32Limit = env->segs[R_FS].limit;
1481 pCtx->fs.Attr.u = (env->segs[R_FS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1482
1483 pCtx->gs.Sel = env->segs[R_GS].selector;
1484 pCtx->gs.ValidSel = env->segs[R_GS].selector;
1485 pCtx->gs.fFlags = CPUMSELREG_FLAGS_VALID;
1486 pCtx->gs.u64Base = env->segs[R_GS].base;
1487 pCtx->gs.u32Limit = env->segs[R_GS].limit;
1488 pCtx->gs.Attr.u = (env->segs[R_GS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1489
1490 pCtx->ss.Sel = env->segs[R_SS].selector;
1491 pCtx->ss.ValidSel = env->segs[R_SS].selector;
1492 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1493 pCtx->ss.u64Base = env->segs[R_SS].base;
1494 pCtx->ss.u32Limit = env->segs[R_SS].limit;
1495 pCtx->ss.Attr.u = (env->segs[R_SS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
1496
1497 pCtx->msrEFER = env->efer;
1498
1499 /* Hardware accelerated raw-mode:
1500 *
1501 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1502 */
1503 if (HMR3CanExecuteGuest(env->pVM, pCtx) == true)
1504 {
1505 *piException = EXCP_EXECUTE_HM;
1506 return true;
1507 }
1508 return false;
1509 }
1510
1511 /*
1512 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1513 * or 32 bits protected mode ring 0 code
1514 *
1515 * The tests are ordered by the likelihood of being true during normal execution.
1516 */
1517 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1518 {
1519 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1520 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1521 return false;
1522 }
1523
1524#ifndef VBOX_RAW_V86
1525 if (fFlags & VM_MASK) {
1526 STAM_COUNTER_INC(&gStatRefuseVM86);
1527 Log2(("raw mode refused: VM_MASK\n"));
1528 return false;
1529 }
1530#endif
1531
1532 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1533 {
1534#ifndef DEBUG_bird
1535 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1536#endif
1537 return false;
1538 }
1539
1540 if (env->singlestep_enabled)
1541 {
1542 //Log2(("raw mode refused: Single step\n"));
1543 return false;
1544 }
1545
1546 if (!QTAILQ_EMPTY(&env->breakpoints))
1547 {
1548 //Log2(("raw mode refused: Breakpoints\n"));
1549 return false;
1550 }
1551
1552 if (!QTAILQ_EMPTY(&env->watchpoints))
1553 {
1554 //Log2(("raw mode refused: Watchpoints\n"));
1555 return false;
1556 }
1557
1558 u32CR0 = env->cr[0];
1559 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1560 {
1561 STAM_COUNTER_INC(&gStatRefusePaging);
1562 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1563 return false;
1564 }
1565
1566 if (env->cr[4] & CR4_PAE_MASK)
1567 {
1568 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1569 {
1570 STAM_COUNTER_INC(&gStatRefusePAE);
1571 return false;
1572 }
1573 }
1574
1575 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1576 {
1577 if (!EMIsRawRing3Enabled(env->pVM))
1578 return false;
1579
1580 if (!(env->eflags & IF_MASK))
1581 {
1582 STAM_COUNTER_INC(&gStatRefuseIF0);
1583 Log2(("raw mode refused: IF (RawR3)\n"));
1584 return false;
1585 }
1586
1587 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1588 {
1589 STAM_COUNTER_INC(&gStatRefuseWP0);
1590 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1591 return false;
1592 }
1593 }
1594 else
1595 {
1596 if (!EMIsRawRing0Enabled(env->pVM))
1597 return false;
1598
1599 // Let's start with pure 32 bits ring 0 code first
1600 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1601 {
1602 STAM_COUNTER_INC(&gStatRefuseCode16);
1603 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1604 return false;
1605 }
1606
1607 if (EMIsRawRing1Enabled(env->pVM))
1608 {
1609 /* Only ring 0 and 1 supervisor code. */
1610 if (((fFlags >> HF_CPL_SHIFT) & 3) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1611 {
1612 Log2(("raw r0 mode refused: CPL %d\n", (fFlags >> HF_CPL_SHIFT) & 3));
1613 return false;
1614 }
1615 }
1616 /* Only R0. */
1617 else if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1618 {
1619 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1620 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1621 return false;
1622 }
1623
1624 if (!(u32CR0 & CR0_WP_MASK))
1625 {
1626 STAM_COUNTER_INC(&gStatRefuseWP0);
1627 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1628 return false;
1629 }
1630
1631#ifdef VBOX_WITH_RAW_MODE
1632 if (PATMIsPatchGCAddr(env->pVM, eip))
1633 {
1634 Log2(("raw r0 mode forced: patch code\n"));
1635 *piException = EXCP_EXECUTE_RAW;
1636 return true;
1637 }
1638#endif
1639
1640#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1641 if (!(env->eflags & IF_MASK))
1642 {
1643 STAM_COUNTER_INC(&gStatRefuseIF0);
1644 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1645 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1646 return false;
1647 }
1648#endif
1649
1650#ifndef VBOX_WITH_RAW_RING1
1651 if (((env->eflags >> IOPL_SHIFT) & 3) != 0)
1652 {
1653 Log2(("raw r0 mode refused: IOPL %d\n", ((env->eflags >> IOPL_SHIFT) & 3)));
1654 return false;
1655 }
1656#endif
1657 env->state |= CPU_RAW_RING0;
1658 }
1659
1660 /*
1661 * Don't reschedule the first time we're called, because there might be
1662 * special reasons why we're here that is not covered by the above checks.
1663 */
1664 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1665 {
1666 Log2(("raw mode refused: first scheduling\n"));
1667 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1668 return false;
1669 }
1670
1671 /*
1672 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1673 */
1674 if (env->segs[R_CS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1675 {
1676 Log2(("raw mode refused: stale CS (%#x)\n", env->segs[R_CS].selector));
1677 STAM_COUNTER_INC(&gaStatRefuseStale[R_CS]);
1678 return false;
1679 }
1680 if (env->segs[R_SS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1681 {
1682 Log2(("raw mode refused: stale SS (%#x)\n", env->segs[R_SS].selector));
1683 STAM_COUNTER_INC(&gaStatRefuseStale[R_SS]);
1684 return false;
1685 }
1686 if (env->segs[R_DS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1687 {
1688 Log2(("raw mode refused: stale DS (%#x)\n", env->segs[R_DS].selector));
1689 STAM_COUNTER_INC(&gaStatRefuseStale[R_DS]);
1690 return false;
1691 }
1692 if (env->segs[R_ES].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1693 {
1694 Log2(("raw mode refused: stale ES (%#x)\n", env->segs[R_ES].selector));
1695 STAM_COUNTER_INC(&gaStatRefuseStale[R_ES]);
1696 return false;
1697 }
1698 if (env->segs[R_FS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1699 {
1700 Log2(("raw mode refused: stale FS (%#x)\n", env->segs[R_FS].selector));
1701 STAM_COUNTER_INC(&gaStatRefuseStale[R_FS]);
1702 return false;
1703 }
1704 if (env->segs[R_GS].fVBoxFlags & CPUMSELREG_FLAGS_STALE)
1705 {
1706 Log2(("raw mode refused: stale GS (%#x)\n", env->segs[R_GS].selector));
1707 STAM_COUNTER_INC(&gaStatRefuseStale[R_GS]);
1708 return false;
1709 }
1710
1711/* Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));*/
1712 *piException = EXCP_EXECUTE_RAW;
1713 return true;
1714}
1715
1716
1717#ifdef VBOX_WITH_RAW_MODE
1718/**
1719 * Fetches a code byte.
1720 *
1721 * @returns Success indicator (bool) for ease of use.
1722 * @param env The CPU environment structure.
1723 * @param GCPtrInstr Where to fetch code.
1724 * @param pu8Byte Where to store the byte on success
1725 */
1726bool remR3GetOpcode(CPUX86State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1727{
1728 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1729 if (RT_SUCCESS(rc))
1730 return true;
1731 return false;
1732}
1733#endif /* VBOX_WITH_RAW_MODE */
1734
1735
1736/**
1737 * Flush (or invalidate if you like) page table/dir entry.
1738 *
1739 * (invlpg instruction; tlb_flush_page)
1740 *
1741 * @param env Pointer to cpu environment.
1742 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1743 */
1744void remR3FlushPage(CPUX86State *env, RTGCPTR GCPtr)
1745{
1746 PVM pVM = env->pVM;
1747 PCPUMCTX pCtx;
1748 int rc;
1749
1750 Assert(EMRemIsLockOwner(env->pVM));
1751
1752 /*
1753 * When we're replaying invlpg instructions or restoring a saved
1754 * state we disable this path.
1755 */
1756 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1757 return;
1758 LogFlow(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1759 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1760
1761 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1762
1763 /*
1764 * Update the control registers before calling PGMFlushPage.
1765 */
1766 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1767 Assert(pCtx);
1768 pCtx->cr0 = env->cr[0];
1769 pCtx->cr3 = env->cr[3];
1770#ifdef VBOX_WITH_RAW_MODE
1771 if (((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
1772 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1773#endif
1774 pCtx->cr4 = env->cr[4];
1775
1776 /*
1777 * Let PGM do the rest.
1778 */
1779 Assert(env->pVCpu);
1780 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1781 if (RT_FAILURE(rc))
1782 {
1783 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1784 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1785 }
1786 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1787}
1788
1789
1790#ifndef REM_PHYS_ADDR_IN_TLB
1791/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1792void *remR3TlbGCPhys2Ptr(CPUX86State *env1, target_ulong physAddr, int fWritable)
1793{
1794 void *pv;
1795 int rc;
1796
1797
1798 /* Address must be aligned enough to fiddle with lower bits */
1799 Assert((physAddr & 0x3) == 0);
1800 /*AssertMsg((env1->a20_mask & physAddr) == physAddr, ("%llx\n", (uint64_t)physAddr));*/
1801
1802 STAM_PROFILE_START(&gStatGCPhys2HCVirt, a);
1803 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1804 STAM_PROFILE_STOP(&gStatGCPhys2HCVirt, a);
1805 Assert( rc == VINF_SUCCESS
1806 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1807 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1808 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1809 if (RT_FAILURE(rc))
1810 return (void *)1;
1811 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1812 return (void *)((uintptr_t)pv | 2);
1813 return pv;
1814}
1815#endif /* REM_PHYS_ADDR_IN_TLB */
1816
1817
1818/**
1819 * Called from tlb_protect_code in order to write monitor a code page.
1820 *
1821 * @param env Pointer to the CPU environment.
1822 * @param GCPtr Code page to monitor
1823 */
1824void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr)
1825{
1826#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1827 Assert(env->pVM->rem.s.fInREM);
1828 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1829 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1830 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1831 && !(env->eflags & VM_MASK) /* no V86 mode */
1832 && !HMIsEnabled(env->pVM))
1833 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1834#endif
1835}
1836
1837
1838/**
1839 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1840 *
1841 * @param env Pointer to the CPU environment.
1842 * @param GCPtr Code page to monitor
1843 */
1844void remR3UnprotectCode(CPUX86State *env, RTGCPTR GCPtr)
1845{
1846 Assert(env->pVM->rem.s.fInREM);
1847#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1848 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1849 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1850 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1851 && !(env->eflags & VM_MASK) /* no V86 mode */
1852 && !HMIsEnabled(env->pVM))
1853 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1854#endif
1855}
1856
1857
1858/**
1859 * Called when the CPU is initialized, any of the CRx registers are changed or
1860 * when the A20 line is modified.
1861 *
1862 * @param env Pointer to the CPU environment.
1863 * @param fGlobal Set if the flush is global.
1864 */
1865void remR3FlushTLB(CPUX86State *env, bool fGlobal)
1866{
1867 PVM pVM = env->pVM;
1868 PCPUMCTX pCtx;
1869 Assert(EMRemIsLockOwner(pVM));
1870
1871 /*
1872 * When we're replaying invlpg instructions or restoring a saved
1873 * state we disable this path.
1874 */
1875 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1876 return;
1877 Assert(pVM->rem.s.fInREM);
1878
1879 /*
1880 * The caller doesn't check cr4, so we have to do that for ourselves.
1881 */
1882 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1883 fGlobal = true;
1884 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1885
1886 /*
1887 * Update the control registers before calling PGMR3FlushTLB.
1888 */
1889 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1890 Assert(pCtx);
1891 pCtx->cr0 = env->cr[0];
1892 pCtx->cr3 = env->cr[3];
1893#ifdef VBOX_WITH_RAW_MODE
1894 if (((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
1895 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1896#endif
1897 pCtx->cr4 = env->cr[4];
1898
1899 /*
1900 * Let PGM do the rest.
1901 */
1902 Assert(env->pVCpu);
1903 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1904}
1905
1906
1907/**
1908 * Called when any of the cr0, cr4 or efer registers is updated.
1909 *
1910 * @param env Pointer to the CPU environment.
1911 */
1912void remR3ChangeCpuMode(CPUX86State *env)
1913{
1914 PVM pVM = env->pVM;
1915 uint64_t efer;
1916 PCPUMCTX pCtx;
1917 int rc;
1918
1919 /*
1920 * When we're replaying loads or restoring a saved
1921 * state this path is disabled.
1922 */
1923 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1924 return;
1925 Assert(pVM->rem.s.fInREM);
1926
1927 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1928 Assert(pCtx);
1929
1930 /*
1931 * Notify PGM about WP0 being enabled (like CPUSetGuestCR0 does).
1932 */
1933 if (((env->cr[0] ^ pCtx->cr0) & X86_CR0_WP) && (env->cr[0] & X86_CR0_WP))
1934 PGMCr0WpEnabled(env->pVCpu);
1935
1936 /*
1937 * Update the control registers before calling PGMChangeMode()
1938 * as it may need to map whatever cr3 is pointing to.
1939 */
1940 pCtx->cr0 = env->cr[0];
1941 pCtx->cr3 = env->cr[3];
1942#ifdef VBOX_WITH_RAW_MODE
1943 if (((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
1944 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1945#endif
1946 pCtx->cr4 = env->cr[4];
1947#ifdef TARGET_X86_64
1948 efer = env->efer;
1949 pCtx->msrEFER = efer;
1950#else
1951 efer = 0;
1952#endif
1953 Assert(env->pVCpu);
1954 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1955 if (rc != VINF_SUCCESS)
1956 {
1957 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1958 {
1959 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1960 remR3RaiseRC(env->pVM, rc);
1961 }
1962 else
1963 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1964 }
1965}
1966
1967
1968/**
1969 * Called from compiled code to run dma.
1970 *
1971 * @param env Pointer to the CPU environment.
1972 */
1973void remR3DmaRun(CPUX86State *env)
1974{
1975 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1976 PDMR3DmaRun(env->pVM);
1977 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1978}
1979
1980
1981/**
1982 * Called from compiled code to schedule pending timers in VMM
1983 *
1984 * @param env Pointer to the CPU environment.
1985 */
1986void remR3TimersRun(CPUX86State *env)
1987{
1988 LogFlow(("remR3TimersRun:\n"));
1989 LogIt(RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1990 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1991 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1992 TMR3TimerQueuesDo(env->pVM);
1993 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1994 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1995}
1996
1997
1998/**
1999 * Record trap occurrence
2000 *
2001 * @returns VBox status code
2002 * @param env Pointer to the CPU environment.
2003 * @param uTrap Trap nr
2004 * @param uErrorCode Error code
2005 * @param pvNextEIP Next EIP
2006 */
2007int remR3NotifyTrap(CPUX86State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
2008{
2009 PVM pVM = env->pVM;
2010#ifdef VBOX_WITH_STATISTICS
2011 static STAMCOUNTER s_aStatTrap[255];
2012 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
2013#endif
2014
2015#ifdef VBOX_WITH_STATISTICS
2016 if (uTrap < 255)
2017 {
2018 if (!s_aRegisters[uTrap])
2019 {
2020 char szStatName[64];
2021 s_aRegisters[uTrap] = true;
2022 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
2023 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
2024 }
2025 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
2026 }
2027#endif
2028 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
2029 if( uTrap < 0x20
2030 && (env->cr[0] & X86_CR0_PE)
2031 && !(env->eflags & X86_EFL_VM))
2032 {
2033#ifdef DEBUG
2034 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
2035#endif
2036 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
2037 {
2038 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
2039 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
2040 return VERR_REM_TOO_MANY_TRAPS;
2041 }
2042 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
2043 {
2044 Log(("remR3NotifyTrap: uTrap=%#x set as pending\n", uTrap));
2045 pVM->rem.s.cPendingExceptions = 1;
2046 }
2047 pVM->rem.s.uPendingException = uTrap;
2048 pVM->rem.s.uPendingExcptEIP = env->eip;
2049 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
2050 }
2051 else
2052 {
2053 pVM->rem.s.cPendingExceptions = 0;
2054 pVM->rem.s.uPendingException = uTrap;
2055 pVM->rem.s.uPendingExcptEIP = env->eip;
2056 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
2057 }
2058 return VINF_SUCCESS;
2059}
2060
2061
2062/*
2063 * Clear current active trap
2064 *
2065 * @param pVM VM Handle.
2066 */
2067void remR3TrapClear(PVM pVM)
2068{
2069 pVM->rem.s.cPendingExceptions = 0;
2070 pVM->rem.s.uPendingException = 0;
2071 pVM->rem.s.uPendingExcptEIP = 0;
2072 pVM->rem.s.uPendingExcptCR2 = 0;
2073}
2074
2075
2076/*
2077 * Record previous call instruction addresses
2078 *
2079 * @param env Pointer to the CPU environment.
2080 */
2081void remR3RecordCall(CPUX86State *env)
2082{
2083#ifdef VBOX_WITH_RAW_MODE
2084 CSAMR3RecordCallAddress(env->pVM, env->eip);
2085#endif
2086}
2087
2088
2089/**
2090 * Syncs the internal REM state with the VM.
2091 *
2092 * This must be called before REMR3Run() is invoked whenever when the REM
2093 * state is not up to date. Calling it several times in a row is not
2094 * permitted.
2095 *
2096 * @returns VBox status code.
2097 *
2098 * @param pVM VM Handle.
2099 * @param pVCpu VMCPU Handle.
2100 *
2101 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
2102 * no do this since the majority of the callers don't want any unnecessary of events
2103 * pending that would immediately interrupt execution.
2104 */
2105REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
2106{
2107 register const CPUMCTX *pCtx;
2108 register unsigned fFlags;
2109 unsigned i;
2110 TRPMEVENT enmType;
2111 uint8_t u8TrapNo;
2112 uint32_t uCpl;
2113 int rc;
2114
2115 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
2116 Log2(("REMR3State:\n"));
2117
2118 pVM->rem.s.Env.pVCpu = pVCpu;
2119 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
2120
2121 Assert(!pVM->rem.s.fInREM);
2122 pVM->rem.s.fInStateSync = true;
2123
2124 /*
2125 * If we have to flush TBs, do that immediately.
2126 */
2127 if (pVM->rem.s.fFlushTBs)
2128 {
2129 STAM_COUNTER_INC(&gStatFlushTBs);
2130 tb_flush(&pVM->rem.s.Env);
2131 pVM->rem.s.fFlushTBs = false;
2132 }
2133
2134 /*
2135 * Copy the registers which require no special handling.
2136 */
2137#ifdef TARGET_X86_64
2138 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2139 Assert(R_EAX == 0);
2140 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
2141 Assert(R_ECX == 1);
2142 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
2143 Assert(R_EDX == 2);
2144 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
2145 Assert(R_EBX == 3);
2146 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
2147 Assert(R_ESP == 4);
2148 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
2149 Assert(R_EBP == 5);
2150 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
2151 Assert(R_ESI == 6);
2152 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
2153 Assert(R_EDI == 7);
2154 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
2155 pVM->rem.s.Env.regs[8] = pCtx->r8;
2156 pVM->rem.s.Env.regs[9] = pCtx->r9;
2157 pVM->rem.s.Env.regs[10] = pCtx->r10;
2158 pVM->rem.s.Env.regs[11] = pCtx->r11;
2159 pVM->rem.s.Env.regs[12] = pCtx->r12;
2160 pVM->rem.s.Env.regs[13] = pCtx->r13;
2161 pVM->rem.s.Env.regs[14] = pCtx->r14;
2162 pVM->rem.s.Env.regs[15] = pCtx->r15;
2163
2164 pVM->rem.s.Env.eip = pCtx->rip;
2165
2166 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
2167#else
2168 Assert(R_EAX == 0);
2169 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
2170 Assert(R_ECX == 1);
2171 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
2172 Assert(R_EDX == 2);
2173 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
2174 Assert(R_EBX == 3);
2175 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
2176 Assert(R_ESP == 4);
2177 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
2178 Assert(R_EBP == 5);
2179 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
2180 Assert(R_ESI == 6);
2181 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
2182 Assert(R_EDI == 7);
2183 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
2184 pVM->rem.s.Env.eip = pCtx->eip;
2185
2186 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
2187#endif
2188
2189 pVM->rem.s.Env.cr[2] = pCtx->cr2;
2190
2191 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
2192 for (i=0;i<8;i++)
2193 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
2194
2195#ifdef HF_HALTED_MASK /** @todo remove me when we're up to date again. */
2196 /*
2197 * Clear the halted hidden flag (the interrupt waking up the CPU can
2198 * have been dispatched in raw mode).
2199 */
2200 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
2201#endif
2202
2203 /*
2204 * Replay invlpg? Only if we're not flushing the TLB.
2205 */
2206 fFlags = CPUMR3RemEnter(pVCpu, &uCpl);
2207 LogFlow(("CPUMR3RemEnter %x %x\n", fFlags, uCpl));
2208 if (pVM->rem.s.cInvalidatedPages)
2209 {
2210 if (!(fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH))
2211 {
2212 RTUINT i;
2213
2214 pVM->rem.s.fIgnoreCR3Load = true;
2215 pVM->rem.s.fIgnoreInvlPg = true;
2216 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
2217 {
2218 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
2219 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
2220 }
2221 pVM->rem.s.fIgnoreInvlPg = false;
2222 pVM->rem.s.fIgnoreCR3Load = false;
2223 }
2224 pVM->rem.s.cInvalidatedPages = 0;
2225 }
2226
2227 /* Replay notification changes. */
2228 REMR3ReplayHandlerNotifications(pVM);
2229
2230 /* Update MSRs; before CRx registers! */
2231 pVM->rem.s.Env.efer = pCtx->msrEFER;
2232 pVM->rem.s.Env.star = pCtx->msrSTAR;
2233 pVM->rem.s.Env.pat = pCtx->msrPAT;
2234#ifdef TARGET_X86_64
2235 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
2236 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
2237 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
2238 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
2239
2240 /* Update the internal long mode activate flag according to the new EFER value. */
2241 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
2242 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
2243 else
2244 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
2245#endif
2246
2247 /* Update the inhibit IRQ mask. */
2248 pVM->rem.s.Env.hflags &= ~HF_INHIBIT_IRQ_MASK;
2249 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2250 {
2251 RTGCPTR InhibitPC = EMGetInhibitInterruptsPC(pVCpu);
2252 if (InhibitPC == pCtx->rip)
2253 pVM->rem.s.Env.hflags |= HF_INHIBIT_IRQ_MASK;
2254 else
2255 {
2256 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#1)\n", (RTGCPTR)pCtx->rip, InhibitPC));
2257 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2258 }
2259 }
2260
2261 /* Update the inhibit NMI mask. */
2262 pVM->rem.s.Env.hflags2 &= ~HF2_NMI_MASK;
2263 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2264 pVM->rem.s.Env.hflags2 |= HF2_NMI_MASK;
2265
2266 /*
2267 * Sync the A20 gate.
2268 */
2269 bool fA20State = PGMPhysIsA20Enabled(pVCpu);
2270 if (fA20State != RT_BOOL(pVM->rem.s.Env.a20_mask & RT_BIT(20)))
2271 {
2272 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2273 cpu_x86_set_a20(&pVM->rem.s.Env, fA20State);
2274 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2275 }
2276
2277 /*
2278 * Registers which are rarely changed and require special handling / order when changed.
2279 */
2280 if (fFlags & ( CPUM_CHANGED_GLOBAL_TLB_FLUSH
2281 | CPUM_CHANGED_CR4
2282 | CPUM_CHANGED_CR0
2283 | CPUM_CHANGED_CR3
2284 | CPUM_CHANGED_GDTR
2285 | CPUM_CHANGED_IDTR
2286 | CPUM_CHANGED_SYSENTER_MSR
2287 | CPUM_CHANGED_LDTR
2288 | CPUM_CHANGED_CPUID
2289 | CPUM_CHANGED_FPU_REM
2290 )
2291 )
2292 {
2293 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
2294 {
2295 pVM->rem.s.fIgnoreCR3Load = true;
2296 tlb_flush(&pVM->rem.s.Env, true);
2297 pVM->rem.s.fIgnoreCR3Load = false;
2298 }
2299
2300 /* CR4 before CR0! */
2301 if (fFlags & CPUM_CHANGED_CR4)
2302 {
2303 pVM->rem.s.fIgnoreCR3Load = true;
2304 pVM->rem.s.fIgnoreCpuMode = true;
2305 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
2306 pVM->rem.s.fIgnoreCpuMode = false;
2307 pVM->rem.s.fIgnoreCR3Load = false;
2308 }
2309
2310 if (fFlags & CPUM_CHANGED_CR0)
2311 {
2312 pVM->rem.s.fIgnoreCR3Load = true;
2313 pVM->rem.s.fIgnoreCpuMode = true;
2314 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
2315 pVM->rem.s.fIgnoreCpuMode = false;
2316 pVM->rem.s.fIgnoreCR3Load = false;
2317 }
2318
2319 if (fFlags & CPUM_CHANGED_CR3)
2320 {
2321 pVM->rem.s.fIgnoreCR3Load = true;
2322 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
2323 pVM->rem.s.fIgnoreCR3Load = false;
2324 }
2325
2326 if (fFlags & CPUM_CHANGED_GDTR)
2327 {
2328 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
2329 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
2330 }
2331
2332 if (fFlags & CPUM_CHANGED_IDTR)
2333 {
2334 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
2335 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
2336 }
2337
2338 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
2339 {
2340 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
2341 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
2342 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
2343 }
2344
2345 if (fFlags & CPUM_CHANGED_LDTR)
2346 {
2347 if (pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2348 {
2349 pVM->rem.s.Env.ldt.selector = pCtx->ldtr.Sel;
2350 pVM->rem.s.Env.ldt.newselector = 0;
2351 pVM->rem.s.Env.ldt.fVBoxFlags = pCtx->ldtr.fFlags;
2352 pVM->rem.s.Env.ldt.base = pCtx->ldtr.u64Base;
2353 pVM->rem.s.Env.ldt.limit = pCtx->ldtr.u32Limit;
2354 pVM->rem.s.Env.ldt.flags = (pCtx->ldtr.Attr.u & SEL_FLAGS_SMASK) << SEL_FLAGS_SHIFT;
2355 }
2356 else
2357 {
2358 AssertFailed(); /* Shouldn't happen, see cpumR3LoadExec. */
2359 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr.Sel);
2360 }
2361 }
2362
2363 if (fFlags & CPUM_CHANGED_CPUID)
2364 {
2365 uint32_t u32Dummy;
2366
2367 /*
2368 * Get the CPUID features.
2369 */
2370 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2371 CPUMGetGuestCpuId(pVCpu, 0x80000001, 0, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2372 }
2373
2374 /* Sync FPU state after CR4, CPUID and EFER (!). */
2375 if (fFlags & CPUM_CHANGED_FPU_REM)
2376 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->pXStateR3->x87); /* 'save' is an excellent name. */
2377 }
2378
2379 /*
2380 * Sync TR unconditionally to make life simpler.
2381 */
2382 pVM->rem.s.Env.tr.selector = pCtx->tr.Sel;
2383 pVM->rem.s.Env.tr.newselector = 0;
2384 pVM->rem.s.Env.tr.fVBoxFlags = pCtx->tr.fFlags;
2385 pVM->rem.s.Env.tr.base = pCtx->tr.u64Base;
2386 pVM->rem.s.Env.tr.limit = pCtx->tr.u32Limit;
2387 pVM->rem.s.Env.tr.flags = (pCtx->tr.Attr.u & SEL_FLAGS_SMASK) << SEL_FLAGS_SHIFT;
2388 /* Note! do_interrupt will fault if the busy flag is still set... */ /** @todo so fix do_interrupt then! */
2389 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2390
2391 /*
2392 * Update selector registers.
2393 *
2394 * This must be done *after* we've synced gdt, ldt and crX registers
2395 * since we're reading the GDT/LDT om sync_seg. This will happen with
2396 * saved state which takes a quick dip into rawmode for instance.
2397 *
2398 * CPL/Stack; Note first check this one as the CPL might have changed.
2399 * The wrong CPL can cause QEmu to raise an exception in sync_seg!!
2400 */
2401 cpu_x86_set_cpl(&pVM->rem.s.Env, uCpl);
2402 /* Note! QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2403#define SYNC_IN_SREG(a_pEnv, a_SReg, a_pRemSReg, a_pVBoxSReg) \
2404 do \
2405 { \
2406 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, a_pVBoxSReg)) \
2407 { \
2408 cpu_x86_load_seg_cache(a_pEnv, R_##a_SReg, \
2409 (a_pVBoxSReg)->Sel, \
2410 (a_pVBoxSReg)->u64Base, \
2411 (a_pVBoxSReg)->u32Limit, \
2412 ((a_pVBoxSReg)->Attr.u & SEL_FLAGS_SMASK) << SEL_FLAGS_SHIFT); \
2413 (a_pRemSReg)->fVBoxFlags = (a_pVBoxSReg)->fFlags; \
2414 } \
2415 /* This only-reload-if-changed stuff is the old approach, we should ditch it. */ \
2416 else if ((a_pRemSReg)->selector != (a_pVBoxSReg)->Sel) \
2417 { \
2418 Log2(("REMR3State: " #a_SReg " changed from %04x to %04x!\n", \
2419 (a_pRemSReg)->selector, (a_pVBoxSReg)->Sel)); \
2420 sync_seg(a_pEnv, R_##a_SReg, (a_pVBoxSReg)->Sel); \
2421 if ((a_pRemSReg)->newselector) \
2422 STAM_COUNTER_INC(&gStatSelOutOfSync[R_##a_SReg]); \
2423 } \
2424 else \
2425 (a_pRemSReg)->newselector = 0; \
2426 } while (0)
2427
2428 SYNC_IN_SREG(&pVM->rem.s.Env, CS, &pVM->rem.s.Env.segs[R_CS], &pCtx->cs);
2429 SYNC_IN_SREG(&pVM->rem.s.Env, SS, &pVM->rem.s.Env.segs[R_SS], &pCtx->ss);
2430 SYNC_IN_SREG(&pVM->rem.s.Env, DS, &pVM->rem.s.Env.segs[R_DS], &pCtx->ds);
2431 SYNC_IN_SREG(&pVM->rem.s.Env, ES, &pVM->rem.s.Env.segs[R_ES], &pCtx->es);
2432 SYNC_IN_SREG(&pVM->rem.s.Env, FS, &pVM->rem.s.Env.segs[R_FS], &pCtx->fs);
2433 SYNC_IN_SREG(&pVM->rem.s.Env, GS, &pVM->rem.s.Env.segs[R_GS], &pCtx->gs);
2434 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2435 * be the same but not the base/limit. */
2436
2437 /*
2438 * Check for traps.
2439 */
2440 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2441 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2442 if (RT_SUCCESS(rc))
2443 {
2444#ifdef DEBUG
2445 if (u8TrapNo == 0x80)
2446 {
2447 remR3DumpLnxSyscall(pVCpu);
2448 remR3DumpOBsdSyscall(pVCpu);
2449 }
2450#endif
2451
2452 pVM->rem.s.Env.exception_index = u8TrapNo;
2453 if (enmType != TRPM_SOFTWARE_INT)
2454 {
2455 pVM->rem.s.Env.exception_is_int = 0;
2456#ifdef IEM_VERIFICATION_MODE /* Ugly hack, needs proper fixing. */
2457 pVM->rem.s.Env.exception_is_int = enmType == TRPM_HARDWARE_INT ? 0x42 : 0;
2458#endif
2459 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2460 }
2461 else
2462 {
2463 /*
2464 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2465 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2466 * for int03 and into.
2467 */
2468 pVM->rem.s.Env.exception_is_int = 1;
2469 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2470 /* int 3 may be generated by one-byte 0xcc */
2471 if (u8TrapNo == 3)
2472 {
2473 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2474 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2475 }
2476 /* int 4 may be generated by one-byte 0xce */
2477 else if (u8TrapNo == 4)
2478 {
2479 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2480 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2481 }
2482 }
2483
2484 /* get error code and cr2 if needed. */
2485 if (enmType == TRPM_TRAP)
2486 {
2487 switch (u8TrapNo)
2488 {
2489 case X86_XCPT_PF:
2490 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2491 /* fallthru */
2492 case X86_XCPT_TS: case X86_XCPT_NP: case X86_XCPT_SS: case X86_XCPT_GP:
2493 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2494 break;
2495
2496 case X86_XCPT_AC: case X86_XCPT_DF:
2497 default:
2498 pVM->rem.s.Env.error_code = 0;
2499 break;
2500 }
2501 }
2502 else
2503 pVM->rem.s.Env.error_code = 0;
2504
2505 /*
2506 * We can now reset the active trap since the recompiler is gonna have a go at it.
2507 */
2508 rc = TRPMResetTrap(pVCpu);
2509 AssertRC(rc);
2510 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2511 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2512 }
2513
2514 /*
2515 * Clear old interrupt request flags; Check for pending hardware interrupts.
2516 * (See @remark for why we don't check for other FFs.)
2517 */
2518 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2519 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2520 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2521 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2522
2523 /*
2524 * We're now in REM mode.
2525 */
2526 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2527 pVM->rem.s.fInREM = true;
2528 pVM->rem.s.fInStateSync = false;
2529 pVM->rem.s.cCanExecuteRaw = 0;
2530 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2531 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2532 return VINF_SUCCESS;
2533}
2534
2535
2536/**
2537 * Syncs back changes in the REM state to the the VM state.
2538 *
2539 * This must be called after invoking REMR3Run().
2540 * Calling it several times in a row is not permitted.
2541 *
2542 * @returns VBox status code.
2543 *
2544 * @param pVM VM Handle.
2545 * @param pVCpu VMCPU Handle.
2546 */
2547REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2548{
2549 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2550 Assert(pCtx);
2551 unsigned i;
2552
2553 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2554 Log2(("REMR3StateBack:\n"));
2555 Assert(pVM->rem.s.fInREM);
2556
2557 /*
2558 * Copy back the registers.
2559 * This is done in the order they are declared in the CPUMCTX structure.
2560 */
2561
2562 /** @todo FOP */
2563 /** @todo FPUIP */
2564 /** @todo CS */
2565 /** @todo FPUDP */
2566 /** @todo DS */
2567
2568 /** @todo check if FPU/XMM was actually used in the recompiler */
2569 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->pXStateR3->x87);
2570//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2571
2572#ifdef TARGET_X86_64
2573 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2574 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2575 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2576 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2577 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2578 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2579 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2580 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2581 pCtx->r8 = pVM->rem.s.Env.regs[8];
2582 pCtx->r9 = pVM->rem.s.Env.regs[9];
2583 pCtx->r10 = pVM->rem.s.Env.regs[10];
2584 pCtx->r11 = pVM->rem.s.Env.regs[11];
2585 pCtx->r12 = pVM->rem.s.Env.regs[12];
2586 pCtx->r13 = pVM->rem.s.Env.regs[13];
2587 pCtx->r14 = pVM->rem.s.Env.regs[14];
2588 pCtx->r15 = pVM->rem.s.Env.regs[15];
2589
2590 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2591
2592#else
2593 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2594 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2595 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2596 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2597 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2598 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2599 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2600
2601 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2602#endif
2603
2604#define SYNC_BACK_SREG(a_sreg, a_SREG) \
2605 do \
2606 { \
2607 pCtx->a_sreg.Sel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2608 if (!pVM->rem.s.Env.segs[R_SS].newselector) \
2609 { \
2610 pCtx->a_sreg.ValidSel = pVM->rem.s.Env.segs[R_##a_SREG].selector; \
2611 pCtx->a_sreg.fFlags = CPUMSELREG_FLAGS_VALID; \
2612 pCtx->a_sreg.u64Base = pVM->rem.s.Env.segs[R_##a_SREG].base; \
2613 pCtx->a_sreg.u32Limit = pVM->rem.s.Env.segs[R_##a_SREG].limit; \
2614 /* Note! QEmu saves the 2nd dword of the descriptor; we (VT-x/AMD-V) keep only the attributes! */ \
2615 pCtx->a_sreg.Attr.u = (pVM->rem.s.Env.segs[R_##a_SREG].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; \
2616 } \
2617 else \
2618 { \
2619 pCtx->a_sreg.fFlags = 0; \
2620 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_##a_SREG]); \
2621 } \
2622 } while (0)
2623
2624 SYNC_BACK_SREG(es, ES);
2625 SYNC_BACK_SREG(cs, CS);
2626 SYNC_BACK_SREG(ss, SS);
2627 SYNC_BACK_SREG(ds, DS);
2628 SYNC_BACK_SREG(fs, FS);
2629 SYNC_BACK_SREG(gs, GS);
2630
2631#ifdef TARGET_X86_64
2632 pCtx->rip = pVM->rem.s.Env.eip;
2633 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2634#else
2635 pCtx->eip = pVM->rem.s.Env.eip;
2636 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2637#endif
2638
2639 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2640 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2641 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2642#ifdef VBOX_WITH_RAW_MODE
2643 if (((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
2644 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2645#endif
2646 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2647
2648 for (i = 0; i < 8; i++)
2649 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2650
2651 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2652 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2653 {
2654 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2655 STAM_COUNTER_INC(&gStatREMGDTChange);
2656#ifdef VBOX_WITH_RAW_MODE
2657 if (!HMIsEnabled(pVM))
2658 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2659#endif
2660 }
2661
2662 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2663 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2664 {
2665 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2666 STAM_COUNTER_INC(&gStatREMIDTChange);
2667#ifdef VBOX_WITH_RAW_MODE
2668 if (!HMIsEnabled(pVM))
2669 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2670#endif
2671 }
2672
2673 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2674 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2675 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2676 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2677 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK)
2678 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2679 )
2680 {
2681 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2682 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2683 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2684 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2685 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2686 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2687 STAM_COUNTER_INC(&gStatREMLDTRChange);
2688#ifdef VBOX_WITH_RAW_MODE
2689 if (!HMIsEnabled(pVM))
2690 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2691#endif
2692 }
2693
2694 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2695 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2696 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2697 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2698 /* Qemu and AMD/Intel have different ideas about the busy flag ... */ /** @todo just fix qemu! */
2699 || pCtx->tr.Attr.u != ( (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & (SEL_FLAGS_SMASK & ~DESC_INTEL_UNUSABLE)
2700 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> SEL_FLAGS_SHIFT
2701 : 0)
2702 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2703 )
2704 {
2705 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2706 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2707 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2708 (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & (SEL_FLAGS_SMASK & ~DESC_INTEL_UNUSABLE)
2709 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> SEL_FLAGS_SHIFT : 0));
2710 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2711 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2712 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2713 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2714 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2715 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2716 if (pCtx->tr.Attr.u & ~DESC_INTEL_UNUSABLE)
2717 pCtx->tr.Attr.u |= DESC_TSS_BUSY_MASK >> SEL_FLAGS_SHIFT;
2718 STAM_COUNTER_INC(&gStatREMTRChange);
2719#ifdef VBOX_WITH_RAW_MODE
2720 if (!HMIsEnabled(pVM))
2721 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2722#endif
2723 }
2724
2725 /* Sysenter MSR */
2726 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2727 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2728 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2729
2730 /* System MSRs. */
2731 pCtx->msrEFER = pVM->rem.s.Env.efer;
2732 pCtx->msrSTAR = pVM->rem.s.Env.star;
2733 pCtx->msrPAT = pVM->rem.s.Env.pat;
2734#ifdef TARGET_X86_64
2735 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2736 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2737 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2738 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2739#endif
2740
2741 /* Inhibit interrupt flag. */
2742 if (pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK)
2743 {
2744 Log(("Settings VMCPU_FF_INHIBIT_INTERRUPTS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2745 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
2746 VMCPU_FF_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2747 }
2748 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2749 {
2750 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv (REM#2)\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu)));
2751 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2752 }
2753
2754 /* Inhibit NMI flag. */
2755 if (pVM->rem.s.Env.hflags2 & HF2_NMI_MASK)
2756 {
2757 Log(("Settings VMCPU_FF_BLOCK_NMIS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2758 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
2759 }
2760 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2761 {
2762 Log(("Clearing VMCPU_FF_BLOCK_NMIS at %RGv (REM)\n", (RTGCPTR)pCtx->rip));
2763 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);
2764 }
2765
2766 remR3TrapClear(pVM);
2767
2768 /*
2769 * Check for traps.
2770 */
2771 if ( pVM->rem.s.Env.exception_index >= 0
2772 && pVM->rem.s.Env.exception_index < 256)
2773 {
2774 /* This cannot be a hardware-interrupt because exception_index < EXCP_INTERRUPT. */
2775 int rc;
2776
2777 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2778 TRPMEVENT enmType = pVM->rem.s.Env.exception_is_int ? TRPM_SOFTWARE_INT : TRPM_TRAP;
2779 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, enmType);
2780 AssertRC(rc);
2781 if (enmType == TRPM_TRAP)
2782 {
2783 switch (pVM->rem.s.Env.exception_index)
2784 {
2785 case X86_XCPT_PF:
2786 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2787 /* fallthru */
2788 case X86_XCPT_TS: case X86_XCPT_NP: case X86_XCPT_SS: case X86_XCPT_GP:
2789 case X86_XCPT_AC: case X86_XCPT_DF: /* 0 */
2790 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2791 break;
2792 }
2793 }
2794 }
2795
2796 /*
2797 * We're not longer in REM mode.
2798 */
2799 CPUMR3RemLeave(pVCpu,
2800 HMIsEnabled(pVM)
2801 || ( pVM->rem.s.Env.segs[R_SS].newselector
2802 | pVM->rem.s.Env.segs[R_GS].newselector
2803 | pVM->rem.s.Env.segs[R_FS].newselector
2804 | pVM->rem.s.Env.segs[R_ES].newselector
2805 | pVM->rem.s.Env.segs[R_DS].newselector
2806 | pVM->rem.s.Env.segs[R_CS].newselector) == 0
2807 );
2808 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2809 pVM->rem.s.fInREM = false;
2810 pVM->rem.s.pCtx = NULL;
2811 pVM->rem.s.Env.pVCpu = NULL;
2812 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2813 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2814 return VINF_SUCCESS;
2815}
2816
2817
2818/**
2819 * This is called by the disassembler when it wants to update the cpu state
2820 * before for instance doing a register dump.
2821 */
2822static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2823{
2824 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2825 unsigned i;
2826
2827 Assert(pVM->rem.s.fInREM);
2828
2829 /*
2830 * Copy back the registers.
2831 * This is done in the order they are declared in the CPUMCTX structure.
2832 */
2833
2834 PX86FXSTATE pFpuCtx = &pCtx->pXStateR3->x87;
2835 /** @todo FOP */
2836 /** @todo FPUIP */
2837 /** @todo CS */
2838 /** @todo FPUDP */
2839 /** @todo DS */
2840 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2841 pFpuCtx->MXCSR = 0;
2842 pFpuCtx->MXCSR_MASK = 0;
2843
2844 /** @todo check if FPU/XMM was actually used in the recompiler */
2845 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)pFpuCtx);
2846//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2847
2848#ifdef TARGET_X86_64
2849 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2850 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2851 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2852 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2853 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2854 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2855 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2856 pCtx->r8 = pVM->rem.s.Env.regs[8];
2857 pCtx->r9 = pVM->rem.s.Env.regs[9];
2858 pCtx->r10 = pVM->rem.s.Env.regs[10];
2859 pCtx->r11 = pVM->rem.s.Env.regs[11];
2860 pCtx->r12 = pVM->rem.s.Env.regs[12];
2861 pCtx->r13 = pVM->rem.s.Env.regs[13];
2862 pCtx->r14 = pVM->rem.s.Env.regs[14];
2863 pCtx->r15 = pVM->rem.s.Env.regs[15];
2864
2865 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2866#else
2867 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2868 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2869 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2870 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2871 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2872 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2873 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2874
2875 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2876#endif
2877
2878 SYNC_BACK_SREG(es, ES);
2879 SYNC_BACK_SREG(cs, CS);
2880 SYNC_BACK_SREG(ss, SS);
2881 SYNC_BACK_SREG(ds, DS);
2882 SYNC_BACK_SREG(fs, FS);
2883 SYNC_BACK_SREG(gs, GS);
2884
2885#ifdef TARGET_X86_64
2886 pCtx->rip = pVM->rem.s.Env.eip;
2887 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2888#else
2889 pCtx->eip = pVM->rem.s.Env.eip;
2890 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2891#endif
2892
2893 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2894 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2895 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2896#ifdef VBOX_WITH_RAW_MODE
2897 if (((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME) && !HMIsEnabled(pVM))
2898 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2899#endif
2900 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2901
2902 for (i = 0; i < 8; i++)
2903 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2904
2905 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2906 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2907 {
2908 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2909 STAM_COUNTER_INC(&gStatREMGDTChange);
2910#ifdef VBOX_WITH_RAW_MODE
2911 if (!HMIsEnabled(pVM))
2912 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2913#endif
2914 }
2915
2916 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2917 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2918 {
2919 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2920 STAM_COUNTER_INC(&gStatREMIDTChange);
2921#ifdef VBOX_WITH_RAW_MODE
2922 if (!HMIsEnabled(pVM))
2923 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2924#endif
2925 }
2926
2927 if ( pCtx->ldtr.Sel != pVM->rem.s.Env.ldt.selector
2928 || pCtx->ldtr.ValidSel != pVM->rem.s.Env.ldt.selector
2929 || pCtx->ldtr.u64Base != pVM->rem.s.Env.ldt.base
2930 || pCtx->ldtr.u32Limit != pVM->rem.s.Env.ldt.limit
2931 || pCtx->ldtr.Attr.u != ((pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK)
2932 || !(pCtx->ldtr.fFlags & CPUMSELREG_FLAGS_VALID)
2933 )
2934 {
2935 pCtx->ldtr.Sel = pVM->rem.s.Env.ldt.selector;
2936 pCtx->ldtr.ValidSel = pVM->rem.s.Env.ldt.selector;
2937 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2938 pCtx->ldtr.u64Base = pVM->rem.s.Env.ldt.base;
2939 pCtx->ldtr.u32Limit = pVM->rem.s.Env.ldt.limit;
2940 pCtx->ldtr.Attr.u = (pVM->rem.s.Env.ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2941 STAM_COUNTER_INC(&gStatREMLDTRChange);
2942#ifdef VBOX_WITH_RAW_MODE
2943 if (!HMIsEnabled(pVM))
2944 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2945#endif
2946 }
2947
2948 if ( pCtx->tr.Sel != pVM->rem.s.Env.tr.selector
2949 || pCtx->tr.ValidSel != pVM->rem.s.Env.tr.selector
2950 || pCtx->tr.u64Base != pVM->rem.s.Env.tr.base
2951 || pCtx->tr.u32Limit != pVM->rem.s.Env.tr.limit
2952 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2953 || pCtx->tr.Attr.u != ( (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & (SEL_FLAGS_SMASK & ~DESC_INTEL_UNUSABLE)
2954 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> SEL_FLAGS_SHIFT
2955 : 0)
2956 || !(pCtx->tr.fFlags & CPUMSELREG_FLAGS_VALID)
2957 )
2958 {
2959 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2960 pCtx->tr.Sel, pCtx->tr.u64Base, pCtx->tr.u32Limit, pCtx->tr.Attr.u,
2961 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2962 (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & (SEL_FLAGS_SMASK & ~DESC_INTEL_UNUSABLE)
2963 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> SEL_FLAGS_SHIFT : 0));
2964 pCtx->tr.Sel = pVM->rem.s.Env.tr.selector;
2965 pCtx->tr.ValidSel = pVM->rem.s.Env.tr.selector;
2966 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2967 pCtx->tr.u64Base = pVM->rem.s.Env.tr.base;
2968 pCtx->tr.u32Limit = pVM->rem.s.Env.tr.limit;
2969 pCtx->tr.Attr.u = (pVM->rem.s.Env.tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK;
2970 if (pCtx->tr.Attr.u & ~DESC_INTEL_UNUSABLE)
2971 pCtx->tr.Attr.u |= DESC_TSS_BUSY_MASK >> SEL_FLAGS_SHIFT;
2972 STAM_COUNTER_INC(&gStatREMTRChange);
2973#ifdef VBOX_WITH_RAW_MODE
2974 if (!HMIsEnabled(pVM))
2975 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2976#endif
2977 }
2978
2979 /* Sysenter MSR */
2980 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2981 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2982 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2983
2984 /* System MSRs. */
2985 pCtx->msrEFER = pVM->rem.s.Env.efer;
2986 pCtx->msrSTAR = pVM->rem.s.Env.star;
2987 pCtx->msrPAT = pVM->rem.s.Env.pat;
2988#ifdef TARGET_X86_64
2989 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2990 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2991 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2992 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2993#endif
2994
2995}
2996
2997
2998/**
2999 * Update the VMM state information if we're currently in REM.
3000 *
3001 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
3002 * we're currently executing in REM and the VMM state is invalid. This method will of
3003 * course check that we're executing in REM before syncing any data over to the VMM.
3004 *
3005 * @param pVM The VM handle.
3006 * @param pVCpu The VMCPU handle.
3007 */
3008REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
3009{
3010 if (pVM->rem.s.fInREM)
3011 remR3StateUpdate(pVM, pVCpu);
3012}
3013
3014
3015#undef LOG_GROUP
3016#define LOG_GROUP LOG_GROUP_REM
3017
3018
3019/**
3020 * Notify the recompiler about Address Gate 20 state change.
3021 *
3022 * This notification is required since A20 gate changes are
3023 * initialized from a device driver and the VM might just as
3024 * well be in REM mode as in RAW mode.
3025 *
3026 * @param pVM VM handle.
3027 * @param pVCpu VMCPU handle.
3028 * @param fEnable True if the gate should be enabled.
3029 * False if the gate should be disabled.
3030 */
3031REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
3032{
3033 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
3034 VM_ASSERT_EMT(pVM);
3035
3036 /** @todo SMP and the A20 gate... */
3037 if (pVM->rem.s.Env.pVCpu == pVCpu)
3038 {
3039 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3040 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
3041 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3042 }
3043}
3044
3045
3046/**
3047 * Replays the handler notification changes
3048 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
3049 *
3050 * @param pVM VM handle.
3051 */
3052REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
3053{
3054 /*
3055 * Replay the flushes.
3056 */
3057 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
3058 VM_ASSERT_EMT(pVM);
3059
3060 /** @todo this isn't ensuring correct replay order. */
3061 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY))
3062 {
3063 uint32_t idxNext;
3064 uint32_t idxRevHead;
3065 uint32_t idxHead;
3066#ifdef VBOX_STRICT
3067 int32_t c = 0;
3068#endif
3069
3070 /* Lockless purging of pending notifications. */
3071 idxHead = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
3072 if (idxHead == UINT32_MAX)
3073 return;
3074 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3075
3076 /*
3077 * Reverse the list to process it in FIFO order.
3078 */
3079 idxRevHead = UINT32_MAX;
3080 do
3081 {
3082 /* Save the index of the next rec. */
3083 idxNext = pVM->rem.s.aHandlerNotifications[idxHead].idxNext;
3084 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || idxNext == UINT32_MAX);
3085 /* Push the record onto the reversed list. */
3086 pVM->rem.s.aHandlerNotifications[idxHead].idxNext = idxRevHead;
3087 idxRevHead = idxHead;
3088 Assert(++c <= RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
3089 /* Advance. */
3090 idxHead = idxNext;
3091 } while (idxHead != UINT32_MAX);
3092
3093 /*
3094 * Loop thru the list, reinserting the record into the free list as they are
3095 * processed to avoid having other EMTs running out of entries while we're flushing.
3096 */
3097 idxHead = idxRevHead;
3098 do
3099 {
3100 PREMHANDLERNOTIFICATION pCur = &pVM->rem.s.aHandlerNotifications[idxHead];
3101 uint32_t idxCur;
3102 Assert(--c >= 0);
3103
3104 switch (pCur->enmKind)
3105 {
3106 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
3107 remR3NotifyHandlerPhysicalRegister(pVM,
3108 pCur->u.PhysicalRegister.enmKind,
3109 pCur->u.PhysicalRegister.GCPhys,
3110 pCur->u.PhysicalRegister.cb,
3111 pCur->u.PhysicalRegister.fHasHCHandler);
3112 break;
3113
3114 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
3115 remR3NotifyHandlerPhysicalDeregister(pVM,
3116 pCur->u.PhysicalDeregister.enmKind,
3117 pCur->u.PhysicalDeregister.GCPhys,
3118 pCur->u.PhysicalDeregister.cb,
3119 pCur->u.PhysicalDeregister.fHasHCHandler,
3120 pCur->u.PhysicalDeregister.fRestoreAsRAM);
3121 break;
3122
3123 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
3124 remR3NotifyHandlerPhysicalModify(pVM,
3125 pCur->u.PhysicalModify.enmKind,
3126 pCur->u.PhysicalModify.GCPhysOld,
3127 pCur->u.PhysicalModify.GCPhysNew,
3128 pCur->u.PhysicalModify.cb,
3129 pCur->u.PhysicalModify.fHasHCHandler,
3130 pCur->u.PhysicalModify.fRestoreAsRAM);
3131 break;
3132
3133 default:
3134 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
3135 break;
3136 }
3137
3138 /*
3139 * Advance idxHead.
3140 */
3141 idxCur = idxHead;
3142 idxHead = pCur->idxNext;
3143 Assert(idxHead < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) || (idxHead == UINT32_MAX && c == 0));
3144
3145 /*
3146 * Put the record back into the free list.
3147 */
3148 do
3149 {
3150 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
3151 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
3152 ASMCompilerBarrier();
3153 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, idxCur, idxNext));
3154 } while (idxHead != UINT32_MAX);
3155
3156#ifdef VBOX_STRICT
3157 if (pVM->cCpus == 1)
3158 {
3159 unsigned c;
3160 /* Check that all records are now on the free list. */
3161 for (c = 0, idxNext = pVM->rem.s.idxFreeList; idxNext != UINT32_MAX;
3162 idxNext = pVM->rem.s.aHandlerNotifications[idxNext].idxNext)
3163 c++;
3164 AssertReleaseMsg(c == RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("%#x != %#x, idxFreeList=%#x\n", c, RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), pVM->rem.s.idxFreeList));
3165 }
3166#endif
3167 }
3168}
3169
3170
3171/**
3172 * Notify REM about changed code page.
3173 *
3174 * @returns VBox status code.
3175 * @param pVM VM handle.
3176 * @param pVCpu VMCPU handle.
3177 * @param pvCodePage Code page address
3178 */
3179REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
3180{
3181#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
3182 int rc;
3183 RTGCPHYS PhysGC;
3184 uint64_t flags;
3185
3186 VM_ASSERT_EMT(pVM);
3187
3188 /*
3189 * Get the physical page address.
3190 */
3191 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
3192 if (rc == VINF_SUCCESS)
3193 {
3194 /*
3195 * Sync the required registers and flush the whole page.
3196 * (Easier to do the whole page than notifying it about each physical
3197 * byte that was changed.
3198 */
3199 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
3200 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
3201 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
3202 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
3203
3204 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
3205 }
3206#endif
3207 return VINF_SUCCESS;
3208}
3209
3210
3211/**
3212 * Notification about a successful MMR3PhysRegister() call.
3213 *
3214 * @param pVM VM handle.
3215 * @param GCPhys The physical address the RAM.
3216 * @param cb Size of the memory.
3217 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
3218 */
3219REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
3220{
3221 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
3222 VM_ASSERT_EMT(pVM);
3223
3224 /*
3225 * Validate input - we trust the caller.
3226 */
3227 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3228 Assert(cb);
3229 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3230 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("%#x\n", fFlags));
3231
3232 /*
3233 * Base ram? Update GCPhysLastRam.
3234 */
3235 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
3236 {
3237 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
3238 {
3239 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
3240 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
3241 }
3242 }
3243
3244 /*
3245 * Register the ram.
3246 */
3247 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3248
3249 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3250 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3251 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3252
3253 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3254}
3255
3256
3257/**
3258 * Notification about a successful MMR3PhysRomRegister() call.
3259 *
3260 * @param pVM VM handle.
3261 * @param GCPhys The physical address of the ROM.
3262 * @param cb The size of the ROM.
3263 * @param pvCopy Pointer to the ROM copy.
3264 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
3265 * This function will be called when ever the protection of the
3266 * shadow ROM changes (at reset and end of POST).
3267 */
3268REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
3269{
3270 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
3271 VM_ASSERT_EMT(pVM);
3272
3273 /*
3274 * Validate input - we trust the caller.
3275 */
3276 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3277 Assert(cb);
3278 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3279
3280 /*
3281 * Register the rom.
3282 */
3283 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3284
3285 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3286 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM), GCPhys);
3287 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3288
3289 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3290}
3291
3292
3293/**
3294 * Notification about a successful memory deregistration or reservation.
3295 *
3296 * @param pVM VM Handle.
3297 * @param GCPhys Start physical address.
3298 * @param cb The size of the range.
3299 */
3300REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
3301{
3302 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
3303 VM_ASSERT_EMT(pVM);
3304
3305 /*
3306 * Validate input - we trust the caller.
3307 */
3308 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3309 Assert(cb);
3310 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
3311
3312 /*
3313 * Unassigning the memory.
3314 */
3315 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3316
3317 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3318 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3319 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3320
3321 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3322}
3323
3324
3325/**
3326 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3327 *
3328 * @param pVM VM Handle.
3329 * @param enmKind Kind of access handler.
3330 * @param GCPhys Handler range address.
3331 * @param cb Size of the handler range.
3332 * @param fHasHCHandler Set if the handler has a HC callback function.
3333 *
3334 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3335 * Handler memory type to memory which has no HC handler.
3336 */
3337static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
3338 bool fHasHCHandler)
3339{
3340 Log(("REMR3NotifyHandlerPhysicalRegister: enmKind=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3341 enmKind, GCPhys, cb, fHasHCHandler));
3342
3343 VM_ASSERT_EMT(pVM);
3344 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3345 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3346
3347
3348 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3349
3350 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3351 if (enmKind == PGMPHYSHANDLERKIND_MMIO)
3352 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iMMIOMemType, GCPhys);
3353 else if (fHasHCHandler)
3354 cpu_register_physical_memory_offset(GCPhys, cb, pVM->rem.s.iHandlerMemType, GCPhys);
3355 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3356
3357 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3358}
3359
3360/**
3361 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3362 *
3363 * @param pVM VM Handle.
3364 * @param enmKind Kind of access handler.
3365 * @param GCPhys Handler range address.
3366 * @param cb Size of the handler range.
3367 * @param fHasHCHandler Set if the handler has a HC callback function.
3368 *
3369 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3370 * Handler memory type to memory which has no HC handler.
3371 */
3372REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
3373 bool fHasHCHandler)
3374{
3375 REMR3ReplayHandlerNotifications(pVM);
3376
3377 remR3NotifyHandlerPhysicalRegister(pVM, enmKind, GCPhys, cb, fHasHCHandler);
3378}
3379
3380/**
3381 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3382 *
3383 * @param pVM VM Handle.
3384 * @param enmKind Kind of access handler.
3385 * @param GCPhys Handler range address.
3386 * @param cb Size of the handler range.
3387 * @param fHasHCHandler Set if the handler has a HC callback function.
3388 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3389 */
3390static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb,
3391 bool fHasHCHandler, bool fRestoreAsRAM)
3392{
3393 Log(("REMR3NotifyHandlerPhysicalDeregister: enmKind=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3394 enmKind, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3395 VM_ASSERT_EMT(pVM);
3396
3397
3398 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3399
3400 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3401 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3402 if (enmKind == PGMPHYSHANDLERKIND_MMIO)
3403 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3404 else if (fHasHCHandler)
3405 {
3406 if (!fRestoreAsRAM)
3407 {
3408 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3409 cpu_register_physical_memory_offset(GCPhys, cb, IO_MEM_UNASSIGNED, GCPhys);
3410 }
3411 else
3412 {
3413 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3414 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3415 cpu_register_physical_memory_offset(GCPhys, cb, GCPhys, GCPhys);
3416 }
3417 }
3418 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3419
3420 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3421}
3422
3423/**
3424 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3425 *
3426 * @param pVM VM Handle.
3427 * @param enmKind Kind of access handler.
3428 * @param GCPhys Handler range address.
3429 * @param cb Size of the handler range.
3430 * @param fHasHCHandler Set if the handler has a HC callback function.
3431 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3432 */
3433REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3434{
3435 REMR3ReplayHandlerNotifications(pVM);
3436 remR3NotifyHandlerPhysicalDeregister(pVM, enmKind, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3437}
3438
3439
3440/**
3441 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3442 *
3443 * @param pVM VM Handle.
3444 * @param enmKind Kind of access handler.
3445 * @param GCPhysOld Old handler range address.
3446 * @param GCPhysNew New handler range address.
3447 * @param cb Size of the handler range.
3448 * @param fHasHCHandler Set if the handler has a HC callback function.
3449 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3450 */
3451static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3452{
3453 Log(("REMR3NotifyHandlerPhysicalModify: enmKind=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3454 enmKind, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3455 VM_ASSERT_EMT(pVM);
3456 AssertReleaseMsg(enmKind != PGMPHYSHANDLERKIND_MMIO, ("enmKind=%d\n", enmKind));
3457
3458 if (fHasHCHandler)
3459 {
3460 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3461
3462 /*
3463 * Reset the old page.
3464 */
3465 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3466 if (!fRestoreAsRAM)
3467 cpu_register_physical_memory_offset(GCPhysOld, cb, IO_MEM_UNASSIGNED, GCPhysOld);
3468 else
3469 {
3470 /* This is not perfect, but it'll do for PD monitoring... */
3471 Assert(cb == PAGE_SIZE);
3472 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3473 cpu_register_physical_memory_offset(GCPhysOld, cb, GCPhysOld, GCPhysOld);
3474 }
3475
3476 /*
3477 * Update the new page.
3478 */
3479 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3480 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3481 cpu_register_physical_memory_offset(GCPhysNew, cb, pVM->rem.s.iHandlerMemType, GCPhysNew);
3482 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3483
3484 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3485 }
3486}
3487
3488/**
3489 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3490 *
3491 * @param pVM VM Handle.
3492 * @param enmKind Kind of access handler.
3493 * @param GCPhysOld Old handler range address.
3494 * @param GCPhysNew New handler range address.
3495 * @param cb Size of the handler range.
3496 * @param fHasHCHandler Set if the handler has a HC callback function.
3497 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3498 */
3499REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERKIND enmKind, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3500{
3501 REMR3ReplayHandlerNotifications(pVM);
3502
3503 remR3NotifyHandlerPhysicalModify(pVM, enmKind, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3504}
3505
3506/**
3507 * Checks if we're handling access to this page or not.
3508 *
3509 * @returns true if we're trapping access.
3510 * @returns false if we aren't.
3511 * @param pVM The VM handle.
3512 * @param GCPhys The physical address.
3513 *
3514 * @remark This function will only work correctly in VBOX_STRICT builds!
3515 */
3516REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3517{
3518#ifdef VBOX_STRICT
3519 ram_addr_t off;
3520 REMR3ReplayHandlerNotifications(pVM);
3521
3522 off = get_phys_page_offset(GCPhys);
3523 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3524 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3525 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3526#else
3527 return false;
3528#endif
3529}
3530
3531
3532/**
3533 * Deals with a rare case in get_phys_addr_code where the code
3534 * is being monitored.
3535 *
3536 * It could also be an MMIO page, in which case we will raise a fatal error.
3537 *
3538 * @returns The physical address corresponding to addr.
3539 * @param env The cpu environment.
3540 * @param addr The virtual address.
3541 * @param pTLBEntry The TLB entry.
3542 * @param IoTlbEntry The I/O TLB entry address.
3543 */
3544target_ulong remR3PhysGetPhysicalAddressCode(CPUX86State *env,
3545 target_ulong addr,
3546 CPUTLBEntry *pTLBEntry,
3547 target_phys_addr_t IoTlbEntry)
3548{
3549 PVM pVM = env->pVM;
3550
3551 if ((IoTlbEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3552 {
3553 /* If code memory is being monitored, appropriate IOTLB entry will have
3554 handler IO type, and addend will provide real physical address, no
3555 matter if we store VA in TLB or not, as handlers are always passed PA */
3556 target_ulong ret = (IoTlbEntry & TARGET_PAGE_MASK) + addr;
3557 return ret;
3558 }
3559 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3560 "*** handlers\n",
3561 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)IoTlbEntry));
3562 DBGFR3Info(pVM->pUVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3563 LogRel(("*** mmio\n"));
3564 DBGFR3Info(pVM->pUVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3565 LogRel(("*** phys\n"));
3566 DBGFR3Info(pVM->pUVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3567 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3568 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3569 AssertFatalFailed();
3570}
3571
3572/**
3573 * Read guest RAM and ROM.
3574 *
3575 * @param SrcGCPhys The source address (guest physical).
3576 * @param pvDst The destination address.
3577 * @param cb Number of bytes
3578 */
3579void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3580{
3581 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3582 VBOX_CHECK_ADDR(SrcGCPhys);
3583 VBOXSTRICTRC rcStrict = PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb, PGMACCESSORIGIN_REM);
3584 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3585#ifdef VBOX_DEBUG_PHYS
3586 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3587#endif
3588 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3589}
3590
3591
3592/**
3593 * Read guest RAM and ROM, unsigned 8-bit.
3594 *
3595 * @param SrcGCPhys The source address (guest physical).
3596 */
3597RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3598{
3599 uint8_t val;
3600 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3601 VBOX_CHECK_ADDR(SrcGCPhys);
3602 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3603 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3604#ifdef VBOX_DEBUG_PHYS
3605 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3606#endif
3607 return val;
3608}
3609
3610
3611/**
3612 * Read guest RAM and ROM, signed 8-bit.
3613 *
3614 * @param SrcGCPhys The source address (guest physical).
3615 */
3616RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3617{
3618 int8_t val;
3619 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3620 VBOX_CHECK_ADDR(SrcGCPhys);
3621 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3622 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3623#ifdef VBOX_DEBUG_PHYS
3624 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3625#endif
3626 return val;
3627}
3628
3629
3630/**
3631 * Read guest RAM and ROM, unsigned 16-bit.
3632 *
3633 * @param SrcGCPhys The source address (guest physical).
3634 */
3635RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3636{
3637 uint16_t val;
3638 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3639 VBOX_CHECK_ADDR(SrcGCPhys);
3640 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3641 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3642#ifdef VBOX_DEBUG_PHYS
3643 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3644#endif
3645 return val;
3646}
3647
3648
3649/**
3650 * Read guest RAM and ROM, signed 16-bit.
3651 *
3652 * @param SrcGCPhys The source address (guest physical).
3653 */
3654RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3655{
3656 int16_t val;
3657 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3658 VBOX_CHECK_ADDR(SrcGCPhys);
3659 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3660 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3661#ifdef VBOX_DEBUG_PHYS
3662 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3663#endif
3664 return val;
3665}
3666
3667
3668/**
3669 * Read guest RAM and ROM, unsigned 32-bit.
3670 *
3671 * @param SrcGCPhys The source address (guest physical).
3672 */
3673RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3674{
3675 uint32_t val;
3676 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3677 VBOX_CHECK_ADDR(SrcGCPhys);
3678 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3679 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3680#ifdef VBOX_DEBUG_PHYS
3681 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3682#endif
3683 return val;
3684}
3685
3686
3687/**
3688 * Read guest RAM and ROM, signed 32-bit.
3689 *
3690 * @param SrcGCPhys The source address (guest physical).
3691 */
3692RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3693{
3694 int32_t val;
3695 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3696 VBOX_CHECK_ADDR(SrcGCPhys);
3697 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3698 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3699#ifdef VBOX_DEBUG_PHYS
3700 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3701#endif
3702 return val;
3703}
3704
3705
3706/**
3707 * Read guest RAM and ROM, unsigned 64-bit.
3708 *
3709 * @param SrcGCPhys The source address (guest physical).
3710 */
3711uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3712{
3713 uint64_t val;
3714 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3715 VBOX_CHECK_ADDR(SrcGCPhys);
3716 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3717 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3718#ifdef VBOX_DEBUG_PHYS
3719 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3720#endif
3721 return val;
3722}
3723
3724
3725/**
3726 * Read guest RAM and ROM, signed 64-bit.
3727 *
3728 * @param SrcGCPhys The source address (guest physical).
3729 */
3730int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3731{
3732 int64_t val;
3733 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3734 VBOX_CHECK_ADDR(SrcGCPhys);
3735 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys, PGMACCESSORIGIN_REM);
3736 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3737#ifdef VBOX_DEBUG_PHYS
3738 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3739#endif
3740 return val;
3741}
3742
3743
3744/**
3745 * Write guest RAM.
3746 *
3747 * @param DstGCPhys The destination address (guest physical).
3748 * @param pvSrc The source address.
3749 * @param cb Number of bytes to write
3750 */
3751void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3752{
3753 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3754 VBOX_CHECK_ADDR(DstGCPhys);
3755 VBOXSTRICTRC rcStrict = PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb, PGMACCESSORIGIN_REM);
3756 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3757 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3758#ifdef VBOX_DEBUG_PHYS
3759 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3760#endif
3761}
3762
3763
3764/**
3765 * Write guest RAM, unsigned 8-bit.
3766 *
3767 * @param DstGCPhys The destination address (guest physical).
3768 * @param val Value
3769 */
3770void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3771{
3772 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3773 VBOX_CHECK_ADDR(DstGCPhys);
3774 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val, PGMACCESSORIGIN_REM);
3775 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3776#ifdef VBOX_DEBUG_PHYS
3777 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3778#endif
3779}
3780
3781
3782/**
3783 * Write guest RAM, unsigned 8-bit.
3784 *
3785 * @param DstGCPhys The destination address (guest physical).
3786 * @param val Value
3787 */
3788void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3789{
3790 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3791 VBOX_CHECK_ADDR(DstGCPhys);
3792 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val, PGMACCESSORIGIN_REM);
3793 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3794#ifdef VBOX_DEBUG_PHYS
3795 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3796#endif
3797}
3798
3799
3800/**
3801 * Write guest RAM, unsigned 32-bit.
3802 *
3803 * @param DstGCPhys The destination address (guest physical).
3804 * @param val Value
3805 */
3806void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3807{
3808 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3809 VBOX_CHECK_ADDR(DstGCPhys);
3810 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val, PGMACCESSORIGIN_REM);
3811 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3812#ifdef VBOX_DEBUG_PHYS
3813 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3814#endif
3815}
3816
3817
3818/**
3819 * Write guest RAM, unsigned 64-bit.
3820 *
3821 * @param DstGCPhys The destination address (guest physical).
3822 * @param val Value
3823 */
3824void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3825{
3826 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3827 VBOX_CHECK_ADDR(DstGCPhys);
3828 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val, PGMACCESSORIGIN_REM);
3829 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3830#ifdef VBOX_DEBUG_PHYS
3831 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)DstGCPhys));
3832#endif
3833}
3834
3835#undef LOG_GROUP
3836#define LOG_GROUP LOG_GROUP_REM_MMIO
3837
3838/** Read MMIO memory. */
3839static uint32_t remR3MMIOReadU8(void *pvEnv, target_phys_addr_t GCPhys)
3840{
3841 CPUX86State *env = (CPUX86State *)pvEnv;
3842 uint32_t u32 = 0;
3843 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 1);
3844 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3845 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", (RTGCPHYS)GCPhys, u32));
3846 return u32;
3847}
3848
3849/** Read MMIO memory. */
3850static uint32_t remR3MMIOReadU16(void *pvEnv, target_phys_addr_t GCPhys)
3851{
3852 CPUX86State *env = (CPUX86State *)pvEnv;
3853 uint32_t u32 = 0;
3854 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 2);
3855 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3856 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", (RTGCPHYS)GCPhys, u32));
3857 return u32;
3858}
3859
3860/** Read MMIO memory. */
3861static uint32_t remR3MMIOReadU32(void *pvEnv, target_phys_addr_t GCPhys)
3862{
3863 CPUX86State *env = (CPUX86State *)pvEnv;
3864 uint32_t u32 = 0;
3865 int rc = IOMMMIORead(env->pVM, env->pVCpu, GCPhys, &u32, 4);
3866 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3867 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", (RTGCPHYS)GCPhys, u32));
3868 return u32;
3869}
3870
3871/** Write to MMIO memory. */
3872static void remR3MMIOWriteU8(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3873{
3874 CPUX86State *env = (CPUX86State *)pvEnv;
3875 int rc;
3876 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3877 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 1);
3878 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3879}
3880
3881/** Write to MMIO memory. */
3882static void remR3MMIOWriteU16(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3883{
3884 CPUX86State *env = (CPUX86State *)pvEnv;
3885 int rc;
3886 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3887 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 2);
3888 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3889}
3890
3891/** Write to MMIO memory. */
3892static void remR3MMIOWriteU32(void *pvEnv, target_phys_addr_t GCPhys, uint32_t u32)
3893{
3894 CPUX86State *env = (CPUX86State *)pvEnv;
3895 int rc;
3896 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3897 rc = IOMMMIOWrite(env->pVM, env->pVCpu, GCPhys, u32, 4);
3898 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3899}
3900
3901
3902#undef LOG_GROUP
3903#define LOG_GROUP LOG_GROUP_REM_HANDLER
3904
3905/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3906
3907static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3908{
3909 uint8_t u8;
3910 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3911 VBOXSTRICTRC rcStrict = PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8), PGMACCESSORIGIN_REM);
3912 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3913 return u8;
3914}
3915
3916static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3917{
3918 uint16_t u16;
3919 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3920 VBOXSTRICTRC rcStrict = PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16), PGMACCESSORIGIN_REM);
3921 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3922 return u16;
3923}
3924
3925static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3926{
3927 uint32_t u32;
3928 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", (RTGCPHYS)GCPhys));
3929 VBOXSTRICTRC rcStrict = PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32), PGMACCESSORIGIN_REM);
3930 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3931 return u32;
3932}
3933
3934static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3935{
3936 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3937 VBOXSTRICTRC rcStrict = PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t), PGMACCESSORIGIN_REM);
3938 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3939}
3940
3941static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3942{
3943 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3944 VBOXSTRICTRC rcStrict = PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t), PGMACCESSORIGIN_REM);
3945 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3946}
3947
3948static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3949{
3950 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", (RTGCPHYS)GCPhys, u32));
3951 VBOXSTRICTRC rcStrict = PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t), PGMACCESSORIGIN_REM);
3952 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); NOREF(rcStrict);
3953}
3954
3955/* -+- disassembly -+- */
3956
3957#undef LOG_GROUP
3958#define LOG_GROUP LOG_GROUP_REM_DISAS
3959
3960
3961/**
3962 * Enables or disables singled stepped disassembly.
3963 *
3964 * @returns VBox status code.
3965 * @param pVM VM handle.
3966 * @param fEnable To enable set this flag, to disable clear it.
3967 */
3968static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3969{
3970 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3971 VM_ASSERT_EMT(pVM);
3972
3973 if (fEnable)
3974 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3975 else
3976 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3977#ifdef REM_USE_QEMU_SINGLE_STEP_FOR_LOGGING
3978 cpu_single_step(&pVM->rem.s.Env, fEnable);
3979#endif
3980 return VINF_SUCCESS;
3981}
3982
3983
3984/**
3985 * Enables or disables singled stepped disassembly.
3986 *
3987 * @returns VBox status code.
3988 * @param pVM VM handle.
3989 * @param fEnable To enable set this flag, to disable clear it.
3990 */
3991REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3992{
3993 int rc;
3994
3995 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3996 if (VM_IS_EMT(pVM))
3997 return remR3DisasEnableStepping(pVM, fEnable);
3998
3999 rc = VMR3ReqPriorityCallWait(pVM, VMCPUID_ANY, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
4000 AssertRC(rc);
4001 return rc;
4002}
4003
4004
4005#ifdef VBOX_WITH_DEBUGGER
4006/**
4007 * External Debugger Command: .remstep [on|off|1|0]
4008 */
4009static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PUVM pUVM,
4010 PCDBGCVAR paArgs, unsigned cArgs)
4011{
4012 int rc;
4013 PVM pVM = pUVM->pVM;
4014
4015 if (cArgs == 0)
4016 /*
4017 * Print the current status.
4018 */
4019 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping is %s\n",
4020 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
4021 else
4022 {
4023 /*
4024 * Convert the argument and change the mode.
4025 */
4026 bool fEnable;
4027 rc = DBGCCmdHlpVarToBool(pCmdHlp, &paArgs[0], &fEnable);
4028 if (RT_SUCCESS(rc))
4029 {
4030 rc = REMR3DisasEnableStepping(pVM, fEnable);
4031 if (RT_SUCCESS(rc))
4032 rc = DBGCCmdHlpPrintf(pCmdHlp, "DisasStepping was %s\n", fEnable ? "enabled" : "disabled");
4033 else
4034 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "REMR3DisasEnableStepping");
4035 }
4036 else
4037 rc = DBGCCmdHlpFailRc(pCmdHlp, pCmd, rc, "DBGCCmdHlpVarToBool");
4038 }
4039 return rc;
4040}
4041#endif /* VBOX_WITH_DEBUGGER */
4042
4043
4044/**
4045 * Disassembles one instruction and prints it to the log.
4046 *
4047 * @returns Success indicator.
4048 * @param env Pointer to the recompiler CPU structure.
4049 * @param f32BitCode Indicates that whether or not the code should
4050 * be disassembled as 16 or 32 bit. If -1 the CS
4051 * selector will be inspected.
4052 * @param pszPrefix
4053 */
4054bool remR3DisasInstr(CPUX86State *env, int f32BitCode, char *pszPrefix)
4055{
4056 PVM pVM = env->pVM;
4057 const bool fLog = LogIsEnabled();
4058 const bool fLog2 = LogIs2Enabled();
4059 int rc = VINF_SUCCESS;
4060
4061 /*
4062 * Don't bother if there ain't any log output to do.
4063 */
4064 if (!fLog && !fLog2)
4065 return true;
4066
4067 /*
4068 * Update the state so DBGF reads the correct register values.
4069 */
4070 remR3StateUpdate(pVM, env->pVCpu);
4071
4072 /*
4073 * Log registers if requested.
4074 */
4075 if (fLog2)
4076 DBGFR3_INFO_LOG(pVM, "cpumguest", pszPrefix);
4077
4078 /*
4079 * Disassemble to log.
4080 */
4081 if (fLog)
4082 {
4083 PVMCPU pVCpu = VMMGetCpu(pVM);
4084 char szBuf[256];
4085 szBuf[0] = '\0';
4086 int rc = DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM,
4087 pVCpu->idCpu,
4088 0, /* Sel */ 0, /* GCPtr */
4089 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4090 szBuf,
4091 sizeof(szBuf),
4092 NULL);
4093 if (RT_FAILURE(rc))
4094 RTStrPrintf(szBuf, sizeof(szBuf), "DBGFR3DisasInstrEx failed with rc=%Rrc\n", rc);
4095 if (pszPrefix && *pszPrefix)
4096 RTLogPrintf("%s-CPU%d: %s\n", pszPrefix, pVCpu->idCpu, szBuf);
4097 else
4098 RTLogPrintf("CPU%d: %s\n", pVCpu->idCpu, szBuf);
4099 }
4100
4101 return RT_SUCCESS(rc);
4102}
4103
4104
4105/**
4106 * Disassemble recompiled code.
4107 *
4108 * @param phFileIgnored Ignored, logfile usually.
4109 * @param pvCode Pointer to the code block.
4110 * @param cb Size of the code block.
4111 */
4112void disas(FILE *phFileIgnored, void *pvCode, unsigned long cb)
4113{
4114 if (LogIs2Enabled())
4115 {
4116 unsigned off = 0;
4117 char szOutput[256];
4118 DISCPUSTATE Cpu;
4119#ifdef RT_ARCH_X86
4120 DISCPUMODE enmCpuMode = DISCPUMODE_32BIT;
4121#else
4122 DISCPUMODE enmCpuMode = DISCPUMODE_64BIT;
4123#endif
4124
4125 RTLogPrintf("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
4126 while (off < cb)
4127 {
4128 uint32_t cbInstr;
4129 int rc = DISInstrToStr((uint8_t const *)pvCode + off, enmCpuMode,
4130 &Cpu, &cbInstr, szOutput, sizeof(szOutput));
4131 if (RT_SUCCESS(rc))
4132 RTLogPrintf("%s", szOutput);
4133 else
4134 {
4135 RTLogPrintf("disas error %Rrc\n", rc);
4136 cbInstr = 1;
4137 }
4138 off += cbInstr;
4139 }
4140 }
4141}
4142
4143
4144/**
4145 * Disassemble guest code.
4146 *
4147 * @param phFileIgnored Ignored, logfile usually.
4148 * @param uCode The guest address of the code to disassemble. (flat?)
4149 * @param cb Number of bytes to disassemble.
4150 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
4151 */
4152void target_disas(FILE *phFileIgnored, target_ulong uCode, target_ulong cb, int fFlags)
4153{
4154 if (LogIs2Enabled())
4155 {
4156 PVM pVM = cpu_single_env->pVM;
4157 PVMCPU pVCpu = cpu_single_env->pVCpu;
4158 RTSEL cs;
4159 RTGCUINTPTR eip;
4160
4161 Assert(pVCpu);
4162
4163 /*
4164 * Update the state so DBGF reads the correct register values (flags).
4165 */
4166 remR3StateUpdate(pVM, pVCpu);
4167
4168 /*
4169 * Do the disassembling.
4170 */
4171 RTLogPrintf("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
4172 cs = cpu_single_env->segs[R_CS].selector;
4173 eip = uCode - cpu_single_env->segs[R_CS].base;
4174 for (;;)
4175 {
4176 char szBuf[256];
4177 uint32_t cbInstr;
4178 int rc = DBGFR3DisasInstrEx(pVM->pUVM,
4179 pVCpu->idCpu,
4180 cs,
4181 eip,
4182 DBGF_DISAS_FLAGS_DEFAULT_MODE,
4183 szBuf, sizeof(szBuf),
4184 &cbInstr);
4185 if (RT_SUCCESS(rc))
4186 RTLogPrintf("%llx %s\n", (uint64_t)uCode, szBuf);
4187 else
4188 {
4189 RTLogPrintf("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
4190 cbInstr = 1;
4191 }
4192
4193 /* next */
4194 if (cb <= cbInstr)
4195 break;
4196 cb -= cbInstr;
4197 uCode += cbInstr;
4198 eip += cbInstr;
4199 }
4200 }
4201}
4202
4203
4204/**
4205 * Looks up a guest symbol.
4206 *
4207 * @returns Pointer to symbol name. This is a static buffer.
4208 * @param orig_addr The address in question.
4209 */
4210const char *lookup_symbol(target_ulong orig_addr)
4211{
4212 PVM pVM = cpu_single_env->pVM;
4213 RTGCINTPTR off = 0;
4214 RTDBGSYMBOL Sym;
4215 DBGFADDRESS Addr;
4216
4217 int rc = DBGFR3AsSymbolByAddr(pVM->pUVM, DBGF_AS_GLOBAL, DBGFR3AddrFromFlat(pVM->pUVM, &Addr, orig_addr),
4218 RTDBGSYMADDR_FLAGS_LESS_OR_EQUAL, &off, &Sym, NULL /*phMod*/);
4219 if (RT_SUCCESS(rc))
4220 {
4221 static char szSym[sizeof(Sym.szName) + 48];
4222 if (!off)
4223 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
4224 else if (off > 0)
4225 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
4226 else
4227 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
4228 return szSym;
4229 }
4230 return "<N/A>";
4231}
4232
4233
4234#undef LOG_GROUP
4235#define LOG_GROUP LOG_GROUP_REM
4236
4237
4238/* -+- FF notifications -+- */
4239
4240
4241/**
4242 * Notification about a pending interrupt.
4243 *
4244 * @param pVM VM Handle.
4245 * @param pVCpu VMCPU Handle.
4246 * @param u8Interrupt Interrupt
4247 * @thread The emulation thread.
4248 */
4249REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
4250{
4251 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
4252 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
4253}
4254
4255/**
4256 * Notification about a pending interrupt.
4257 *
4258 * @returns Pending interrupt or REM_NO_PENDING_IRQ
4259 * @param pVM VM Handle.
4260 * @param pVCpu VMCPU Handle.
4261 * @thread The emulation thread.
4262 */
4263REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
4264{
4265 return pVM->rem.s.u32PendingInterrupt;
4266}
4267
4268/**
4269 * Notification about the interrupt FF being set.
4270 *
4271 * @param pVM VM Handle.
4272 * @param pVCpu VMCPU Handle.
4273 * @thread The emulation thread.
4274 */
4275REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
4276{
4277#ifndef IEM_VERIFICATION_MODE
4278 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
4279 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
4280 if (pVM->rem.s.fInREM)
4281 {
4282 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4283 CPU_INTERRUPT_EXTERNAL_HARD);
4284 }
4285#endif
4286}
4287
4288
4289/**
4290 * Notification about the interrupt FF being set.
4291 *
4292 * @param pVM VM Handle.
4293 * @param pVCpu VMCPU Handle.
4294 * @thread Any.
4295 */
4296REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
4297{
4298 LogFlow(("REMR3NotifyInterruptClear:\n"));
4299 if (pVM->rem.s.fInREM)
4300 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
4301}
4302
4303
4304/**
4305 * Notification about pending timer(s).
4306 *
4307 * @param pVM VM Handle.
4308 * @param pVCpuDst The target cpu for this notification.
4309 * TM will not broadcast pending timer events, but use
4310 * a dedicated EMT for them. So, only interrupt REM
4311 * execution if the given CPU is executing in REM.
4312 * @thread Any.
4313 */
4314REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
4315{
4316#ifndef IEM_VERIFICATION_MODE
4317#ifndef DEBUG_bird
4318 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
4319#endif
4320 if (pVM->rem.s.fInREM)
4321 {
4322 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
4323 {
4324 LogIt(RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
4325 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
4326 CPU_INTERRUPT_EXTERNAL_TIMER);
4327 }
4328 else
4329 LogIt(RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
4330 }
4331 else
4332 LogIt(RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
4333#endif
4334}
4335
4336
4337/**
4338 * Notification about pending DMA transfers.
4339 *
4340 * @param pVM VM Handle.
4341 * @thread Any.
4342 */
4343REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
4344{
4345#ifndef IEM_VERIFICATION_MODE
4346 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
4347 if (pVM->rem.s.fInREM)
4348 {
4349 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4350 CPU_INTERRUPT_EXTERNAL_DMA);
4351 }
4352#endif
4353}
4354
4355
4356/**
4357 * Notification about pending timer(s).
4358 *
4359 * @param pVM VM Handle.
4360 * @thread Any.
4361 */
4362REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
4363{
4364#ifndef IEM_VERIFICATION_MODE
4365 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
4366 if (pVM->rem.s.fInREM)
4367 {
4368 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4369 CPU_INTERRUPT_EXTERNAL_EXIT);
4370 }
4371#endif
4372}
4373
4374
4375/**
4376 * Notification about pending FF set by an external thread.
4377 *
4378 * @param pVM VM handle.
4379 * @thread Any.
4380 */
4381REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4382{
4383#ifndef IEM_VERIFICATION_MODE
4384 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4385 if (pVM->rem.s.fInREM)
4386 {
4387 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4388 CPU_INTERRUPT_EXTERNAL_EXIT);
4389 }
4390#endif
4391}
4392
4393
4394#ifdef VBOX_WITH_STATISTICS
4395void remR3ProfileStart(int statcode)
4396{
4397 STAMPROFILEADV *pStat;
4398 switch(statcode)
4399 {
4400 case STATS_EMULATE_SINGLE_INSTR:
4401 pStat = &gStatExecuteSingleInstr;
4402 break;
4403 case STATS_QEMU_COMPILATION:
4404 pStat = &gStatCompilationQEmu;
4405 break;
4406 case STATS_QEMU_RUN_EMULATED_CODE:
4407 pStat = &gStatRunCodeQEmu;
4408 break;
4409 case STATS_QEMU_TOTAL:
4410 pStat = &gStatTotalTimeQEmu;
4411 break;
4412 case STATS_QEMU_RUN_TIMERS:
4413 pStat = &gStatTimers;
4414 break;
4415 case STATS_TLB_LOOKUP:
4416 pStat= &gStatTBLookup;
4417 break;
4418 case STATS_IRQ_HANDLING:
4419 pStat= &gStatIRQ;
4420 break;
4421 case STATS_RAW_CHECK:
4422 pStat = &gStatRawCheck;
4423 break;
4424
4425 default:
4426 AssertMsgFailed(("unknown stat %d\n", statcode));
4427 return;
4428 }
4429 STAM_PROFILE_ADV_START(pStat, a);
4430}
4431
4432
4433void remR3ProfileStop(int statcode)
4434{
4435 STAMPROFILEADV *pStat;
4436 switch(statcode)
4437 {
4438 case STATS_EMULATE_SINGLE_INSTR:
4439 pStat = &gStatExecuteSingleInstr;
4440 break;
4441 case STATS_QEMU_COMPILATION:
4442 pStat = &gStatCompilationQEmu;
4443 break;
4444 case STATS_QEMU_RUN_EMULATED_CODE:
4445 pStat = &gStatRunCodeQEmu;
4446 break;
4447 case STATS_QEMU_TOTAL:
4448 pStat = &gStatTotalTimeQEmu;
4449 break;
4450 case STATS_QEMU_RUN_TIMERS:
4451 pStat = &gStatTimers;
4452 break;
4453 case STATS_TLB_LOOKUP:
4454 pStat= &gStatTBLookup;
4455 break;
4456 case STATS_IRQ_HANDLING:
4457 pStat= &gStatIRQ;
4458 break;
4459 case STATS_RAW_CHECK:
4460 pStat = &gStatRawCheck;
4461 break;
4462 default:
4463 AssertMsgFailed(("unknown stat %d\n", statcode));
4464 return;
4465 }
4466 STAM_PROFILE_ADV_STOP(pStat, a);
4467}
4468#endif
4469
4470/**
4471 * Raise an RC, force rem exit.
4472 *
4473 * @param pVM VM handle.
4474 * @param rc The rc.
4475 */
4476void remR3RaiseRC(PVM pVM, int rc)
4477{
4478 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4479 Assert(pVM->rem.s.fInREM);
4480 VM_ASSERT_EMT(pVM);
4481 pVM->rem.s.rc = rc;
4482 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4483}
4484
4485
4486/* -+- timers -+- */
4487
4488uint64_t cpu_get_tsc(CPUX86State *env)
4489{
4490 STAM_COUNTER_INC(&gStatCpuGetTSC);
4491 return TMCpuTickGet(env->pVCpu);
4492}
4493
4494
4495/* -+- interrupts -+- */
4496
4497void cpu_set_ferr(CPUX86State *env)
4498{
4499 int rc = PDMIsaSetIrq(env->pVM, 13, 1, 0 /*uTagSrc*/);
4500 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4501}
4502
4503int cpu_get_pic_interrupt(CPUX86State *env)
4504{
4505 uint8_t u8Interrupt;
4506 int rc;
4507
4508 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4509 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4510 * with the (a)pic.
4511 */
4512 /* Note! We assume we will go directly to the recompiler to handle the pending interrupt! */
4513 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4514 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4515 * remove this kludge. */
4516 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4517 {
4518 rc = VINF_SUCCESS;
4519 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4520 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4521 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4522 }
4523 else
4524 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4525
4526 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc pc=%04x:%08llx ~flags=%08llx\n",
4527 u8Interrupt, rc, env->segs[R_CS].selector, (uint64_t)env->eip, (uint64_t)env->eflags));
4528 if (RT_SUCCESS(rc))
4529 {
4530 if (VMCPU_FF_IS_PENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4531 env->interrupt_request |= CPU_INTERRUPT_HARD;
4532 return u8Interrupt;
4533 }
4534 return -1;
4535}
4536
4537
4538/* -+- local apic -+- */
4539
4540#if 0 /* CPUMSetGuestMsr does this now. */
4541void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4542{
4543 int rc = PDMApicSetBase(env->pVM, val);
4544 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4545}
4546#endif
4547
4548uint64_t cpu_get_apic_base(CPUX86State *env)
4549{
4550 uint64_t u64;
4551 VBOXSTRICTRC rcStrict = CPUMQueryGuestMsr(env->pVCpu, MSR_IA32_APICBASE, &u64);
4552 if (RT_SUCCESS(rcStrict))
4553 {
4554 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4555 return u64;
4556 }
4557 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict)));
4558 return 0;
4559}
4560
4561void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4562{
4563 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4564 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4565}
4566
4567uint8_t cpu_get_apic_tpr(CPUX86State *env)
4568{
4569 uint8_t u8;
4570 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL, NULL);
4571 if (RT_SUCCESS(rc))
4572 {
4573 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4574 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4575 }
4576 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4577 return 0;
4578}
4579
4580/**
4581 * Read an MSR.
4582 *
4583 * @retval 0 success.
4584 * @retval -1 failure, raise \#GP(0).
4585 * @param env The cpu state.
4586 * @param idMsr The MSR to read.
4587 * @param puValue Where to return the value.
4588 */
4589int cpu_rdmsr(CPUX86State *env, uint32_t idMsr, uint64_t *puValue)
4590{
4591 Assert(env->pVCpu);
4592 return CPUMQueryGuestMsr(env->pVCpu, idMsr, puValue) == VINF_SUCCESS ? 0 : -1;
4593}
4594
4595/**
4596 * Write to an MSR.
4597 *
4598 * @retval 0 success.
4599 * @retval -1 failure, raise \#GP(0).
4600 * @param env The cpu state.
4601 * @param idMsr The MSR to write to.
4602 * @param uValue The value to write.
4603 */
4604int cpu_wrmsr(CPUX86State *env, uint32_t idMsr, uint64_t uValue)
4605{
4606 Assert(env->pVCpu);
4607 return CPUMSetGuestMsr(env->pVCpu, idMsr, uValue) == VINF_SUCCESS ? 0 : -1;
4608}
4609
4610/* -+- I/O Ports -+- */
4611
4612#undef LOG_GROUP
4613#define LOG_GROUP LOG_GROUP_REM_IOPORT
4614
4615void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val)
4616{
4617 int rc;
4618
4619 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4620 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4621
4622 rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 1);
4623 if (RT_LIKELY(rc == VINF_SUCCESS))
4624 return;
4625 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4626 {
4627 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4628 remR3RaiseRC(env->pVM, rc);
4629 return;
4630 }
4631 remAbort(rc, __FUNCTION__);
4632}
4633
4634void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val)
4635{
4636 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4637 int rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 2);
4638 if (RT_LIKELY(rc == VINF_SUCCESS))
4639 return;
4640 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4641 {
4642 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4643 remR3RaiseRC(env->pVM, rc);
4644 return;
4645 }
4646 remAbort(rc, __FUNCTION__);
4647}
4648
4649void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val)
4650{
4651 int rc;
4652 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4653 rc = IOMIOPortWrite(env->pVM, env->pVCpu, (RTIOPORT)addr, val, 4);
4654 if (RT_LIKELY(rc == VINF_SUCCESS))
4655 return;
4656 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4657 {
4658 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4659 remR3RaiseRC(env->pVM, rc);
4660 return;
4661 }
4662 remAbort(rc, __FUNCTION__);
4663}
4664
4665uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr)
4666{
4667 uint32_t u32 = 0;
4668 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 1);
4669 if (RT_LIKELY(rc == VINF_SUCCESS))
4670 {
4671 if (/*addr != 0x61 && */addr != 0x71)
4672 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4673 return (uint8_t)u32;
4674 }
4675 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4676 {
4677 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4678 remR3RaiseRC(env->pVM, rc);
4679 return (uint8_t)u32;
4680 }
4681 remAbort(rc, __FUNCTION__);
4682 return UINT8_C(0xff);
4683}
4684
4685uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr)
4686{
4687 uint32_t u32 = 0;
4688 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 2);
4689 if (RT_LIKELY(rc == VINF_SUCCESS))
4690 {
4691 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4692 return (uint16_t)u32;
4693 }
4694 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4695 {
4696 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4697 remR3RaiseRC(env->pVM, rc);
4698 return (uint16_t)u32;
4699 }
4700 remAbort(rc, __FUNCTION__);
4701 return UINT16_C(0xffff);
4702}
4703
4704uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr)
4705{
4706 uint32_t u32 = 0;
4707 int rc = IOMIOPortRead(env->pVM, env->pVCpu, (RTIOPORT)addr, &u32, 4);
4708 if (RT_LIKELY(rc == VINF_SUCCESS))
4709 {
4710 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4711 return u32;
4712 }
4713 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4714 {
4715 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4716 remR3RaiseRC(env->pVM, rc);
4717 return u32;
4718 }
4719 remAbort(rc, __FUNCTION__);
4720 return UINT32_C(0xffffffff);
4721}
4722
4723#undef LOG_GROUP
4724#define LOG_GROUP LOG_GROUP_REM
4725
4726
4727/* -+- helpers and misc other interfaces -+- */
4728
4729/**
4730 * Perform the CPUID instruction.
4731 *
4732 * @param env Pointer to the recompiler CPU structure.
4733 * @param idx The CPUID leaf (eax).
4734 * @param idxSub The CPUID sub-leaf (ecx) where applicable.
4735 * @param pEAX Where to store eax.
4736 * @param pEBX Where to store ebx.
4737 * @param pECX Where to store ecx.
4738 * @param pEDX Where to store edx.
4739 */
4740void cpu_x86_cpuid(CPUX86State *env, uint32_t idx, uint32_t idxSub,
4741 uint32_t *pEAX, uint32_t *pEBX, uint32_t *pECX, uint32_t *pEDX)
4742{
4743 NOREF(idxSub);
4744 CPUMGetGuestCpuId(env->pVCpu, idx, idxSub, pEAX, pEBX, pECX, pEDX);
4745}
4746
4747
4748#if 0 /* not used */
4749/**
4750 * Interface for qemu hardware to report back fatal errors.
4751 */
4752void hw_error(const char *pszFormat, ...)
4753{
4754 /*
4755 * Bitch about it.
4756 */
4757 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4758 * this in my Odin32 tree at home! */
4759 va_list args;
4760 va_start(args, pszFormat);
4761 RTLogPrintf("fatal error in virtual hardware:");
4762 RTLogPrintfV(pszFormat, args);
4763 va_end(args);
4764 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4765
4766 /*
4767 * If we're in REM context we'll sync back the state before 'jumping' to
4768 * the EMs failure handling.
4769 */
4770 PVM pVM = cpu_single_env->pVM;
4771 if (pVM->rem.s.fInREM)
4772 REMR3StateBack(pVM);
4773 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4774 AssertMsgFailed(("EMR3FatalError returned!\n"));
4775}
4776#endif
4777
4778/**
4779 * Interface for the qemu cpu to report unhandled situation
4780 * raising a fatal VM error.
4781 */
4782void cpu_abort(CPUX86State *env, const char *pszFormat, ...)
4783{
4784 va_list va;
4785 PVM pVM;
4786 PVMCPU pVCpu;
4787 char szMsg[256];
4788
4789 /*
4790 * Bitch about it.
4791 */
4792 RTLogFlags(NULL, "nodisabled nobuffered");
4793 RTLogFlush(NULL);
4794
4795 va_start(va, pszFormat);
4796#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4797 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4798 unsigned cArgs = 0;
4799 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4800 const char *psz = strchr(pszFormat, '%');
4801 while (psz && cArgs < 6)
4802 {
4803 auArgs[cArgs++] = va_arg(va, uintptr_t);
4804 psz = strchr(psz + 1, '%');
4805 }
4806 switch (cArgs)
4807 {
4808 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4809 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4810 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4811 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4812 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4813 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4814 default:
4815 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4816 }
4817#else
4818 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4819#endif
4820 va_end(va);
4821
4822 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4823 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4824
4825 /*
4826 * If we're in REM context we'll sync back the state before 'jumping' to
4827 * the EMs failure handling.
4828 */
4829 pVM = cpu_single_env->pVM;
4830 pVCpu = cpu_single_env->pVCpu;
4831 Assert(pVCpu);
4832
4833 if (pVM->rem.s.fInREM)
4834 REMR3StateBack(pVM, pVCpu);
4835 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4836 AssertMsgFailed(("EMR3FatalError returned!\n"));
4837}
4838
4839
4840/**
4841 * Aborts the VM.
4842 *
4843 * @param rc VBox error code.
4844 * @param pszTip Hint about why/when this happened.
4845 */
4846void remAbort(int rc, const char *pszTip)
4847{
4848 PVM pVM;
4849 PVMCPU pVCpu;
4850
4851 /*
4852 * Bitch about it.
4853 */
4854 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4855 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4856
4857 /*
4858 * Jump back to where we entered the recompiler.
4859 */
4860 pVM = cpu_single_env->pVM;
4861 pVCpu = cpu_single_env->pVCpu;
4862 Assert(pVCpu);
4863
4864 if (pVM->rem.s.fInREM)
4865 REMR3StateBack(pVM, pVCpu);
4866
4867 EMR3FatalError(pVCpu, rc);
4868 AssertMsgFailed(("EMR3FatalError returned!\n"));
4869}
4870
4871
4872/**
4873 * Dumps a linux system call.
4874 * @param pVCpu VMCPU handle.
4875 */
4876void remR3DumpLnxSyscall(PVMCPU pVCpu)
4877{
4878 static const char *apsz[] =
4879 {
4880 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4881 "sys_exit",
4882 "sys_fork",
4883 "sys_read",
4884 "sys_write",
4885 "sys_open", /* 5 */
4886 "sys_close",
4887 "sys_waitpid",
4888 "sys_creat",
4889 "sys_link",
4890 "sys_unlink", /* 10 */
4891 "sys_execve",
4892 "sys_chdir",
4893 "sys_time",
4894 "sys_mknod",
4895 "sys_chmod", /* 15 */
4896 "sys_lchown16",
4897 "sys_ni_syscall", /* old break syscall holder */
4898 "sys_stat",
4899 "sys_lseek",
4900 "sys_getpid", /* 20 */
4901 "sys_mount",
4902 "sys_oldumount",
4903 "sys_setuid16",
4904 "sys_getuid16",
4905 "sys_stime", /* 25 */
4906 "sys_ptrace",
4907 "sys_alarm",
4908 "sys_fstat",
4909 "sys_pause",
4910 "sys_utime", /* 30 */
4911 "sys_ni_syscall", /* old stty syscall holder */
4912 "sys_ni_syscall", /* old gtty syscall holder */
4913 "sys_access",
4914 "sys_nice",
4915 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4916 "sys_sync",
4917 "sys_kill",
4918 "sys_rename",
4919 "sys_mkdir",
4920 "sys_rmdir", /* 40 */
4921 "sys_dup",
4922 "sys_pipe",
4923 "sys_times",
4924 "sys_ni_syscall", /* old prof syscall holder */
4925 "sys_brk", /* 45 */
4926 "sys_setgid16",
4927 "sys_getgid16",
4928 "sys_signal",
4929 "sys_geteuid16",
4930 "sys_getegid16", /* 50 */
4931 "sys_acct",
4932 "sys_umount", /* recycled never used phys() */
4933 "sys_ni_syscall", /* old lock syscall holder */
4934 "sys_ioctl",
4935 "sys_fcntl", /* 55 */
4936 "sys_ni_syscall", /* old mpx syscall holder */
4937 "sys_setpgid",
4938 "sys_ni_syscall", /* old ulimit syscall holder */
4939 "sys_olduname",
4940 "sys_umask", /* 60 */
4941 "sys_chroot",
4942 "sys_ustat",
4943 "sys_dup2",
4944 "sys_getppid",
4945 "sys_getpgrp", /* 65 */
4946 "sys_setsid",
4947 "sys_sigaction",
4948 "sys_sgetmask",
4949 "sys_ssetmask",
4950 "sys_setreuid16", /* 70 */
4951 "sys_setregid16",
4952 "sys_sigsuspend",
4953 "sys_sigpending",
4954 "sys_sethostname",
4955 "sys_setrlimit", /* 75 */
4956 "sys_old_getrlimit",
4957 "sys_getrusage",
4958 "sys_gettimeofday",
4959 "sys_settimeofday",
4960 "sys_getgroups16", /* 80 */
4961 "sys_setgroups16",
4962 "old_select",
4963 "sys_symlink",
4964 "sys_lstat",
4965 "sys_readlink", /* 85 */
4966 "sys_uselib",
4967 "sys_swapon",
4968 "sys_reboot",
4969 "old_readdir",
4970 "old_mmap", /* 90 */
4971 "sys_munmap",
4972 "sys_truncate",
4973 "sys_ftruncate",
4974 "sys_fchmod",
4975 "sys_fchown16", /* 95 */
4976 "sys_getpriority",
4977 "sys_setpriority",
4978 "sys_ni_syscall", /* old profil syscall holder */
4979 "sys_statfs",
4980 "sys_fstatfs", /* 100 */
4981 "sys_ioperm",
4982 "sys_socketcall",
4983 "sys_syslog",
4984 "sys_setitimer",
4985 "sys_getitimer", /* 105 */
4986 "sys_newstat",
4987 "sys_newlstat",
4988 "sys_newfstat",
4989 "sys_uname",
4990 "sys_iopl", /* 110 */
4991 "sys_vhangup",
4992 "sys_ni_syscall", /* old "idle" system call */
4993 "sys_vm86old",
4994 "sys_wait4",
4995 "sys_swapoff", /* 115 */
4996 "sys_sysinfo",
4997 "sys_ipc",
4998 "sys_fsync",
4999 "sys_sigreturn",
5000 "sys_clone", /* 120 */
5001 "sys_setdomainname",
5002 "sys_newuname",
5003 "sys_modify_ldt",
5004 "sys_adjtimex",
5005 "sys_mprotect", /* 125 */
5006 "sys_sigprocmask",
5007 "sys_ni_syscall", /* old "create_module" */
5008 "sys_init_module",
5009 "sys_delete_module",
5010 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
5011 "sys_quotactl",
5012 "sys_getpgid",
5013 "sys_fchdir",
5014 "sys_bdflush",
5015 "sys_sysfs", /* 135 */
5016 "sys_personality",
5017 "sys_ni_syscall", /* reserved for afs_syscall */
5018 "sys_setfsuid16",
5019 "sys_setfsgid16",
5020 "sys_llseek", /* 140 */
5021 "sys_getdents",
5022 "sys_select",
5023 "sys_flock",
5024 "sys_msync",
5025 "sys_readv", /* 145 */
5026 "sys_writev",
5027 "sys_getsid",
5028 "sys_fdatasync",
5029 "sys_sysctl",
5030 "sys_mlock", /* 150 */
5031 "sys_munlock",
5032 "sys_mlockall",
5033 "sys_munlockall",
5034 "sys_sched_setparam",
5035 "sys_sched_getparam", /* 155 */
5036 "sys_sched_setscheduler",
5037 "sys_sched_getscheduler",
5038 "sys_sched_yield",
5039 "sys_sched_get_priority_max",
5040 "sys_sched_get_priority_min", /* 160 */
5041 "sys_sched_rr_get_interval",
5042 "sys_nanosleep",
5043 "sys_mremap",
5044 "sys_setresuid16",
5045 "sys_getresuid16", /* 165 */
5046 "sys_vm86",
5047 "sys_ni_syscall", /* Old sys_query_module */
5048 "sys_poll",
5049 "sys_nfsservctl",
5050 "sys_setresgid16", /* 170 */
5051 "sys_getresgid16",
5052 "sys_prctl",
5053 "sys_rt_sigreturn",
5054 "sys_rt_sigaction",
5055 "sys_rt_sigprocmask", /* 175 */
5056 "sys_rt_sigpending",
5057 "sys_rt_sigtimedwait",
5058 "sys_rt_sigqueueinfo",
5059 "sys_rt_sigsuspend",
5060 "sys_pread64", /* 180 */
5061 "sys_pwrite64",
5062 "sys_chown16",
5063 "sys_getcwd",
5064 "sys_capget",
5065 "sys_capset", /* 185 */
5066 "sys_sigaltstack",
5067 "sys_sendfile",
5068 "sys_ni_syscall", /* reserved for streams1 */
5069 "sys_ni_syscall", /* reserved for streams2 */
5070 "sys_vfork", /* 190 */
5071 "sys_getrlimit",
5072 "sys_mmap2",
5073 "sys_truncate64",
5074 "sys_ftruncate64",
5075 "sys_stat64", /* 195 */
5076 "sys_lstat64",
5077 "sys_fstat64",
5078 "sys_lchown",
5079 "sys_getuid",
5080 "sys_getgid", /* 200 */
5081 "sys_geteuid",
5082 "sys_getegid",
5083 "sys_setreuid",
5084 "sys_setregid",
5085 "sys_getgroups", /* 205 */
5086 "sys_setgroups",
5087 "sys_fchown",
5088 "sys_setresuid",
5089 "sys_getresuid",
5090 "sys_setresgid", /* 210 */
5091 "sys_getresgid",
5092 "sys_chown",
5093 "sys_setuid",
5094 "sys_setgid",
5095 "sys_setfsuid", /* 215 */
5096 "sys_setfsgid",
5097 "sys_pivot_root",
5098 "sys_mincore",
5099 "sys_madvise",
5100 "sys_getdents64", /* 220 */
5101 "sys_fcntl64",
5102 "sys_ni_syscall", /* reserved for TUX */
5103 "sys_ni_syscall",
5104 "sys_gettid",
5105 "sys_readahead", /* 225 */
5106 "sys_setxattr",
5107 "sys_lsetxattr",
5108 "sys_fsetxattr",
5109 "sys_getxattr",
5110 "sys_lgetxattr", /* 230 */
5111 "sys_fgetxattr",
5112 "sys_listxattr",
5113 "sys_llistxattr",
5114 "sys_flistxattr",
5115 "sys_removexattr", /* 235 */
5116 "sys_lremovexattr",
5117 "sys_fremovexattr",
5118 "sys_tkill",
5119 "sys_sendfile64",
5120 "sys_futex", /* 240 */
5121 "sys_sched_setaffinity",
5122 "sys_sched_getaffinity",
5123 "sys_set_thread_area",
5124 "sys_get_thread_area",
5125 "sys_io_setup", /* 245 */
5126 "sys_io_destroy",
5127 "sys_io_getevents",
5128 "sys_io_submit",
5129 "sys_io_cancel",
5130 "sys_fadvise64", /* 250 */
5131 "sys_ni_syscall",
5132 "sys_exit_group",
5133 "sys_lookup_dcookie",
5134 "sys_epoll_create",
5135 "sys_epoll_ctl", /* 255 */
5136 "sys_epoll_wait",
5137 "sys_remap_file_pages",
5138 "sys_set_tid_address",
5139 "sys_timer_create",
5140 "sys_timer_settime", /* 260 */
5141 "sys_timer_gettime",
5142 "sys_timer_getoverrun",
5143 "sys_timer_delete",
5144 "sys_clock_settime",
5145 "sys_clock_gettime", /* 265 */
5146 "sys_clock_getres",
5147 "sys_clock_nanosleep",
5148 "sys_statfs64",
5149 "sys_fstatfs64",
5150 "sys_tgkill", /* 270 */
5151 "sys_utimes",
5152 "sys_fadvise64_64",
5153 "sys_ni_syscall" /* sys_vserver */
5154 };
5155
5156 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
5157 switch (uEAX)
5158 {
5159 default:
5160 if (uEAX < RT_ELEMENTS(apsz))
5161 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
5162 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
5163 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
5164 else
5165 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
5166 break;
5167
5168 }
5169}
5170
5171
5172/**
5173 * Dumps an OpenBSD system call.
5174 * @param pVCpu VMCPU handle.
5175 */
5176void remR3DumpOBsdSyscall(PVMCPU pVCpu)
5177{
5178 static const char *apsz[] =
5179 {
5180 "SYS_syscall", //0
5181 "SYS_exit", //1
5182 "SYS_fork", //2
5183 "SYS_read", //3
5184 "SYS_write", //4
5185 "SYS_open", //5
5186 "SYS_close", //6
5187 "SYS_wait4", //7
5188 "SYS_8",
5189 "SYS_link", //9
5190 "SYS_unlink", //10
5191 "SYS_11",
5192 "SYS_chdir", //12
5193 "SYS_fchdir", //13
5194 "SYS_mknod", //14
5195 "SYS_chmod", //15
5196 "SYS_chown", //16
5197 "SYS_break", //17
5198 "SYS_18",
5199 "SYS_19",
5200 "SYS_getpid", //20
5201 "SYS_mount", //21
5202 "SYS_unmount", //22
5203 "SYS_setuid", //23
5204 "SYS_getuid", //24
5205 "SYS_geteuid", //25
5206 "SYS_ptrace", //26
5207 "SYS_recvmsg", //27
5208 "SYS_sendmsg", //28
5209 "SYS_recvfrom", //29
5210 "SYS_accept", //30
5211 "SYS_getpeername", //31
5212 "SYS_getsockname", //32
5213 "SYS_access", //33
5214 "SYS_chflags", //34
5215 "SYS_fchflags", //35
5216 "SYS_sync", //36
5217 "SYS_kill", //37
5218 "SYS_38",
5219 "SYS_getppid", //39
5220 "SYS_40",
5221 "SYS_dup", //41
5222 "SYS_opipe", //42
5223 "SYS_getegid", //43
5224 "SYS_profil", //44
5225 "SYS_ktrace", //45
5226 "SYS_sigaction", //46
5227 "SYS_getgid", //47
5228 "SYS_sigprocmask", //48
5229 "SYS_getlogin", //49
5230 "SYS_setlogin", //50
5231 "SYS_acct", //51
5232 "SYS_sigpending", //52
5233 "SYS_osigaltstack", //53
5234 "SYS_ioctl", //54
5235 "SYS_reboot", //55
5236 "SYS_revoke", //56
5237 "SYS_symlink", //57
5238 "SYS_readlink", //58
5239 "SYS_execve", //59
5240 "SYS_umask", //60
5241 "SYS_chroot", //61
5242 "SYS_62",
5243 "SYS_63",
5244 "SYS_64",
5245 "SYS_65",
5246 "SYS_vfork", //66
5247 "SYS_67",
5248 "SYS_68",
5249 "SYS_sbrk", //69
5250 "SYS_sstk", //70
5251 "SYS_61",
5252 "SYS_vadvise", //72
5253 "SYS_munmap", //73
5254 "SYS_mprotect", //74
5255 "SYS_madvise", //75
5256 "SYS_76",
5257 "SYS_77",
5258 "SYS_mincore", //78
5259 "SYS_getgroups", //79
5260 "SYS_setgroups", //80
5261 "SYS_getpgrp", //81
5262 "SYS_setpgid", //82
5263 "SYS_setitimer", //83
5264 "SYS_84",
5265 "SYS_85",
5266 "SYS_getitimer", //86
5267 "SYS_87",
5268 "SYS_88",
5269 "SYS_89",
5270 "SYS_dup2", //90
5271 "SYS_91",
5272 "SYS_fcntl", //92
5273 "SYS_select", //93
5274 "SYS_94",
5275 "SYS_fsync", //95
5276 "SYS_setpriority", //96
5277 "SYS_socket", //97
5278 "SYS_connect", //98
5279 "SYS_99",
5280 "SYS_getpriority", //100
5281 "SYS_101",
5282 "SYS_102",
5283 "SYS_sigreturn", //103
5284 "SYS_bind", //104
5285 "SYS_setsockopt", //105
5286 "SYS_listen", //106
5287 "SYS_107",
5288 "SYS_108",
5289 "SYS_109",
5290 "SYS_110",
5291 "SYS_sigsuspend", //111
5292 "SYS_112",
5293 "SYS_113",
5294 "SYS_114",
5295 "SYS_115",
5296 "SYS_gettimeofday", //116
5297 "SYS_getrusage", //117
5298 "SYS_getsockopt", //118
5299 "SYS_119",
5300 "SYS_readv", //120
5301 "SYS_writev", //121
5302 "SYS_settimeofday", //122
5303 "SYS_fchown", //123
5304 "SYS_fchmod", //124
5305 "SYS_125",
5306 "SYS_setreuid", //126
5307 "SYS_setregid", //127
5308 "SYS_rename", //128
5309 "SYS_129",
5310 "SYS_130",
5311 "SYS_flock", //131
5312 "SYS_mkfifo", //132
5313 "SYS_sendto", //133
5314 "SYS_shutdown", //134
5315 "SYS_socketpair", //135
5316 "SYS_mkdir", //136
5317 "SYS_rmdir", //137
5318 "SYS_utimes", //138
5319 "SYS_139",
5320 "SYS_adjtime", //140
5321 "SYS_141",
5322 "SYS_142",
5323 "SYS_143",
5324 "SYS_144",
5325 "SYS_145",
5326 "SYS_146",
5327 "SYS_setsid", //147
5328 "SYS_quotactl", //148
5329 "SYS_149",
5330 "SYS_150",
5331 "SYS_151",
5332 "SYS_152",
5333 "SYS_153",
5334 "SYS_154",
5335 "SYS_nfssvc", //155
5336 "SYS_156",
5337 "SYS_157",
5338 "SYS_158",
5339 "SYS_159",
5340 "SYS_160",
5341 "SYS_getfh", //161
5342 "SYS_162",
5343 "SYS_163",
5344 "SYS_164",
5345 "SYS_sysarch", //165
5346 "SYS_166",
5347 "SYS_167",
5348 "SYS_168",
5349 "SYS_169",
5350 "SYS_170",
5351 "SYS_171",
5352 "SYS_172",
5353 "SYS_pread", //173
5354 "SYS_pwrite", //174
5355 "SYS_175",
5356 "SYS_176",
5357 "SYS_177",
5358 "SYS_178",
5359 "SYS_179",
5360 "SYS_180",
5361 "SYS_setgid", //181
5362 "SYS_setegid", //182
5363 "SYS_seteuid", //183
5364 "SYS_lfs_bmapv", //184
5365 "SYS_lfs_markv", //185
5366 "SYS_lfs_segclean", //186
5367 "SYS_lfs_segwait", //187
5368 "SYS_188",
5369 "SYS_189",
5370 "SYS_190",
5371 "SYS_pathconf", //191
5372 "SYS_fpathconf", //192
5373 "SYS_swapctl", //193
5374 "SYS_getrlimit", //194
5375 "SYS_setrlimit", //195
5376 "SYS_getdirentries", //196
5377 "SYS_mmap", //197
5378 "SYS___syscall", //198
5379 "SYS_lseek", //199
5380 "SYS_truncate", //200
5381 "SYS_ftruncate", //201
5382 "SYS___sysctl", //202
5383 "SYS_mlock", //203
5384 "SYS_munlock", //204
5385 "SYS_205",
5386 "SYS_futimes", //206
5387 "SYS_getpgid", //207
5388 "SYS_xfspioctl", //208
5389 "SYS_209",
5390 "SYS_210",
5391 "SYS_211",
5392 "SYS_212",
5393 "SYS_213",
5394 "SYS_214",
5395 "SYS_215",
5396 "SYS_216",
5397 "SYS_217",
5398 "SYS_218",
5399 "SYS_219",
5400 "SYS_220",
5401 "SYS_semget", //221
5402 "SYS_222",
5403 "SYS_223",
5404 "SYS_224",
5405 "SYS_msgget", //225
5406 "SYS_msgsnd", //226
5407 "SYS_msgrcv", //227
5408 "SYS_shmat", //228
5409 "SYS_229",
5410 "SYS_shmdt", //230
5411 "SYS_231",
5412 "SYS_clock_gettime", //232
5413 "SYS_clock_settime", //233
5414 "SYS_clock_getres", //234
5415 "SYS_235",
5416 "SYS_236",
5417 "SYS_237",
5418 "SYS_238",
5419 "SYS_239",
5420 "SYS_nanosleep", //240
5421 "SYS_241",
5422 "SYS_242",
5423 "SYS_243",
5424 "SYS_244",
5425 "SYS_245",
5426 "SYS_246",
5427 "SYS_247",
5428 "SYS_248",
5429 "SYS_249",
5430 "SYS_minherit", //250
5431 "SYS_rfork", //251
5432 "SYS_poll", //252
5433 "SYS_issetugid", //253
5434 "SYS_lchown", //254
5435 "SYS_getsid", //255
5436 "SYS_msync", //256
5437 "SYS_257",
5438 "SYS_258",
5439 "SYS_259",
5440 "SYS_getfsstat", //260
5441 "SYS_statfs", //261
5442 "SYS_fstatfs", //262
5443 "SYS_pipe", //263
5444 "SYS_fhopen", //264
5445 "SYS_265",
5446 "SYS_fhstatfs", //266
5447 "SYS_preadv", //267
5448 "SYS_pwritev", //268
5449 "SYS_kqueue", //269
5450 "SYS_kevent", //270
5451 "SYS_mlockall", //271
5452 "SYS_munlockall", //272
5453 "SYS_getpeereid", //273
5454 "SYS_274",
5455 "SYS_275",
5456 "SYS_276",
5457 "SYS_277",
5458 "SYS_278",
5459 "SYS_279",
5460 "SYS_280",
5461 "SYS_getresuid", //281
5462 "SYS_setresuid", //282
5463 "SYS_getresgid", //283
5464 "SYS_setresgid", //284
5465 "SYS_285",
5466 "SYS_mquery", //286
5467 "SYS_closefrom", //287
5468 "SYS_sigaltstack", //288
5469 "SYS_shmget", //289
5470 "SYS_semop", //290
5471 "SYS_stat", //291
5472 "SYS_fstat", //292
5473 "SYS_lstat", //293
5474 "SYS_fhstat", //294
5475 "SYS___semctl", //295
5476 "SYS_shmctl", //296
5477 "SYS_msgctl", //297
5478 "SYS_MAXSYSCALL", //298
5479 //299
5480 //300
5481 };
5482 uint32_t uEAX;
5483 if (!LogIsEnabled())
5484 return;
5485 uEAX = CPUMGetGuestEAX(pVCpu);
5486 switch (uEAX)
5487 {
5488 default:
5489 if (uEAX < RT_ELEMENTS(apsz))
5490 {
5491 uint32_t au32Args[8] = {0};
5492 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5493 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5494 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5495 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5496 }
5497 else
5498 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5499 break;
5500 }
5501}
5502
5503
5504#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5505/**
5506 * The Dll main entry point (stub).
5507 */
5508bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5509{
5510 return true;
5511}
5512
5513void *memcpy(void *dst, const void *src, size_t size)
5514{
5515 uint8_t*pbDst = dst, *pbSrc = src;
5516 while (size-- > 0)
5517 *pbDst++ = *pbSrc++;
5518 return dst;
5519}
5520
5521#endif
5522
5523void cpu_smm_update(CPUX86State *env)
5524{
5525}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette