VirtualBox

source: vbox/trunk/src/recompiler/VBoxRecompiler.c@ 20845

Last change on this file since 20845 was 20845, checked in by vboxsync, 16 years ago

VBoxRecompiler.c: minor cleanup + @todo.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 164.7 KB
Line 
1/* $Id: VBoxRecompiler.c 20845 2009-06-23 14:55:25Z vboxsync $ */
2/** @file
3 * VBox Recompiler - QEMU.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_REM
27#include "vl.h"
28#include "osdep.h"
29#include "exec-all.h"
30#include "config.h"
31#include "cpu-all.h"
32
33#include <VBox/rem.h>
34#include <VBox/vmapi.h>
35#include <VBox/tm.h>
36#include <VBox/ssm.h>
37#include <VBox/em.h>
38#include <VBox/trpm.h>
39#include <VBox/iom.h>
40#include <VBox/mm.h>
41#include <VBox/pgm.h>
42#include <VBox/pdm.h>
43#include <VBox/dbgf.h>
44#include <VBox/dbg.h>
45#include <VBox/hwaccm.h>
46#include <VBox/patm.h>
47#include <VBox/csam.h>
48#include "REMInternal.h"
49#include <VBox/vm.h>
50#include <VBox/param.h>
51#include <VBox/err.h>
52
53#include <VBox/log.h>
54#include <iprt/semaphore.h>
55#include <iprt/asm.h>
56#include <iprt/assert.h>
57#include <iprt/thread.h>
58#include <iprt/string.h>
59
60/* Don't wanna include everything. */
61extern void cpu_exec_init_all(unsigned long tb_size);
62extern void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
63extern void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
64extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
65extern void tlb_flush_page(CPUX86State *env, target_ulong addr);
66extern void tlb_flush(CPUState *env, int flush_global);
67extern void sync_seg(CPUX86State *env1, int seg_reg, int selector);
68extern void sync_ldtr(CPUX86State *env1, int selector);
69
70#ifdef VBOX_STRICT
71unsigned long get_phys_page_offset(target_ulong addr);
72#endif
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78
79/** Copy 80-bit fpu register at pSrc to pDst.
80 * This is probably faster than *calling* memcpy.
81 */
82#define REM_COPY_FPU_REG(pDst, pSrc) \
83 do { *(PX86FPUMMX)(pDst) = *(const X86FPUMMX *)(pSrc); } while (0)
84
85
86/*******************************************************************************
87* Internal Functions *
88*******************************************************************************/
89static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM);
90static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
91static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu);
92static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded);
93
94static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys);
95static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys);
96static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys);
97static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
98static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
99static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
100
101static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys);
102static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys);
103static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys);
104static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
105static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
106static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32);
107
108static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
109static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler);
110static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM);
111
112/*******************************************************************************
113* Global Variables *
114*******************************************************************************/
115
116/** @todo Move stats to REM::s some rainy day we have nothing do to. */
117#ifdef VBOX_WITH_STATISTICS
118static STAMPROFILEADV gStatExecuteSingleInstr;
119static STAMPROFILEADV gStatCompilationQEmu;
120static STAMPROFILEADV gStatRunCodeQEmu;
121static STAMPROFILEADV gStatTotalTimeQEmu;
122static STAMPROFILEADV gStatTimers;
123static STAMPROFILEADV gStatTBLookup;
124static STAMPROFILEADV gStatIRQ;
125static STAMPROFILEADV gStatRawCheck;
126static STAMPROFILEADV gStatMemRead;
127static STAMPROFILEADV gStatMemWrite;
128static STAMPROFILE gStatGCPhys2HCVirt;
129static STAMPROFILE gStatHCVirt2GCPhys;
130static STAMCOUNTER gStatCpuGetTSC;
131static STAMCOUNTER gStatRefuseTFInhibit;
132static STAMCOUNTER gStatRefuseVM86;
133static STAMCOUNTER gStatRefusePaging;
134static STAMCOUNTER gStatRefusePAE;
135static STAMCOUNTER gStatRefuseIOPLNot0;
136static STAMCOUNTER gStatRefuseIF0;
137static STAMCOUNTER gStatRefuseCode16;
138static STAMCOUNTER gStatRefuseWP0;
139static STAMCOUNTER gStatRefuseRing1or2;
140static STAMCOUNTER gStatRefuseCanExecute;
141static STAMCOUNTER gStatREMGDTChange;
142static STAMCOUNTER gStatREMIDTChange;
143static STAMCOUNTER gStatREMLDTRChange;
144static STAMCOUNTER gStatREMTRChange;
145static STAMCOUNTER gStatSelOutOfSync[6];
146static STAMCOUNTER gStatSelOutOfSyncStateBack[6];
147static STAMCOUNTER gStatFlushTBs;
148#endif
149/* in exec.c */
150extern uint32_t tlb_flush_count;
151extern uint32_t tb_flush_count;
152extern uint32_t tb_phys_invalidate_count;
153
154/*
155 * Global stuff.
156 */
157
158/** MMIO read callbacks. */
159CPUReadMemoryFunc *g_apfnMMIORead[3] =
160{
161 remR3MMIOReadU8,
162 remR3MMIOReadU16,
163 remR3MMIOReadU32
164};
165
166/** MMIO write callbacks. */
167CPUWriteMemoryFunc *g_apfnMMIOWrite[3] =
168{
169 remR3MMIOWriteU8,
170 remR3MMIOWriteU16,
171 remR3MMIOWriteU32
172};
173
174/** Handler read callbacks. */
175CPUReadMemoryFunc *g_apfnHandlerRead[3] =
176{
177 remR3HandlerReadU8,
178 remR3HandlerReadU16,
179 remR3HandlerReadU32
180};
181
182/** Handler write callbacks. */
183CPUWriteMemoryFunc *g_apfnHandlerWrite[3] =
184{
185 remR3HandlerWriteU8,
186 remR3HandlerWriteU16,
187 remR3HandlerWriteU32
188};
189
190
191#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
192/*
193 * Debugger commands.
194 */
195static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
196
197/** '.remstep' arguments. */
198static const DBGCVARDESC g_aArgRemStep[] =
199{
200 /* cTimesMin, cTimesMax, enmCategory, fFlags, pszName, pszDescription */
201 { 0, ~0, DBGCVAR_CAT_NUMBER, 0, "on/off", "Boolean value/mnemonic indicating the new state." },
202};
203
204/** Command descriptors. */
205static const DBGCCMD g_aCmds[] =
206{
207 {
208 .pszCmd ="remstep",
209 .cArgsMin = 0,
210 .cArgsMax = 1,
211 .paArgDescs = &g_aArgRemStep[0],
212 .cArgDescs = RT_ELEMENTS(g_aArgRemStep),
213 .pResultDesc = NULL,
214 .fFlags = 0,
215 .pfnHandler = remR3CmdDisasEnableStepping,
216 .pszSyntax = "[on/off]",
217 .pszDescription = "Enable or disable the single stepping with logged disassembly. "
218 "If no arguments show the current state."
219 }
220};
221#endif
222
223/** Prologue code, must be in lower 4G to simplify jumps to/from generated code. */
224uint8_t *code_gen_prologue;
225
226
227/*******************************************************************************
228* Internal Functions *
229*******************************************************************************/
230void remAbort(int rc, const char *pszTip);
231extern int testmath(void);
232
233/* Put them here to avoid unused variable warning. */
234AssertCompile(RT_SIZEOFMEMB(VM, rem.padding) >= RT_SIZEOFMEMB(VM, rem.s));
235#if !defined(IPRT_NO_CRT) && (defined(RT_OS_LINUX) || defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS))
236//AssertCompileMemberSize(REM, Env, REM_ENV_SIZE);
237/* Why did this have to be identical?? */
238AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
239#else
240AssertCompile(RT_SIZEOFMEMB(REM, Env) <= REM_ENV_SIZE);
241#endif
242
243
244/**
245 * Initializes the REM.
246 *
247 * @returns VBox status code.
248 * @param pVM The VM to operate on.
249 */
250REMR3DECL(int) REMR3Init(PVM pVM)
251{
252 uint32_t u32Dummy;
253 int rc;
254
255#ifdef VBOX_ENABLE_VBOXREM64
256 LogRel(("Using 64-bit aware REM\n"));
257#endif
258
259 /*
260 * Assert sanity.
261 */
262 AssertReleaseMsg(sizeof(pVM->rem.padding) >= sizeof(pVM->rem.s), ("%#x >= %#x; sizeof(Env)=%#x\n", sizeof(pVM->rem.padding), sizeof(pVM->rem.s), sizeof(pVM->rem.s.Env)));
263 AssertReleaseMsg(sizeof(pVM->rem.s.Env) <= REM_ENV_SIZE, ("%#x == %#x\n", sizeof(pVM->rem.s.Env), REM_ENV_SIZE));
264 AssertReleaseMsg(!(RT_OFFSETOF(VM, rem) & 31), ("off=%#x\n", RT_OFFSETOF(VM, rem)));
265#if defined(DEBUG) && !defined(RT_OS_SOLARIS) && !defined(RT_OS_FREEBSD) /// @todo fix the solaris and freebsd math stuff.
266 Assert(!testmath());
267#endif
268
269 /*
270 * Init some internal data members.
271 */
272 pVM->rem.s.offVM = RT_OFFSETOF(VM, rem.s);
273 pVM->rem.s.Env.pVM = pVM;
274#ifdef CPU_RAW_MODE_INIT
275 pVM->rem.s.state |= CPU_RAW_MODE_INIT;
276#endif
277
278 /*
279 * Initialize the REM critical section.
280 *
281 * Note: This is not a 100% safe solution as updating the internal memory state while another VCPU
282 * is executing code could be dangerous. Taking the REM lock is not an option due to the danger of
283 * deadlocks. (mostly pgm vs rem locking)
284 */
285 rc = PDMR3CritSectInit(pVM, &pVM->rem.s.CritSectRegister, "REM-Register");
286 AssertRCReturn(rc, rc);
287
288 /* ctx. */
289 pVM->rem.s.pCtx = NULL; /* set when executing code. */
290 AssertMsg(MMR3PhysGetRamSize(pVM) == 0, ("Init order has changed! REM depends on notification about ALL physical memory registrations\n"));
291
292 /* ignore all notifications */
293 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
294
295 code_gen_prologue = RTMemExecAlloc(_1K);
296 AssertLogRelReturn(code_gen_prologue, VERR_NO_MEMORY);
297
298 cpu_exec_init_all(0);
299
300 /*
301 * Init the recompiler.
302 */
303 if (!cpu_x86_init(&pVM->rem.s.Env, "vbox"))
304 {
305 AssertMsgFailed(("cpu_x86_init failed - impossible!\n"));
306 return VERR_GENERAL_FAILURE;
307 }
308 PVMCPU pVCpu = VMMGetCpu(pVM);
309 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
310 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext3_features, &pVM->rem.s.Env.cpuid_ext2_features);
311
312 /* allocate code buffer for single instruction emulation. */
313 pVM->rem.s.Env.cbCodeBuffer = 4096;
314 pVM->rem.s.Env.pvCodeBuffer = RTMemExecAlloc(pVM->rem.s.Env.cbCodeBuffer);
315 AssertMsgReturn(pVM->rem.s.Env.pvCodeBuffer, ("Failed to allocate code buffer!\n"), VERR_NO_MEMORY);
316
317 /* finally, set the cpu_single_env global. */
318 cpu_single_env = &pVM->rem.s.Env;
319
320 /* Nothing is pending by default */
321 pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
322
323 /*
324 * Register ram types.
325 */
326 pVM->rem.s.iMMIOMemType = cpu_register_io_memory(-1, g_apfnMMIORead, g_apfnMMIOWrite, pVM);
327 AssertReleaseMsg(pVM->rem.s.iMMIOMemType >= 0, ("pVM->rem.s.iMMIOMemType=%d\n", pVM->rem.s.iMMIOMemType));
328 pVM->rem.s.iHandlerMemType = cpu_register_io_memory(-1, g_apfnHandlerRead, g_apfnHandlerWrite, pVM);
329 AssertReleaseMsg(pVM->rem.s.iHandlerMemType >= 0, ("pVM->rem.s.iHandlerMemType=%d\n", pVM->rem.s.iHandlerMemType));
330 Log2(("REM: iMMIOMemType=%d iHandlerMemType=%d\n", pVM->rem.s.iMMIOMemType, pVM->rem.s.iHandlerMemType));
331
332 /* stop ignoring. */
333 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
334
335 /*
336 * Register the saved state data unit.
337 */
338 rc = SSMR3RegisterInternal(pVM, "rem", 1, REM_SAVED_STATE_VERSION, sizeof(uint32_t) * 10,
339 NULL, remR3Save, NULL,
340 NULL, remR3Load, NULL);
341 if (RT_FAILURE(rc))
342 return rc;
343
344#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
345 /*
346 * Debugger commands.
347 */
348 static bool fRegisteredCmds = false;
349 if (!fRegisteredCmds)
350 {
351 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
352 if (RT_SUCCESS(rc))
353 fRegisteredCmds = true;
354 }
355#endif
356
357#ifdef VBOX_WITH_STATISTICS
358 /*
359 * Statistics.
360 */
361 STAM_REG(pVM, &gStatExecuteSingleInstr, STAMTYPE_PROFILE, "/PROF/REM/SingleInstr",STAMUNIT_TICKS_PER_CALL, "Profiling single instruction emulation.");
362 STAM_REG(pVM, &gStatCompilationQEmu, STAMTYPE_PROFILE, "/PROF/REM/Compile", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu compilation.");
363 STAM_REG(pVM, &gStatRunCodeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Runcode", STAMUNIT_TICKS_PER_CALL, "Profiling QEmu code execution.");
364 STAM_REG(pVM, &gStatTotalTimeQEmu, STAMTYPE_PROFILE, "/PROF/REM/Emulate", STAMUNIT_TICKS_PER_CALL, "Profiling code emulation.");
365 STAM_REG(pVM, &gStatTimers, STAMTYPE_PROFILE, "/PROF/REM/Timers", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
366 STAM_REG(pVM, &gStatTBLookup, STAMTYPE_PROFILE, "/PROF/REM/TBLookup", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
367 STAM_REG(pVM, &gStatIRQ, STAMTYPE_PROFILE, "/PROF/REM/IRQ", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
368 STAM_REG(pVM, &gStatRawCheck, STAMTYPE_PROFILE, "/PROF/REM/RawCheck", STAMUNIT_TICKS_PER_CALL, "Profiling timer scheduling.");
369 STAM_REG(pVM, &gStatMemRead, STAMTYPE_PROFILE, "/PROF/REM/MemRead", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
370 STAM_REG(pVM, &gStatMemWrite, STAMTYPE_PROFILE, "/PROF/REM/MemWrite", STAMUNIT_TICKS_PER_CALL, "Profiling memory access.");
371 STAM_REG(pVM, &gStatHCVirt2GCPhys, STAMTYPE_PROFILE, "/PROF/REM/HCVirt2GCPhys", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
372 STAM_REG(pVM, &gStatGCPhys2HCVirt, STAMTYPE_PROFILE, "/PROF/REM/GCPhys2HCVirt", STAMUNIT_TICKS_PER_CALL, "Profiling memory convertion.");
373
374 STAM_REG(pVM, &gStatCpuGetTSC, STAMTYPE_COUNTER, "/REM/CpuGetTSC", STAMUNIT_OCCURENCES, "cpu_get_tsc calls");
375
376 STAM_REG(pVM, &gStatRefuseTFInhibit, STAMTYPE_COUNTER, "/REM/Refuse/TFInibit", STAMUNIT_OCCURENCES, "Raw mode refused because of TF or irq inhibit");
377 STAM_REG(pVM, &gStatRefuseVM86, STAMTYPE_COUNTER, "/REM/Refuse/VM86", STAMUNIT_OCCURENCES, "Raw mode refused because of VM86");
378 STAM_REG(pVM, &gStatRefusePaging, STAMTYPE_COUNTER, "/REM/Refuse/Paging", STAMUNIT_OCCURENCES, "Raw mode refused because of disabled paging/pm");
379 STAM_REG(pVM, &gStatRefusePAE, STAMTYPE_COUNTER, "/REM/Refuse/PAE", STAMUNIT_OCCURENCES, "Raw mode refused because of PAE");
380 STAM_REG(pVM, &gStatRefuseIOPLNot0, STAMTYPE_COUNTER, "/REM/Refuse/IOPLNot0", STAMUNIT_OCCURENCES, "Raw mode refused because of IOPL != 0");
381 STAM_REG(pVM, &gStatRefuseIF0, STAMTYPE_COUNTER, "/REM/Refuse/IF0", STAMUNIT_OCCURENCES, "Raw mode refused because of IF=0");
382 STAM_REG(pVM, &gStatRefuseCode16, STAMTYPE_COUNTER, "/REM/Refuse/Code16", STAMUNIT_OCCURENCES, "Raw mode refused because of 16 bit code");
383 STAM_REG(pVM, &gStatRefuseWP0, STAMTYPE_COUNTER, "/REM/Refuse/WP0", STAMUNIT_OCCURENCES, "Raw mode refused because of WP=0");
384 STAM_REG(pVM, &gStatRefuseRing1or2, STAMTYPE_COUNTER, "/REM/Refuse/Ring1or2", STAMUNIT_OCCURENCES, "Raw mode refused because of ring 1/2 execution");
385 STAM_REG(pVM, &gStatRefuseCanExecute, STAMTYPE_COUNTER, "/REM/Refuse/CanExecuteRaw", STAMUNIT_OCCURENCES, "Raw mode refused because of cCanExecuteRaw");
386 STAM_REG(pVM, &gStatFlushTBs, STAMTYPE_COUNTER, "/REM/FlushTB", STAMUNIT_OCCURENCES, "Number of TB flushes");
387
388 STAM_REG(pVM, &gStatREMGDTChange, STAMTYPE_COUNTER, "/REM/Change/GDTBase", STAMUNIT_OCCURENCES, "GDT base changes");
389 STAM_REG(pVM, &gStatREMLDTRChange, STAMTYPE_COUNTER, "/REM/Change/LDTR", STAMUNIT_OCCURENCES, "LDTR changes");
390 STAM_REG(pVM, &gStatREMIDTChange, STAMTYPE_COUNTER, "/REM/Change/IDTBase", STAMUNIT_OCCURENCES, "IDT base changes");
391 STAM_REG(pVM, &gStatREMTRChange, STAMTYPE_COUNTER, "/REM/Change/TR", STAMUNIT_OCCURENCES, "TR selector changes");
392
393 STAM_REG(pVM, &gStatSelOutOfSync[0], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
394 STAM_REG(pVM, &gStatSelOutOfSync[1], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
395 STAM_REG(pVM, &gStatSelOutOfSync[2], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
396 STAM_REG(pVM, &gStatSelOutOfSync[3], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
397 STAM_REG(pVM, &gStatSelOutOfSync[4], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
398 STAM_REG(pVM, &gStatSelOutOfSync[5], STAMTYPE_COUNTER, "/REM/State/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
399
400 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[0], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/ES", STAMUNIT_OCCURENCES, "ES out of sync");
401 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[1], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/CS", STAMUNIT_OCCURENCES, "CS out of sync");
402 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[2], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/SS", STAMUNIT_OCCURENCES, "SS out of sync");
403 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[3], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/DS", STAMUNIT_OCCURENCES, "DS out of sync");
404 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[4], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/FS", STAMUNIT_OCCURENCES, "FS out of sync");
405 STAM_REG(pVM, &gStatSelOutOfSyncStateBack[5], STAMTYPE_COUNTER, "/REM/StateBack/SelOutOfSync/GS", STAMUNIT_OCCURENCES, "GS out of sync");
406
407 STAM_REG(pVM, &pVM->rem.s.Env.StatTbFlush, STAMTYPE_PROFILE, "/REM/TbFlush", STAMUNIT_TICKS_PER_CALL, "profiling tb_flush().");
408#endif /* VBOX_WITH_STATISTICS */
409
410 STAM_REL_REG(pVM, &tb_flush_count, STAMTYPE_U32_RESET, "/REM/TbFlushCount", STAMUNIT_OCCURENCES, "tb_flush() calls");
411 STAM_REL_REG(pVM, &tb_phys_invalidate_count, STAMTYPE_U32_RESET, "/REM/TbPhysInvldCount", STAMUNIT_OCCURENCES, "tb_phys_invalidate() calls");
412 STAM_REL_REG(pVM, &tlb_flush_count, STAMTYPE_U32_RESET, "/REM/TlbFlushCount", STAMUNIT_OCCURENCES, "tlb_flush() calls");
413
414
415#ifdef DEBUG_ALL_LOGGING
416 loglevel = ~0;
417# ifdef DEBUG_TMP_LOGGING
418 logfile = fopen("/tmp/vbox-qemu.log", "w");
419# endif
420#endif
421
422 PREMHANDLERNOTIFICATION pCur;
423 unsigned i;
424
425 pVM->rem.s.idxPendingList = -1;
426 pVM->rem.s.idxFreeList = 0;
427
428 for (i = 0 ; i < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) - 1; i++)
429 {
430 pCur = &pVM->rem.s.aHandlerNotifications[i];
431 pCur->idxNext = i + 1;
432 pCur->idxSelf = i;
433 }
434
435 pCur = &pVM->rem.s.aHandlerNotifications[RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) - 1];
436 pCur->idxNext = -1;
437 pCur->idxSelf = RT_ELEMENTS(pVM->rem.s.aHandlerNotifications) - 1;
438
439 return rc;
440}
441
442
443/**
444 * Finalizes the REM initialization.
445 *
446 * This is called after all components, devices and drivers has
447 * been initialized. Its main purpose it to finish the RAM related
448 * initialization.
449 *
450 * @returns VBox status code.
451 *
452 * @param pVM The VM handle.
453 */
454REMR3DECL(int) REMR3InitFinalize(PVM pVM)
455{
456 int rc;
457
458 /*
459 * Ram size & dirty bit map.
460 */
461 Assert(!pVM->rem.s.fGCPhysLastRamFixed);
462 pVM->rem.s.fGCPhysLastRamFixed = true;
463#ifdef RT_STRICT
464 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, true /* fGuarded */);
465#else
466 rc = remR3InitPhysRamSizeAndDirtyMap(pVM, false /* fGuarded */);
467#endif
468 return rc;
469}
470
471
472/**
473 * Initializes phys_ram_size, phys_ram_dirty and phys_ram_dirty_size.
474 *
475 * @returns VBox status code.
476 * @param pVM The VM handle.
477 * @param fGuarded Whether to guard the map.
478 */
479static int remR3InitPhysRamSizeAndDirtyMap(PVM pVM, bool fGuarded)
480{
481 int rc = VINF_SUCCESS;
482 RTGCPHYS cb;
483
484 cb = pVM->rem.s.GCPhysLastRam + 1;
485 AssertLogRelMsgReturn(cb > pVM->rem.s.GCPhysLastRam,
486 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam),
487 VERR_OUT_OF_RANGE);
488 phys_ram_size = cb;
489 phys_ram_dirty_size = cb >> PAGE_SHIFT;
490 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb));
491
492 if (!fGuarded)
493 {
494 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);
495 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);
496 }
497 else
498 {
499 /*
500 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it.
501 */
502 uint32_t cbBitmapAligned = RT_ALIGN_32(phys_ram_dirty_size, PAGE_SIZE);
503 uint32_t cbBitmapFull = RT_ALIGN_32(phys_ram_dirty_size, (_4G >> PAGE_SHIFT));
504 if (cbBitmapFull == cbBitmapAligned)
505 cbBitmapFull += _4G >> PAGE_SHIFT;
506 else if (cbBitmapFull - cbBitmapAligned < _64K)
507 cbBitmapFull += _64K;
508
509 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);
510 AssertLogRelMsgReturn(phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);
511
512 rc = RTMemProtect(phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);
513 if (RT_FAILURE(rc))
514 {
515 RTMemPageFree(phys_ram_dirty);
516 AssertLogRelRCReturn(rc, rc);
517 }
518
519 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;
520 }
521
522 /* initialize it. */
523 memset(phys_ram_dirty, 0xff, phys_ram_dirty_size);
524 return rc;
525}
526
527
528/**
529 * Terminates the REM.
530 *
531 * Termination means cleaning up and freeing all resources,
532 * the VM it self is at this point powered off or suspended.
533 *
534 * @returns VBox status code.
535 * @param pVM The VM to operate on.
536 */
537REMR3DECL(int) REMR3Term(PVM pVM)
538{
539#ifdef VBOX_WITH_STATISTICS
540 /*
541 * Statistics.
542 */
543 STAM_DEREG(pVM, &gStatExecuteSingleInstr);
544 STAM_DEREG(pVM, &gStatCompilationQEmu);
545 STAM_DEREG(pVM, &gStatRunCodeQEmu);
546 STAM_DEREG(pVM, &gStatTotalTimeQEmu);
547 STAM_DEREG(pVM, &gStatTimers);
548 STAM_DEREG(pVM, &gStatTBLookup);
549 STAM_DEREG(pVM, &gStatIRQ);
550 STAM_DEREG(pVM, &gStatRawCheck);
551 STAM_DEREG(pVM, &gStatMemRead);
552 STAM_DEREG(pVM, &gStatMemWrite);
553 STAM_DEREG(pVM, &gStatHCVirt2GCPhys);
554 STAM_DEREG(pVM, &gStatGCPhys2HCVirt);
555
556 STAM_DEREG(pVM, &gStatCpuGetTSC);
557
558 STAM_DEREG(pVM, &gStatRefuseTFInhibit);
559 STAM_DEREG(pVM, &gStatRefuseVM86);
560 STAM_DEREG(pVM, &gStatRefusePaging);
561 STAM_DEREG(pVM, &gStatRefusePAE);
562 STAM_DEREG(pVM, &gStatRefuseIOPLNot0);
563 STAM_DEREG(pVM, &gStatRefuseIF0);
564 STAM_DEREG(pVM, &gStatRefuseCode16);
565 STAM_DEREG(pVM, &gStatRefuseWP0);
566 STAM_DEREG(pVM, &gStatRefuseRing1or2);
567 STAM_DEREG(pVM, &gStatRefuseCanExecute);
568 STAM_DEREG(pVM, &gStatFlushTBs);
569
570 STAM_DEREG(pVM, &gStatREMGDTChange);
571 STAM_DEREG(pVM, &gStatREMLDTRChange);
572 STAM_DEREG(pVM, &gStatREMIDTChange);
573 STAM_DEREG(pVM, &gStatREMTRChange);
574
575 STAM_DEREG(pVM, &gStatSelOutOfSync[0]);
576 STAM_DEREG(pVM, &gStatSelOutOfSync[1]);
577 STAM_DEREG(pVM, &gStatSelOutOfSync[2]);
578 STAM_DEREG(pVM, &gStatSelOutOfSync[3]);
579 STAM_DEREG(pVM, &gStatSelOutOfSync[4]);
580 STAM_DEREG(pVM, &gStatSelOutOfSync[5]);
581
582 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[0]);
583 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[1]);
584 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[2]);
585 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[3]);
586 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[4]);
587 STAM_DEREG(pVM, &gStatSelOutOfSyncStateBack[5]);
588
589 STAM_DEREG(pVM, &pVM->rem.s.Env.StatTbFlush);
590#endif /* VBOX_WITH_STATISTICS */
591
592 STAM_REL_DEREG(pVM, &tb_flush_count);
593 STAM_REL_DEREG(pVM, &tb_phys_invalidate_count);
594 STAM_REL_DEREG(pVM, &tlb_flush_count);
595
596 return VINF_SUCCESS;
597}
598
599
600/**
601 * The VM is being reset.
602 *
603 * For the REM component this means to call the cpu_reset() and
604 * reinitialize some state variables.
605 *
606 * @param pVM VM handle.
607 */
608REMR3DECL(void) REMR3Reset(PVM pVM)
609{
610 /*
611 * Reset the REM cpu.
612 */
613 Assert(pVM->rem.s.cIgnoreAll == 0);
614 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
615 cpu_reset(&pVM->rem.s.Env);
616 pVM->rem.s.cInvalidatedPages = 0;
617 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
618 Assert(pVM->rem.s.cIgnoreAll == 0);
619
620 /* Clear raw ring 0 init state */
621 pVM->rem.s.Env.state &= ~CPU_RAW_RING0;
622
623 /* Flush the TBs the next time we execute code here. */
624 pVM->rem.s.fFlushTBs = true;
625}
626
627
628/**
629 * Execute state save operation.
630 *
631 * @returns VBox status code.
632 * @param pVM VM Handle.
633 * @param pSSM SSM operation handle.
634 */
635static DECLCALLBACK(int) remR3Save(PVM pVM, PSSMHANDLE pSSM)
636{
637 PREM pRem = &pVM->rem.s;
638
639 /*
640 * Save the required CPU Env bits.
641 * (Not much because we're never in REM when doing the save.)
642 */
643 LogFlow(("remR3Save:\n"));
644 Assert(!pRem->fInREM);
645 SSMR3PutU32(pSSM, pRem->Env.hflags);
646 SSMR3PutU32(pSSM, ~0); /* separator */
647
648 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
649 SSMR3PutU32(pSSM, !!(pRem->Env.state & CPU_RAW_RING0));
650 SSMR3PutU32(pSSM, pVM->rem.s.u32PendingInterrupt);
651
652 return SSMR3PutU32(pSSM, ~0); /* terminator */
653}
654
655
656/**
657 * Execute state load operation.
658 *
659 * @returns VBox status code.
660 * @param pVM VM Handle.
661 * @param pSSM SSM operation handle.
662 * @param u32Version Data layout version.
663 */
664static DECLCALLBACK(int) remR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
665{
666 uint32_t u32Dummy;
667 uint32_t fRawRing0 = false;
668 uint32_t u32Sep;
669 unsigned i;
670 int rc;
671 PREM pRem;
672 LogFlow(("remR3Load:\n"));
673
674 /*
675 * Validate version.
676 */
677 if ( u32Version != REM_SAVED_STATE_VERSION
678 && u32Version != REM_SAVED_STATE_VERSION_VER1_6)
679 {
680 AssertMsgFailed(("remR3Load: Invalid version u32Version=%d!\n", u32Version));
681 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
682 }
683
684 /*
685 * Do a reset to be on the safe side...
686 */
687 REMR3Reset(pVM);
688
689 /*
690 * Ignore all ignorable notifications.
691 * (Not doing this will cause serious trouble.)
692 */
693 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
694
695 /*
696 * Load the required CPU Env bits.
697 * (Not much because we're never in REM when doing the save.)
698 */
699 pRem = &pVM->rem.s;
700 Assert(!pRem->fInREM);
701 SSMR3GetU32(pSSM, &pRem->Env.hflags);
702 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
703 {
704 /* Redundant REM CPU state has to be loaded, but can be ignored. */
705 CPUX86State_Ver16 temp;
706 SSMR3GetMem(pSSM, &temp, RT_OFFSETOF(CPUX86State_Ver16, jmp_env));
707 }
708
709 rc = SSMR3GetU32(pSSM, &u32Sep); /* separator */
710 if (RT_FAILURE(rc))
711 return rc;
712 if (u32Sep != ~0U)
713 {
714 AssertMsgFailed(("u32Sep=%#x\n", u32Sep));
715 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
716 }
717
718 /* Remember if we've entered raw mode (vital for ring 1 checks in e.g. iret emulation). */
719 SSMR3GetUInt(pSSM, &fRawRing0);
720 if (fRawRing0)
721 pRem->Env.state |= CPU_RAW_RING0;
722
723 if (u32Version == REM_SAVED_STATE_VERSION_VER1_6)
724 {
725 /*
726 * Load the REM stuff.
727 */
728 /** @todo r=bird: We should just drop all these items, restoring doesn't make
729 * sense. */
730 rc = SSMR3GetU32(pSSM, (uint32_t *)&pRem->cInvalidatedPages);
731 if (RT_FAILURE(rc))
732 return rc;
733 if (pRem->cInvalidatedPages > RT_ELEMENTS(pRem->aGCPtrInvalidatedPages))
734 {
735 AssertMsgFailed(("cInvalidatedPages=%#x\n", pRem->cInvalidatedPages));
736 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
737 }
738 for (i = 0; i < pRem->cInvalidatedPages; i++)
739 SSMR3GetGCPtr(pSSM, &pRem->aGCPtrInvalidatedPages[i]);
740 }
741
742 rc = SSMR3GetUInt(pSSM, &pVM->rem.s.u32PendingInterrupt);
743 if (RT_FAILURE(rc))
744 return rc;
745
746 /* check the terminator. */
747 rc = SSMR3GetU32(pSSM, &u32Sep);
748 if (RT_FAILURE(rc))
749 return rc;
750 if (u32Sep != ~0U)
751 {
752 AssertMsgFailed(("u32Sep=%#x (term)\n", u32Sep));
753 return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
754 }
755
756 /*
757 * Get the CPUID features.
758 */
759 PVMCPU pVCpu = VMMGetCpu(pVM);
760 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
761 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
762
763 /*
764 * Sync the Load Flush the TLB
765 */
766 tlb_flush(&pRem->Env, 1);
767
768 /*
769 * Stop ignoring ignornable notifications.
770 */
771 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
772
773 /*
774 * Sync the whole CPU state when executing code in the recompiler.
775 */
776 for (i=0;i<pVM->cCPUs;i++)
777 {
778 PVMCPU pVCpu = &pVM->aCpus[i];
779
780 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
781 }
782 return VINF_SUCCESS;
783}
784
785
786
787#undef LOG_GROUP
788#define LOG_GROUP LOG_GROUP_REM_RUN
789
790/**
791 * Single steps an instruction in recompiled mode.
792 *
793 * Before calling this function the REM state needs to be in sync with
794 * the VM. Call REMR3State() to perform the sync. It's only necessary
795 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
796 * and after calling REMR3StateBack().
797 *
798 * @returns VBox status code.
799 *
800 * @param pVM VM Handle.
801 * @param pVCpu VMCPU Handle.
802 */
803REMR3DECL(int) REMR3Step(PVM pVM, PVMCPU pVCpu)
804{
805 int rc, interrupt_request;
806 RTGCPTR GCPtrPC;
807 bool fBp;
808
809 /*
810 * Lock the REM - we don't wanna have anyone interrupting us
811 * while stepping - and enabled single stepping. We also ignore
812 * pending interrupts and suchlike.
813 */
814 interrupt_request = pVM->rem.s.Env.interrupt_request;
815 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
816 pVM->rem.s.Env.interrupt_request = 0;
817 cpu_single_step(&pVM->rem.s.Env, 1);
818
819 /*
820 * If we're standing at a breakpoint, that have to be disabled before we start stepping.
821 */
822 GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
823 fBp = !cpu_breakpoint_remove(&pVM->rem.s.Env, GCPtrPC);
824
825 /*
826 * Execute and handle the return code.
827 * We execute without enabling the cpu tick, so on success we'll
828 * just flip it on and off to make sure it moves
829 */
830 rc = cpu_exec(&pVM->rem.s.Env);
831 if (rc == EXCP_DEBUG)
832 {
833 TMR3NotifyResume(pVM, pVCpu);
834 TMR3NotifySuspend(pVM, pVCpu);
835 rc = VINF_EM_DBG_STEPPED;
836 }
837 else
838 {
839 switch (rc)
840 {
841 case EXCP_INTERRUPT: rc = VINF_SUCCESS; break;
842 case EXCP_HLT:
843 case EXCP_HALTED: rc = VINF_EM_HALT; break;
844 case EXCP_RC:
845 rc = pVM->rem.s.rc;
846 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
847 break;
848 case EXCP_EXECUTE_RAW:
849 case EXCP_EXECUTE_HWACC:
850 /** @todo: is it correct? No! */
851 rc = VINF_SUCCESS;
852 break;
853 default:
854 AssertReleaseMsgFailed(("This really shouldn't happen, rc=%d!\n", rc));
855 rc = VERR_INTERNAL_ERROR;
856 break;
857 }
858 }
859
860 /*
861 * Restore the stuff we changed to prevent interruption.
862 * Unlock the REM.
863 */
864 if (fBp)
865 {
866 int rc2 = cpu_breakpoint_insert(&pVM->rem.s.Env, GCPtrPC);
867 Assert(rc2 == 0); NOREF(rc2);
868 }
869 cpu_single_step(&pVM->rem.s.Env, 0);
870 pVM->rem.s.Env.interrupt_request = interrupt_request;
871
872 return rc;
873}
874
875
876/**
877 * Set a breakpoint using the REM facilities.
878 *
879 * @returns VBox status code.
880 * @param pVM The VM handle.
881 * @param Address The breakpoint address.
882 * @thread The emulation thread.
883 */
884REMR3DECL(int) REMR3BreakpointSet(PVM pVM, RTGCUINTPTR Address)
885{
886 VM_ASSERT_EMT(pVM);
887 if (!cpu_breakpoint_insert(&pVM->rem.s.Env, Address))
888 {
889 LogFlow(("REMR3BreakpointSet: Address=%RGv\n", Address));
890 return VINF_SUCCESS;
891 }
892 LogFlow(("REMR3BreakpointSet: Address=%RGv - failed!\n", Address));
893 return VERR_REM_NO_MORE_BP_SLOTS;
894}
895
896
897/**
898 * Clears a breakpoint set by REMR3BreakpointSet().
899 *
900 * @returns VBox status code.
901 * @param pVM The VM handle.
902 * @param Address The breakpoint address.
903 * @thread The emulation thread.
904 */
905REMR3DECL(int) REMR3BreakpointClear(PVM pVM, RTGCUINTPTR Address)
906{
907 VM_ASSERT_EMT(pVM);
908 if (!cpu_breakpoint_remove(&pVM->rem.s.Env, Address))
909 {
910 LogFlow(("REMR3BreakpointClear: Address=%RGv\n", Address));
911 return VINF_SUCCESS;
912 }
913 LogFlow(("REMR3BreakpointClear: Address=%RGv - not found!\n", Address));
914 return VERR_REM_BP_NOT_FOUND;
915}
916
917
918/**
919 * Emulate an instruction.
920 *
921 * This function executes one instruction without letting anyone
922 * interrupt it. This is intended for being called while being in
923 * raw mode and thus will take care of all the state syncing between
924 * REM and the rest.
925 *
926 * @returns VBox status code.
927 * @param pVM VM handle.
928 * @param pVCpu VMCPU Handle.
929 */
930REMR3DECL(int) REMR3EmulateInstruction(PVM pVM, PVMCPU pVCpu)
931{
932 bool fFlushTBs;
933
934 int rc, rc2;
935 Log2(("REMR3EmulateInstruction: (cs:eip=%04x:%08x)\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
936
937 /* Make sure this flag is set; we might never execute remR3CanExecuteRaw in the AMD-V case.
938 * CPU_RAW_HWACC makes sure we never execute interrupt handlers in the recompiler.
939 */
940 if (HWACCMIsEnabled(pVM))
941 pVM->rem.s.Env.state |= CPU_RAW_HWACC;
942
943 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */
944 fFlushTBs = pVM->rem.s.fFlushTBs;
945 pVM->rem.s.fFlushTBs = false;
946
947 /*
948 * Sync the state and enable single instruction / single stepping.
949 */
950 rc = REMR3State(pVM, pVCpu);
951 pVM->rem.s.fFlushTBs = fFlushTBs;
952 if (RT_SUCCESS(rc))
953 {
954 int interrupt_request = pVM->rem.s.Env.interrupt_request;
955 Assert(!(interrupt_request & ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER | CPU_INTERRUPT_EXTERNAL_HARD | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER)));
956 Assert(!pVM->rem.s.Env.singlestep_enabled);
957 /*
958 * Now we set the execute single instruction flag and enter the cpu_exec loop.
959 */
960 TMNotifyStartOfExecution(pVCpu);
961 pVM->rem.s.Env.interrupt_request = CPU_INTERRUPT_SINGLE_INSTR;
962 rc = cpu_exec(&pVM->rem.s.Env);
963 TMNotifyEndOfExecution(pVCpu);
964 switch (rc)
965 {
966 /*
967 * Executed without anything out of the way happening.
968 */
969 case EXCP_SINGLE_INSTR:
970 rc = VINF_EM_RESCHEDULE;
971 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_SINGLE_INSTR\n"));
972 break;
973
974 /*
975 * If we take a trap or start servicing a pending interrupt, we might end up here.
976 * (Timer thread or some other thread wishing EMT's attention.)
977 */
978 case EXCP_INTERRUPT:
979 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_INTERRUPT\n"));
980 rc = VINF_EM_RESCHEDULE;
981 break;
982
983 /*
984 * Single step, we assume!
985 * If there was a breakpoint there we're fucked now.
986 */
987 case EXCP_DEBUG:
988 {
989 /* breakpoint or single step? */
990 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
991 int iBP;
992 rc = VINF_EM_DBG_STEPPED;
993 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
994 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
995 {
996 rc = VINF_EM_DBG_BREAKPOINT;
997 break;
998 }
999 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1000 break;
1001 }
1002
1003 /*
1004 * hlt instruction.
1005 */
1006 case EXCP_HLT:
1007 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HLT\n"));
1008 rc = VINF_EM_HALT;
1009 break;
1010
1011 /*
1012 * The VM has halted.
1013 */
1014 case EXCP_HALTED:
1015 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_HALTED\n"));
1016 rc = VINF_EM_HALT;
1017 break;
1018
1019 /*
1020 * Switch to RAW-mode.
1021 */
1022 case EXCP_EXECUTE_RAW:
1023 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1024 rc = VINF_EM_RESCHEDULE_RAW;
1025 break;
1026
1027 /*
1028 * Switch to hardware accelerated RAW-mode.
1029 */
1030 case EXCP_EXECUTE_HWACC:
1031 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1032 rc = VINF_EM_RESCHEDULE_HWACC;
1033 break;
1034
1035 /*
1036 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1037 */
1038 case EXCP_RC:
1039 Log2(("REMR3EmulateInstruction: cpu_exec -> EXCP_RC\n"));
1040 rc = pVM->rem.s.rc;
1041 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1042 break;
1043
1044 /*
1045 * Figure out the rest when they arrive....
1046 */
1047 default:
1048 AssertMsgFailed(("rc=%d\n", rc));
1049 Log2(("REMR3EmulateInstruction: cpu_exec -> %d\n", rc));
1050 rc = VINF_EM_RESCHEDULE;
1051 break;
1052 }
1053
1054 /*
1055 * Switch back the state.
1056 */
1057 pVM->rem.s.Env.interrupt_request = interrupt_request;
1058 rc2 = REMR3StateBack(pVM, pVCpu);
1059 AssertRC(rc2);
1060 }
1061
1062 Log2(("REMR3EmulateInstruction: returns %Rrc (cs:eip=%04x:%RGv)\n",
1063 rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1064 return rc;
1065}
1066
1067
1068/**
1069 * Runs code in recompiled mode.
1070 *
1071 * Before calling this function the REM state needs to be in sync with
1072 * the VM. Call REMR3State() to perform the sync. It's only necessary
1073 * (and permitted) to sync at the first call to REMR3Step()/REMR3Run()
1074 * and after calling REMR3StateBack().
1075 *
1076 * @returns VBox status code.
1077 *
1078 * @param pVM VM Handle.
1079 * @param pVCpu VMCPU Handle.
1080 */
1081REMR3DECL(int) REMR3Run(PVM pVM, PVMCPU pVCpu)
1082{
1083 int rc;
1084 Log2(("REMR3Run: (cs:eip=%04x:%RGv)\n", pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1085 Assert(pVM->rem.s.fInREM);
1086
1087 TMNotifyStartOfExecution(pVCpu);
1088 rc = cpu_exec(&pVM->rem.s.Env);
1089 TMNotifyEndOfExecution(pVCpu);
1090 switch (rc)
1091 {
1092 /*
1093 * This happens when the execution was interrupted
1094 * by an external event, like pending timers.
1095 */
1096 case EXCP_INTERRUPT:
1097 Log2(("REMR3Run: cpu_exec -> EXCP_INTERRUPT\n"));
1098 rc = VINF_SUCCESS;
1099 break;
1100
1101 /*
1102 * hlt instruction.
1103 */
1104 case EXCP_HLT:
1105 Log2(("REMR3Run: cpu_exec -> EXCP_HLT\n"));
1106 rc = VINF_EM_HALT;
1107 break;
1108
1109 /*
1110 * The VM has halted.
1111 */
1112 case EXCP_HALTED:
1113 Log2(("REMR3Run: cpu_exec -> EXCP_HALTED\n"));
1114 rc = VINF_EM_HALT;
1115 break;
1116
1117 /*
1118 * Breakpoint/single step.
1119 */
1120 case EXCP_DEBUG:
1121 {
1122#if 0//def DEBUG_bird
1123 static int iBP = 0;
1124 printf("howdy, breakpoint! iBP=%d\n", iBP);
1125 switch (iBP)
1126 {
1127 case 0:
1128 cpu_breakpoint_remove(&pVM->rem.s.Env, pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base);
1129 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
1130 //pVM->rem.s.Env.interrupt_request = 0;
1131 //pVM->rem.s.Env.exception_index = -1;
1132 //g_fInterruptDisabled = 1;
1133 rc = VINF_SUCCESS;
1134 asm("int3");
1135 break;
1136 default:
1137 asm("int3");
1138 break;
1139 }
1140 iBP++;
1141#else
1142 /* breakpoint or single step? */
1143 RTGCPTR GCPtrPC = pVM->rem.s.Env.eip + pVM->rem.s.Env.segs[R_CS].base;
1144 int iBP;
1145 rc = VINF_EM_DBG_STEPPED;
1146 for (iBP = 0; iBP < pVM->rem.s.Env.nb_breakpoints; iBP++)
1147 if (pVM->rem.s.Env.breakpoints[iBP] == GCPtrPC)
1148 {
1149 rc = VINF_EM_DBG_BREAKPOINT;
1150 break;
1151 }
1152 Log2(("REMR3Run: cpu_exec -> EXCP_DEBUG rc=%Rrc iBP=%d GCPtrPC=%RGv\n", rc, iBP, GCPtrPC));
1153#endif
1154 break;
1155 }
1156
1157 /*
1158 * Switch to RAW-mode.
1159 */
1160 case EXCP_EXECUTE_RAW:
1161 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_RAW\n"));
1162 rc = VINF_EM_RESCHEDULE_RAW;
1163 break;
1164
1165 /*
1166 * Switch to hardware accelerated RAW-mode.
1167 */
1168 case EXCP_EXECUTE_HWACC:
1169 Log2(("REMR3Run: cpu_exec -> EXCP_EXECUTE_HWACC\n"));
1170 rc = VINF_EM_RESCHEDULE_HWACC;
1171 break;
1172
1173 /** @todo missing VBOX_WITH_VMI/EXECP_PARAV_CALL */
1174 /*
1175 * An EM RC was raised (VMR3Reset/Suspend/PowerOff/some-fatal-error).
1176 */
1177 case EXCP_RC:
1178 Log2(("REMR3Run: cpu_exec -> EXCP_RC rc=%Rrc\n", pVM->rem.s.rc));
1179 rc = pVM->rem.s.rc;
1180 pVM->rem.s.rc = VERR_INTERNAL_ERROR;
1181 break;
1182
1183 /*
1184 * Figure out the rest when they arrive....
1185 */
1186 default:
1187 AssertMsgFailed(("rc=%d\n", rc));
1188 Log2(("REMR3Run: cpu_exec -> %d\n", rc));
1189 rc = VINF_SUCCESS;
1190 break;
1191 }
1192
1193 Log2(("REMR3Run: returns %Rrc (cs:eip=%04x:%RGv)\n", rc, pVM->rem.s.Env.segs[R_CS].selector, (RTGCPTR)pVM->rem.s.Env.eip));
1194 return rc;
1195}
1196
1197
1198/**
1199 * Check if the cpu state is suitable for Raw execution.
1200 *
1201 * @returns boolean
1202 * @param env The CPU env struct.
1203 * @param eip The EIP to check this for (might differ from env->eip).
1204 * @param fFlags hflags OR'ed with IOPL, TF and VM from eflags.
1205 * @param piException Stores EXCP_EXECUTE_RAW/HWACC in case raw mode is supported in this context
1206 *
1207 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp!
1208 */
1209bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException)
1210{
1211 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1212 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1213 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */
1214 uint32_t u32CR0;
1215
1216 /* Update counter. */
1217 env->pVM->rem.s.cCanExecuteRaw++;
1218
1219 if (HWACCMIsEnabled(env->pVM))
1220 {
1221 CPUMCTX Ctx;
1222
1223 env->state |= CPU_RAW_HWACC;
1224
1225 /*
1226 * Create partial context for HWACCMR3CanExecuteGuest
1227 */
1228 Ctx.cr0 = env->cr[0];
1229 Ctx.cr3 = env->cr[3];
1230 Ctx.cr4 = env->cr[4];
1231
1232 Ctx.tr = env->tr.selector;
1233 Ctx.trHid.u64Base = env->tr.base;
1234 Ctx.trHid.u32Limit = env->tr.limit;
1235 Ctx.trHid.Attr.u = (env->tr.flags >> 8) & 0xF0FF;
1236
1237 Ctx.idtr.cbIdt = env->idt.limit;
1238 Ctx.idtr.pIdt = env->idt.base;
1239
1240 Ctx.gdtr.cbGdt = env->gdt.limit;
1241 Ctx.gdtr.pGdt = env->gdt.base;
1242
1243 Ctx.rsp = env->regs[R_ESP];
1244 Ctx.rip = env->eip;
1245
1246 Ctx.eflags.u32 = env->eflags;
1247
1248 Ctx.cs = env->segs[R_CS].selector;
1249 Ctx.csHid.u64Base = env->segs[R_CS].base;
1250 Ctx.csHid.u32Limit = env->segs[R_CS].limit;
1251 Ctx.csHid.Attr.u = (env->segs[R_CS].flags >> 8) & 0xF0FF;
1252
1253 Ctx.ds = env->segs[R_DS].selector;
1254 Ctx.dsHid.u64Base = env->segs[R_DS].base;
1255 Ctx.dsHid.u32Limit = env->segs[R_DS].limit;
1256 Ctx.dsHid.Attr.u = (env->segs[R_DS].flags >> 8) & 0xF0FF;
1257
1258 Ctx.es = env->segs[R_ES].selector;
1259 Ctx.esHid.u64Base = env->segs[R_ES].base;
1260 Ctx.esHid.u32Limit = env->segs[R_ES].limit;
1261 Ctx.esHid.Attr.u = (env->segs[R_ES].flags >> 8) & 0xF0FF;
1262
1263 Ctx.fs = env->segs[R_FS].selector;
1264 Ctx.fsHid.u64Base = env->segs[R_FS].base;
1265 Ctx.fsHid.u32Limit = env->segs[R_FS].limit;
1266 Ctx.fsHid.Attr.u = (env->segs[R_FS].flags >> 8) & 0xF0FF;
1267
1268 Ctx.gs = env->segs[R_GS].selector;
1269 Ctx.gsHid.u64Base = env->segs[R_GS].base;
1270 Ctx.gsHid.u32Limit = env->segs[R_GS].limit;
1271 Ctx.gsHid.Attr.u = (env->segs[R_GS].flags >> 8) & 0xF0FF;
1272
1273 Ctx.ss = env->segs[R_SS].selector;
1274 Ctx.ssHid.u64Base = env->segs[R_SS].base;
1275 Ctx.ssHid.u32Limit = env->segs[R_SS].limit;
1276 Ctx.ssHid.Attr.u = (env->segs[R_SS].flags >> 8) & 0xF0FF;
1277
1278 Ctx.msrEFER = env->efer;
1279
1280 /* Hardware accelerated raw-mode:
1281 *
1282 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1283 */
1284 if (HWACCMR3CanExecuteGuest(env->pVM, &Ctx) == true)
1285 {
1286 *piException = EXCP_EXECUTE_HWACC;
1287 return true;
1288 }
1289 return false;
1290 }
1291
1292 /*
1293 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1294 * or 32 bits protected mode ring 0 code
1295 *
1296 * The tests are ordered by the likelyhood of being true during normal execution.
1297 */
1298 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))
1299 {
1300 STAM_COUNTER_INC(&gStatRefuseTFInhibit);
1301 Log2(("raw mode refused: fFlags=%#x\n", fFlags));
1302 return false;
1303 }
1304
1305#ifndef VBOX_RAW_V86
1306 if (fFlags & VM_MASK) {
1307 STAM_COUNTER_INC(&gStatRefuseVM86);
1308 Log2(("raw mode refused: VM_MASK\n"));
1309 return false;
1310 }
1311#endif
1312
1313 if (env->state & CPU_EMULATE_SINGLE_INSTR)
1314 {
1315#ifndef DEBUG_bird
1316 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n"));
1317#endif
1318 return false;
1319 }
1320
1321 if (env->singlestep_enabled)
1322 {
1323 //Log2(("raw mode refused: Single step\n"));
1324 return false;
1325 }
1326
1327 if (env->nb_breakpoints > 0)
1328 {
1329 //Log2(("raw mode refused: Breakpoints\n"));
1330 return false;
1331 }
1332
1333 u32CR0 = env->cr[0];
1334 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1335 {
1336 STAM_COUNTER_INC(&gStatRefusePaging);
1337 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1338 return false;
1339 }
1340
1341 if (env->cr[4] & CR4_PAE_MASK)
1342 {
1343 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE))
1344 {
1345 STAM_COUNTER_INC(&gStatRefusePAE);
1346 return false;
1347 }
1348 }
1349
1350 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3)
1351 {
1352 if (!EMIsRawRing3Enabled(env->pVM))
1353 return false;
1354
1355 if (!(env->eflags & IF_MASK))
1356 {
1357 STAM_COUNTER_INC(&gStatRefuseIF0);
1358 Log2(("raw mode refused: IF (RawR3)\n"));
1359 return false;
1360 }
1361
1362 if (!(u32CR0 & CR0_WP_MASK) && EMIsRawRing0Enabled(env->pVM))
1363 {
1364 STAM_COUNTER_INC(&gStatRefuseWP0);
1365 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1366 return false;
1367 }
1368 }
1369 else
1370 {
1371 if (!EMIsRawRing0Enabled(env->pVM))
1372 return false;
1373
1374 // Let's start with pure 32 bits ring 0 code first
1375 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK))
1376 {
1377 STAM_COUNTER_INC(&gStatRefuseCode16);
1378 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags));
1379 return false;
1380 }
1381
1382 // Only R0
1383 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0)
1384 {
1385 STAM_COUNTER_INC(&gStatRefuseRing1or2);
1386 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) ));
1387 return false;
1388 }
1389
1390 if (!(u32CR0 & CR0_WP_MASK))
1391 {
1392 STAM_COUNTER_INC(&gStatRefuseWP0);
1393 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1394 return false;
1395 }
1396
1397 if (PATMIsPatchGCAddr(env->pVM, eip))
1398 {
1399 Log2(("raw r0 mode forced: patch code\n"));
1400 *piException = EXCP_EXECUTE_RAW;
1401 return true;
1402 }
1403
1404#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1405 if (!(env->eflags & IF_MASK))
1406 {
1407 STAM_COUNTER_INC(&gStatRefuseIF0);
1408 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags));
1409 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1410 return false;
1411 }
1412#endif
1413
1414 env->state |= CPU_RAW_RING0;
1415 }
1416
1417 /*
1418 * Don't reschedule the first time we're called, because there might be
1419 * special reasons why we're here that is not covered by the above checks.
1420 */
1421 if (env->pVM->rem.s.cCanExecuteRaw == 1)
1422 {
1423 Log2(("raw mode refused: first scheduling\n"));
1424 STAM_COUNTER_INC(&gStatRefuseCanExecute);
1425 return false;
1426 }
1427
1428 Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));
1429 *piException = EXCP_EXECUTE_RAW;
1430 return true;
1431}
1432
1433
1434/**
1435 * Fetches a code byte.
1436 *
1437 * @returns Success indicator (bool) for ease of use.
1438 * @param env The CPU environment structure.
1439 * @param GCPtrInstr Where to fetch code.
1440 * @param pu8Byte Where to store the byte on success
1441 */
1442bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)
1443{
1444 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte);
1445 if (RT_SUCCESS(rc))
1446 return true;
1447 return false;
1448}
1449
1450
1451/**
1452 * Flush (or invalidate if you like) page table/dir entry.
1453 *
1454 * (invlpg instruction; tlb_flush_page)
1455 *
1456 * @param env Pointer to cpu environment.
1457 * @param GCPtr The virtual address which page table/dir entry should be invalidated.
1458 */
1459void remR3FlushPage(CPUState *env, RTGCPTR GCPtr)
1460{
1461 PVM pVM = env->pVM;
1462 PCPUMCTX pCtx;
1463 int rc;
1464
1465 /*
1466 * When we're replaying invlpg instructions or restoring a saved
1467 * state we disable this path.
1468 */
1469 if (pVM->rem.s.fIgnoreInvlPg || pVM->rem.s.cIgnoreAll)
1470 return;
1471 Log(("remR3FlushPage: GCPtr=%RGv\n", GCPtr));
1472 Assert(pVM->rem.s.fInREM || pVM->rem.s.fInStateSync);
1473
1474 //RAWEx_ProfileStop(env, STATS_QEMU_TOTAL);
1475
1476 /*
1477 * Update the control registers before calling PGMFlushPage.
1478 */
1479 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1480 Assert(pCtx);
1481 pCtx->cr0 = env->cr[0];
1482 pCtx->cr3 = env->cr[3];
1483 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1484 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1485 pCtx->cr4 = env->cr[4];
1486
1487 /*
1488 * Let PGM do the rest.
1489 */
1490 Assert(env->pVCpu);
1491 rc = PGMInvalidatePage(env->pVCpu, GCPtr);
1492 if (RT_FAILURE(rc))
1493 {
1494 AssertMsgFailed(("remR3FlushPage %RGv failed with %d!!\n", GCPtr, rc));
1495 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_PGM_SYNC_CR3);
1496 }
1497 //RAWEx_ProfileStart(env, STATS_QEMU_TOTAL);
1498}
1499
1500
1501#ifndef REM_PHYS_ADDR_IN_TLB
1502/** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */
1503void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable)
1504{
1505 void *pv;
1506 int rc;
1507
1508 /* Address must be aligned enough to fiddle with lower bits */
1509 Assert((physAddr & 0x3) == 0);
1510
1511 rc = PGMR3PhysTlbGCPhys2Ptr(env1->pVM, physAddr, true /*fWritable*/, &pv);
1512 Assert( rc == VINF_SUCCESS
1513 || rc == VINF_PGM_PHYS_TLB_CATCH_WRITE
1514 || rc == VERR_PGM_PHYS_TLB_CATCH_ALL
1515 || rc == VERR_PGM_PHYS_TLB_UNASSIGNED);
1516 if (RT_FAILURE(rc))
1517 return (void *)1;
1518 if (rc == VINF_PGM_PHYS_TLB_CATCH_WRITE)
1519 return (void *)((uintptr_t)pv | 2);
1520 return pv;
1521}
1522#endif /* REM_PHYS_ADDR_IN_TLB */
1523
1524
1525/**
1526 * Called from tlb_protect_code in order to write monitor a code page.
1527 *
1528 * @param env Pointer to the CPU environment.
1529 * @param GCPtr Code page to monitor
1530 */
1531void remR3ProtectCode(CPUState *env, RTGCPTR GCPtr)
1532{
1533#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1534 Assert(env->pVM->rem.s.fInREM);
1535 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1536 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1537 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1538 && !(env->eflags & VM_MASK) /* no V86 mode */
1539 && !HWACCMIsEnabled(env->pVM))
1540 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1541#endif
1542}
1543
1544
1545/**
1546 * Called from tlb_unprotect_code in order to clear write monitoring for a code page.
1547 *
1548 * @param env Pointer to the CPU environment.
1549 * @param GCPtr Code page to monitor
1550 */
1551void remR3UnprotectCode(CPUState *env, RTGCPTR GCPtr)
1552{
1553 Assert(env->pVM->rem.s.fInREM);
1554#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
1555 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */
1556 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */
1557 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */
1558 && !(env->eflags & VM_MASK) /* no V86 mode */
1559 && !HWACCMIsEnabled(env->pVM))
1560 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);
1561#endif
1562}
1563
1564
1565/**
1566 * Called when the CPU is initialized, any of the CRx registers are changed or
1567 * when the A20 line is modified.
1568 *
1569 * @param env Pointer to the CPU environment.
1570 * @param fGlobal Set if the flush is global.
1571 */
1572void remR3FlushTLB(CPUState *env, bool fGlobal)
1573{
1574 PVM pVM = env->pVM;
1575 PCPUMCTX pCtx;
1576
1577 /*
1578 * When we're replaying invlpg instructions or restoring a saved
1579 * state we disable this path.
1580 */
1581 if (pVM->rem.s.fIgnoreCR3Load || pVM->rem.s.cIgnoreAll)
1582 return;
1583 Assert(pVM->rem.s.fInREM);
1584
1585 /*
1586 * The caller doesn't check cr4, so we have to do that for ourselves.
1587 */
1588 if (!fGlobal && !(env->cr[4] & X86_CR4_PGE))
1589 fGlobal = true;
1590 Log(("remR3FlushTLB: CR0=%08RX64 CR3=%08RX64 CR4=%08RX64 %s\n", (uint64_t)env->cr[0], (uint64_t)env->cr[3], (uint64_t)env->cr[4], fGlobal ? " global" : ""));
1591
1592 /*
1593 * Update the control registers before calling PGMR3FlushTLB.
1594 */
1595 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1596 Assert(pCtx);
1597 pCtx->cr0 = env->cr[0];
1598 pCtx->cr3 = env->cr[3];
1599 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1600 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1601 pCtx->cr4 = env->cr[4];
1602
1603 /*
1604 * Let PGM do the rest.
1605 */
1606 Assert(env->pVCpu);
1607 PGMFlushTLB(env->pVCpu, env->cr[3], fGlobal);
1608}
1609
1610
1611/**
1612 * Called when any of the cr0, cr4 or efer registers is updated.
1613 *
1614 * @param env Pointer to the CPU environment.
1615 */
1616void remR3ChangeCpuMode(CPUState *env)
1617{
1618 PVM pVM = env->pVM;
1619 uint64_t efer;
1620 PCPUMCTX pCtx;
1621 int rc;
1622
1623 /*
1624 * When we're replaying loads or restoring a saved
1625 * state this path is disabled.
1626 */
1627 if (pVM->rem.s.fIgnoreCpuMode || pVM->rem.s.cIgnoreAll)
1628 return;
1629 Assert(pVM->rem.s.fInREM);
1630
1631 /*
1632 * Update the control registers before calling PGMChangeMode()
1633 * as it may need to map whatever cr3 is pointing to.
1634 */
1635 pCtx = (PCPUMCTX)pVM->rem.s.pCtx;
1636 Assert(pCtx);
1637 pCtx->cr0 = env->cr[0];
1638 pCtx->cr3 = env->cr[3];
1639 if ((env->cr[4] ^ pCtx->cr4) & X86_CR4_VME)
1640 VMCPU_FF_SET(env->pVCpu, VMCPU_FF_SELM_SYNC_TSS);
1641 pCtx->cr4 = env->cr[4];
1642
1643#ifdef TARGET_X86_64
1644 efer = env->efer;
1645#else
1646 efer = 0;
1647#endif
1648 Assert(env->pVCpu);
1649 rc = PGMChangeMode(env->pVCpu, env->cr[0], env->cr[4], efer);
1650 if (rc != VINF_SUCCESS)
1651 {
1652 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1653 {
1654 Log(("PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc -> remR3RaiseRC\n", env->cr[0], env->cr[4], efer, rc));
1655 remR3RaiseRC(env->pVM, rc);
1656 }
1657 else
1658 cpu_abort(env, "PGMChangeMode(, %RX64, %RX64, %RX64) -> %Rrc\n", env->cr[0], env->cr[4], efer, rc);
1659 }
1660}
1661
1662
1663/**
1664 * Called from compiled code to run dma.
1665 *
1666 * @param env Pointer to the CPU environment.
1667 */
1668void remR3DmaRun(CPUState *env)
1669{
1670 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1671 PDMR3DmaRun(env->pVM);
1672 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1673}
1674
1675
1676/**
1677 * Called from compiled code to schedule pending timers in VMM
1678 *
1679 * @param env Pointer to the CPU environment.
1680 */
1681void remR3TimersRun(CPUState *env)
1682{
1683 LogFlow(("remR3TimersRun:\n"));
1684 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("remR3TimersRun\n"));
1685 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE);
1686 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
1687 TMR3TimerQueuesDo(env->pVM);
1688 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
1689 remR3ProfileStart(STATS_QEMU_RUN_EMULATED_CODE);
1690}
1691
1692
1693/**
1694 * Record trap occurance
1695 *
1696 * @returns VBox status code
1697 * @param env Pointer to the CPU environment.
1698 * @param uTrap Trap nr
1699 * @param uErrorCode Error code
1700 * @param pvNextEIP Next EIP
1701 */
1702int remR3NotifyTrap(CPUState *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)
1703{
1704 PVM pVM = env->pVM;
1705#ifdef VBOX_WITH_STATISTICS
1706 static STAMCOUNTER s_aStatTrap[255];
1707 static bool s_aRegisters[RT_ELEMENTS(s_aStatTrap)];
1708#endif
1709
1710#ifdef VBOX_WITH_STATISTICS
1711 if (uTrap < 255)
1712 {
1713 if (!s_aRegisters[uTrap])
1714 {
1715 char szStatName[64];
1716 s_aRegisters[uTrap] = true;
1717 RTStrPrintf(szStatName, sizeof(szStatName), "/REM/Trap/0x%02X", uTrap);
1718 STAM_REG(env->pVM, &s_aStatTrap[uTrap], STAMTYPE_COUNTER, szStatName, STAMUNIT_OCCURENCES, "Trap stats.");
1719 }
1720 STAM_COUNTER_INC(&s_aStatTrap[uTrap]);
1721 }
1722#endif
1723 Log(("remR3NotifyTrap: uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1724 if( uTrap < 0x20
1725 && (env->cr[0] & X86_CR0_PE)
1726 && !(env->eflags & X86_EFL_VM))
1727 {
1728#ifdef DEBUG
1729 remR3DisasInstr(env, 1, "remR3NotifyTrap: ");
1730#endif
1731 if(pVM->rem.s.uPendingException == uTrap && ++pVM->rem.s.cPendingExceptions > 512)
1732 {
1733 LogRel(("VERR_REM_TOO_MANY_TRAPS -> uTrap=%x error=%x next_eip=%RGv eip=%RGv cr2=%RGv\n", uTrap, uErrorCode, pvNextEIP, (RTGCPTR)env->eip, (RTGCPTR)env->cr[2]));
1734 remR3RaiseRC(env->pVM, VERR_REM_TOO_MANY_TRAPS);
1735 return VERR_REM_TOO_MANY_TRAPS;
1736 }
1737 if(pVM->rem.s.uPendingException != uTrap || pVM->rem.s.uPendingExcptEIP != env->eip || pVM->rem.s.uPendingExcptCR2 != env->cr[2])
1738 pVM->rem.s.cPendingExceptions = 1;
1739 pVM->rem.s.uPendingException = uTrap;
1740 pVM->rem.s.uPendingExcptEIP = env->eip;
1741 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1742 }
1743 else
1744 {
1745 pVM->rem.s.cPendingExceptions = 0;
1746 pVM->rem.s.uPendingException = uTrap;
1747 pVM->rem.s.uPendingExcptEIP = env->eip;
1748 pVM->rem.s.uPendingExcptCR2 = env->cr[2];
1749 }
1750 return VINF_SUCCESS;
1751}
1752
1753
1754/*
1755 * Clear current active trap
1756 *
1757 * @param pVM VM Handle.
1758 */
1759void remR3TrapClear(PVM pVM)
1760{
1761 pVM->rem.s.cPendingExceptions = 0;
1762 pVM->rem.s.uPendingException = 0;
1763 pVM->rem.s.uPendingExcptEIP = 0;
1764 pVM->rem.s.uPendingExcptCR2 = 0;
1765}
1766
1767
1768/*
1769 * Record previous call instruction addresses
1770 *
1771 * @param env Pointer to the CPU environment.
1772 */
1773void remR3RecordCall(CPUState *env)
1774{
1775 CSAMR3RecordCallAddress(env->pVM, env->eip);
1776}
1777
1778
1779/**
1780 * Syncs the internal REM state with the VM.
1781 *
1782 * This must be called before REMR3Run() is invoked whenever when the REM
1783 * state is not up to date. Calling it several times in a row is not
1784 * permitted.
1785 *
1786 * @returns VBox status code.
1787 *
1788 * @param pVM VM Handle.
1789 * @param pVCpu VMCPU Handle.
1790 *
1791 * @remark The caller has to check for important FFs before calling REMR3Run. REMR3State will
1792 * no do this since the majority of the callers don't want any unnecessary of events
1793 * pending that would immediatly interrupt execution.
1794 */
1795REMR3DECL(int) REMR3State(PVM pVM, PVMCPU pVCpu)
1796{
1797 register const CPUMCTX *pCtx;
1798 register unsigned fFlags;
1799 bool fHiddenSelRegsValid;
1800 unsigned i;
1801 TRPMEVENT enmType;
1802 uint8_t u8TrapNo;
1803 int rc;
1804
1805 STAM_PROFILE_START(&pVM->rem.s.StatsState, a);
1806 Log2(("REMR3State:\n"));
1807
1808 pVM->rem.s.Env.pVCpu = pVCpu;
1809 pCtx = pVM->rem.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
1810 fHiddenSelRegsValid = CPUMAreHiddenSelRegsValid(pVM);
1811
1812 Assert(!pVM->rem.s.fInREM);
1813 pVM->rem.s.fInStateSync = true;
1814
1815 /*
1816 * If we have to flush TBs, do that immediately.
1817 */
1818 if (pVM->rem.s.fFlushTBs)
1819 {
1820 STAM_COUNTER_INC(&gStatFlushTBs);
1821 tb_flush(&pVM->rem.s.Env);
1822 pVM->rem.s.fFlushTBs = false;
1823 }
1824
1825 /*
1826 * Copy the registers which require no special handling.
1827 */
1828#ifdef TARGET_X86_64
1829 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
1830 Assert(R_EAX == 0);
1831 pVM->rem.s.Env.regs[R_EAX] = pCtx->rax;
1832 Assert(R_ECX == 1);
1833 pVM->rem.s.Env.regs[R_ECX] = pCtx->rcx;
1834 Assert(R_EDX == 2);
1835 pVM->rem.s.Env.regs[R_EDX] = pCtx->rdx;
1836 Assert(R_EBX == 3);
1837 pVM->rem.s.Env.regs[R_EBX] = pCtx->rbx;
1838 Assert(R_ESP == 4);
1839 pVM->rem.s.Env.regs[R_ESP] = pCtx->rsp;
1840 Assert(R_EBP == 5);
1841 pVM->rem.s.Env.regs[R_EBP] = pCtx->rbp;
1842 Assert(R_ESI == 6);
1843 pVM->rem.s.Env.regs[R_ESI] = pCtx->rsi;
1844 Assert(R_EDI == 7);
1845 pVM->rem.s.Env.regs[R_EDI] = pCtx->rdi;
1846 pVM->rem.s.Env.regs[8] = pCtx->r8;
1847 pVM->rem.s.Env.regs[9] = pCtx->r9;
1848 pVM->rem.s.Env.regs[10] = pCtx->r10;
1849 pVM->rem.s.Env.regs[11] = pCtx->r11;
1850 pVM->rem.s.Env.regs[12] = pCtx->r12;
1851 pVM->rem.s.Env.regs[13] = pCtx->r13;
1852 pVM->rem.s.Env.regs[14] = pCtx->r14;
1853 pVM->rem.s.Env.regs[15] = pCtx->r15;
1854
1855 pVM->rem.s.Env.eip = pCtx->rip;
1856
1857 pVM->rem.s.Env.eflags = pCtx->rflags.u64;
1858#else
1859 Assert(R_EAX == 0);
1860 pVM->rem.s.Env.regs[R_EAX] = pCtx->eax;
1861 Assert(R_ECX == 1);
1862 pVM->rem.s.Env.regs[R_ECX] = pCtx->ecx;
1863 Assert(R_EDX == 2);
1864 pVM->rem.s.Env.regs[R_EDX] = pCtx->edx;
1865 Assert(R_EBX == 3);
1866 pVM->rem.s.Env.regs[R_EBX] = pCtx->ebx;
1867 Assert(R_ESP == 4);
1868 pVM->rem.s.Env.regs[R_ESP] = pCtx->esp;
1869 Assert(R_EBP == 5);
1870 pVM->rem.s.Env.regs[R_EBP] = pCtx->ebp;
1871 Assert(R_ESI == 6);
1872 pVM->rem.s.Env.regs[R_ESI] = pCtx->esi;
1873 Assert(R_EDI == 7);
1874 pVM->rem.s.Env.regs[R_EDI] = pCtx->edi;
1875 pVM->rem.s.Env.eip = pCtx->eip;
1876
1877 pVM->rem.s.Env.eflags = pCtx->eflags.u32;
1878#endif
1879
1880 pVM->rem.s.Env.cr[2] = pCtx->cr2;
1881
1882 /** @todo we could probably benefit from using a CPUM_CHANGED_DRx flag too! */
1883 for (i=0;i<8;i++)
1884 pVM->rem.s.Env.dr[i] = pCtx->dr[i];
1885
1886 /*
1887 * Clear the halted hidden flag (the interrupt waking up the CPU can
1888 * have been dispatched in raw mode).
1889 */
1890 pVM->rem.s.Env.hflags &= ~HF_HALTED_MASK;
1891
1892 /*
1893 * Replay invlpg?
1894 */
1895 if (pVM->rem.s.cInvalidatedPages)
1896 {
1897 RTUINT i;
1898
1899 pVM->rem.s.fIgnoreInvlPg = true;
1900 for (i = 0; i < pVM->rem.s.cInvalidatedPages; i++)
1901 {
1902 Log2(("REMR3State: invlpg %RGv\n", pVM->rem.s.aGCPtrInvalidatedPages[i]));
1903 tlb_flush_page(&pVM->rem.s.Env, pVM->rem.s.aGCPtrInvalidatedPages[i]);
1904 }
1905 pVM->rem.s.fIgnoreInvlPg = false;
1906 pVM->rem.s.cInvalidatedPages = 0;
1907 }
1908
1909 /* Replay notification changes. */
1910 REMR3ReplayHandlerNotifications(pVM);
1911
1912 /* Update MSRs; before CRx registers! */
1913 pVM->rem.s.Env.efer = pCtx->msrEFER;
1914 pVM->rem.s.Env.star = pCtx->msrSTAR;
1915 pVM->rem.s.Env.pat = pCtx->msrPAT;
1916#ifdef TARGET_X86_64
1917 pVM->rem.s.Env.lstar = pCtx->msrLSTAR;
1918 pVM->rem.s.Env.cstar = pCtx->msrCSTAR;
1919 pVM->rem.s.Env.fmask = pCtx->msrSFMASK;
1920 pVM->rem.s.Env.kernelgsbase = pCtx->msrKERNELGSBASE;
1921
1922 /* Update the internal long mode activate flag according to the new EFER value. */
1923 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
1924 pVM->rem.s.Env.hflags |= HF_LMA_MASK;
1925 else
1926 pVM->rem.s.Env.hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
1927#endif
1928
1929 /*
1930 * Registers which are rarely changed and require special handling / order when changed.
1931 */
1932 fFlags = CPUMGetAndClearChangedFlagsREM(pVCpu);
1933 LogFlow(("CPUMGetAndClearChangedFlagsREM %x\n", fFlags));
1934 if (fFlags & ( CPUM_CHANGED_CR4 | CPUM_CHANGED_CR3 | CPUM_CHANGED_CR0
1935 | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_LDTR
1936 | CPUM_CHANGED_FPU_REM | CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_CPUID))
1937 {
1938 if (fFlags & CPUM_CHANGED_GLOBAL_TLB_FLUSH)
1939 {
1940 pVM->rem.s.fIgnoreCR3Load = true;
1941 tlb_flush(&pVM->rem.s.Env, true);
1942 pVM->rem.s.fIgnoreCR3Load = false;
1943 }
1944
1945 /* CR4 before CR0! */
1946 if (fFlags & CPUM_CHANGED_CR4)
1947 {
1948 pVM->rem.s.fIgnoreCR3Load = true;
1949 pVM->rem.s.fIgnoreCpuMode = true;
1950 cpu_x86_update_cr4(&pVM->rem.s.Env, pCtx->cr4);
1951 pVM->rem.s.fIgnoreCpuMode = false;
1952 pVM->rem.s.fIgnoreCR3Load = false;
1953 }
1954
1955 if (fFlags & CPUM_CHANGED_CR0)
1956 {
1957 pVM->rem.s.fIgnoreCR3Load = true;
1958 pVM->rem.s.fIgnoreCpuMode = true;
1959 cpu_x86_update_cr0(&pVM->rem.s.Env, pCtx->cr0);
1960 pVM->rem.s.fIgnoreCpuMode = false;
1961 pVM->rem.s.fIgnoreCR3Load = false;
1962 }
1963
1964 if (fFlags & CPUM_CHANGED_CR3)
1965 {
1966 pVM->rem.s.fIgnoreCR3Load = true;
1967 cpu_x86_update_cr3(&pVM->rem.s.Env, pCtx->cr3);
1968 pVM->rem.s.fIgnoreCR3Load = false;
1969 }
1970
1971 if (fFlags & CPUM_CHANGED_GDTR)
1972 {
1973 pVM->rem.s.Env.gdt.base = pCtx->gdtr.pGdt;
1974 pVM->rem.s.Env.gdt.limit = pCtx->gdtr.cbGdt;
1975 }
1976
1977 if (fFlags & CPUM_CHANGED_IDTR)
1978 {
1979 pVM->rem.s.Env.idt.base = pCtx->idtr.pIdt;
1980 pVM->rem.s.Env.idt.limit = pCtx->idtr.cbIdt;
1981 }
1982
1983 if (fFlags & CPUM_CHANGED_SYSENTER_MSR)
1984 {
1985 pVM->rem.s.Env.sysenter_cs = pCtx->SysEnter.cs;
1986 pVM->rem.s.Env.sysenter_eip = pCtx->SysEnter.eip;
1987 pVM->rem.s.Env.sysenter_esp = pCtx->SysEnter.esp;
1988 }
1989
1990 if (fFlags & CPUM_CHANGED_LDTR)
1991 {
1992 if (fHiddenSelRegsValid)
1993 {
1994 pVM->rem.s.Env.ldt.selector = pCtx->ldtr;
1995 pVM->rem.s.Env.ldt.base = pCtx->ldtrHid.u64Base;
1996 pVM->rem.s.Env.ldt.limit = pCtx->ldtrHid.u32Limit;
1997 pVM->rem.s.Env.ldt.flags = (pCtx->ldtrHid.Attr.u << 8) & 0xFFFFFF;
1998 }
1999 else
2000 sync_ldtr(&pVM->rem.s.Env, pCtx->ldtr);
2001 }
2002
2003 if (fFlags & CPUM_CHANGED_CPUID)
2004 {
2005 uint32_t u32Dummy;
2006
2007 /*
2008 * Get the CPUID features.
2009 */
2010 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext_features, &pVM->rem.s.Env.cpuid_features);
2011 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Dummy, &u32Dummy, &u32Dummy, &pVM->rem.s.Env.cpuid_ext2_features);
2012 }
2013
2014 /* Sync FPU state after CR4, CPUID and EFER (!). */
2015 if (fFlags & CPUM_CHANGED_FPU_REM)
2016 save_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu); /* 'save' is an excellent name. */
2017 }
2018
2019 /*
2020 * Sync TR unconditionally to make life simpler.
2021 */
2022 pVM->rem.s.Env.tr.selector = pCtx->tr;
2023 pVM->rem.s.Env.tr.base = pCtx->trHid.u64Base;
2024 pVM->rem.s.Env.tr.limit = pCtx->trHid.u32Limit;
2025 pVM->rem.s.Env.tr.flags = (pCtx->trHid.Attr.u << 8) & 0xFFFFFF;
2026 /* Note! do_interrupt will fault if the busy flag is still set... */
2027 pVM->rem.s.Env.tr.flags &= ~DESC_TSS_BUSY_MASK;
2028
2029 /*
2030 * Update selector registers.
2031 * This must be done *after* we've synced gdt, ldt and crX registers
2032 * since we're reading the GDT/LDT om sync_seg. This will happen with
2033 * saved state which takes a quick dip into rawmode for instance.
2034 */
2035 /*
2036 * Stack; Note first check this one as the CPL might have changed. The
2037 * wrong CPL can cause QEmu to raise an exception in sync_seg!!
2038 */
2039
2040 if (fHiddenSelRegsValid)
2041 {
2042 /* The hidden selector registers are valid in the CPU context. */
2043 /** @note QEmu saves the 2nd dword of the descriptor; we should convert the attribute word back! */
2044
2045 /* Set current CPL */
2046 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2047
2048 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_CS, pCtx->cs, pCtx->csHid.u64Base, pCtx->csHid.u32Limit, (pCtx->csHid.Attr.u << 8) & 0xFFFFFF);
2049 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_SS, pCtx->ss, pCtx->ssHid.u64Base, pCtx->ssHid.u32Limit, (pCtx->ssHid.Attr.u << 8) & 0xFFFFFF);
2050 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_DS, pCtx->ds, pCtx->dsHid.u64Base, pCtx->dsHid.u32Limit, (pCtx->dsHid.Attr.u << 8) & 0xFFFFFF);
2051 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_ES, pCtx->es, pCtx->esHid.u64Base, pCtx->esHid.u32Limit, (pCtx->esHid.Attr.u << 8) & 0xFFFFFF);
2052 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_FS, pCtx->fs, pCtx->fsHid.u64Base, pCtx->fsHid.u32Limit, (pCtx->fsHid.Attr.u << 8) & 0xFFFFFF);
2053 cpu_x86_load_seg_cache(&pVM->rem.s.Env, R_GS, pCtx->gs, pCtx->gsHid.u64Base, pCtx->gsHid.u32Limit, (pCtx->gsHid.Attr.u << 8) & 0xFFFFFF);
2054 }
2055 else
2056 {
2057 /* In 'normal' raw mode we don't have access to the hidden selector registers. */
2058 if (pVM->rem.s.Env.segs[R_SS].selector != pCtx->ss)
2059 {
2060 Log2(("REMR3State: SS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_SS].selector, pCtx->ss));
2061
2062 cpu_x86_set_cpl(&pVM->rem.s.Env, CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)));
2063 sync_seg(&pVM->rem.s.Env, R_SS, pCtx->ss);
2064#ifdef VBOX_WITH_STATISTICS
2065 if (pVM->rem.s.Env.segs[R_SS].newselector)
2066 {
2067 STAM_COUNTER_INC(&gStatSelOutOfSync[R_SS]);
2068 }
2069#endif
2070 }
2071 else
2072 pVM->rem.s.Env.segs[R_SS].newselector = 0;
2073
2074 if (pVM->rem.s.Env.segs[R_ES].selector != pCtx->es)
2075 {
2076 Log2(("REMR3State: ES changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_ES].selector, pCtx->es));
2077 sync_seg(&pVM->rem.s.Env, R_ES, pCtx->es);
2078#ifdef VBOX_WITH_STATISTICS
2079 if (pVM->rem.s.Env.segs[R_ES].newselector)
2080 {
2081 STAM_COUNTER_INC(&gStatSelOutOfSync[R_ES]);
2082 }
2083#endif
2084 }
2085 else
2086 pVM->rem.s.Env.segs[R_ES].newselector = 0;
2087
2088 if (pVM->rem.s.Env.segs[R_CS].selector != pCtx->cs)
2089 {
2090 Log2(("REMR3State: CS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_CS].selector, pCtx->cs));
2091 sync_seg(&pVM->rem.s.Env, R_CS, pCtx->cs);
2092#ifdef VBOX_WITH_STATISTICS
2093 if (pVM->rem.s.Env.segs[R_CS].newselector)
2094 {
2095 STAM_COUNTER_INC(&gStatSelOutOfSync[R_CS]);
2096 }
2097#endif
2098 }
2099 else
2100 pVM->rem.s.Env.segs[R_CS].newselector = 0;
2101
2102 if (pVM->rem.s.Env.segs[R_DS].selector != pCtx->ds)
2103 {
2104 Log2(("REMR3State: DS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_DS].selector, pCtx->ds));
2105 sync_seg(&pVM->rem.s.Env, R_DS, pCtx->ds);
2106#ifdef VBOX_WITH_STATISTICS
2107 if (pVM->rem.s.Env.segs[R_DS].newselector)
2108 {
2109 STAM_COUNTER_INC(&gStatSelOutOfSync[R_DS]);
2110 }
2111#endif
2112 }
2113 else
2114 pVM->rem.s.Env.segs[R_DS].newselector = 0;
2115
2116 /** @todo need to find a way to communicate potential GDT/LDT changes and thread switches. The selector might
2117 * be the same but not the base/limit. */
2118 if (pVM->rem.s.Env.segs[R_FS].selector != pCtx->fs)
2119 {
2120 Log2(("REMR3State: FS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_FS].selector, pCtx->fs));
2121 sync_seg(&pVM->rem.s.Env, R_FS, pCtx->fs);
2122#ifdef VBOX_WITH_STATISTICS
2123 if (pVM->rem.s.Env.segs[R_FS].newselector)
2124 {
2125 STAM_COUNTER_INC(&gStatSelOutOfSync[R_FS]);
2126 }
2127#endif
2128 }
2129 else
2130 pVM->rem.s.Env.segs[R_FS].newselector = 0;
2131
2132 if (pVM->rem.s.Env.segs[R_GS].selector != pCtx->gs)
2133 {
2134 Log2(("REMR3State: GS changed from %04x to %04x!\n", pVM->rem.s.Env.segs[R_GS].selector, pCtx->gs));
2135 sync_seg(&pVM->rem.s.Env, R_GS, pCtx->gs);
2136#ifdef VBOX_WITH_STATISTICS
2137 if (pVM->rem.s.Env.segs[R_GS].newselector)
2138 {
2139 STAM_COUNTER_INC(&gStatSelOutOfSync[R_GS]);
2140 }
2141#endif
2142 }
2143 else
2144 pVM->rem.s.Env.segs[R_GS].newselector = 0;
2145 }
2146
2147 /*
2148 * Check for traps.
2149 */
2150 pVM->rem.s.Env.exception_index = -1; /** @todo this won't work :/ */
2151 rc = TRPMQueryTrap(pVCpu, &u8TrapNo, &enmType);
2152 if (RT_SUCCESS(rc))
2153 {
2154#ifdef DEBUG
2155 if (u8TrapNo == 0x80)
2156 {
2157 remR3DumpLnxSyscall(pVCpu);
2158 remR3DumpOBsdSyscall(pVCpu);
2159 }
2160#endif
2161
2162 pVM->rem.s.Env.exception_index = u8TrapNo;
2163 if (enmType != TRPM_SOFTWARE_INT)
2164 {
2165 pVM->rem.s.Env.exception_is_int = 0;
2166 pVM->rem.s.Env.exception_next_eip = pVM->rem.s.Env.eip;
2167 }
2168 else
2169 {
2170 /*
2171 * The there are two 1 byte opcodes and one 2 byte opcode for software interrupts.
2172 * We ASSUME that there are no prefixes and sets the default to 2 byte, and checks
2173 * for int03 and into.
2174 */
2175 pVM->rem.s.Env.exception_is_int = 1;
2176 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 2;
2177 /* int 3 may be generated by one-byte 0xcc */
2178 if (u8TrapNo == 3)
2179 {
2180 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xcc)
2181 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2182 }
2183 /* int 4 may be generated by one-byte 0xce */
2184 else if (u8TrapNo == 4)
2185 {
2186 if (read_byte(&pVM->rem.s.Env, pVM->rem.s.Env.segs[R_CS].base + pCtx->rip) == 0xce)
2187 pVM->rem.s.Env.exception_next_eip = pCtx->rip + 1;
2188 }
2189 }
2190
2191 /* get error code and cr2 if needed. */
2192 switch (u8TrapNo)
2193 {
2194 case 0x0e:
2195 pVM->rem.s.Env.cr[2] = TRPMGetFaultAddress(pVCpu);
2196 /* fallthru */
2197 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2198 pVM->rem.s.Env.error_code = TRPMGetErrorCode(pVCpu);
2199 break;
2200
2201 case 0x11: case 0x08:
2202 default:
2203 pVM->rem.s.Env.error_code = 0;
2204 break;
2205 }
2206
2207 /*
2208 * We can now reset the active trap since the recompiler is gonna have a go at it.
2209 */
2210 rc = TRPMResetTrap(pVCpu);
2211 AssertRC(rc);
2212 Log2(("REMR3State: trap=%02x errcd=%RGv cr2=%RGv nexteip=%RGv%s\n", pVM->rem.s.Env.exception_index, (RTGCPTR)pVM->rem.s.Env.error_code,
2213 (RTGCPTR)pVM->rem.s.Env.cr[2], (RTGCPTR)pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : ""));
2214 }
2215
2216 /*
2217 * Clear old interrupt request flags; Check for pending hardware interrupts.
2218 * (See @remark for why we don't check for other FFs.)
2219 */
2220 pVM->rem.s.Env.interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXIT | CPU_INTERRUPT_EXITTB | CPU_INTERRUPT_TIMER);
2221 if ( pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ
2222 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2223 pVM->rem.s.Env.interrupt_request |= CPU_INTERRUPT_HARD;
2224
2225 /*
2226 * We're now in REM mode.
2227 */
2228 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_REM);
2229 pVM->rem.s.fInREM = true;
2230 pVM->rem.s.fInStateSync = false;
2231 pVM->rem.s.cCanExecuteRaw = 0;
2232 STAM_PROFILE_STOP(&pVM->rem.s.StatsState, a);
2233 Log2(("REMR3State: returns VINF_SUCCESS\n"));
2234 return VINF_SUCCESS;
2235}
2236
2237
2238/**
2239 * Syncs back changes in the REM state to the the VM state.
2240 *
2241 * This must be called after invoking REMR3Run().
2242 * Calling it several times in a row is not permitted.
2243 *
2244 * @returns VBox status code.
2245 *
2246 * @param pVM VM Handle.
2247 * @param pVCpu VMCPU Handle.
2248 */
2249REMR3DECL(int) REMR3StateBack(PVM pVM, PVMCPU pVCpu)
2250{
2251 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2252 Assert(pCtx);
2253 unsigned i;
2254
2255 STAM_PROFILE_START(&pVM->rem.s.StatsStateBack, a);
2256 Log2(("REMR3StateBack:\n"));
2257 Assert(pVM->rem.s.fInREM);
2258
2259 /*
2260 * Copy back the registers.
2261 * This is done in the order they are declared in the CPUMCTX structure.
2262 */
2263
2264 /** @todo FOP */
2265 /** @todo FPUIP */
2266 /** @todo CS */
2267 /** @todo FPUDP */
2268 /** @todo DS */
2269
2270 /** @todo check if FPU/XMM was actually used in the recompiler */
2271 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2272//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2273
2274#ifdef TARGET_X86_64
2275 /* Note that the high dwords of 64 bits registers are undefined in 32 bits mode and are undefined after a mode change. */
2276 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2277 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2278 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2279 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2280 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2281 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2282 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2283 pCtx->r8 = pVM->rem.s.Env.regs[8];
2284 pCtx->r9 = pVM->rem.s.Env.regs[9];
2285 pCtx->r10 = pVM->rem.s.Env.regs[10];
2286 pCtx->r11 = pVM->rem.s.Env.regs[11];
2287 pCtx->r12 = pVM->rem.s.Env.regs[12];
2288 pCtx->r13 = pVM->rem.s.Env.regs[13];
2289 pCtx->r14 = pVM->rem.s.Env.regs[14];
2290 pCtx->r15 = pVM->rem.s.Env.regs[15];
2291
2292 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2293
2294#else
2295 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2296 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2297 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2298 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2299 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2300 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2301 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2302
2303 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2304#endif
2305
2306 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2307
2308#ifdef VBOX_WITH_STATISTICS
2309 if (pVM->rem.s.Env.segs[R_SS].newselector)
2310 {
2311 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_SS]);
2312 }
2313 if (pVM->rem.s.Env.segs[R_GS].newselector)
2314 {
2315 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_GS]);
2316 }
2317 if (pVM->rem.s.Env.segs[R_FS].newselector)
2318 {
2319 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_FS]);
2320 }
2321 if (pVM->rem.s.Env.segs[R_ES].newselector)
2322 {
2323 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_ES]);
2324 }
2325 if (pVM->rem.s.Env.segs[R_DS].newselector)
2326 {
2327 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_DS]);
2328 }
2329 if (pVM->rem.s.Env.segs[R_CS].newselector)
2330 {
2331 STAM_COUNTER_INC(&gStatSelOutOfSyncStateBack[R_CS]);
2332 }
2333#endif
2334 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2335 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2336 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2337 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2338 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2339
2340#ifdef TARGET_X86_64
2341 pCtx->rip = pVM->rem.s.Env.eip;
2342 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2343#else
2344 pCtx->eip = pVM->rem.s.Env.eip;
2345 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2346#endif
2347
2348 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2349 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2350 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2351 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2352 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2353 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2354
2355 for (i = 0; i < 8; i++)
2356 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2357
2358 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2359 if (pCtx->gdtr.pGdt != pVM->rem.s.Env.gdt.base)
2360 {
2361 pCtx->gdtr.pGdt = pVM->rem.s.Env.gdt.base;
2362 STAM_COUNTER_INC(&gStatREMGDTChange);
2363 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2364 }
2365
2366 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2367 if (pCtx->idtr.pIdt != pVM->rem.s.Env.idt.base)
2368 {
2369 pCtx->idtr.pIdt = pVM->rem.s.Env.idt.base;
2370 STAM_COUNTER_INC(&gStatREMIDTChange);
2371 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2372 }
2373
2374 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2375 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2376 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2377 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2378 {
2379 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2380 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2381 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2382 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF;
2383 STAM_COUNTER_INC(&gStatREMLDTRChange);
2384 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2385 }
2386
2387 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2388 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2389 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2390 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2391 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2392 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2393 : 0) )
2394 {
2395 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2396 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2397 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2398 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2399 pCtx->tr = pVM->rem.s.Env.tr.selector;
2400 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2401 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2402 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2403 if (pCtx->trHid.Attr.u)
2404 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2405 STAM_COUNTER_INC(&gStatREMTRChange);
2406 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2407 }
2408
2409 /** @todo These values could still be out of sync! */
2410 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2411 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2412 /* Note! QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2413 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xF0FF;
2414
2415 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2416 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2417 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xF0FF;
2418
2419 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2420 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2421 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xF0FF;
2422
2423 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2424 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2425 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xF0FF;
2426
2427 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2428 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2429 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xF0FF;
2430
2431 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2432 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2433 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xF0FF;
2434
2435 /* Sysenter MSR */
2436 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2437 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2438 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2439
2440 /* System MSRs. */
2441 pCtx->msrEFER = pVM->rem.s.Env.efer;
2442 pCtx->msrSTAR = pVM->rem.s.Env.star;
2443 pCtx->msrPAT = pVM->rem.s.Env.pat;
2444#ifdef TARGET_X86_64
2445 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2446 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2447 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2448 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2449#endif
2450
2451 remR3TrapClear(pVM);
2452
2453 /*
2454 * Check for traps.
2455 */
2456 if ( pVM->rem.s.Env.exception_index >= 0
2457 && pVM->rem.s.Env.exception_index < 256)
2458 {
2459 int rc;
2460
2461 Log(("REMR3StateBack: Pending trap %x %d\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.exception_is_int));
2462 rc = TRPMAssertTrap(pVCpu, pVM->rem.s.Env.exception_index, (pVM->rem.s.Env.exception_is_int) ? TRPM_SOFTWARE_INT : TRPM_HARDWARE_INT);
2463 AssertRC(rc);
2464 switch (pVM->rem.s.Env.exception_index)
2465 {
2466 case 0x0e:
2467 TRPMSetFaultAddress(pVCpu, pCtx->cr2);
2468 /* fallthru */
2469 case 0x0a: case 0x0b: case 0x0c: case 0x0d:
2470 case 0x11: case 0x08: /* 0 */
2471 TRPMSetErrorCode(pVCpu, pVM->rem.s.Env.error_code);
2472 break;
2473 }
2474
2475 }
2476
2477 /*
2478 * We're not longer in REM mode.
2479 */
2480 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC_REM);
2481 pVM->rem.s.fInREM = false;
2482 pVM->rem.s.pCtx = NULL;
2483 pVM->rem.s.Env.pVCpu = NULL;
2484 STAM_PROFILE_STOP(&pVM->rem.s.StatsStateBack, a);
2485 Log2(("REMR3StateBack: returns VINF_SUCCESS\n"));
2486 return VINF_SUCCESS;
2487}
2488
2489
2490/**
2491 * This is called by the disassembler when it wants to update the cpu state
2492 * before for instance doing a register dump.
2493 */
2494static void remR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2495{
2496 register PCPUMCTX pCtx = pVM->rem.s.pCtx;
2497 unsigned i;
2498
2499 Assert(pVM->rem.s.fInREM);
2500
2501 /*
2502 * Copy back the registers.
2503 * This is done in the order they are declared in the CPUMCTX structure.
2504 */
2505
2506 /** @todo FOP */
2507 /** @todo FPUIP */
2508 /** @todo CS */
2509 /** @todo FPUDP */
2510 /** @todo DS */
2511 /** @todo Fix MXCSR support in QEMU so we don't overwrite MXCSR with 0 when we shouldn't! */
2512 pCtx->fpu.MXCSR = 0;
2513 pCtx->fpu.MXCSR_MASK = 0;
2514
2515 /** @todo check if FPU/XMM was actually used in the recompiler */
2516 restore_raw_fp_state(&pVM->rem.s.Env, (uint8_t *)&pCtx->fpu);
2517//// dprintf2(("FPU state CW=%04X TT=%04X SW=%04X (%04X)\n", env->fpuc, env->fpstt, env->fpus, pVMCtx->fpu.FSW));
2518
2519#ifdef TARGET_X86_64
2520 pCtx->rdi = pVM->rem.s.Env.regs[R_EDI];
2521 pCtx->rsi = pVM->rem.s.Env.regs[R_ESI];
2522 pCtx->rbp = pVM->rem.s.Env.regs[R_EBP];
2523 pCtx->rax = pVM->rem.s.Env.regs[R_EAX];
2524 pCtx->rbx = pVM->rem.s.Env.regs[R_EBX];
2525 pCtx->rdx = pVM->rem.s.Env.regs[R_EDX];
2526 pCtx->rcx = pVM->rem.s.Env.regs[R_ECX];
2527 pCtx->r8 = pVM->rem.s.Env.regs[8];
2528 pCtx->r9 = pVM->rem.s.Env.regs[9];
2529 pCtx->r10 = pVM->rem.s.Env.regs[10];
2530 pCtx->r11 = pVM->rem.s.Env.regs[11];
2531 pCtx->r12 = pVM->rem.s.Env.regs[12];
2532 pCtx->r13 = pVM->rem.s.Env.regs[13];
2533 pCtx->r14 = pVM->rem.s.Env.regs[14];
2534 pCtx->r15 = pVM->rem.s.Env.regs[15];
2535
2536 pCtx->rsp = pVM->rem.s.Env.regs[R_ESP];
2537#else
2538 pCtx->edi = pVM->rem.s.Env.regs[R_EDI];
2539 pCtx->esi = pVM->rem.s.Env.regs[R_ESI];
2540 pCtx->ebp = pVM->rem.s.Env.regs[R_EBP];
2541 pCtx->eax = pVM->rem.s.Env.regs[R_EAX];
2542 pCtx->ebx = pVM->rem.s.Env.regs[R_EBX];
2543 pCtx->edx = pVM->rem.s.Env.regs[R_EDX];
2544 pCtx->ecx = pVM->rem.s.Env.regs[R_ECX];
2545
2546 pCtx->esp = pVM->rem.s.Env.regs[R_ESP];
2547#endif
2548
2549 pCtx->ss = pVM->rem.s.Env.segs[R_SS].selector;
2550
2551 pCtx->gs = pVM->rem.s.Env.segs[R_GS].selector;
2552 pCtx->fs = pVM->rem.s.Env.segs[R_FS].selector;
2553 pCtx->es = pVM->rem.s.Env.segs[R_ES].selector;
2554 pCtx->ds = pVM->rem.s.Env.segs[R_DS].selector;
2555 pCtx->cs = pVM->rem.s.Env.segs[R_CS].selector;
2556
2557#ifdef TARGET_X86_64
2558 pCtx->rip = pVM->rem.s.Env.eip;
2559 pCtx->rflags.u64 = pVM->rem.s.Env.eflags;
2560#else
2561 pCtx->eip = pVM->rem.s.Env.eip;
2562 pCtx->eflags.u32 = pVM->rem.s.Env.eflags;
2563#endif
2564
2565 pCtx->cr0 = pVM->rem.s.Env.cr[0];
2566 pCtx->cr2 = pVM->rem.s.Env.cr[2];
2567 pCtx->cr3 = pVM->rem.s.Env.cr[3];
2568 if ((pVM->rem.s.Env.cr[4] ^ pCtx->cr4) & X86_CR4_VME)
2569 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2570 pCtx->cr4 = pVM->rem.s.Env.cr[4];
2571
2572 for (i = 0; i < 8; i++)
2573 pCtx->dr[i] = pVM->rem.s.Env.dr[i];
2574
2575 pCtx->gdtr.cbGdt = pVM->rem.s.Env.gdt.limit;
2576 if (pCtx->gdtr.pGdt != (RTGCPTR)pVM->rem.s.Env.gdt.base)
2577 {
2578 pCtx->gdtr.pGdt = (RTGCPTR)pVM->rem.s.Env.gdt.base;
2579 STAM_COUNTER_INC(&gStatREMGDTChange);
2580 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);
2581 }
2582
2583 pCtx->idtr.cbIdt = pVM->rem.s.Env.idt.limit;
2584 if (pCtx->idtr.pIdt != (RTGCPTR)pVM->rem.s.Env.idt.base)
2585 {
2586 pCtx->idtr.pIdt = (RTGCPTR)pVM->rem.s.Env.idt.base;
2587 STAM_COUNTER_INC(&gStatREMIDTChange);
2588 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);
2589 }
2590
2591 if ( pCtx->ldtr != pVM->rem.s.Env.ldt.selector
2592 || pCtx->ldtrHid.u64Base != pVM->rem.s.Env.ldt.base
2593 || pCtx->ldtrHid.u32Limit != pVM->rem.s.Env.ldt.limit
2594 || pCtx->ldtrHid.Attr.u != ((pVM->rem.s.Env.ldt.flags >> 8) & 0xF0FF))
2595 {
2596 pCtx->ldtr = pVM->rem.s.Env.ldt.selector;
2597 pCtx->ldtrHid.u64Base = pVM->rem.s.Env.ldt.base;
2598 pCtx->ldtrHid.u32Limit = pVM->rem.s.Env.ldt.limit;
2599 pCtx->ldtrHid.Attr.u = (pVM->rem.s.Env.ldt.flags >> 8) & 0xFFFF;
2600 STAM_COUNTER_INC(&gStatREMLDTRChange);
2601 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);
2602 }
2603
2604 if ( pCtx->tr != pVM->rem.s.Env.tr.selector
2605 || pCtx->trHid.u64Base != pVM->rem.s.Env.tr.base
2606 || pCtx->trHid.u32Limit != pVM->rem.s.Env.tr.limit
2607 /* Qemu and AMD/Intel have different ideas about the busy flag ... */
2608 || pCtx->trHid.Attr.u != ( (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF
2609 ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8
2610 : 0) )
2611 {
2612 Log(("REM: TR changed! %#x{%#llx,%#x,%#x} -> %#x{%llx,%#x,%#x}\n",
2613 pCtx->tr, pCtx->trHid.u64Base, pCtx->trHid.u32Limit, pCtx->trHid.Attr.u,
2614 pVM->rem.s.Env.tr.selector, (uint64_t)pVM->rem.s.Env.tr.base, pVM->rem.s.Env.tr.limit,
2615 (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF ? (pVM->rem.s.Env.tr.flags | DESC_TSS_BUSY_MASK) >> 8 : 0));
2616 pCtx->tr = pVM->rem.s.Env.tr.selector;
2617 pCtx->trHid.u64Base = pVM->rem.s.Env.tr.base;
2618 pCtx->trHid.u32Limit = pVM->rem.s.Env.tr.limit;
2619 pCtx->trHid.Attr.u = (pVM->rem.s.Env.tr.flags >> 8) & 0xF0FF;
2620 if (pCtx->trHid.Attr.u)
2621 pCtx->trHid.Attr.u |= DESC_TSS_BUSY_MASK >> 8;
2622 STAM_COUNTER_INC(&gStatREMTRChange);
2623 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
2624 }
2625
2626 /** @todo These values could still be out of sync! */
2627 pCtx->csHid.u64Base = pVM->rem.s.Env.segs[R_CS].base;
2628 pCtx->csHid.u32Limit = pVM->rem.s.Env.segs[R_CS].limit;
2629 /** @note QEmu saves the 2nd dword of the descriptor; we should store the attribute word only! */
2630 pCtx->csHid.Attr.u = (pVM->rem.s.Env.segs[R_CS].flags >> 8) & 0xFFFF;
2631
2632 pCtx->dsHid.u64Base = pVM->rem.s.Env.segs[R_DS].base;
2633 pCtx->dsHid.u32Limit = pVM->rem.s.Env.segs[R_DS].limit;
2634 pCtx->dsHid.Attr.u = (pVM->rem.s.Env.segs[R_DS].flags >> 8) & 0xFFFF;
2635
2636 pCtx->esHid.u64Base = pVM->rem.s.Env.segs[R_ES].base;
2637 pCtx->esHid.u32Limit = pVM->rem.s.Env.segs[R_ES].limit;
2638 pCtx->esHid.Attr.u = (pVM->rem.s.Env.segs[R_ES].flags >> 8) & 0xFFFF;
2639
2640 pCtx->fsHid.u64Base = pVM->rem.s.Env.segs[R_FS].base;
2641 pCtx->fsHid.u32Limit = pVM->rem.s.Env.segs[R_FS].limit;
2642 pCtx->fsHid.Attr.u = (pVM->rem.s.Env.segs[R_FS].flags >> 8) & 0xFFFF;
2643
2644 pCtx->gsHid.u64Base = pVM->rem.s.Env.segs[R_GS].base;
2645 pCtx->gsHid.u32Limit = pVM->rem.s.Env.segs[R_GS].limit;
2646 pCtx->gsHid.Attr.u = (pVM->rem.s.Env.segs[R_GS].flags >> 8) & 0xFFFF;
2647
2648 pCtx->ssHid.u64Base = pVM->rem.s.Env.segs[R_SS].base;
2649 pCtx->ssHid.u32Limit = pVM->rem.s.Env.segs[R_SS].limit;
2650 pCtx->ssHid.Attr.u = (pVM->rem.s.Env.segs[R_SS].flags >> 8) & 0xFFFF;
2651
2652 /* Sysenter MSR */
2653 pCtx->SysEnter.cs = pVM->rem.s.Env.sysenter_cs;
2654 pCtx->SysEnter.eip = pVM->rem.s.Env.sysenter_eip;
2655 pCtx->SysEnter.esp = pVM->rem.s.Env.sysenter_esp;
2656
2657 /* System MSRs. */
2658 pCtx->msrEFER = pVM->rem.s.Env.efer;
2659 pCtx->msrSTAR = pVM->rem.s.Env.star;
2660 pCtx->msrPAT = pVM->rem.s.Env.pat;
2661#ifdef TARGET_X86_64
2662 pCtx->msrLSTAR = pVM->rem.s.Env.lstar;
2663 pCtx->msrCSTAR = pVM->rem.s.Env.cstar;
2664 pCtx->msrSFMASK = pVM->rem.s.Env.fmask;
2665 pCtx->msrKERNELGSBASE = pVM->rem.s.Env.kernelgsbase;
2666#endif
2667
2668}
2669
2670
2671/**
2672 * Update the VMM state information if we're currently in REM.
2673 *
2674 * This method is used by the DBGF and PDMDevice when there is any uncertainty of whether
2675 * we're currently executing in REM and the VMM state is invalid. This method will of
2676 * course check that we're executing in REM before syncing any data over to the VMM.
2677 *
2678 * @param pVM The VM handle.
2679 * @param pVCpu The VMCPU handle.
2680 */
2681REMR3DECL(void) REMR3StateUpdate(PVM pVM, PVMCPU pVCpu)
2682{
2683 if (pVM->rem.s.fInREM)
2684 remR3StateUpdate(pVM, pVCpu);
2685}
2686
2687
2688#undef LOG_GROUP
2689#define LOG_GROUP LOG_GROUP_REM
2690
2691
2692/**
2693 * Notify the recompiler about Address Gate 20 state change.
2694 *
2695 * This notification is required since A20 gate changes are
2696 * initialized from a device driver and the VM might just as
2697 * well be in REM mode as in RAW mode.
2698 *
2699 * @param pVM VM handle.
2700 * @param pVCpu VMCPU handle.
2701 * @param fEnable True if the gate should be enabled.
2702 * False if the gate should be disabled.
2703 */
2704REMR3DECL(void) REMR3A20Set(PVM pVM, PVMCPU pVCpu, bool fEnable)
2705{
2706 LogFlow(("REMR3A20Set: fEnable=%d\n", fEnable));
2707 VM_ASSERT_EMT(pVM);
2708
2709 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2710 cpu_x86_set_a20(&pVM->rem.s.Env, fEnable);
2711 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2712}
2713
2714
2715/**
2716 * Replays the handler notification changes
2717 * Called in response to VM_FF_REM_HANDLER_NOTIFY from the RAW execution loop.
2718 *
2719 * @param pVM VM handle.
2720 */
2721REMR3DECL(void) REMR3ReplayHandlerNotifications(PVM pVM)
2722{
2723 /*
2724 * Replay the flushes.
2725 */
2726 LogFlow(("REMR3ReplayHandlerNotifications:\n"));
2727 VM_ASSERT_EMT(pVM);
2728
2729 /** @todo this isn't ensuring correct replay order. */
2730 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_REM_HANDLER_NOTIFY_BIT))
2731 {
2732 PREMHANDLERNOTIFICATION pReqsRev;
2733 PREMHANDLERNOTIFICATION pReqs;
2734 uint32_t idxNext;
2735 uint32_t idxReqs;
2736
2737 /* Lockless purging of pending notifications. */
2738 idxReqs = ASMAtomicXchgU32(&pVM->rem.s.idxPendingList, UINT32_MAX);
2739 if (idxReqs == UINT32_MAX)
2740 return;
2741 Assert(idxReqs < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2742
2743 /*
2744 * Reverse the list to process it in FIFO order.
2745 */
2746 pReqsRev = &pVM->rem.s.aHandlerNotifications[idxReqs];
2747 pReqs = NULL;
2748 while (pReqsRev)
2749 {
2750 PREMHANDLERNOTIFICATION pCur = pReqsRev;
2751 idxNext = pReqsRev->idxNext;
2752 if (idxNext != UINT32_MAX)
2753 {
2754 Assert(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));
2755 pReqsRev = &pVM->rem.s.aHandlerNotifications[idxNext];
2756 }
2757 else
2758 pReqsRev = NULL;
2759 pCur->idxNext = idxNext;
2760 pReqs = pCur;
2761 }
2762
2763 /*
2764 * Loop thru the list, reinserting the record into the free list as they are
2765 * processed to avoid having other EMTs running out of entries while we're flushing.
2766 */
2767 while (pReqs)
2768 {
2769 PREMHANDLERNOTIFICATION pCur = pReqs;
2770
2771 switch (pCur->enmKind)
2772 {
2773 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_REGISTER:
2774 remR3NotifyHandlerPhysicalRegister(pVM,
2775 pCur->u.PhysicalRegister.enmType,
2776 pCur->u.PhysicalRegister.GCPhys,
2777 pCur->u.PhysicalRegister.cb,
2778 pCur->u.PhysicalRegister.fHasHCHandler);
2779 break;
2780
2781 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_DEREGISTER:
2782 remR3NotifyHandlerPhysicalDeregister(pVM,
2783 pCur->u.PhysicalDeregister.enmType,
2784 pCur->u.PhysicalDeregister.GCPhys,
2785 pCur->u.PhysicalDeregister.cb,
2786 pCur->u.PhysicalDeregister.fHasHCHandler,
2787 pCur->u.PhysicalDeregister.fRestoreAsRAM);
2788 break;
2789
2790 case REMHANDLERNOTIFICATIONKIND_PHYSICAL_MODIFY:
2791 remR3NotifyHandlerPhysicalModify(pVM,
2792 pCur->u.PhysicalModify.enmType,
2793 pCur->u.PhysicalModify.GCPhysOld,
2794 pCur->u.PhysicalModify.GCPhysNew,
2795 pCur->u.PhysicalModify.cb,
2796 pCur->u.PhysicalModify.fHasHCHandler,
2797 pCur->u.PhysicalModify.fRestoreAsRAM);
2798 break;
2799
2800 default:
2801 AssertReleaseMsgFailed(("enmKind=%d\n", pCur->enmKind));
2802 break;
2803 }
2804
2805 /*
2806 * Advance pReqs.
2807 */
2808 idxNext = pCur->idxNext;
2809 if (idxNext != UINT32_MAX)
2810 {
2811 AssertMsg(idxNext < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications), ("idxNext=%d\n", idxNext));
2812 pReqs = &pVM->rem.s.aHandlerNotifications[idxNext];
2813 }
2814 else
2815 pReqs = NULL;
2816
2817 /*
2818 * Put the record back into the free list.
2819 */
2820 do
2821 {
2822 idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList);
2823 ASMAtomicWriteU32(&pCur->idxNext, idxNext);
2824 ASMCompilerBarrier();
2825 } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, pCur->idxSelf, idxNext));
2826 }
2827 }
2828}
2829
2830
2831/**
2832 * Notify REM about changed code page.
2833 *
2834 * @returns VBox status code.
2835 * @param pVM VM handle.
2836 * @param pVCpu VMCPU handle.
2837 * @param pvCodePage Code page address
2838 */
2839REMR3DECL(int) REMR3NotifyCodePageChanged(PVM pVM, PVMCPU pVCpu, RTGCPTR pvCodePage)
2840{
2841#ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC
2842 int rc;
2843 RTGCPHYS PhysGC;
2844 uint64_t flags;
2845
2846 VM_ASSERT_EMT(pVM);
2847
2848 /*
2849 * Get the physical page address.
2850 */
2851 rc = PGMGstGetPage(pVM, pvCodePage, &flags, &PhysGC);
2852 if (rc == VINF_SUCCESS)
2853 {
2854 /*
2855 * Sync the required registers and flush the whole page.
2856 * (Easier to do the whole page than notifying it about each physical
2857 * byte that was changed.
2858 */
2859 pVM->rem.s.Env.cr[0] = pVM->rem.s.pCtx->cr0;
2860 pVM->rem.s.Env.cr[2] = pVM->rem.s.pCtx->cr2;
2861 pVM->rem.s.Env.cr[3] = pVM->rem.s.pCtx->cr3;
2862 pVM->rem.s.Env.cr[4] = pVM->rem.s.pCtx->cr4;
2863
2864 tb_invalidate_phys_page_range(PhysGC, PhysGC + PAGE_SIZE - 1, 0);
2865 }
2866#endif
2867 return VINF_SUCCESS;
2868}
2869
2870
2871/**
2872 * Notification about a successful MMR3PhysRegister() call.
2873 *
2874 * @param pVM VM handle.
2875 * @param GCPhys The physical address the RAM.
2876 * @param cb Size of the memory.
2877 * @param fFlags Flags of the REM_NOTIFY_PHYS_RAM_FLAGS_* defines.
2878 */
2879REMR3DECL(void) REMR3NotifyPhysRamRegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, unsigned fFlags)
2880{
2881 Log(("REMR3NotifyPhysRamRegister: GCPhys=%RGp cb=%RGp fFlags=%#x\n", GCPhys, cb, fFlags));
2882 VM_ASSERT_EMT(pVM);
2883
2884 /*
2885 * Validate input - we trust the caller.
2886 */
2887 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2888 Assert(cb);
2889 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2890 AssertMsg(fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_RAM || fFlags == REM_NOTIFY_PHYS_RAM_FLAGS_MMIO2, ("#x\n", fFlags));
2891
2892 /*
2893 * Base ram? Update GCPhysLastRam.
2894 */
2895 if (fFlags & REM_NOTIFY_PHYS_RAM_FLAGS_RAM)
2896 {
2897 if (GCPhys + (cb - 1) > pVM->rem.s.GCPhysLastRam)
2898 {
2899 AssertReleaseMsg(!pVM->rem.s.fGCPhysLastRamFixed, ("GCPhys=%RGp cb=%RGp\n", GCPhys, cb));
2900 pVM->rem.s.GCPhysLastRam = GCPhys + (cb - 1);
2901 }
2902 }
2903
2904 /*
2905 * Register the ram.
2906 */
2907 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2908
2909 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2910 cpu_register_physical_memory(GCPhys, cb, GCPhys);
2911 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2912
2913 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2914}
2915
2916
2917/**
2918 * Notification about a successful MMR3PhysRomRegister() call.
2919 *
2920 * @param pVM VM handle.
2921 * @param GCPhys The physical address of the ROM.
2922 * @param cb The size of the ROM.
2923 * @param pvCopy Pointer to the ROM copy.
2924 * @param fShadow Whether it's currently writable shadow ROM or normal readonly ROM.
2925 * This function will be called when ever the protection of the
2926 * shadow ROM changes (at reset and end of POST).
2927 */
2928REMR3DECL(void) REMR3NotifyPhysRomRegister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb, void *pvCopy, bool fShadow)
2929{
2930 Log(("REMR3NotifyPhysRomRegister: GCPhys=%RGp cb=%d fShadow=%RTbool\n", GCPhys, cb, fShadow));
2931 VM_ASSERT_EMT(pVM);
2932
2933 /*
2934 * Validate input - we trust the caller.
2935 */
2936 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2937 Assert(cb);
2938 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2939
2940 /*
2941 * Register the rom.
2942 */
2943 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2944
2945 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2946 cpu_register_physical_memory(GCPhys, cb, GCPhys | (fShadow ? 0 : IO_MEM_ROM));
2947 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2948
2949 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2950}
2951
2952
2953/**
2954 * Notification about a successful memory deregistration or reservation.
2955 *
2956 * @param pVM VM Handle.
2957 * @param GCPhys Start physical address.
2958 * @param cb The size of the range.
2959 */
2960REMR3DECL(void) REMR3NotifyPhysRamDeregister(PVM pVM, RTGCPHYS GCPhys, RTUINT cb)
2961{
2962 Log(("REMR3NotifyPhysRamDeregister: GCPhys=%RGp cb=%d\n", GCPhys, cb));
2963 VM_ASSERT_EMT(pVM);
2964
2965 /*
2966 * Validate input - we trust the caller.
2967 */
2968 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
2969 Assert(cb);
2970 Assert(RT_ALIGN_Z(cb, PAGE_SIZE) == cb);
2971
2972 /*
2973 * Unassigning the memory.
2974 */
2975 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
2976
2977 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
2978 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
2979 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
2980
2981 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
2982}
2983
2984
2985/**
2986 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
2987 *
2988 * @param pVM VM Handle.
2989 * @param enmType Handler type.
2990 * @param GCPhys Handler range address.
2991 * @param cb Size of the handler range.
2992 * @param fHasHCHandler Set if the handler has a HC callback function.
2993 *
2994 * @remark MMR3PhysRomRegister assumes that this function will not apply the
2995 * Handler memory type to memory which has no HC handler.
2996 */
2997static void remR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
2998{
2999 Log(("REMR3NotifyHandlerPhysicalRegister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%d\n",
3000 enmType, GCPhys, cb, fHasHCHandler));
3001
3002 VM_ASSERT_EMT(pVM);
3003 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3004 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3005
3006
3007 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3008
3009 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3010 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3011 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iMMIOMemType);
3012 else if (fHasHCHandler)
3013 cpu_register_physical_memory(GCPhys, cb, pVM->rem.s.iHandlerMemType);
3014 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3015
3016 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3017}
3018
3019/**
3020 * Notification about a successful PGMR3HandlerPhysicalRegister() call.
3021 *
3022 * @param pVM VM Handle.
3023 * @param enmType Handler type.
3024 * @param GCPhys Handler range address.
3025 * @param cb Size of the handler range.
3026 * @param fHasHCHandler Set if the handler has a HC callback function.
3027 *
3028 * @remark MMR3PhysRomRegister assumes that this function will not apply the
3029 * Handler memory type to memory which has no HC handler.
3030 */
3031REMR3DECL(void) REMR3NotifyHandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler)
3032{
3033 REMR3ReplayHandlerNotifications(pVM);
3034
3035 remR3NotifyHandlerPhysicalRegister(pVM, enmType, GCPhys, cb, fHasHCHandler);
3036}
3037
3038/**
3039 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3040 *
3041 * @param pVM VM Handle.
3042 * @param enmType Handler type.
3043 * @param GCPhys Handler range address.
3044 * @param cb Size of the handler range.
3045 * @param fHasHCHandler Set if the handler has a HC callback function.
3046 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3047 */
3048static void remR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3049{
3050 Log(("REMR3NotifyHandlerPhysicalDeregister: enmType=%d GCPhys=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool RAM=%08x\n",
3051 enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM, MMR3PhysGetRamSize(pVM)));
3052 VM_ASSERT_EMT(pVM);
3053
3054
3055 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3056
3057 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3058 /** @todo this isn't right, MMIO can (in theory) be restored as RAM. */
3059 if (enmType == PGMPHYSHANDLERTYPE_MMIO)
3060 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3061 else if (fHasHCHandler)
3062 {
3063 if (!fRestoreAsRAM)
3064 {
3065 Assert(GCPhys > MMR3PhysGetRamSize(pVM));
3066 cpu_register_physical_memory(GCPhys, cb, IO_MEM_UNASSIGNED);
3067 }
3068 else
3069 {
3070 Assert(RT_ALIGN_T(GCPhys, PAGE_SIZE, RTGCPHYS) == GCPhys);
3071 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3072 cpu_register_physical_memory(GCPhys, cb, GCPhys);
3073 }
3074 }
3075 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3076
3077 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3078}
3079
3080/**
3081 * Notification about a successful PGMR3HandlerPhysicalDeregister() operation.
3082 *
3083 * @param pVM VM Handle.
3084 * @param enmType Handler type.
3085 * @param GCPhys Handler range address.
3086 * @param cb Size of the handler range.
3087 * @param fHasHCHandler Set if the handler has a HC callback function.
3088 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3089 */
3090REMR3DECL(void) REMR3NotifyHandlerPhysicalDeregister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3091{
3092 REMR3ReplayHandlerNotifications(pVM);
3093 remR3NotifyHandlerPhysicalDeregister(pVM, enmType, GCPhys, cb, fHasHCHandler, fRestoreAsRAM);
3094}
3095
3096
3097/**
3098 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3099 *
3100 * @param pVM VM Handle.
3101 * @param enmType Handler type.
3102 * @param GCPhysOld Old handler range address.
3103 * @param GCPhysNew New handler range address.
3104 * @param cb Size of the handler range.
3105 * @param fHasHCHandler Set if the handler has a HC callback function.
3106 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3107 */
3108static void remR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3109{
3110 Log(("REMR3NotifyHandlerPhysicalModify: enmType=%d GCPhysOld=%RGp GCPhysNew=%RGp cb=%RGp fHasHCHandler=%RTbool fRestoreAsRAM=%RTbool\n",
3111 enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM));
3112 VM_ASSERT_EMT(pVM);
3113 AssertReleaseMsg(enmType != PGMPHYSHANDLERTYPE_MMIO, ("enmType=%d\n", enmType));
3114
3115 if (fHasHCHandler)
3116 {
3117 ASMAtomicIncU32(&pVM->rem.s.cIgnoreAll);
3118
3119 /*
3120 * Reset the old page.
3121 */
3122 PDMCritSectEnter(&pVM->rem.s.CritSectRegister, VERR_SEM_BUSY);
3123 if (!fRestoreAsRAM)
3124 cpu_register_physical_memory(GCPhysOld, cb, IO_MEM_UNASSIGNED);
3125 else
3126 {
3127 /* This is not perfect, but it'll do for PD monitoring... */
3128 Assert(cb == PAGE_SIZE);
3129 Assert(RT_ALIGN_T(GCPhysOld, PAGE_SIZE, RTGCPHYS) == GCPhysOld);
3130 cpu_register_physical_memory(GCPhysOld, cb, GCPhysOld);
3131 }
3132
3133 /*
3134 * Update the new page.
3135 */
3136 Assert(RT_ALIGN_T(GCPhysNew, PAGE_SIZE, RTGCPHYS) == GCPhysNew);
3137 Assert(RT_ALIGN_T(cb, PAGE_SIZE, RTGCPHYS) == cb);
3138 cpu_register_physical_memory(GCPhysNew, cb, pVM->rem.s.iHandlerMemType);
3139 PDMCritSectLeave(&pVM->rem.s.CritSectRegister);
3140
3141 ASMAtomicDecU32(&pVM->rem.s.cIgnoreAll);
3142 }
3143}
3144
3145/**
3146 * Notification about a successful PGMR3HandlerPhysicalModify() call.
3147 *
3148 * @param pVM VM Handle.
3149 * @param enmType Handler type.
3150 * @param GCPhysOld Old handler range address.
3151 * @param GCPhysNew New handler range address.
3152 * @param cb Size of the handler range.
3153 * @param fHasHCHandler Set if the handler has a HC callback function.
3154 * @param fRestoreAsRAM Whether the to restore it as normal RAM or as unassigned memory.
3155 */
3156REMR3DECL(void) REMR3NotifyHandlerPhysicalModify(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhysOld, RTGCPHYS GCPhysNew, RTGCPHYS cb, bool fHasHCHandler, bool fRestoreAsRAM)
3157{
3158 REMR3ReplayHandlerNotifications(pVM);
3159
3160 remR3NotifyHandlerPhysicalModify(pVM, enmType, GCPhysOld, GCPhysNew, cb, fHasHCHandler, fRestoreAsRAM);
3161}
3162
3163/**
3164 * Checks if we're handling access to this page or not.
3165 *
3166 * @returns true if we're trapping access.
3167 * @returns false if we aren't.
3168 * @param pVM The VM handle.
3169 * @param GCPhys The physical address.
3170 *
3171 * @remark This function will only work correctly in VBOX_STRICT builds!
3172 */
3173REMR3DECL(bool) REMR3IsPageAccessHandled(PVM pVM, RTGCPHYS GCPhys)
3174{
3175#ifdef VBOX_STRICT
3176 unsigned long off;
3177 REMR3ReplayHandlerNotifications(pVM);
3178
3179 off = get_phys_page_offset(GCPhys);
3180 return (off & PAGE_OFFSET_MASK) == pVM->rem.s.iHandlerMemType
3181 || (off & PAGE_OFFSET_MASK) == pVM->rem.s.iMMIOMemType
3182 || (off & PAGE_OFFSET_MASK) == IO_MEM_ROM;
3183#else
3184 return false;
3185#endif
3186}
3187
3188
3189/**
3190 * Deals with a rare case in get_phys_addr_code where the code
3191 * is being monitored.
3192 *
3193 * It could also be an MMIO page, in which case we will raise a fatal error.
3194 *
3195 * @returns The physical address corresponding to addr.
3196 * @param env The cpu environment.
3197 * @param addr The virtual address.
3198 * @param pTLBEntry The TLB entry.
3199 */
3200target_ulong remR3PhysGetPhysicalAddressCode(CPUState* env,
3201 target_ulong addr,
3202 CPUTLBEntry* pTLBEntry,
3203 target_phys_addr_t ioTLBEntry)
3204{
3205 PVM pVM = env->pVM;
3206
3207 if ((ioTLBEntry & ~TARGET_PAGE_MASK) == pVM->rem.s.iHandlerMemType)
3208 {
3209 /* If code memory is being monitored, appropriate IOTLB entry will have
3210 handler IO type, and addend will provide real physical address, no
3211 matter if we store VA in TLB or not, as handlers are always passed PA */
3212 target_ulong ret = (ioTLBEntry & TARGET_PAGE_MASK) + addr;
3213 return ret;
3214 }
3215 LogRel(("\nTrying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv! (iHandlerMemType=%#x iMMIOMemType=%#x IOTLB=%RGp)\n"
3216 "*** handlers\n",
3217 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType, (RTGCPHYS)ioTLBEntry));
3218 DBGFR3Info(pVM, "handlers", NULL, DBGFR3InfoLogRelHlp());
3219 LogRel(("*** mmio\n"));
3220 DBGFR3Info(pVM, "mmio", NULL, DBGFR3InfoLogRelHlp());
3221 LogRel(("*** phys\n"));
3222 DBGFR3Info(pVM, "phys", NULL, DBGFR3InfoLogRelHlp());
3223 cpu_abort(env, "Trying to execute code with memory type addr_code=%RGv addend=%RGp at %RGv. (iHandlerMemType=%#x iMMIOMemType=%#x)\n",
3224 (RTGCPTR)pTLBEntry->addr_code, (RTGCPHYS)pTLBEntry->addend, (RTGCPTR)addr, pVM->rem.s.iHandlerMemType, pVM->rem.s.iMMIOMemType);
3225 AssertFatalFailed();
3226}
3227
3228/**
3229 * Read guest RAM and ROM.
3230 *
3231 * @param SrcGCPhys The source address (guest physical).
3232 * @param pvDst The destination address.
3233 * @param cb Number of bytes
3234 */
3235void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb)
3236{
3237 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3238 VBOX_CHECK_ADDR(SrcGCPhys);
3239 PGMPhysRead(cpu_single_env->pVM, SrcGCPhys, pvDst, cb);
3240#ifdef VBOX_DEBUG_PHYS
3241 LogRel(("read(%d): %08x\n", cb, (uint32_t)SrcGCPhys));
3242#endif
3243 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3244}
3245
3246
3247/**
3248 * Read guest RAM and ROM, unsigned 8-bit.
3249 *
3250 * @param SrcGCPhys The source address (guest physical).
3251 */
3252RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys)
3253{
3254 uint8_t val;
3255 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3256 VBOX_CHECK_ADDR(SrcGCPhys);
3257 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3258 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3259#ifdef VBOX_DEBUG_PHYS
3260 LogRel(("readu8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3261#endif
3262 return val;
3263}
3264
3265
3266/**
3267 * Read guest RAM and ROM, signed 8-bit.
3268 *
3269 * @param SrcGCPhys The source address (guest physical).
3270 */
3271RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys)
3272{
3273 int8_t val;
3274 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3275 VBOX_CHECK_ADDR(SrcGCPhys);
3276 val = PGMR3PhysReadU8(cpu_single_env->pVM, SrcGCPhys);
3277 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3278#ifdef VBOX_DEBUG_PHYS
3279 LogRel(("reads8: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3280#endif
3281 return val;
3282}
3283
3284
3285/**
3286 * Read guest RAM and ROM, unsigned 16-bit.
3287 *
3288 * @param SrcGCPhys The source address (guest physical).
3289 */
3290RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys)
3291{
3292 uint16_t val;
3293 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3294 VBOX_CHECK_ADDR(SrcGCPhys);
3295 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3296 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3297#ifdef VBOX_DEBUG_PHYS
3298 LogRel(("readu16: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3299#endif
3300 return val;
3301}
3302
3303
3304/**
3305 * Read guest RAM and ROM, signed 16-bit.
3306 *
3307 * @param SrcGCPhys The source address (guest physical).
3308 */
3309RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys)
3310{
3311 int16_t val;
3312 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3313 VBOX_CHECK_ADDR(SrcGCPhys);
3314 val = PGMR3PhysReadU16(cpu_single_env->pVM, SrcGCPhys);
3315 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3316#ifdef VBOX_DEBUG_PHYS
3317 LogRel(("reads16: %x <- %08x\n", (uint16_t)val, (uint32_t)SrcGCPhys));
3318#endif
3319 return val;
3320}
3321
3322
3323/**
3324 * Read guest RAM and ROM, unsigned 32-bit.
3325 *
3326 * @param SrcGCPhys The source address (guest physical).
3327 */
3328RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys)
3329{
3330 uint32_t val;
3331 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3332 VBOX_CHECK_ADDR(SrcGCPhys);
3333 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3334 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3335#ifdef VBOX_DEBUG_PHYS
3336 LogRel(("readu32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3337#endif
3338 return val;
3339}
3340
3341
3342/**
3343 * Read guest RAM and ROM, signed 32-bit.
3344 *
3345 * @param SrcGCPhys The source address (guest physical).
3346 */
3347RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys)
3348{
3349 int32_t val;
3350 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3351 VBOX_CHECK_ADDR(SrcGCPhys);
3352 val = PGMR3PhysReadU32(cpu_single_env->pVM, SrcGCPhys);
3353 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3354#ifdef VBOX_DEBUG_PHYS
3355 LogRel(("reads32: %x <- %08x\n", val, (uint32_t)SrcGCPhys));
3356#endif
3357 return val;
3358}
3359
3360
3361/**
3362 * Read guest RAM and ROM, unsigned 64-bit.
3363 *
3364 * @param SrcGCPhys The source address (guest physical).
3365 */
3366uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys)
3367{
3368 uint64_t val;
3369 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3370 VBOX_CHECK_ADDR(SrcGCPhys);
3371 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3372 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3373#ifdef VBOX_DEBUG_PHYS
3374 LogRel(("readu64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3375#endif
3376 return val;
3377}
3378
3379
3380/**
3381 * Read guest RAM and ROM, signed 64-bit.
3382 *
3383 * @param SrcGCPhys The source address (guest physical).
3384 */
3385int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys)
3386{
3387 int64_t val;
3388 STAM_PROFILE_ADV_START(&gStatMemRead, a);
3389 VBOX_CHECK_ADDR(SrcGCPhys);
3390 val = PGMR3PhysReadU64(cpu_single_env->pVM, SrcGCPhys);
3391 STAM_PROFILE_ADV_STOP(&gStatMemRead, a);
3392#ifdef VBOX_DEBUG_PHYS
3393 LogRel(("reads64: %llx <- %08x\n", val, (uint32_t)SrcGCPhys));
3394#endif
3395 return val;
3396}
3397
3398
3399/**
3400 * Write guest RAM.
3401 *
3402 * @param DstGCPhys The destination address (guest physical).
3403 * @param pvSrc The source address.
3404 * @param cb Number of bytes to write
3405 */
3406void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb)
3407{
3408 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3409 VBOX_CHECK_ADDR(DstGCPhys);
3410 PGMPhysWrite(cpu_single_env->pVM, DstGCPhys, pvSrc, cb);
3411 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3412#ifdef VBOX_DEBUG_PHYS
3413 LogRel(("write(%d): %08x\n", cb, (uint32_t)DstGCPhys));
3414#endif
3415}
3416
3417
3418/**
3419 * Write guest RAM, unsigned 8-bit.
3420 *
3421 * @param DstGCPhys The destination address (guest physical).
3422 * @param val Value
3423 */
3424void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val)
3425{
3426 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3427 VBOX_CHECK_ADDR(DstGCPhys);
3428 PGMR3PhysWriteU8(cpu_single_env->pVM, DstGCPhys, val);
3429 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3430#ifdef VBOX_DEBUG_PHYS
3431 LogRel(("writeu8: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3432#endif
3433}
3434
3435
3436/**
3437 * Write guest RAM, unsigned 8-bit.
3438 *
3439 * @param DstGCPhys The destination address (guest physical).
3440 * @param val Value
3441 */
3442void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val)
3443{
3444 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3445 VBOX_CHECK_ADDR(DstGCPhys);
3446 PGMR3PhysWriteU16(cpu_single_env->pVM, DstGCPhys, val);
3447 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3448#ifdef VBOX_DEBUG_PHYS
3449 LogRel(("writeu16: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3450#endif
3451}
3452
3453
3454/**
3455 * Write guest RAM, unsigned 32-bit.
3456 *
3457 * @param DstGCPhys The destination address (guest physical).
3458 * @param val Value
3459 */
3460void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val)
3461{
3462 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3463 VBOX_CHECK_ADDR(DstGCPhys);
3464 PGMR3PhysWriteU32(cpu_single_env->pVM, DstGCPhys, val);
3465 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3466#ifdef VBOX_DEBUG_PHYS
3467 LogRel(("writeu32: %x -> %08x\n", val, (uint32_t)DstGCPhys));
3468#endif
3469}
3470
3471
3472/**
3473 * Write guest RAM, unsigned 64-bit.
3474 *
3475 * @param DstGCPhys The destination address (guest physical).
3476 * @param val Value
3477 */
3478void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val)
3479{
3480 STAM_PROFILE_ADV_START(&gStatMemWrite, a);
3481 VBOX_CHECK_ADDR(DstGCPhys);
3482 PGMR3PhysWriteU64(cpu_single_env->pVM, DstGCPhys, val);
3483 STAM_PROFILE_ADV_STOP(&gStatMemWrite, a);
3484#ifdef VBOX_DEBUG_PHYS
3485 LogRel(("writeu64: %llx -> %08x\n", val, (uint32_t)SrcGCPhys));
3486#endif
3487}
3488
3489#undef LOG_GROUP
3490#define LOG_GROUP LOG_GROUP_REM_MMIO
3491
3492/** Read MMIO memory. */
3493static uint32_t remR3MMIOReadU8(void *pvVM, target_phys_addr_t GCPhys)
3494{
3495 uint32_t u32 = 0;
3496 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 1);
3497 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3498 Log2(("remR3MMIOReadU8: GCPhys=%RGp -> %02x\n", GCPhys, u32));
3499 return u32;
3500}
3501
3502/** Read MMIO memory. */
3503static uint32_t remR3MMIOReadU16(void *pvVM, target_phys_addr_t GCPhys)
3504{
3505 uint32_t u32 = 0;
3506 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 2);
3507 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3508 Log2(("remR3MMIOReadU16: GCPhys=%RGp -> %04x\n", GCPhys, u32));
3509 return u32;
3510}
3511
3512/** Read MMIO memory. */
3513static uint32_t remR3MMIOReadU32(void *pvVM, target_phys_addr_t GCPhys)
3514{
3515 uint32_t u32 = 0;
3516 int rc = IOMMMIORead((PVM)pvVM, GCPhys, &u32, 4);
3517 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3518 Log2(("remR3MMIOReadU32: GCPhys=%RGp -> %08x\n", GCPhys, u32));
3519 return u32;
3520}
3521
3522/** Write to MMIO memory. */
3523static void remR3MMIOWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3524{
3525 int rc;
3526 Log2(("remR3MMIOWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3527 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 1);
3528 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3529}
3530
3531/** Write to MMIO memory. */
3532static void remR3MMIOWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3533{
3534 int rc;
3535 Log2(("remR3MMIOWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3536 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 2);
3537 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3538}
3539
3540/** Write to MMIO memory. */
3541static void remR3MMIOWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3542{
3543 int rc;
3544 Log2(("remR3MMIOWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3545 rc = IOMMMIOWrite((PVM)pvVM, GCPhys, u32, 4);
3546 AssertMsg(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc)); NOREF(rc);
3547}
3548
3549
3550#undef LOG_GROUP
3551#define LOG_GROUP LOG_GROUP_REM_HANDLER
3552
3553/* !!!WARNING!!! This is extremely hackish right now, we assume it's only for LFB access! !!!WARNING!!! */
3554
3555static uint32_t remR3HandlerReadU8(void *pvVM, target_phys_addr_t GCPhys)
3556{
3557 uint8_t u8;
3558 Log2(("remR3HandlerReadU8: GCPhys=%RGp\n", GCPhys));
3559 PGMPhysRead((PVM)pvVM, GCPhys, &u8, sizeof(u8));
3560 return u8;
3561}
3562
3563static uint32_t remR3HandlerReadU16(void *pvVM, target_phys_addr_t GCPhys)
3564{
3565 uint16_t u16;
3566 Log2(("remR3HandlerReadU16: GCPhys=%RGp\n", GCPhys));
3567 PGMPhysRead((PVM)pvVM, GCPhys, &u16, sizeof(u16));
3568 return u16;
3569}
3570
3571static uint32_t remR3HandlerReadU32(void *pvVM, target_phys_addr_t GCPhys)
3572{
3573 uint32_t u32;
3574 Log2(("remR3HandlerReadU32: GCPhys=%RGp\n", GCPhys));
3575 PGMPhysRead((PVM)pvVM, GCPhys, &u32, sizeof(u32));
3576 return u32;
3577}
3578
3579static void remR3HandlerWriteU8(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3580{
3581 Log2(("remR3HandlerWriteU8: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3582 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint8_t));
3583}
3584
3585static void remR3HandlerWriteU16(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3586{
3587 Log2(("remR3HandlerWriteU16: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3588 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint16_t));
3589}
3590
3591static void remR3HandlerWriteU32(void *pvVM, target_phys_addr_t GCPhys, uint32_t u32)
3592{
3593 Log2(("remR3HandlerWriteU32: GCPhys=%RGp u32=%#x\n", GCPhys, u32));
3594 PGMPhysWrite((PVM)pvVM, GCPhys, &u32, sizeof(uint32_t));
3595}
3596
3597/* -+- disassembly -+- */
3598
3599#undef LOG_GROUP
3600#define LOG_GROUP LOG_GROUP_REM_DISAS
3601
3602
3603/**
3604 * Enables or disables singled stepped disassembly.
3605 *
3606 * @returns VBox status code.
3607 * @param pVM VM handle.
3608 * @param fEnable To enable set this flag, to disable clear it.
3609 */
3610static DECLCALLBACK(int) remR3DisasEnableStepping(PVM pVM, bool fEnable)
3611{
3612 LogFlow(("remR3DisasEnableStepping: fEnable=%d\n", fEnable));
3613 VM_ASSERT_EMT(pVM);
3614
3615 if (fEnable)
3616 pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP;
3617 else
3618 pVM->rem.s.Env.state &= ~CPU_EMULATE_SINGLE_STEP;
3619 return VINF_SUCCESS;
3620}
3621
3622
3623/**
3624 * Enables or disables singled stepped disassembly.
3625 *
3626 * @returns VBox status code.
3627 * @param pVM VM handle.
3628 * @param fEnable To enable set this flag, to disable clear it.
3629 */
3630REMR3DECL(int) REMR3DisasEnableStepping(PVM pVM, bool fEnable)
3631{
3632 PVMREQ pReq;
3633 int rc;
3634
3635 LogFlow(("REMR3DisasEnableStepping: fEnable=%d\n", fEnable));
3636 if (VM_IS_EMT(pVM))
3637 return remR3DisasEnableStepping(pVM, fEnable);
3638
3639 rc = VMR3ReqCall(pVM, VMCPUID_ANY, &pReq, RT_INDEFINITE_WAIT, (PFNRT)remR3DisasEnableStepping, 2, pVM, fEnable);
3640 AssertRC(rc);
3641 if (RT_SUCCESS(rc))
3642 rc = pReq->iStatus;
3643 VMR3ReqFree(pReq);
3644 return rc;
3645}
3646
3647
3648#if defined(VBOX_WITH_DEBUGGER) && !(defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64))
3649/**
3650 * External Debugger Command: .remstep [on|off|1|0]
3651 */
3652static DECLCALLBACK(int) remR3CmdDisasEnableStepping(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
3653{
3654 bool fEnable;
3655 int rc;
3656
3657 /* print status */
3658 if (cArgs == 0)
3659 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "DisasStepping is %s\n",
3660 pVM->rem.s.Env.state & CPU_EMULATE_SINGLE_STEP ? "enabled" : "disabled");
3661
3662 /* convert the argument and change the mode. */
3663 rc = pCmdHlp->pfnVarToBool(pCmdHlp, &paArgs[0], &fEnable);
3664 if (RT_FAILURE(rc))
3665 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "boolean conversion failed!\n");
3666 rc = REMR3DisasEnableStepping(pVM, fEnable);
3667 if (RT_FAILURE(rc))
3668 return pCmdHlp->pfnVBoxError(pCmdHlp, rc, "REMR3DisasEnableStepping failed!\n");
3669 return rc;
3670}
3671#endif
3672
3673
3674/**
3675 * Disassembles one instruction and prints it to the log.
3676 *
3677 * @returns Success indicator.
3678 * @param env Pointer to the recompiler CPU structure.
3679 * @param f32BitCode Indicates that whether or not the code should
3680 * be disassembled as 16 or 32 bit. If -1 the CS
3681 * selector will be inspected.
3682 * @param pszPrefix
3683 */
3684bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix)
3685{
3686 PVM pVM = env->pVM;
3687 const bool fLog = LogIsEnabled();
3688 const bool fLog2 = LogIs2Enabled();
3689 int rc = VINF_SUCCESS;
3690
3691 /*
3692 * Don't bother if there ain't any log output to do.
3693 */
3694 if (!fLog && !fLog2)
3695 return true;
3696
3697 /*
3698 * Update the state so DBGF reads the correct register values.
3699 */
3700 remR3StateUpdate(pVM, env->pVCpu);
3701
3702 /*
3703 * Log registers if requested.
3704 */
3705 if (!fLog2)
3706 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
3707
3708 /*
3709 * Disassemble to log.
3710 */
3711 if (fLog)
3712 rc = DBGFR3DisasInstrCurrentLogInternal(env->pVCpu, pszPrefix);
3713
3714 return RT_SUCCESS(rc);
3715}
3716
3717
3718/**
3719 * Disassemble recompiled code.
3720 *
3721 * @param phFileIgnored Ignored, logfile usually.
3722 * @param pvCode Pointer to the code block.
3723 * @param cb Size of the code block.
3724 */
3725void disas(FILE *phFile, void *pvCode, unsigned long cb)
3726{
3727#ifdef DEBUG_TMP_LOGGING
3728# define DISAS_PRINTF(x...) fprintf(phFile, x)
3729#else
3730# define DISAS_PRINTF(x...) RTLogPrintf(x)
3731 if (LogIs2Enabled())
3732#endif
3733 {
3734 unsigned off = 0;
3735 char szOutput[256];
3736 DISCPUSTATE Cpu;
3737
3738 memset(&Cpu, 0, sizeof(Cpu));
3739#ifdef RT_ARCH_X86
3740 Cpu.mode = CPUMODE_32BIT;
3741#else
3742 Cpu.mode = CPUMODE_64BIT;
3743#endif
3744
3745 DISAS_PRINTF("Recompiled Code: %p %#lx (%ld) bytes\n", pvCode, cb, cb);
3746 while (off < cb)
3747 {
3748 uint32_t cbInstr;
3749 if (RT_SUCCESS(DISInstr(&Cpu, (uintptr_t)pvCode + off, 0, &cbInstr, szOutput)))
3750 DISAS_PRINTF("%s", szOutput);
3751 else
3752 {
3753 DISAS_PRINTF("disas error\n");
3754 cbInstr = 1;
3755#ifdef RT_ARCH_AMD64 /** @todo remove when DISInstr starts supporing 64-bit code. */
3756 break;
3757#endif
3758 }
3759 off += cbInstr;
3760 }
3761 }
3762
3763#undef DISAS_PRINTF
3764}
3765
3766
3767/**
3768 * Disassemble guest code.
3769 *
3770 * @param phFileIgnored Ignored, logfile usually.
3771 * @param uCode The guest address of the code to disassemble. (flat?)
3772 * @param cb Number of bytes to disassemble.
3773 * @param fFlags Flags, probably something which tells if this is 16, 32 or 64 bit code.
3774 */
3775void target_disas(FILE *phFile, target_ulong uCode, target_ulong cb, int fFlags)
3776{
3777#ifdef DEBUG_TMP_LOGGING
3778# define DISAS_PRINTF(x...) fprintf(phFile, x)
3779#else
3780# define DISAS_PRINTF(x...) RTLogPrintf(x)
3781 if (LogIs2Enabled())
3782#endif
3783 {
3784 PVM pVM = cpu_single_env->pVM;
3785 PVMCPU pVCpu = cpu_single_env->pVCpu;
3786 RTSEL cs;
3787 RTGCUINTPTR eip;
3788
3789 Assert(pVCpu);
3790
3791 /*
3792 * Update the state so DBGF reads the correct register values (flags).
3793 */
3794 remR3StateUpdate(pVM, pVCpu);
3795
3796 /*
3797 * Do the disassembling.
3798 */
3799 DISAS_PRINTF("Guest Code: PC=%llx %llx bytes fFlags=%d\n", (uint64_t)uCode, (uint64_t)cb, fFlags);
3800 cs = cpu_single_env->segs[R_CS].selector;
3801 eip = uCode - cpu_single_env->segs[R_CS].base;
3802 for (;;)
3803 {
3804 char szBuf[256];
3805 uint32_t cbInstr;
3806 int rc = DBGFR3DisasInstrEx(pVM,
3807 pVCpu->idCpu,
3808 cs,
3809 eip,
3810 0,
3811 szBuf, sizeof(szBuf),
3812 &cbInstr);
3813 if (RT_SUCCESS(rc))
3814 DISAS_PRINTF("%llx %s\n", (uint64_t)uCode, szBuf);
3815 else
3816 {
3817 DISAS_PRINTF("%llx %04x:%llx: %s\n", (uint64_t)uCode, cs, (uint64_t)eip, szBuf);
3818 cbInstr = 1;
3819 }
3820
3821 /* next */
3822 if (cb <= cbInstr)
3823 break;
3824 cb -= cbInstr;
3825 uCode += cbInstr;
3826 eip += cbInstr;
3827 }
3828 }
3829#undef DISAS_PRINTF
3830}
3831
3832
3833/**
3834 * Looks up a guest symbol.
3835 *
3836 * @returns Pointer to symbol name. This is a static buffer.
3837 * @param orig_addr The address in question.
3838 */
3839const char *lookup_symbol(target_ulong orig_addr)
3840{
3841 RTGCINTPTR off = 0;
3842 DBGFSYMBOL Sym;
3843 PVM pVM = cpu_single_env->pVM;
3844 int rc = DBGFR3SymbolByAddr(pVM, orig_addr, &off, &Sym);
3845 if (RT_SUCCESS(rc))
3846 {
3847 static char szSym[sizeof(Sym.szName) + 48];
3848 if (!off)
3849 RTStrPrintf(szSym, sizeof(szSym), "%s\n", Sym.szName);
3850 else if (off > 0)
3851 RTStrPrintf(szSym, sizeof(szSym), "%s+%x\n", Sym.szName, off);
3852 else
3853 RTStrPrintf(szSym, sizeof(szSym), "%s-%x\n", Sym.szName, -off);
3854 return szSym;
3855 }
3856 return "<N/A>";
3857}
3858
3859
3860#undef LOG_GROUP
3861#define LOG_GROUP LOG_GROUP_REM
3862
3863
3864/* -+- FF notifications -+- */
3865
3866
3867/**
3868 * Notification about a pending interrupt.
3869 *
3870 * @param pVM VM Handle.
3871 * @param pVCpu VMCPU Handle.
3872 * @param u8Interrupt Interrupt
3873 * @thread The emulation thread.
3874 */
3875REMR3DECL(void) REMR3NotifyPendingInterrupt(PVM pVM, PVMCPU pVCpu, uint8_t u8Interrupt)
3876{
3877 Assert(pVM->rem.s.u32PendingInterrupt == REM_NO_PENDING_IRQ);
3878 pVM->rem.s.u32PendingInterrupt = u8Interrupt;
3879}
3880
3881/**
3882 * Notification about a pending interrupt.
3883 *
3884 * @returns Pending interrupt or REM_NO_PENDING_IRQ
3885 * @param pVM VM Handle.
3886 * @param pVCpu VMCPU Handle.
3887 * @thread The emulation thread.
3888 */
3889REMR3DECL(uint32_t) REMR3QueryPendingInterrupt(PVM pVM, PVMCPU pVCpu)
3890{
3891 return pVM->rem.s.u32PendingInterrupt;
3892}
3893
3894/**
3895 * Notification about the interrupt FF being set.
3896 *
3897 * @param pVM VM Handle.
3898 * @param pVCpu VMCPU Handle.
3899 * @thread The emulation thread.
3900 */
3901REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu)
3902{
3903 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM,
3904 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled"));
3905 if (pVM->rem.s.fInREM)
3906 {
3907 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3908 CPU_INTERRUPT_EXTERNAL_HARD);
3909 }
3910}
3911
3912
3913/**
3914 * Notification about the interrupt FF being set.
3915 *
3916 * @param pVM VM Handle.
3917 * @param pVCpu VMCPU Handle.
3918 * @thread Any.
3919 */
3920REMR3DECL(void) REMR3NotifyInterruptClear(PVM pVM, PVMCPU pVCpu)
3921{
3922 LogFlow(("REMR3NotifyInterruptClear:\n"));
3923 if (pVM->rem.s.fInREM)
3924 cpu_reset_interrupt(cpu_single_env, CPU_INTERRUPT_HARD);
3925}
3926
3927
3928/**
3929 * Notification about pending timer(s).
3930 *
3931 * @param pVM VM Handle.
3932 * @param pVCpuDst The target cpu for this notification.
3933 * TM will not broadcast pending timer events, but use
3934 * a decidated EMT for them. So, only interrupt REM
3935 * execution if the given CPU is executing in REM.
3936 * @thread Any.
3937 */
3938REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst)
3939{
3940#ifndef DEBUG_bird
3941 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM));
3942#endif
3943 if (pVM->rem.s.fInREM)
3944 {
3945 if (pVM->rem.s.Env.pVCpu == pVCpuDst)
3946 {
3947 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: setting\n"));
3948 ASMAtomicOrS32((int32_t volatile *)&pVM->rem.s.Env.interrupt_request,
3949 CPU_INTERRUPT_EXTERNAL_TIMER);
3950 }
3951 else
3952 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: pVCpu:%p != pVCpuDst:%p\n", pVM->rem.s.Env.pVCpu, pVCpuDst));
3953 }
3954 else
3955 LogIt(LOG_INSTANCE, RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst)));
3956}
3957
3958
3959/**
3960 * Notification about pending DMA transfers.
3961 *
3962 * @param pVM VM Handle.
3963 * @thread Any.
3964 */
3965REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM)
3966{
3967 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM));
3968 if (pVM->rem.s.fInREM)
3969 {
3970 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3971 CPU_INTERRUPT_EXTERNAL_DMA);
3972 }
3973}
3974
3975
3976/**
3977 * Notification about pending timer(s).
3978 *
3979 * @param pVM VM Handle.
3980 * @thread Any.
3981 */
3982REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM)
3983{
3984 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM));
3985 if (pVM->rem.s.fInREM)
3986 {
3987 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
3988 CPU_INTERRUPT_EXTERNAL_EXIT);
3989 }
3990}
3991
3992
3993/**
3994 * Notification about pending FF set by an external thread.
3995 *
3996 * @param pVM VM handle.
3997 * @thread Any.
3998 */
3999REMR3DECL(void) REMR3NotifyFF(PVM pVM)
4000{
4001 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM));
4002 if (pVM->rem.s.fInREM)
4003 {
4004 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request,
4005 CPU_INTERRUPT_EXTERNAL_EXIT);
4006 }
4007}
4008
4009
4010#ifdef VBOX_WITH_STATISTICS
4011void remR3ProfileStart(int statcode)
4012{
4013 STAMPROFILEADV *pStat;
4014 switch(statcode)
4015 {
4016 case STATS_EMULATE_SINGLE_INSTR:
4017 pStat = &gStatExecuteSingleInstr;
4018 break;
4019 case STATS_QEMU_COMPILATION:
4020 pStat = &gStatCompilationQEmu;
4021 break;
4022 case STATS_QEMU_RUN_EMULATED_CODE:
4023 pStat = &gStatRunCodeQEmu;
4024 break;
4025 case STATS_QEMU_TOTAL:
4026 pStat = &gStatTotalTimeQEmu;
4027 break;
4028 case STATS_QEMU_RUN_TIMERS:
4029 pStat = &gStatTimers;
4030 break;
4031 case STATS_TLB_LOOKUP:
4032 pStat= &gStatTBLookup;
4033 break;
4034 case STATS_IRQ_HANDLING:
4035 pStat= &gStatIRQ;
4036 break;
4037 case STATS_RAW_CHECK:
4038 pStat = &gStatRawCheck;
4039 break;
4040
4041 default:
4042 AssertMsgFailed(("unknown stat %d\n", statcode));
4043 return;
4044 }
4045 STAM_PROFILE_ADV_START(pStat, a);
4046}
4047
4048
4049void remR3ProfileStop(int statcode)
4050{
4051 STAMPROFILEADV *pStat;
4052 switch(statcode)
4053 {
4054 case STATS_EMULATE_SINGLE_INSTR:
4055 pStat = &gStatExecuteSingleInstr;
4056 break;
4057 case STATS_QEMU_COMPILATION:
4058 pStat = &gStatCompilationQEmu;
4059 break;
4060 case STATS_QEMU_RUN_EMULATED_CODE:
4061 pStat = &gStatRunCodeQEmu;
4062 break;
4063 case STATS_QEMU_TOTAL:
4064 pStat = &gStatTotalTimeQEmu;
4065 break;
4066 case STATS_QEMU_RUN_TIMERS:
4067 pStat = &gStatTimers;
4068 break;
4069 case STATS_TLB_LOOKUP:
4070 pStat= &gStatTBLookup;
4071 break;
4072 case STATS_IRQ_HANDLING:
4073 pStat= &gStatIRQ;
4074 break;
4075 case STATS_RAW_CHECK:
4076 pStat = &gStatRawCheck;
4077 break;
4078 default:
4079 AssertMsgFailed(("unknown stat %d\n", statcode));
4080 return;
4081 }
4082 STAM_PROFILE_ADV_STOP(pStat, a);
4083}
4084#endif
4085
4086/**
4087 * Raise an RC, force rem exit.
4088 *
4089 * @param pVM VM handle.
4090 * @param rc The rc.
4091 */
4092void remR3RaiseRC(PVM pVM, int rc)
4093{
4094 Log(("remR3RaiseRC: rc=%Rrc\n", rc));
4095 Assert(pVM->rem.s.fInREM);
4096 VM_ASSERT_EMT(pVM);
4097 pVM->rem.s.rc = rc;
4098 cpu_interrupt(&pVM->rem.s.Env, CPU_INTERRUPT_RC);
4099}
4100
4101
4102/* -+- timers -+- */
4103
4104uint64_t cpu_get_tsc(CPUX86State *env)
4105{
4106 STAM_COUNTER_INC(&gStatCpuGetTSC);
4107 return TMCpuTickGet(env->pVCpu);
4108}
4109
4110
4111/* -+- interrupts -+- */
4112
4113void cpu_set_ferr(CPUX86State *env)
4114{
4115 int rc = PDMIsaSetIrq(env->pVM, 13, 1);
4116 LogFlow(("cpu_set_ferr: rc=%d\n", rc)); NOREF(rc);
4117}
4118
4119int cpu_get_pic_interrupt(CPUState *env)
4120{
4121 uint8_t u8Interrupt;
4122 int rc;
4123
4124 /* When we fail to forward interrupts directly in raw mode, we fall back to the recompiler.
4125 * In that case we can't call PDMGetInterrupt anymore, because it has already cleared the interrupt
4126 * with the (a)pic.
4127 */
4128 /** @note We assume we will go directly to the recompiler to handle the pending interrupt! */
4129 /** @todo r=bird: In the long run we should just do the interrupt handling in EM/CPUM/TRPM/somewhere and
4130 * if we cannot execute the interrupt handler in raw-mode just reschedule to REM. Once that is done we
4131 * remove this kludge. */
4132 if (env->pVM->rem.s.u32PendingInterrupt != REM_NO_PENDING_IRQ)
4133 {
4134 rc = VINF_SUCCESS;
4135 Assert(env->pVM->rem.s.u32PendingInterrupt <= 255);
4136 u8Interrupt = env->pVM->rem.s.u32PendingInterrupt;
4137 env->pVM->rem.s.u32PendingInterrupt = REM_NO_PENDING_IRQ;
4138 }
4139 else
4140 rc = PDMGetInterrupt(env->pVCpu, &u8Interrupt);
4141
4142 LogFlow(("cpu_get_pic_interrupt: u8Interrupt=%d rc=%Rrc\n", u8Interrupt, rc));
4143 if (RT_SUCCESS(rc))
4144 {
4145 if (VMCPU_FF_ISPENDING(env->pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
4146 env->interrupt_request |= CPU_INTERRUPT_HARD;
4147 return u8Interrupt;
4148 }
4149 return -1;
4150}
4151
4152
4153/* -+- local apic -+- */
4154
4155void cpu_set_apic_base(CPUX86State *env, uint64_t val)
4156{
4157 int rc = PDMApicSetBase(env->pVM, val);
4158 LogFlow(("cpu_set_apic_base: val=%#llx rc=%Rrc\n", val, rc)); NOREF(rc);
4159}
4160
4161uint64_t cpu_get_apic_base(CPUX86State *env)
4162{
4163 uint64_t u64;
4164 int rc = PDMApicGetBase(env->pVM, &u64);
4165 if (RT_SUCCESS(rc))
4166 {
4167 LogFlow(("cpu_get_apic_base: returns %#llx \n", u64));
4168 return u64;
4169 }
4170 LogFlow(("cpu_get_apic_base: returns 0 (rc=%Rrc)\n", rc));
4171 return 0;
4172}
4173
4174void cpu_set_apic_tpr(CPUX86State *env, uint8_t val)
4175{
4176 int rc = PDMApicSetTPR(env->pVCpu, val << 4); /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4177 LogFlow(("cpu_set_apic_tpr: val=%#x rc=%Rrc\n", val, rc)); NOREF(rc);
4178}
4179
4180uint8_t cpu_get_apic_tpr(CPUX86State *env)
4181{
4182 uint8_t u8;
4183 int rc = PDMApicGetTPR(env->pVCpu, &u8, NULL);
4184 if (RT_SUCCESS(rc))
4185 {
4186 LogFlow(("cpu_get_apic_tpr: returns %#x\n", u8));
4187 return u8 >> 4; /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
4188 }
4189 LogFlow(("cpu_get_apic_tpr: returns 0 (rc=%Rrc)\n", rc));
4190 return 0;
4191}
4192
4193
4194uint64_t cpu_apic_rdmsr(CPUX86State *env, uint32_t reg)
4195{
4196 uint64_t value;
4197 int rc = PDMApicReadMSR(env->pVM, 0/* cpu */, reg, &value);
4198 if (RT_SUCCESS(rc))
4199 {
4200 LogFlow(("cpu_apic_rdms returns %#x\n", value));
4201 return value;
4202 }
4203 /** @todo: exception ? */
4204 LogFlow(("cpu_apic_rdms returns 0 (rc=%Rrc)\n", rc));
4205 return value;
4206}
4207
4208void cpu_apic_wrmsr(CPUX86State *env, uint32_t reg, uint64_t value)
4209{
4210 int rc = PDMApicWriteMSR(env->pVM, 0 /* cpu */, reg, value);
4211 /** @todo: exception if error ? */
4212 LogFlow(("cpu_apic_wrmsr: rc=%Rrc\n", rc)); NOREF(rc);
4213}
4214
4215uint64_t cpu_rdmsr(CPUX86State *env, uint32_t msr)
4216{
4217 Assert(env->pVCpu);
4218 return CPUMGetGuestMsr(env->pVCpu, msr);
4219}
4220
4221void cpu_wrmsr(CPUX86State *env, uint32_t msr, uint64_t val)
4222{
4223 Assert(env->pVCpu);
4224 CPUMSetGuestMsr(env->pVCpu, msr, val);
4225}
4226
4227/* -+- I/O Ports -+- */
4228
4229#undef LOG_GROUP
4230#define LOG_GROUP LOG_GROUP_REM_IOPORT
4231
4232void cpu_outb(CPUState *env, int addr, int val)
4233{
4234 int rc;
4235
4236 if (addr != 0x80 && addr != 0x70 && addr != 0x61)
4237 Log2(("cpu_outb: addr=%#06x val=%#x\n", addr, val));
4238
4239 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 1);
4240 if (RT_LIKELY(rc == VINF_SUCCESS))
4241 return;
4242 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4243 {
4244 Log(("cpu_outb: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4245 remR3RaiseRC(env->pVM, rc);
4246 return;
4247 }
4248 remAbort(rc, __FUNCTION__);
4249}
4250
4251void cpu_outw(CPUState *env, int addr, int val)
4252{
4253 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val));
4254 int rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 2);
4255 if (RT_LIKELY(rc == VINF_SUCCESS))
4256 return;
4257 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4258 {
4259 Log(("cpu_outw: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4260 remR3RaiseRC(env->pVM, rc);
4261 return;
4262 }
4263 remAbort(rc, __FUNCTION__);
4264}
4265
4266void cpu_outl(CPUState *env, int addr, int val)
4267{
4268 int rc;
4269 Log2(("cpu_outl: addr=%#06x val=%#x\n", addr, val));
4270 rc = IOMIOPortWrite(env->pVM, (RTIOPORT)addr, val, 4);
4271 if (RT_LIKELY(rc == VINF_SUCCESS))
4272 return;
4273 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4274 {
4275 Log(("cpu_outl: addr=%#06x val=%#x -> %Rrc\n", addr, val, rc));
4276 remR3RaiseRC(env->pVM, rc);
4277 return;
4278 }
4279 remAbort(rc, __FUNCTION__);
4280}
4281
4282int cpu_inb(CPUState *env, int addr)
4283{
4284 uint32_t u32 = 0;
4285 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 1);
4286 if (RT_LIKELY(rc == VINF_SUCCESS))
4287 {
4288 if (/*addr != 0x61 && */addr != 0x71)
4289 Log2(("cpu_inb: addr=%#06x -> %#x\n", addr, u32));
4290 return (int)u32;
4291 }
4292 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4293 {
4294 Log(("cpu_inb: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4295 remR3RaiseRC(env->pVM, rc);
4296 return (int)u32;
4297 }
4298 remAbort(rc, __FUNCTION__);
4299 return 0xff;
4300}
4301
4302int cpu_inw(CPUState *env, int addr)
4303{
4304 uint32_t u32 = 0;
4305 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 2);
4306 if (RT_LIKELY(rc == VINF_SUCCESS))
4307 {
4308 Log2(("cpu_inw: addr=%#06x -> %#x\n", addr, u32));
4309 return (int)u32;
4310 }
4311 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4312 {
4313 Log(("cpu_inw: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4314 remR3RaiseRC(env->pVM, rc);
4315 return (int)u32;
4316 }
4317 remAbort(rc, __FUNCTION__);
4318 return 0xffff;
4319}
4320
4321int cpu_inl(CPUState *env, int addr)
4322{
4323 uint32_t u32 = 0;
4324 int rc = IOMIOPortRead(env->pVM, (RTIOPORT)addr, &u32, 4);
4325 if (RT_LIKELY(rc == VINF_SUCCESS))
4326 {
4327//if (addr==0x01f0 && u32 == 0x6b6d)
4328// loglevel = ~0;
4329 Log2(("cpu_inl: addr=%#06x -> %#x\n", addr, u32));
4330 return (int)u32;
4331 }
4332 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
4333 {
4334 Log(("cpu_inl: addr=%#06x -> %#x rc=%Rrc\n", addr, u32, rc));
4335 remR3RaiseRC(env->pVM, rc);
4336 return (int)u32;
4337 }
4338 remAbort(rc, __FUNCTION__);
4339 return 0xffffffff;
4340}
4341
4342#undef LOG_GROUP
4343#define LOG_GROUP LOG_GROUP_REM
4344
4345
4346/* -+- helpers and misc other interfaces -+- */
4347
4348/**
4349 * Perform the CPUID instruction.
4350 *
4351 * ASMCpuId cannot be invoked from some source files where this is used because of global
4352 * register allocations.
4353 *
4354 * @param env Pointer to the recompiler CPU structure.
4355 * @param uOperator CPUID operation (eax).
4356 * @param pvEAX Where to store eax.
4357 * @param pvEBX Where to store ebx.
4358 * @param pvECX Where to store ecx.
4359 * @param pvEDX Where to store edx.
4360 */
4361void remR3CpuId(CPUState *env, unsigned uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
4362{
4363 CPUMGetGuestCpuId(env->pVCpu, uOperator, (uint32_t *)pvEAX, (uint32_t *)pvEBX, (uint32_t *)pvECX, (uint32_t *)pvEDX);
4364}
4365
4366
4367#if 0 /* not used */
4368/**
4369 * Interface for qemu hardware to report back fatal errors.
4370 */
4371void hw_error(const char *pszFormat, ...)
4372{
4373 /*
4374 * Bitch about it.
4375 */
4376 /** @todo Add support for nested arg lists in the LogPrintfV routine! I've code for
4377 * this in my Odin32 tree at home! */
4378 va_list args;
4379 va_start(args, pszFormat);
4380 RTLogPrintf("fatal error in virtual hardware:");
4381 RTLogPrintfV(pszFormat, args);
4382 va_end(args);
4383 AssertReleaseMsgFailed(("fatal error in virtual hardware: %s\n", pszFormat));
4384
4385 /*
4386 * If we're in REM context we'll sync back the state before 'jumping' to
4387 * the EMs failure handling.
4388 */
4389 PVM pVM = cpu_single_env->pVM;
4390 if (pVM->rem.s.fInREM)
4391 REMR3StateBack(pVM);
4392 EMR3FatalError(pVM, VERR_REM_VIRTUAL_HARDWARE_ERROR);
4393 AssertMsgFailed(("EMR3FatalError returned!\n"));
4394}
4395#endif
4396
4397/**
4398 * Interface for the qemu cpu to report unhandled situation
4399 * raising a fatal VM error.
4400 */
4401void cpu_abort(CPUState *env, const char *pszFormat, ...)
4402{
4403 va_list va;
4404 PVM pVM;
4405 PVMCPU pVCpu;
4406 char szMsg[256];
4407
4408 /*
4409 * Bitch about it.
4410 */
4411 RTLogFlags(NULL, "nodisabled nobuffered");
4412 RTLogFlush(NULL);
4413
4414 va_start(va, pszFormat);
4415#if defined(RT_OS_WINDOWS) && ARCH_BITS == 64
4416 /* It's a bit complicated when mixing MSC and GCC on AMD64. This is a bit ugly, but it works. */
4417 unsigned cArgs = 0;
4418 uintptr_t auArgs[6] = {0,0,0,0,0,0};
4419 const char *psz = strchr(pszFormat, '%');
4420 while (psz && cArgs < 6)
4421 {
4422 auArgs[cArgs++] = va_arg(va, uintptr_t);
4423 psz = strchr(psz + 1, '%');
4424 }
4425 switch (cArgs)
4426 {
4427 case 1: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0]); break;
4428 case 2: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1]); break;
4429 case 3: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2]); break;
4430 case 4: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3]); break;
4431 case 5: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4]); break;
4432 case 6: RTStrPrintf(szMsg, sizeof(szMsg), pszFormat, auArgs[0], auArgs[1], auArgs[2], auArgs[3], auArgs[4], auArgs[5]); break;
4433 default:
4434 case 0: RTStrPrintf(szMsg, sizeof(szMsg), "%s", pszFormat); break;
4435 }
4436#else
4437 RTStrPrintfV(szMsg, sizeof(szMsg), pszFormat, va);
4438#endif
4439 va_end(va);
4440
4441 RTLogPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4442 RTLogRelPrintf("fatal error in recompiler cpu: %s\n", szMsg);
4443
4444 /*
4445 * If we're in REM context we'll sync back the state before 'jumping' to
4446 * the EMs failure handling.
4447 */
4448 pVM = cpu_single_env->pVM;
4449 pVCpu = cpu_single_env->pVCpu;
4450 Assert(pVCpu);
4451
4452 if (pVM->rem.s.fInREM)
4453 REMR3StateBack(pVM, pVCpu);
4454 EMR3FatalError(pVCpu, VERR_REM_VIRTUAL_CPU_ERROR);
4455 AssertMsgFailed(("EMR3FatalError returned!\n"));
4456}
4457
4458
4459/**
4460 * Aborts the VM.
4461 *
4462 * @param rc VBox error code.
4463 * @param pszTip Hint about why/when this happend.
4464 */
4465void remAbort(int rc, const char *pszTip)
4466{
4467 PVM pVM;
4468 PVMCPU pVCpu;
4469
4470 /*
4471 * Bitch about it.
4472 */
4473 RTLogPrintf("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip);
4474 AssertReleaseMsgFailed(("internal REM fatal error: rc=%Rrc %s\n", rc, pszTip));
4475
4476 /*
4477 * Jump back to where we entered the recompiler.
4478 */
4479 pVM = cpu_single_env->pVM;
4480 pVCpu = cpu_single_env->pVCpu;
4481 Assert(pVCpu);
4482
4483 if (pVM->rem.s.fInREM)
4484 REMR3StateBack(pVM, pVCpu);
4485
4486 EMR3FatalError(pVCpu, rc);
4487 AssertMsgFailed(("EMR3FatalError returned!\n"));
4488}
4489
4490
4491/**
4492 * Dumps a linux system call.
4493 * @param pVCpu VMCPU handle.
4494 */
4495void remR3DumpLnxSyscall(PVMCPU pVCpu)
4496{
4497 static const char *apsz[] =
4498 {
4499 "sys_restart_syscall", /* 0 - old "setup()" system call, used for restarting */
4500 "sys_exit",
4501 "sys_fork",
4502 "sys_read",
4503 "sys_write",
4504 "sys_open", /* 5 */
4505 "sys_close",
4506 "sys_waitpid",
4507 "sys_creat",
4508 "sys_link",
4509 "sys_unlink", /* 10 */
4510 "sys_execve",
4511 "sys_chdir",
4512 "sys_time",
4513 "sys_mknod",
4514 "sys_chmod", /* 15 */
4515 "sys_lchown16",
4516 "sys_ni_syscall", /* old break syscall holder */
4517 "sys_stat",
4518 "sys_lseek",
4519 "sys_getpid", /* 20 */
4520 "sys_mount",
4521 "sys_oldumount",
4522 "sys_setuid16",
4523 "sys_getuid16",
4524 "sys_stime", /* 25 */
4525 "sys_ptrace",
4526 "sys_alarm",
4527 "sys_fstat",
4528 "sys_pause",
4529 "sys_utime", /* 30 */
4530 "sys_ni_syscall", /* old stty syscall holder */
4531 "sys_ni_syscall", /* old gtty syscall holder */
4532 "sys_access",
4533 "sys_nice",
4534 "sys_ni_syscall", /* 35 - old ftime syscall holder */
4535 "sys_sync",
4536 "sys_kill",
4537 "sys_rename",
4538 "sys_mkdir",
4539 "sys_rmdir", /* 40 */
4540 "sys_dup",
4541 "sys_pipe",
4542 "sys_times",
4543 "sys_ni_syscall", /* old prof syscall holder */
4544 "sys_brk", /* 45 */
4545 "sys_setgid16",
4546 "sys_getgid16",
4547 "sys_signal",
4548 "sys_geteuid16",
4549 "sys_getegid16", /* 50 */
4550 "sys_acct",
4551 "sys_umount", /* recycled never used phys() */
4552 "sys_ni_syscall", /* old lock syscall holder */
4553 "sys_ioctl",
4554 "sys_fcntl", /* 55 */
4555 "sys_ni_syscall", /* old mpx syscall holder */
4556 "sys_setpgid",
4557 "sys_ni_syscall", /* old ulimit syscall holder */
4558 "sys_olduname",
4559 "sys_umask", /* 60 */
4560 "sys_chroot",
4561 "sys_ustat",
4562 "sys_dup2",
4563 "sys_getppid",
4564 "sys_getpgrp", /* 65 */
4565 "sys_setsid",
4566 "sys_sigaction",
4567 "sys_sgetmask",
4568 "sys_ssetmask",
4569 "sys_setreuid16", /* 70 */
4570 "sys_setregid16",
4571 "sys_sigsuspend",
4572 "sys_sigpending",
4573 "sys_sethostname",
4574 "sys_setrlimit", /* 75 */
4575 "sys_old_getrlimit",
4576 "sys_getrusage",
4577 "sys_gettimeofday",
4578 "sys_settimeofday",
4579 "sys_getgroups16", /* 80 */
4580 "sys_setgroups16",
4581 "old_select",
4582 "sys_symlink",
4583 "sys_lstat",
4584 "sys_readlink", /* 85 */
4585 "sys_uselib",
4586 "sys_swapon",
4587 "sys_reboot",
4588 "old_readdir",
4589 "old_mmap", /* 90 */
4590 "sys_munmap",
4591 "sys_truncate",
4592 "sys_ftruncate",
4593 "sys_fchmod",
4594 "sys_fchown16", /* 95 */
4595 "sys_getpriority",
4596 "sys_setpriority",
4597 "sys_ni_syscall", /* old profil syscall holder */
4598 "sys_statfs",
4599 "sys_fstatfs", /* 100 */
4600 "sys_ioperm",
4601 "sys_socketcall",
4602 "sys_syslog",
4603 "sys_setitimer",
4604 "sys_getitimer", /* 105 */
4605 "sys_newstat",
4606 "sys_newlstat",
4607 "sys_newfstat",
4608 "sys_uname",
4609 "sys_iopl", /* 110 */
4610 "sys_vhangup",
4611 "sys_ni_syscall", /* old "idle" system call */
4612 "sys_vm86old",
4613 "sys_wait4",
4614 "sys_swapoff", /* 115 */
4615 "sys_sysinfo",
4616 "sys_ipc",
4617 "sys_fsync",
4618 "sys_sigreturn",
4619 "sys_clone", /* 120 */
4620 "sys_setdomainname",
4621 "sys_newuname",
4622 "sys_modify_ldt",
4623 "sys_adjtimex",
4624 "sys_mprotect", /* 125 */
4625 "sys_sigprocmask",
4626 "sys_ni_syscall", /* old "create_module" */
4627 "sys_init_module",
4628 "sys_delete_module",
4629 "sys_ni_syscall", /* 130: old "get_kernel_syms" */
4630 "sys_quotactl",
4631 "sys_getpgid",
4632 "sys_fchdir",
4633 "sys_bdflush",
4634 "sys_sysfs", /* 135 */
4635 "sys_personality",
4636 "sys_ni_syscall", /* reserved for afs_syscall */
4637 "sys_setfsuid16",
4638 "sys_setfsgid16",
4639 "sys_llseek", /* 140 */
4640 "sys_getdents",
4641 "sys_select",
4642 "sys_flock",
4643 "sys_msync",
4644 "sys_readv", /* 145 */
4645 "sys_writev",
4646 "sys_getsid",
4647 "sys_fdatasync",
4648 "sys_sysctl",
4649 "sys_mlock", /* 150 */
4650 "sys_munlock",
4651 "sys_mlockall",
4652 "sys_munlockall",
4653 "sys_sched_setparam",
4654 "sys_sched_getparam", /* 155 */
4655 "sys_sched_setscheduler",
4656 "sys_sched_getscheduler",
4657 "sys_sched_yield",
4658 "sys_sched_get_priority_max",
4659 "sys_sched_get_priority_min", /* 160 */
4660 "sys_sched_rr_get_interval",
4661 "sys_nanosleep",
4662 "sys_mremap",
4663 "sys_setresuid16",
4664 "sys_getresuid16", /* 165 */
4665 "sys_vm86",
4666 "sys_ni_syscall", /* Old sys_query_module */
4667 "sys_poll",
4668 "sys_nfsservctl",
4669 "sys_setresgid16", /* 170 */
4670 "sys_getresgid16",
4671 "sys_prctl",
4672 "sys_rt_sigreturn",
4673 "sys_rt_sigaction",
4674 "sys_rt_sigprocmask", /* 175 */
4675 "sys_rt_sigpending",
4676 "sys_rt_sigtimedwait",
4677 "sys_rt_sigqueueinfo",
4678 "sys_rt_sigsuspend",
4679 "sys_pread64", /* 180 */
4680 "sys_pwrite64",
4681 "sys_chown16",
4682 "sys_getcwd",
4683 "sys_capget",
4684 "sys_capset", /* 185 */
4685 "sys_sigaltstack",
4686 "sys_sendfile",
4687 "sys_ni_syscall", /* reserved for streams1 */
4688 "sys_ni_syscall", /* reserved for streams2 */
4689 "sys_vfork", /* 190 */
4690 "sys_getrlimit",
4691 "sys_mmap2",
4692 "sys_truncate64",
4693 "sys_ftruncate64",
4694 "sys_stat64", /* 195 */
4695 "sys_lstat64",
4696 "sys_fstat64",
4697 "sys_lchown",
4698 "sys_getuid",
4699 "sys_getgid", /* 200 */
4700 "sys_geteuid",
4701 "sys_getegid",
4702 "sys_setreuid",
4703 "sys_setregid",
4704 "sys_getgroups", /* 205 */
4705 "sys_setgroups",
4706 "sys_fchown",
4707 "sys_setresuid",
4708 "sys_getresuid",
4709 "sys_setresgid", /* 210 */
4710 "sys_getresgid",
4711 "sys_chown",
4712 "sys_setuid",
4713 "sys_setgid",
4714 "sys_setfsuid", /* 215 */
4715 "sys_setfsgid",
4716 "sys_pivot_root",
4717 "sys_mincore",
4718 "sys_madvise",
4719 "sys_getdents64", /* 220 */
4720 "sys_fcntl64",
4721 "sys_ni_syscall", /* reserved for TUX */
4722 "sys_ni_syscall",
4723 "sys_gettid",
4724 "sys_readahead", /* 225 */
4725 "sys_setxattr",
4726 "sys_lsetxattr",
4727 "sys_fsetxattr",
4728 "sys_getxattr",
4729 "sys_lgetxattr", /* 230 */
4730 "sys_fgetxattr",
4731 "sys_listxattr",
4732 "sys_llistxattr",
4733 "sys_flistxattr",
4734 "sys_removexattr", /* 235 */
4735 "sys_lremovexattr",
4736 "sys_fremovexattr",
4737 "sys_tkill",
4738 "sys_sendfile64",
4739 "sys_futex", /* 240 */
4740 "sys_sched_setaffinity",
4741 "sys_sched_getaffinity",
4742 "sys_set_thread_area",
4743 "sys_get_thread_area",
4744 "sys_io_setup", /* 245 */
4745 "sys_io_destroy",
4746 "sys_io_getevents",
4747 "sys_io_submit",
4748 "sys_io_cancel",
4749 "sys_fadvise64", /* 250 */
4750 "sys_ni_syscall",
4751 "sys_exit_group",
4752 "sys_lookup_dcookie",
4753 "sys_epoll_create",
4754 "sys_epoll_ctl", /* 255 */
4755 "sys_epoll_wait",
4756 "sys_remap_file_pages",
4757 "sys_set_tid_address",
4758 "sys_timer_create",
4759 "sys_timer_settime", /* 260 */
4760 "sys_timer_gettime",
4761 "sys_timer_getoverrun",
4762 "sys_timer_delete",
4763 "sys_clock_settime",
4764 "sys_clock_gettime", /* 265 */
4765 "sys_clock_getres",
4766 "sys_clock_nanosleep",
4767 "sys_statfs64",
4768 "sys_fstatfs64",
4769 "sys_tgkill", /* 270 */
4770 "sys_utimes",
4771 "sys_fadvise64_64",
4772 "sys_ni_syscall" /* sys_vserver */
4773 };
4774
4775 uint32_t uEAX = CPUMGetGuestEAX(pVCpu);
4776 switch (uEAX)
4777 {
4778 default:
4779 if (uEAX < RT_ELEMENTS(apsz))
4780 Log(("REM: linux syscall %3d: %s (eip=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x ebp=%08x)\n",
4781 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), CPUMGetGuestEBX(pVCpu), CPUMGetGuestECX(pVCpu),
4782 CPUMGetGuestEDX(pVCpu), CPUMGetGuestESI(pVCpu), CPUMGetGuestEDI(pVCpu), CPUMGetGuestEBP(pVCpu)));
4783 else
4784 Log(("eip=%08x: linux syscall %d (#%x) unknown\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX));
4785 break;
4786
4787 }
4788}
4789
4790
4791/**
4792 * Dumps an OpenBSD system call.
4793 * @param pVCpu VMCPU handle.
4794 */
4795void remR3DumpOBsdSyscall(PVMCPU pVCpu)
4796{
4797 static const char *apsz[] =
4798 {
4799 "SYS_syscall", //0
4800 "SYS_exit", //1
4801 "SYS_fork", //2
4802 "SYS_read", //3
4803 "SYS_write", //4
4804 "SYS_open", //5
4805 "SYS_close", //6
4806 "SYS_wait4", //7
4807 "SYS_8",
4808 "SYS_link", //9
4809 "SYS_unlink", //10
4810 "SYS_11",
4811 "SYS_chdir", //12
4812 "SYS_fchdir", //13
4813 "SYS_mknod", //14
4814 "SYS_chmod", //15
4815 "SYS_chown", //16
4816 "SYS_break", //17
4817 "SYS_18",
4818 "SYS_19",
4819 "SYS_getpid", //20
4820 "SYS_mount", //21
4821 "SYS_unmount", //22
4822 "SYS_setuid", //23
4823 "SYS_getuid", //24
4824 "SYS_geteuid", //25
4825 "SYS_ptrace", //26
4826 "SYS_recvmsg", //27
4827 "SYS_sendmsg", //28
4828 "SYS_recvfrom", //29
4829 "SYS_accept", //30
4830 "SYS_getpeername", //31
4831 "SYS_getsockname", //32
4832 "SYS_access", //33
4833 "SYS_chflags", //34
4834 "SYS_fchflags", //35
4835 "SYS_sync", //36
4836 "SYS_kill", //37
4837 "SYS_38",
4838 "SYS_getppid", //39
4839 "SYS_40",
4840 "SYS_dup", //41
4841 "SYS_opipe", //42
4842 "SYS_getegid", //43
4843 "SYS_profil", //44
4844 "SYS_ktrace", //45
4845 "SYS_sigaction", //46
4846 "SYS_getgid", //47
4847 "SYS_sigprocmask", //48
4848 "SYS_getlogin", //49
4849 "SYS_setlogin", //50
4850 "SYS_acct", //51
4851 "SYS_sigpending", //52
4852 "SYS_osigaltstack", //53
4853 "SYS_ioctl", //54
4854 "SYS_reboot", //55
4855 "SYS_revoke", //56
4856 "SYS_symlink", //57
4857 "SYS_readlink", //58
4858 "SYS_execve", //59
4859 "SYS_umask", //60
4860 "SYS_chroot", //61
4861 "SYS_62",
4862 "SYS_63",
4863 "SYS_64",
4864 "SYS_65",
4865 "SYS_vfork", //66
4866 "SYS_67",
4867 "SYS_68",
4868 "SYS_sbrk", //69
4869 "SYS_sstk", //70
4870 "SYS_61",
4871 "SYS_vadvise", //72
4872 "SYS_munmap", //73
4873 "SYS_mprotect", //74
4874 "SYS_madvise", //75
4875 "SYS_76",
4876 "SYS_77",
4877 "SYS_mincore", //78
4878 "SYS_getgroups", //79
4879 "SYS_setgroups", //80
4880 "SYS_getpgrp", //81
4881 "SYS_setpgid", //82
4882 "SYS_setitimer", //83
4883 "SYS_84",
4884 "SYS_85",
4885 "SYS_getitimer", //86
4886 "SYS_87",
4887 "SYS_88",
4888 "SYS_89",
4889 "SYS_dup2", //90
4890 "SYS_91",
4891 "SYS_fcntl", //92
4892 "SYS_select", //93
4893 "SYS_94",
4894 "SYS_fsync", //95
4895 "SYS_setpriority", //96
4896 "SYS_socket", //97
4897 "SYS_connect", //98
4898 "SYS_99",
4899 "SYS_getpriority", //100
4900 "SYS_101",
4901 "SYS_102",
4902 "SYS_sigreturn", //103
4903 "SYS_bind", //104
4904 "SYS_setsockopt", //105
4905 "SYS_listen", //106
4906 "SYS_107",
4907 "SYS_108",
4908 "SYS_109",
4909 "SYS_110",
4910 "SYS_sigsuspend", //111
4911 "SYS_112",
4912 "SYS_113",
4913 "SYS_114",
4914 "SYS_115",
4915 "SYS_gettimeofday", //116
4916 "SYS_getrusage", //117
4917 "SYS_getsockopt", //118
4918 "SYS_119",
4919 "SYS_readv", //120
4920 "SYS_writev", //121
4921 "SYS_settimeofday", //122
4922 "SYS_fchown", //123
4923 "SYS_fchmod", //124
4924 "SYS_125",
4925 "SYS_setreuid", //126
4926 "SYS_setregid", //127
4927 "SYS_rename", //128
4928 "SYS_129",
4929 "SYS_130",
4930 "SYS_flock", //131
4931 "SYS_mkfifo", //132
4932 "SYS_sendto", //133
4933 "SYS_shutdown", //134
4934 "SYS_socketpair", //135
4935 "SYS_mkdir", //136
4936 "SYS_rmdir", //137
4937 "SYS_utimes", //138
4938 "SYS_139",
4939 "SYS_adjtime", //140
4940 "SYS_141",
4941 "SYS_142",
4942 "SYS_143",
4943 "SYS_144",
4944 "SYS_145",
4945 "SYS_146",
4946 "SYS_setsid", //147
4947 "SYS_quotactl", //148
4948 "SYS_149",
4949 "SYS_150",
4950 "SYS_151",
4951 "SYS_152",
4952 "SYS_153",
4953 "SYS_154",
4954 "SYS_nfssvc", //155
4955 "SYS_156",
4956 "SYS_157",
4957 "SYS_158",
4958 "SYS_159",
4959 "SYS_160",
4960 "SYS_getfh", //161
4961 "SYS_162",
4962 "SYS_163",
4963 "SYS_164",
4964 "SYS_sysarch", //165
4965 "SYS_166",
4966 "SYS_167",
4967 "SYS_168",
4968 "SYS_169",
4969 "SYS_170",
4970 "SYS_171",
4971 "SYS_172",
4972 "SYS_pread", //173
4973 "SYS_pwrite", //174
4974 "SYS_175",
4975 "SYS_176",
4976 "SYS_177",
4977 "SYS_178",
4978 "SYS_179",
4979 "SYS_180",
4980 "SYS_setgid", //181
4981 "SYS_setegid", //182
4982 "SYS_seteuid", //183
4983 "SYS_lfs_bmapv", //184
4984 "SYS_lfs_markv", //185
4985 "SYS_lfs_segclean", //186
4986 "SYS_lfs_segwait", //187
4987 "SYS_188",
4988 "SYS_189",
4989 "SYS_190",
4990 "SYS_pathconf", //191
4991 "SYS_fpathconf", //192
4992 "SYS_swapctl", //193
4993 "SYS_getrlimit", //194
4994 "SYS_setrlimit", //195
4995 "SYS_getdirentries", //196
4996 "SYS_mmap", //197
4997 "SYS___syscall", //198
4998 "SYS_lseek", //199
4999 "SYS_truncate", //200
5000 "SYS_ftruncate", //201
5001 "SYS___sysctl", //202
5002 "SYS_mlock", //203
5003 "SYS_munlock", //204
5004 "SYS_205",
5005 "SYS_futimes", //206
5006 "SYS_getpgid", //207
5007 "SYS_xfspioctl", //208
5008 "SYS_209",
5009 "SYS_210",
5010 "SYS_211",
5011 "SYS_212",
5012 "SYS_213",
5013 "SYS_214",
5014 "SYS_215",
5015 "SYS_216",
5016 "SYS_217",
5017 "SYS_218",
5018 "SYS_219",
5019 "SYS_220",
5020 "SYS_semget", //221
5021 "SYS_222",
5022 "SYS_223",
5023 "SYS_224",
5024 "SYS_msgget", //225
5025 "SYS_msgsnd", //226
5026 "SYS_msgrcv", //227
5027 "SYS_shmat", //228
5028 "SYS_229",
5029 "SYS_shmdt", //230
5030 "SYS_231",
5031 "SYS_clock_gettime", //232
5032 "SYS_clock_settime", //233
5033 "SYS_clock_getres", //234
5034 "SYS_235",
5035 "SYS_236",
5036 "SYS_237",
5037 "SYS_238",
5038 "SYS_239",
5039 "SYS_nanosleep", //240
5040 "SYS_241",
5041 "SYS_242",
5042 "SYS_243",
5043 "SYS_244",
5044 "SYS_245",
5045 "SYS_246",
5046 "SYS_247",
5047 "SYS_248",
5048 "SYS_249",
5049 "SYS_minherit", //250
5050 "SYS_rfork", //251
5051 "SYS_poll", //252
5052 "SYS_issetugid", //253
5053 "SYS_lchown", //254
5054 "SYS_getsid", //255
5055 "SYS_msync", //256
5056 "SYS_257",
5057 "SYS_258",
5058 "SYS_259",
5059 "SYS_getfsstat", //260
5060 "SYS_statfs", //261
5061 "SYS_fstatfs", //262
5062 "SYS_pipe", //263
5063 "SYS_fhopen", //264
5064 "SYS_265",
5065 "SYS_fhstatfs", //266
5066 "SYS_preadv", //267
5067 "SYS_pwritev", //268
5068 "SYS_kqueue", //269
5069 "SYS_kevent", //270
5070 "SYS_mlockall", //271
5071 "SYS_munlockall", //272
5072 "SYS_getpeereid", //273
5073 "SYS_274",
5074 "SYS_275",
5075 "SYS_276",
5076 "SYS_277",
5077 "SYS_278",
5078 "SYS_279",
5079 "SYS_280",
5080 "SYS_getresuid", //281
5081 "SYS_setresuid", //282
5082 "SYS_getresgid", //283
5083 "SYS_setresgid", //284
5084 "SYS_285",
5085 "SYS_mquery", //286
5086 "SYS_closefrom", //287
5087 "SYS_sigaltstack", //288
5088 "SYS_shmget", //289
5089 "SYS_semop", //290
5090 "SYS_stat", //291
5091 "SYS_fstat", //292
5092 "SYS_lstat", //293
5093 "SYS_fhstat", //294
5094 "SYS___semctl", //295
5095 "SYS_shmctl", //296
5096 "SYS_msgctl", //297
5097 "SYS_MAXSYSCALL", //298
5098 //299
5099 //300
5100 };
5101 uint32_t uEAX;
5102 if (!LogIsEnabled())
5103 return;
5104 uEAX = CPUMGetGuestEAX(pVCpu);
5105 switch (uEAX)
5106 {
5107 default:
5108 if (uEAX < RT_ELEMENTS(apsz))
5109 {
5110 uint32_t au32Args[8] = {0};
5111 PGMPhysSimpleReadGCPtr(pVCpu, au32Args, CPUMGetGuestESP(pVCpu), sizeof(au32Args));
5112 RTLogPrintf("REM: OpenBSD syscall %3d: %s (eip=%08x %08x %08x %08x %08x %08x %08x %08x %08x)\n",
5113 uEAX, apsz[uEAX], CPUMGetGuestEIP(pVCpu), au32Args[0], au32Args[1], au32Args[2], au32Args[3],
5114 au32Args[4], au32Args[5], au32Args[6], au32Args[7]);
5115 }
5116 else
5117 RTLogPrintf("eip=%08x: OpenBSD syscall %d (#%x) unknown!!\n", CPUMGetGuestEIP(pVCpu), uEAX, uEAX);
5118 break;
5119 }
5120}
5121
5122
5123#if defined(IPRT_NO_CRT) && defined(RT_OS_WINDOWS) && defined(RT_ARCH_X86)
5124/**
5125 * The Dll main entry point (stub).
5126 */
5127bool __stdcall _DllMainCRTStartup(void *hModule, uint32_t dwReason, void *pvReserved)
5128{
5129 return true;
5130}
5131
5132void *memcpy(void *dst, const void *src, size_t size)
5133{
5134 uint8_t*pbDst = dst, *pbSrc = src;
5135 while (size-- > 0)
5136 *pbDst++ = *pbSrc++;
5137 return dst;
5138}
5139
5140#endif
5141
5142void cpu_smm_update(CPUState *env)
5143{
5144}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette