VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 62290

Last change on this file since 62290 was 62290, checked in by vboxsync, 9 years ago

IEM: a few more lines of code tlb code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 566.8 KB
Line 
1/* $Id: IEMAll.cpp 62290 2016-07-16 13:34:27Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84//#define IEM_WITH_CODE_TLB - work in progress
85
86
87/*********************************************************************************************************************************
88* Header Files *
89*********************************************************************************************************************************/
90#define LOG_GROUP LOG_GROUP_IEM
91#define VMCPU_INCL_CPUM_GST_CTX
92#include <VBox/vmm/iem.h>
93#include <VBox/vmm/cpum.h>
94#include <VBox/vmm/pdm.h>
95#include <VBox/vmm/pgm.h>
96#include <internal/pgm.h>
97#include <VBox/vmm/iom.h>
98#include <VBox/vmm/em.h>
99#include <VBox/vmm/hm.h>
100#include <VBox/vmm/tm.h>
101#include <VBox/vmm/dbgf.h>
102#include <VBox/vmm/dbgftrace.h>
103#ifdef VBOX_WITH_RAW_MODE_NOT_R0
104# include <VBox/vmm/patm.h>
105# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
106# include <VBox/vmm/csam.h>
107# endif
108#endif
109#include "IEMInternal.h"
110#ifdef IEM_VERIFICATION_MODE_FULL
111# include <VBox/vmm/rem.h>
112# include <VBox/vmm/mm.h>
113#endif
114#include <VBox/vmm/vm.h>
115#include <VBox/log.h>
116#include <VBox/err.h>
117#include <VBox/param.h>
118#include <VBox/dis.h>
119#include <VBox/disopcode.h>
120#include <iprt/assert.h>
121#include <iprt/string.h>
122#include <iprt/x86.h>
123
124
125/*********************************************************************************************************************************
126* Structures and Typedefs *
127*********************************************************************************************************************************/
128/** @typedef PFNIEMOP
129 * Pointer to an opcode decoder function.
130 */
131
132/** @def FNIEMOP_DEF
133 * Define an opcode decoder function.
134 *
135 * We're using macors for this so that adding and removing parameters as well as
136 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
137 *
138 * @param a_Name The function name.
139 */
140
141/** @typedef PFNIEMOPRM
142 * Pointer to an opcode decoder function with RM byte.
143 */
144
145/** @def FNIEMOPRM_DEF
146 * Define an opcode decoder function with RM byte.
147 *
148 * We're using macors for this so that adding and removing parameters as well as
149 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
150 *
151 * @param a_Name The function name.
152 */
153
154#if defined(__GNUC__) && defined(RT_ARCH_X86)
155typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
156typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
157# define FNIEMOP_DEF(a_Name) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
159# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
161# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
163
164#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
165typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
166typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
167# define FNIEMOP_DEF(a_Name) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
171# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
173
174#elif defined(__GNUC__)
175typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
176typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
177# define FNIEMOP_DEF(a_Name) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
179# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
181# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
183
184#else
185typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
186typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
187# define FNIEMOP_DEF(a_Name) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
191# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
193
194#endif
195#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
196
197
198/**
199 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
200 */
201typedef union IEMSELDESC
202{
203 /** The legacy view. */
204 X86DESC Legacy;
205 /** The long mode view. */
206 X86DESC64 Long;
207} IEMSELDESC;
208/** Pointer to a selector descriptor table entry. */
209typedef IEMSELDESC *PIEMSELDESC;
210
211
212/*********************************************************************************************************************************
213* Defined Constants And Macros *
214*********************************************************************************************************************************/
215/** @def IEM_WITH_SETJMP
216 * Enables alternative status code handling using setjmps.
217 *
218 * This adds a bit of expense via the setjmp() call since it saves all the
219 * non-volatile registers. However, it eliminates return code checks and allows
220 * for more optimal return value passing (return regs instead of stack buffer).
221 */
222#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
223# define IEM_WITH_SETJMP
224#endif
225
226/** Temporary hack to disable the double execution. Will be removed in favor
227 * of a dedicated execution mode in EM. */
228//#define IEM_VERIFICATION_MODE_NO_REM
229
230/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
231 * due to GCC lacking knowledge about the value range of a switch. */
232#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
233
234/**
235 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
236 * occation.
237 */
238#ifdef LOG_ENABLED
239# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
240 do { \
241 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
242 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
243 } while (0)
244#else
245# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
246 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
247#endif
248
249/**
250 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
251 * occation using the supplied logger statement.
252 *
253 * @param a_LoggerArgs What to log on failure.
254 */
255#ifdef LOG_ENABLED
256# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
257 do { \
258 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
259 /*LogFunc(a_LoggerArgs);*/ \
260 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
261 } while (0)
262#else
263# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
264 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
265#endif
266
267/**
268 * Call an opcode decoder function.
269 *
270 * We're using macors for this so that adding and removing parameters can be
271 * done as we please. See FNIEMOP_DEF.
272 */
273#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
274
275/**
276 * Call a common opcode decoder function taking one extra argument.
277 *
278 * We're using macors for this so that adding and removing parameters can be
279 * done as we please. See FNIEMOP_DEF_1.
280 */
281#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
282
283/**
284 * Call a common opcode decoder function taking one extra argument.
285 *
286 * We're using macors for this so that adding and removing parameters can be
287 * done as we please. See FNIEMOP_DEF_1.
288 */
289#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
290
291/**
292 * Check if we're currently executing in real or virtual 8086 mode.
293 *
294 * @returns @c true if it is, @c false if not.
295 * @param a_pVCpu The IEM state of the current CPU.
296 */
297#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
298
299/**
300 * Check if we're currently executing in virtual 8086 mode.
301 *
302 * @returns @c true if it is, @c false if not.
303 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
304 */
305#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
306
307/**
308 * Check if we're currently executing in long mode.
309 *
310 * @returns @c true if it is, @c false if not.
311 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
312 */
313#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
314
315/**
316 * Check if we're currently executing in real mode.
317 *
318 * @returns @c true if it is, @c false if not.
319 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
320 */
321#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
322
323/**
324 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
325 * @returns PCCPUMFEATURES
326 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
327 */
328#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
329
330/**
331 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
332 * @returns PCCPUMFEATURES
333 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
334 */
335#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
336
337/**
338 * Evaluates to true if we're presenting an Intel CPU to the guest.
339 */
340#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
341
342/**
343 * Evaluates to true if we're presenting an AMD CPU to the guest.
344 */
345#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
346
347/**
348 * Check if the address is canonical.
349 */
350#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
351
352/** @def IEM_USE_UNALIGNED_DATA_ACCESS
353 * Use unaligned accesses instead of elaborate byte assembly. */
354#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
355# define IEM_USE_UNALIGNED_DATA_ACCESS
356#endif
357
358
359/*********************************************************************************************************************************
360* Global Variables *
361*********************************************************************************************************************************/
362extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
363
364
365/** Function table for the ADD instruction. */
366IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
367{
368 iemAImpl_add_u8, iemAImpl_add_u8_locked,
369 iemAImpl_add_u16, iemAImpl_add_u16_locked,
370 iemAImpl_add_u32, iemAImpl_add_u32_locked,
371 iemAImpl_add_u64, iemAImpl_add_u64_locked
372};
373
374/** Function table for the ADC instruction. */
375IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
376{
377 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
378 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
379 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
380 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
381};
382
383/** Function table for the SUB instruction. */
384IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
385{
386 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
387 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
388 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
389 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
390};
391
392/** Function table for the SBB instruction. */
393IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
394{
395 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
396 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
397 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
398 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
399};
400
401/** Function table for the OR instruction. */
402IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
403{
404 iemAImpl_or_u8, iemAImpl_or_u8_locked,
405 iemAImpl_or_u16, iemAImpl_or_u16_locked,
406 iemAImpl_or_u32, iemAImpl_or_u32_locked,
407 iemAImpl_or_u64, iemAImpl_or_u64_locked
408};
409
410/** Function table for the XOR instruction. */
411IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
412{
413 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
414 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
415 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
416 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
417};
418
419/** Function table for the AND instruction. */
420IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
421{
422 iemAImpl_and_u8, iemAImpl_and_u8_locked,
423 iemAImpl_and_u16, iemAImpl_and_u16_locked,
424 iemAImpl_and_u32, iemAImpl_and_u32_locked,
425 iemAImpl_and_u64, iemAImpl_and_u64_locked
426};
427
428/** Function table for the CMP instruction.
429 * @remarks Making operand order ASSUMPTIONS.
430 */
431IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
432{
433 iemAImpl_cmp_u8, NULL,
434 iemAImpl_cmp_u16, NULL,
435 iemAImpl_cmp_u32, NULL,
436 iemAImpl_cmp_u64, NULL
437};
438
439/** Function table for the TEST instruction.
440 * @remarks Making operand order ASSUMPTIONS.
441 */
442IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
443{
444 iemAImpl_test_u8, NULL,
445 iemAImpl_test_u16, NULL,
446 iemAImpl_test_u32, NULL,
447 iemAImpl_test_u64, NULL
448};
449
450/** Function table for the BT instruction. */
451IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
452{
453 NULL, NULL,
454 iemAImpl_bt_u16, NULL,
455 iemAImpl_bt_u32, NULL,
456 iemAImpl_bt_u64, NULL
457};
458
459/** Function table for the BTC instruction. */
460IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
461{
462 NULL, NULL,
463 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
464 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
465 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
466};
467
468/** Function table for the BTR instruction. */
469IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
470{
471 NULL, NULL,
472 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
473 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
474 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
475};
476
477/** Function table for the BTS instruction. */
478IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
479{
480 NULL, NULL,
481 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
482 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
483 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
484};
485
486/** Function table for the BSF instruction. */
487IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
488{
489 NULL, NULL,
490 iemAImpl_bsf_u16, NULL,
491 iemAImpl_bsf_u32, NULL,
492 iemAImpl_bsf_u64, NULL
493};
494
495/** Function table for the BSR instruction. */
496IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
497{
498 NULL, NULL,
499 iemAImpl_bsr_u16, NULL,
500 iemAImpl_bsr_u32, NULL,
501 iemAImpl_bsr_u64, NULL
502};
503
504/** Function table for the IMUL instruction. */
505IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
506{
507 NULL, NULL,
508 iemAImpl_imul_two_u16, NULL,
509 iemAImpl_imul_two_u32, NULL,
510 iemAImpl_imul_two_u64, NULL
511};
512
513/** Group 1 /r lookup table. */
514IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
515{
516 &g_iemAImpl_add,
517 &g_iemAImpl_or,
518 &g_iemAImpl_adc,
519 &g_iemAImpl_sbb,
520 &g_iemAImpl_and,
521 &g_iemAImpl_sub,
522 &g_iemAImpl_xor,
523 &g_iemAImpl_cmp
524};
525
526/** Function table for the INC instruction. */
527IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
528{
529 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
530 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
531 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
532 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
533};
534
535/** Function table for the DEC instruction. */
536IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
537{
538 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
539 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
540 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
541 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
542};
543
544/** Function table for the NEG instruction. */
545IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
546{
547 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
548 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
549 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
550 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
551};
552
553/** Function table for the NOT instruction. */
554IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
555{
556 iemAImpl_not_u8, iemAImpl_not_u8_locked,
557 iemAImpl_not_u16, iemAImpl_not_u16_locked,
558 iemAImpl_not_u32, iemAImpl_not_u32_locked,
559 iemAImpl_not_u64, iemAImpl_not_u64_locked
560};
561
562
563/** Function table for the ROL instruction. */
564IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
565{
566 iemAImpl_rol_u8,
567 iemAImpl_rol_u16,
568 iemAImpl_rol_u32,
569 iemAImpl_rol_u64
570};
571
572/** Function table for the ROR instruction. */
573IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
574{
575 iemAImpl_ror_u8,
576 iemAImpl_ror_u16,
577 iemAImpl_ror_u32,
578 iemAImpl_ror_u64
579};
580
581/** Function table for the RCL instruction. */
582IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
583{
584 iemAImpl_rcl_u8,
585 iemAImpl_rcl_u16,
586 iemAImpl_rcl_u32,
587 iemAImpl_rcl_u64
588};
589
590/** Function table for the RCR instruction. */
591IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
592{
593 iemAImpl_rcr_u8,
594 iemAImpl_rcr_u16,
595 iemAImpl_rcr_u32,
596 iemAImpl_rcr_u64
597};
598
599/** Function table for the SHL instruction. */
600IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
601{
602 iemAImpl_shl_u8,
603 iemAImpl_shl_u16,
604 iemAImpl_shl_u32,
605 iemAImpl_shl_u64
606};
607
608/** Function table for the SHR instruction. */
609IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
610{
611 iemAImpl_shr_u8,
612 iemAImpl_shr_u16,
613 iemAImpl_shr_u32,
614 iemAImpl_shr_u64
615};
616
617/** Function table for the SAR instruction. */
618IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
619{
620 iemAImpl_sar_u8,
621 iemAImpl_sar_u16,
622 iemAImpl_sar_u32,
623 iemAImpl_sar_u64
624};
625
626
627/** Function table for the MUL instruction. */
628IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
629{
630 iemAImpl_mul_u8,
631 iemAImpl_mul_u16,
632 iemAImpl_mul_u32,
633 iemAImpl_mul_u64
634};
635
636/** Function table for the IMUL instruction working implicitly on rAX. */
637IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
638{
639 iemAImpl_imul_u8,
640 iemAImpl_imul_u16,
641 iemAImpl_imul_u32,
642 iemAImpl_imul_u64
643};
644
645/** Function table for the DIV instruction. */
646IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
647{
648 iemAImpl_div_u8,
649 iemAImpl_div_u16,
650 iemAImpl_div_u32,
651 iemAImpl_div_u64
652};
653
654/** Function table for the MUL instruction. */
655IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
656{
657 iemAImpl_idiv_u8,
658 iemAImpl_idiv_u16,
659 iemAImpl_idiv_u32,
660 iemAImpl_idiv_u64
661};
662
663/** Function table for the SHLD instruction */
664IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
665{
666 iemAImpl_shld_u16,
667 iemAImpl_shld_u32,
668 iemAImpl_shld_u64,
669};
670
671/** Function table for the SHRD instruction */
672IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
673{
674 iemAImpl_shrd_u16,
675 iemAImpl_shrd_u32,
676 iemAImpl_shrd_u64,
677};
678
679
680/** Function table for the PUNPCKLBW instruction */
681IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
682/** Function table for the PUNPCKLBD instruction */
683IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
684/** Function table for the PUNPCKLDQ instruction */
685IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
686/** Function table for the PUNPCKLQDQ instruction */
687IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
688
689/** Function table for the PUNPCKHBW instruction */
690IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
691/** Function table for the PUNPCKHBD instruction */
692IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
693/** Function table for the PUNPCKHDQ instruction */
694IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
695/** Function table for the PUNPCKHQDQ instruction */
696IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
697
698/** Function table for the PXOR instruction */
699IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
700/** Function table for the PCMPEQB instruction */
701IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
702/** Function table for the PCMPEQW instruction */
703IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
704/** Function table for the PCMPEQD instruction */
705IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
706
707
708#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
709/** What IEM just wrote. */
710uint8_t g_abIemWrote[256];
711/** How much IEM just wrote. */
712size_t g_cbIemWrote;
713#endif
714
715
716/*********************************************************************************************************************************
717* Internal Functions *
718*********************************************************************************************************************************/
719IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
720IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
721IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
722IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
723/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
724IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
725IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
726IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
727IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
728IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
729IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
730IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
731IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
732IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
733IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
734IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
735IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
736#ifdef IEM_WITH_SETJMP
737DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
738DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
739DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
740DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
741DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
742#endif
743
744IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
745IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
746IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
747IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
748IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
749IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
750IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
751IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
752IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
753IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
754IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
755IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
756IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
757IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
758IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
759IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
760
761#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
762IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
763#endif
764IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
765IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
766
767
768
769/**
770 * Sets the pass up status.
771 *
772 * @returns VINF_SUCCESS.
773 * @param pVCpu The cross context virtual CPU structure of the
774 * calling thread.
775 * @param rcPassUp The pass up status. Must be informational.
776 * VINF_SUCCESS is not allowed.
777 */
778IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
779{
780 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
781
782 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
783 if (rcOldPassUp == VINF_SUCCESS)
784 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
785 /* If both are EM scheduling codes, use EM priority rules. */
786 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
787 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
788 {
789 if (rcPassUp < rcOldPassUp)
790 {
791 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
792 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
793 }
794 else
795 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
796 }
797 /* Override EM scheduling with specific status code. */
798 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
799 {
800 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
801 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
802 }
803 /* Don't override specific status code, first come first served. */
804 else
805 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
806 return VINF_SUCCESS;
807}
808
809
810/**
811 * Calculates the CPU mode.
812 *
813 * This is mainly for updating IEMCPU::enmCpuMode.
814 *
815 * @returns CPU mode.
816 * @param pCtx The register context for the CPU.
817 */
818DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
819{
820 if (CPUMIsGuestIn64BitCodeEx(pCtx))
821 return IEMMODE_64BIT;
822 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
823 return IEMMODE_32BIT;
824 return IEMMODE_16BIT;
825}
826
827
828/**
829 * Initializes the execution state.
830 *
831 * @param pVCpu The cross context virtual CPU structure of the
832 * calling thread.
833 * @param fBypassHandlers Whether to bypass access handlers.
834 *
835 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
836 * side-effects in strict builds.
837 */
838DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
839{
840 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
841
842 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
843
844#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
845 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
846 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
847 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
848 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
849 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
850 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
851 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
852 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
853#endif
854
855#ifdef VBOX_WITH_RAW_MODE_NOT_R0
856 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
857#endif
858 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
859 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
860#ifdef VBOX_STRICT
861 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xc0fe;
862 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xc0fe;
863 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xc0fe;
864 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xc0fe;
865 pVCpu->iem.s.fPrefixes = (IEMMODE)0xfeedbeef;
866 pVCpu->iem.s.uRexReg = 127;
867 pVCpu->iem.s.uRexB = 127;
868 pVCpu->iem.s.uRexIndex = 127;
869 pVCpu->iem.s.iEffSeg = 127;
870 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
871# ifdef IEM_WITH_CODE_TLB
872 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
873 pVCpu->iem.s.pbInstrBuf = NULL;
874 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
875 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
876 pVCpu->iem.s.offCurInstrStart = UINT16_MAX;
877 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
878# else
879 pVCpu->iem.s.offOpcode = 127;
880 pVCpu->iem.s.cbOpcode = 127;
881# endif
882#endif
883
884 pVCpu->iem.s.cActiveMappings = 0;
885 pVCpu->iem.s.iNextMapping = 0;
886 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
887 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
888#ifdef VBOX_WITH_RAW_MODE_NOT_R0
889 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
890 && pCtx->cs.u64Base == 0
891 && pCtx->cs.u32Limit == UINT32_MAX
892 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
893 if (!pVCpu->iem.s.fInPatchCode)
894 CPUMRawLeave(pVCpu, VINF_SUCCESS);
895#endif
896
897#ifdef IEM_VERIFICATION_MODE_FULL
898 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
899 pVCpu->iem.s.fNoRem = true;
900#endif
901}
902
903
904/**
905 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
906 *
907 * @param pVCpu The cross context virtual CPU structure of the
908 * calling thread.
909 */
910DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
911{
912 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
913#ifdef IEM_VERIFICATION_MODE_FULL
914 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
915#endif
916#ifdef VBOX_STRICT
917# ifdef IEM_WITH_CODE_TLB
918# else
919 pVCpu->iem.s.cbOpcode = 0;
920# endif
921#else
922 NOREF(pVCpu);
923#endif
924}
925
926
927/**
928 * Initializes the decoder state.
929 *
930 * iemReInitDecoder is mostly a copy of this function.
931 *
932 * @param pVCpu The cross context virtual CPU structure of the
933 * calling thread.
934 * @param fBypassHandlers Whether to bypass access handlers.
935 */
936DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
937{
938 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
939
940 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
941
942#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
943 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
944 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
945 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
946 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
947 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
948 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
949 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
950 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
951#endif
952
953#ifdef VBOX_WITH_RAW_MODE_NOT_R0
954 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
955#endif
956 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
957#ifdef IEM_VERIFICATION_MODE_FULL
958 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
959 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
960#endif
961 IEMMODE enmMode = iemCalcCpuMode(pCtx);
962 pVCpu->iem.s.enmCpuMode = enmMode;
963 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
964 pVCpu->iem.s.enmEffAddrMode = enmMode;
965 if (enmMode != IEMMODE_64BIT)
966 {
967 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
968 pVCpu->iem.s.enmEffOpSize = enmMode;
969 }
970 else
971 {
972 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
973 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
974 }
975 pVCpu->iem.s.fPrefixes = 0;
976 pVCpu->iem.s.uRexReg = 0;
977 pVCpu->iem.s.uRexB = 0;
978 pVCpu->iem.s.uRexIndex = 0;
979 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
980#ifdef IEM_WITH_CODE_TLB
981 pVCpu->iem.s.pbInstrBuf = NULL;
982 pVCpu->iem.s.offInstrNextByte = 0;
983 pVCpu->iem.s.offCurInstrStart = 0;
984# ifdef VBOX_STRICT
985 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
986 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
987 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
988# endif
989#else
990 pVCpu->iem.s.offOpcode = 0;
991 pVCpu->iem.s.cbOpcode = 0;
992#endif
993 pVCpu->iem.s.cActiveMappings = 0;
994 pVCpu->iem.s.iNextMapping = 0;
995 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
996 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
997#ifdef VBOX_WITH_RAW_MODE_NOT_R0
998 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
999 && pCtx->cs.u64Base == 0
1000 && pCtx->cs.u32Limit == UINT32_MAX
1001 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1002 if (!pVCpu->iem.s.fInPatchCode)
1003 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1004#endif
1005
1006#ifdef DBGFTRACE_ENABLED
1007 switch (enmMode)
1008 {
1009 case IEMMODE_64BIT:
1010 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1011 break;
1012 case IEMMODE_32BIT:
1013 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1014 break;
1015 case IEMMODE_16BIT:
1016 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1017 break;
1018 }
1019#endif
1020}
1021
1022
1023/**
1024 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1025 *
1026 * This is mostly a copy of iemInitDecoder.
1027 *
1028 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1029 */
1030DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1031{
1032 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1033
1034 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1035
1036#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1037 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1038 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1039 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1040 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1041 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1042 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1043 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1044 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1045#endif
1046
1047 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1048#ifdef IEM_VERIFICATION_MODE_FULL
1049 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1050 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1051#endif
1052 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1053 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1054 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1055 pVCpu->iem.s.enmEffAddrMode = enmMode;
1056 if (enmMode != IEMMODE_64BIT)
1057 {
1058 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1059 pVCpu->iem.s.enmEffOpSize = enmMode;
1060 }
1061 else
1062 {
1063 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1064 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1065 }
1066 pVCpu->iem.s.fPrefixes = 0;
1067 pVCpu->iem.s.uRexReg = 0;
1068 pVCpu->iem.s.uRexB = 0;
1069 pVCpu->iem.s.uRexIndex = 0;
1070 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1071#ifdef IEM_WITH_CODE_TLB
1072 if (pVCpu->iem.s.pbInstrBuf)
1073 {
1074 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1075 - pVCpu->iem.s.uInstrBufPc;
1076 if (off < pVCpu->iem.s.cbInstrBufTotal)
1077 {
1078 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1079 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1080 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1081 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1082 else
1083 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1084 }
1085 else
1086 {
1087 pVCpu->iem.s.pbInstrBuf = NULL;
1088 pVCpu->iem.s.offInstrNextByte = 0;
1089 pVCpu->iem.s.offCurInstrStart = 0;
1090 }
1091 }
1092 else
1093 {
1094 pVCpu->iem.s.offInstrNextByte = 0;
1095 pVCpu->iem.s.offCurInstrStart = 0;
1096 }
1097#else
1098 pVCpu->iem.s.cbOpcode = 0;
1099 pVCpu->iem.s.offOpcode = 0;
1100#endif
1101 Assert(pVCpu->iem.s.cActiveMappings == 0);
1102 pVCpu->iem.s.iNextMapping = 0;
1103 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1104 Assert(pVCpu->iem.s.fBypassHandlers == false);
1105#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1106 if (!pVCpu->iem.s.fInPatchCode)
1107 { /* likely */ }
1108 else
1109 {
1110 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1111 && pCtx->cs.u64Base == 0
1112 && pCtx->cs.u32Limit == UINT32_MAX
1113 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1114 if (!pVCpu->iem.s.fInPatchCode)
1115 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1116 }
1117#endif
1118
1119#ifdef DBGFTRACE_ENABLED
1120 switch (enmMode)
1121 {
1122 case IEMMODE_64BIT:
1123 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1124 break;
1125 case IEMMODE_32BIT:
1126 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1127 break;
1128 case IEMMODE_16BIT:
1129 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1130 break;
1131 }
1132#endif
1133}
1134
1135
1136
1137/**
1138 * Prefetch opcodes the first time when starting executing.
1139 *
1140 * @returns Strict VBox status code.
1141 * @param pVCpu The cross context virtual CPU structure of the
1142 * calling thread.
1143 * @param fBypassHandlers Whether to bypass access handlers.
1144 */
1145IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1146{
1147#ifdef IEM_VERIFICATION_MODE_FULL
1148 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1149#endif
1150 iemInitDecoder(pVCpu, fBypassHandlers);
1151
1152#ifdef IEM_WITH_CODE_TLB
1153 /** @todo Do ITLB lookup here. */
1154
1155#else /* !IEM_WITH_CODE_TLB */
1156
1157 /*
1158 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1159 *
1160 * First translate CS:rIP to a physical address.
1161 */
1162 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1163 uint32_t cbToTryRead;
1164 RTGCPTR GCPtrPC;
1165 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1166 {
1167 cbToTryRead = PAGE_SIZE;
1168 GCPtrPC = pCtx->rip;
1169 if (!IEM_IS_CANONICAL(GCPtrPC))
1170 return iemRaiseGeneralProtectionFault0(pVCpu);
1171 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1172 }
1173 else
1174 {
1175 uint32_t GCPtrPC32 = pCtx->eip;
1176 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1177 if (GCPtrPC32 > pCtx->cs.u32Limit)
1178 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1179 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1180 if (!cbToTryRead) /* overflowed */
1181 {
1182 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1183 cbToTryRead = UINT32_MAX;
1184 }
1185 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1186 Assert(GCPtrPC <= UINT32_MAX);
1187 }
1188
1189# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1190 /* Allow interpretation of patch manager code blocks since they can for
1191 instance throw #PFs for perfectly good reasons. */
1192 if (pVCpu->iem.s.fInPatchCode)
1193 {
1194 size_t cbRead = 0;
1195 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1196 AssertRCReturn(rc, rc);
1197 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1198 return VINF_SUCCESS;
1199 }
1200# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1201
1202 RTGCPHYS GCPhys;
1203 uint64_t fFlags;
1204 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1205 if (RT_FAILURE(rc))
1206 {
1207 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1208 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1209 }
1210 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1211 {
1212 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1213 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1214 }
1215 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1216 {
1217 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1218 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1219 }
1220 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1221 /** @todo Check reserved bits and such stuff. PGM is better at doing
1222 * that, so do it when implementing the guest virtual address
1223 * TLB... */
1224
1225# ifdef IEM_VERIFICATION_MODE_FULL
1226 /*
1227 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1228 * instruction.
1229 */
1230 /** @todo optimize this differently by not using PGMPhysRead. */
1231 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1232 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1233 if ( offPrevOpcodes < cbOldOpcodes
1234 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1235 {
1236 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1237 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1238 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1239 pVCpu->iem.s.cbOpcode = cbNew;
1240 return VINF_SUCCESS;
1241 }
1242# endif
1243
1244 /*
1245 * Read the bytes at this address.
1246 */
1247 PVM pVM = pVCpu->CTX_SUFF(pVM);
1248# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1249 size_t cbActual;
1250 if ( PATMIsEnabled(pVM)
1251 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1252 {
1253 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1254 Assert(cbActual > 0);
1255 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1256 }
1257 else
1258# endif
1259 {
1260 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1261 if (cbToTryRead > cbLeftOnPage)
1262 cbToTryRead = cbLeftOnPage;
1263 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1264 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1265
1266 if (!pVCpu->iem.s.fBypassHandlers)
1267 {
1268 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1269 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1270 { /* likely */ }
1271 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1272 {
1273 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1274 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1275 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1276 }
1277 else
1278 {
1279 Log((RT_SUCCESS(rcStrict)
1280 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1281 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1282 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1283 return rcStrict;
1284 }
1285 }
1286 else
1287 {
1288 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1289 if (RT_SUCCESS(rc))
1290 { /* likely */ }
1291 else
1292 {
1293 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1294 GCPtrPC, GCPhys, rc, cbToTryRead));
1295 return rc;
1296 }
1297 }
1298 pVCpu->iem.s.cbOpcode = cbToTryRead;
1299 }
1300#endif /* !IEM_WITH_CODE_TLB */
1301 return VINF_SUCCESS;
1302}
1303
1304
1305/**
1306 * Invalidates the IEM TLBs.
1307 *
1308 * This is called internally as well as by PGM when moving GC mappings.
1309 *
1310 * @returns
1311 * @param pVCpu The cross context virtual CPU structure of the calling
1312 * thread.
1313 * @param fVmm Set when PGM calls us with a remapping.
1314 */
1315void IEMInvalidTLBs(PVMCPU pVCpu, bool fVmm)
1316{
1317#ifdef IEM_WITH_CODE_TLB
1318 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1319 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1320 { /* very likely */ }
1321 else
1322 {
1323 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1324 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1325 while (i-- > 0)
1326 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1327 }
1328#endif
1329
1330#ifdef IEM_WITH_DATA_TLB
1331 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1332 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1333 { /* very likely */ }
1334 else
1335 {
1336 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1337 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1338 while (i-- > 0)
1339 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1340 }
1341#endif
1342 NOREF(pVCpu); NOREF(fVmm);
1343}
1344
1345
1346/**
1347 * Invalidates the host physical aspects of the IEM TLBs.
1348 *
1349 * This is called internally as well as by PGM when moving GC mappings.
1350 *
1351 * @param pVCpu The cross context virtual CPU structure of the calling
1352 * thread.
1353 * @param uTlbPhysRev The revision of the phys stuff.
1354 * @param fFullFlush Whether we're doing a full flush or not.
1355 */
1356void IEMInvalidTLBsHostPhys(PVMCPU pVCpu, uint64_t uTlbPhysRev, bool fFullFlush)
1357{
1358#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1359 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1360
1361 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1362 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1363
1364 if (!fFullFlush)
1365 { /* very likely */ }
1366 else
1367 {
1368 unsigned i;
1369# ifdef IEM_WITH_CODE_TLB
1370 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1371 while (i-- > 0)
1372 {
1373 pVCpu->iem.s.CodeTlb.aEntries[i].pMappingR3 = NULL;
1374 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1375 }
1376# endif
1377# ifdef IEM_WITH_DATA_TLB
1378 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1379 while (i-- > 0)
1380 {
1381 pVCpu->iem.s.DataTlb.aEntries[i].pMappingR3 = NULL;
1382 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1383 }
1384# endif
1385 }
1386#endif
1387 NOREF(pVCpu); NOREF(fFullFlush);
1388}
1389
1390
1391#ifdef IEM_WITH_CODE_TLB
1392
1393/**
1394 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1395 * failure and jumps.
1396 *
1397 * We end up here for a number of reasons:
1398 * - pbInstrBuf isn't yet initialized.
1399 * - Advancing beyond the buffer boundrary (e.g. cross page).
1400 * - Advancing beyond the CS segment limit.
1401 * - Fetching from non-mappable page (e.g. MMIO).
1402 *
1403 * @param pVCpu The cross context virtual CPU structure of the
1404 * calling thread.
1405 * @param pvDst Where to return the bytes.
1406 * @param cbDst Number of bytes to read.
1407 *
1408 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1409 */
1410IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1411{
1412 Assert(cbDst <= 8);
1413 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1414
1415 /*
1416 * We might have a partial buffer match, deal with that first to make the
1417 * rest simpler. This is the first part of the cross page/buffer case.
1418 */
1419 if (pVCpu->iem.s.pbInstrBuf != NULL)
1420 {
1421 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1422 {
1423 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1424 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1425 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1426
1427 cbDst -= cbCopy;
1428 pvDst = (uint8_t *)pvDst + cbCopy;
1429 offBuf += cbCopy;
1430 pVCpu->iem.s.offInstrNextByte += offBuf;
1431 }
1432 }
1433
1434 /*
1435 * Check segment limit, figuring how much we're allowed to access at this point.
1436 */
1437 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1438 RTGCPTR GCPtrFirst;
1439 uint32_t cbMaxRead;
1440 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1441 {
1442 GCPtrFirst = pCtx->rip + (offBuf - pVCpu->iem.s.offCurInstrStart);
1443 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1444 { /* likely */ }
1445 else
1446 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1447 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1448 }
1449 else
1450 {
1451 GCPtrFirst = pCtx->eip + (offBuf - pVCpu->iem.s.offCurInstrStart);
1452 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1453 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1454 { /* likely */ }
1455 else
1456 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1457 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1458 if (cbMaxRead != 0)
1459 { /* likely */ }
1460 else
1461 {
1462 /* Overflowed because address is 0 and limit is max. */
1463 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1464 cbMaxRead = X86_PAGE_SIZE;
1465 }
1466 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1467 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1468 if (cbMaxRead2 < cbMaxRead)
1469 cbMaxRead = cbMaxRead2;
1470 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1471 }
1472
1473 /*
1474 * Get the TLB entry for this piece of code.
1475 */
1476 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1477 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1478 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1479 if (pTlbe->uTag == uTag)
1480 {
1481 /* likely when executing lots of code, otherwise unlikely */
1482# ifdef VBOX_WITH_STATISTICS
1483 pVCpu->iem.s.CodeTlb.cTlbHits++;
1484# endif
1485 }
1486 else
1487 {
1488 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1489 pVCpu->iem.s.CodeTlb.cTlbMissesTag++;
1490# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1491 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1492 {
1493 pTlbe->uTag = uTag;
1494 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1495 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1496 pTlbe->GCPhys = NIL_RTGCPHYS;
1497 pTlbe->pMappingR3 = NULL;
1498 }
1499 else
1500# endif
1501 {
1502 RTGCPHYS GCPhys;
1503 uint64_t fFlags;
1504 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1505 if (RT_FAILURE(rc))
1506 {
1507 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1508 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1509 }
1510
1511 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1512 pTlbe->uTag = uTag;
1513 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1514 pTlbe->GCPhys = GCPhys;
1515 pTlbe->pMappingR3 = NULL;
1516 }
1517 }
1518
1519 /*
1520 * Check TLB page table level access flags.
1521 */
1522 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1523 {
1524 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1525 {
1526 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1527 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1528 }
1529 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1530 {
1531 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1532 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1533 }
1534 }
1535
1536# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1537 /*
1538 * Allow interpretation of patch manager code blocks since they can for
1539 * instance throw #PFs for perfectly good reasons.
1540 */
1541 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1542 { /* no unlikely */ }
1543 else
1544 {
1545 /** @todo Could be optimized this a little in ring-3 if we liked. */
1546 size_t cbRead = 0;
1547 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1548 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1549 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1550 return;
1551 }
1552# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1553
1554 /*
1555 * Look up the physical page info if necessary.
1556 */
1557 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1558 { /* not necessary */ }
1559 else
1560 {
1561 }
1562
1563
1564# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1565 /*
1566 * Try do a direct read using the pMappingR3 pointer.
1567 */
1568 if (!(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1569 {
1570
1571 }
1572# endif
1573
1574
1575# if 0
1576 /*
1577 * Read the bytes at this address.
1578 *
1579 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1580 * and since PATM should only patch the start of an instruction there
1581 * should be no need to check again here.
1582 */
1583 if (!pVCpu->iem.s.fBypassHandlers)
1584 {
1585 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1586 cbToTryRead, PGMACCESSORIGIN_IEM);
1587 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1588 { /* likely */ }
1589 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1590 {
1591 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1592 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1593 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1594 }
1595 else
1596 {
1597 Log((RT_SUCCESS(rcStrict)
1598 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1599 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1600 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1601 return rcStrict;
1602 }
1603 }
1604 else
1605 {
1606 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1607 if (RT_SUCCESS(rc))
1608 { /* likely */ }
1609 else
1610 {
1611 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1612 return rc;
1613 }
1614 }
1615 pVCpu->iem.s.cbOpcode += cbToTryRead;
1616 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1617# endif
1618}
1619
1620#else
1621
1622/**
1623 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1624 * exception if it fails.
1625 *
1626 * @returns Strict VBox status code.
1627 * @param pVCpu The cross context virtual CPU structure of the
1628 * calling thread.
1629 * @param cbMin The minimum number of bytes relative offOpcode
1630 * that must be read.
1631 */
1632IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1633{
1634 /*
1635 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1636 *
1637 * First translate CS:rIP to a physical address.
1638 */
1639 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1640 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1641 uint32_t cbToTryRead;
1642 RTGCPTR GCPtrNext;
1643 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1644 {
1645 cbToTryRead = PAGE_SIZE;
1646 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1647 if (!IEM_IS_CANONICAL(GCPtrNext))
1648 return iemRaiseGeneralProtectionFault0(pVCpu);
1649 }
1650 else
1651 {
1652 uint32_t GCPtrNext32 = pCtx->eip;
1653 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1654 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1655 if (GCPtrNext32 > pCtx->cs.u32Limit)
1656 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1657 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1658 if (!cbToTryRead) /* overflowed */
1659 {
1660 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1661 cbToTryRead = UINT32_MAX;
1662 /** @todo check out wrapping around the code segment. */
1663 }
1664 if (cbToTryRead < cbMin - cbLeft)
1665 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1666 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1667 }
1668
1669 /* Only read up to the end of the page, and make sure we don't read more
1670 than the opcode buffer can hold. */
1671 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1672 if (cbToTryRead > cbLeftOnPage)
1673 cbToTryRead = cbLeftOnPage;
1674 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1675 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1676/** @todo r=bird: Convert assertion into undefined opcode exception? */
1677 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1678
1679# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1680 /* Allow interpretation of patch manager code blocks since they can for
1681 instance throw #PFs for perfectly good reasons. */
1682 if (pVCpu->iem.s.fInPatchCode)
1683 {
1684 size_t cbRead = 0;
1685 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1686 AssertRCReturn(rc, rc);
1687 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1688 return VINF_SUCCESS;
1689 }
1690# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1691
1692 RTGCPHYS GCPhys;
1693 uint64_t fFlags;
1694 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1695 if (RT_FAILURE(rc))
1696 {
1697 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1698 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1699 }
1700 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1701 {
1702 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1703 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1704 }
1705 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1706 {
1707 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1708 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1709 }
1710 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1711 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1712 /** @todo Check reserved bits and such stuff. PGM is better at doing
1713 * that, so do it when implementing the guest virtual address
1714 * TLB... */
1715
1716 /*
1717 * Read the bytes at this address.
1718 *
1719 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1720 * and since PATM should only patch the start of an instruction there
1721 * should be no need to check again here.
1722 */
1723 if (!pVCpu->iem.s.fBypassHandlers)
1724 {
1725 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1726 cbToTryRead, PGMACCESSORIGIN_IEM);
1727 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1728 { /* likely */ }
1729 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1730 {
1731 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1732 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1733 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1734 }
1735 else
1736 {
1737 Log((RT_SUCCESS(rcStrict)
1738 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1739 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1740 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1741 return rcStrict;
1742 }
1743 }
1744 else
1745 {
1746 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1747 if (RT_SUCCESS(rc))
1748 { /* likely */ }
1749 else
1750 {
1751 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1752 return rc;
1753 }
1754 }
1755 pVCpu->iem.s.cbOpcode += cbToTryRead;
1756 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1757
1758 return VINF_SUCCESS;
1759}
1760
1761#endif /* !IEM_WITH_CODE_TLB */
1762#ifndef IEM_WITH_SETJMP
1763
1764/**
1765 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1766 *
1767 * @returns Strict VBox status code.
1768 * @param pVCpu The cross context virtual CPU structure of the
1769 * calling thread.
1770 * @param pb Where to return the opcode byte.
1771 */
1772DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
1773{
1774 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1775 if (rcStrict == VINF_SUCCESS)
1776 {
1777 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1778 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1779 pVCpu->iem.s.offOpcode = offOpcode + 1;
1780 }
1781 else
1782 *pb = 0;
1783 return rcStrict;
1784}
1785
1786
1787/**
1788 * Fetches the next opcode byte.
1789 *
1790 * @returns Strict VBox status code.
1791 * @param pVCpu The cross context virtual CPU structure of the
1792 * calling thread.
1793 * @param pu8 Where to return the opcode byte.
1794 */
1795DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
1796{
1797 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
1798 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1799 {
1800 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1801 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
1802 return VINF_SUCCESS;
1803 }
1804 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
1805}
1806
1807#else /* IEM_WITH_SETJMP */
1808
1809/**
1810 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1811 *
1812 * @returns The opcode byte.
1813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1814 */
1815DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
1816{
1817# ifdef IEM_WITH_CODE_TLB
1818 uint8_t u8;
1819 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1820 return u8;
1821# else
1822 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1823 if (rcStrict == VINF_SUCCESS)
1824 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1825 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1826# endif
1827}
1828
1829
1830/**
1831 * Fetches the next opcode byte, longjmp on error.
1832 *
1833 * @returns The opcode byte.
1834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1835 */
1836DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
1837{
1838# ifdef IEM_WITH_CODE_TLB
1839 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
1840 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
1841 if (RT_LIKELY( pbBuf != NULL
1842 && offBuf < pVCpu->iem.s.cbInstrBuf))
1843 {
1844 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
1845 return pbBuf[offBuf];
1846 }
1847# else
1848 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
1849 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
1850 {
1851 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
1852 return pVCpu->iem.s.abOpcode[offOpcode];
1853 }
1854# endif
1855 return iemOpcodeGetNextU8SlowJmp(pVCpu);
1856}
1857
1858#endif /* IEM_WITH_SETJMP */
1859
1860/**
1861 * Fetches the next opcode byte, returns automatically on failure.
1862 *
1863 * @param a_pu8 Where to return the opcode byte.
1864 * @remark Implicitly references pVCpu.
1865 */
1866#ifndef IEM_WITH_SETJMP
1867# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
1868 do \
1869 { \
1870 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
1871 if (rcStrict2 == VINF_SUCCESS) \
1872 { /* likely */ } \
1873 else \
1874 return rcStrict2; \
1875 } while (0)
1876#else
1877# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
1878#endif /* IEM_WITH_SETJMP */
1879
1880
1881#ifndef IEM_WITH_SETJMP
1882/**
1883 * Fetches the next signed byte from the opcode stream.
1884 *
1885 * @returns Strict VBox status code.
1886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1887 * @param pi8 Where to return the signed byte.
1888 */
1889DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
1890{
1891 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
1892}
1893#endif /* !IEM_WITH_SETJMP */
1894
1895
1896/**
1897 * Fetches the next signed byte from the opcode stream, returning automatically
1898 * on failure.
1899 *
1900 * @param a_pi8 Where to return the signed byte.
1901 * @remark Implicitly references pVCpu.
1902 */
1903#ifndef IEM_WITH_SETJMP
1904# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
1905 do \
1906 { \
1907 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
1908 if (rcStrict2 != VINF_SUCCESS) \
1909 return rcStrict2; \
1910 } while (0)
1911#else /* IEM_WITH_SETJMP */
1912# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
1913
1914#endif /* IEM_WITH_SETJMP */
1915
1916#ifndef IEM_WITH_SETJMP
1917
1918/**
1919 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1920 *
1921 * @returns Strict VBox status code.
1922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1923 * @param pu16 Where to return the opcode dword.
1924 */
1925DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
1926{
1927 uint8_t u8;
1928 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1929 if (rcStrict == VINF_SUCCESS)
1930 *pu16 = (int8_t)u8;
1931 return rcStrict;
1932}
1933
1934
1935/**
1936 * Fetches the next signed byte from the opcode stream, extending it to
1937 * unsigned 16-bit.
1938 *
1939 * @returns Strict VBox status code.
1940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1941 * @param pu16 Where to return the unsigned word.
1942 */
1943DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
1944{
1945 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
1946 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
1947 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
1948
1949 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
1950 pVCpu->iem.s.offOpcode = offOpcode + 1;
1951 return VINF_SUCCESS;
1952}
1953
1954#endif /* !IEM_WITH_SETJMP */
1955
1956/**
1957 * Fetches the next signed byte from the opcode stream and sign-extending it to
1958 * a word, returning automatically on failure.
1959 *
1960 * @param a_pu16 Where to return the word.
1961 * @remark Implicitly references pVCpu.
1962 */
1963#ifndef IEM_WITH_SETJMP
1964# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
1965 do \
1966 { \
1967 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
1968 if (rcStrict2 != VINF_SUCCESS) \
1969 return rcStrict2; \
1970 } while (0)
1971#else
1972# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
1973#endif
1974
1975#ifndef IEM_WITH_SETJMP
1976
1977/**
1978 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1979 *
1980 * @returns Strict VBox status code.
1981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1982 * @param pu32 Where to return the opcode dword.
1983 */
1984DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
1985{
1986 uint8_t u8;
1987 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1988 if (rcStrict == VINF_SUCCESS)
1989 *pu32 = (int8_t)u8;
1990 return rcStrict;
1991}
1992
1993
1994/**
1995 * Fetches the next signed byte from the opcode stream, extending it to
1996 * unsigned 32-bit.
1997 *
1998 * @returns Strict VBox status code.
1999 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2000 * @param pu32 Where to return the unsigned dword.
2001 */
2002DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2003{
2004 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2005 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2006 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2007
2008 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2009 pVCpu->iem.s.offOpcode = offOpcode + 1;
2010 return VINF_SUCCESS;
2011}
2012
2013#endif /* !IEM_WITH_SETJMP */
2014
2015/**
2016 * Fetches the next signed byte from the opcode stream and sign-extending it to
2017 * a word, returning automatically on failure.
2018 *
2019 * @param a_pu32 Where to return the word.
2020 * @remark Implicitly references pVCpu.
2021 */
2022#ifndef IEM_WITH_SETJMP
2023#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2024 do \
2025 { \
2026 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2027 if (rcStrict2 != VINF_SUCCESS) \
2028 return rcStrict2; \
2029 } while (0)
2030#else
2031# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2032#endif
2033
2034#ifndef IEM_WITH_SETJMP
2035
2036/**
2037 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2038 *
2039 * @returns Strict VBox status code.
2040 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2041 * @param pu64 Where to return the opcode qword.
2042 */
2043DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2044{
2045 uint8_t u8;
2046 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2047 if (rcStrict == VINF_SUCCESS)
2048 *pu64 = (int8_t)u8;
2049 return rcStrict;
2050}
2051
2052
2053/**
2054 * Fetches the next signed byte from the opcode stream, extending it to
2055 * unsigned 64-bit.
2056 *
2057 * @returns Strict VBox status code.
2058 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2059 * @param pu64 Where to return the unsigned qword.
2060 */
2061DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2062{
2063 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2064 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2065 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2066
2067 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2068 pVCpu->iem.s.offOpcode = offOpcode + 1;
2069 return VINF_SUCCESS;
2070}
2071
2072#endif /* !IEM_WITH_SETJMP */
2073
2074
2075/**
2076 * Fetches the next signed byte from the opcode stream and sign-extending it to
2077 * a word, returning automatically on failure.
2078 *
2079 * @param a_pu64 Where to return the word.
2080 * @remark Implicitly references pVCpu.
2081 */
2082#ifndef IEM_WITH_SETJMP
2083# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2084 do \
2085 { \
2086 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2087 if (rcStrict2 != VINF_SUCCESS) \
2088 return rcStrict2; \
2089 } while (0)
2090#else
2091# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2092#endif
2093
2094
2095#ifndef IEM_WITH_SETJMP
2096
2097/**
2098 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2099 *
2100 * @returns Strict VBox status code.
2101 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2102 * @param pu16 Where to return the opcode word.
2103 */
2104DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2105{
2106 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2107 if (rcStrict == VINF_SUCCESS)
2108 {
2109 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2110# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2111 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2112# else
2113 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2114# endif
2115 pVCpu->iem.s.offOpcode = offOpcode + 2;
2116 }
2117 else
2118 *pu16 = 0;
2119 return rcStrict;
2120}
2121
2122
2123/**
2124 * Fetches the next opcode word.
2125 *
2126 * @returns Strict VBox status code.
2127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2128 * @param pu16 Where to return the opcode word.
2129 */
2130DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2131{
2132 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2133 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2134 {
2135 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2136# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2137 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2138# else
2139 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2140# endif
2141 return VINF_SUCCESS;
2142 }
2143 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2144}
2145
2146#else /* IEM_WITH_SETJMP */
2147
2148/**
2149 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2150 *
2151 * @returns The opcode word.
2152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2153 */
2154DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2155{
2156# ifdef IEM_WITH_CODE_TLB
2157 uint16_t u16;
2158 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2159 return u16;
2160# else
2161 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2162 if (rcStrict == VINF_SUCCESS)
2163 {
2164 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2165 pVCpu->iem.s.offOpcode += 2;
2166# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2167 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2168# else
2169 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2170# endif
2171 }
2172 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2173# endif
2174}
2175
2176
2177/**
2178 * Fetches the next opcode word, longjmp on error.
2179 *
2180 * @returns The opcode word.
2181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2182 */
2183DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2184{
2185# ifdef IEM_WITH_CODE_TLB
2186 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2187 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2188 if (RT_LIKELY( pbBuf != NULL
2189 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2190 {
2191 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2192# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2193 return *(uint16_t const *)&pbBuf[offBuf];
2194# else
2195 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2196# endif
2197 }
2198# else
2199 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2200 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2201 {
2202 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2203# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2204 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2205# else
2206 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2207# endif
2208 }
2209# endif
2210 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2211}
2212
2213#endif /* IEM_WITH_SETJMP */
2214
2215
2216/**
2217 * Fetches the next opcode word, returns automatically on failure.
2218 *
2219 * @param a_pu16 Where to return the opcode word.
2220 * @remark Implicitly references pVCpu.
2221 */
2222#ifndef IEM_WITH_SETJMP
2223# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2224 do \
2225 { \
2226 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2227 if (rcStrict2 != VINF_SUCCESS) \
2228 return rcStrict2; \
2229 } while (0)
2230#else
2231# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2232#endif
2233
2234#ifndef IEM_WITH_SETJMP
2235
2236/**
2237 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2238 *
2239 * @returns Strict VBox status code.
2240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2241 * @param pu32 Where to return the opcode double word.
2242 */
2243DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2244{
2245 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2246 if (rcStrict == VINF_SUCCESS)
2247 {
2248 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2249 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2250 pVCpu->iem.s.offOpcode = offOpcode + 2;
2251 }
2252 else
2253 *pu32 = 0;
2254 return rcStrict;
2255}
2256
2257
2258/**
2259 * Fetches the next opcode word, zero extending it to a double word.
2260 *
2261 * @returns Strict VBox status code.
2262 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2263 * @param pu32 Where to return the opcode double word.
2264 */
2265DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2266{
2267 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2268 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2269 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2270
2271 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2272 pVCpu->iem.s.offOpcode = offOpcode + 2;
2273 return VINF_SUCCESS;
2274}
2275
2276#endif /* !IEM_WITH_SETJMP */
2277
2278
2279/**
2280 * Fetches the next opcode word and zero extends it to a double word, returns
2281 * automatically on failure.
2282 *
2283 * @param a_pu32 Where to return the opcode double word.
2284 * @remark Implicitly references pVCpu.
2285 */
2286#ifndef IEM_WITH_SETJMP
2287# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2288 do \
2289 { \
2290 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2291 if (rcStrict2 != VINF_SUCCESS) \
2292 return rcStrict2; \
2293 } while (0)
2294#else
2295# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2296#endif
2297
2298#ifndef IEM_WITH_SETJMP
2299
2300/**
2301 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2302 *
2303 * @returns Strict VBox status code.
2304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2305 * @param pu64 Where to return the opcode quad word.
2306 */
2307DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2308{
2309 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2310 if (rcStrict == VINF_SUCCESS)
2311 {
2312 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2313 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2314 pVCpu->iem.s.offOpcode = offOpcode + 2;
2315 }
2316 else
2317 *pu64 = 0;
2318 return rcStrict;
2319}
2320
2321
2322/**
2323 * Fetches the next opcode word, zero extending it to a quad word.
2324 *
2325 * @returns Strict VBox status code.
2326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2327 * @param pu64 Where to return the opcode quad word.
2328 */
2329DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2330{
2331 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2332 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2333 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2334
2335 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2336 pVCpu->iem.s.offOpcode = offOpcode + 2;
2337 return VINF_SUCCESS;
2338}
2339
2340#endif /* !IEM_WITH_SETJMP */
2341
2342/**
2343 * Fetches the next opcode word and zero extends it to a quad word, returns
2344 * automatically on failure.
2345 *
2346 * @param a_pu64 Where to return the opcode quad word.
2347 * @remark Implicitly references pVCpu.
2348 */
2349#ifndef IEM_WITH_SETJMP
2350# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2351 do \
2352 { \
2353 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2354 if (rcStrict2 != VINF_SUCCESS) \
2355 return rcStrict2; \
2356 } while (0)
2357#else
2358# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2359#endif
2360
2361
2362#ifndef IEM_WITH_SETJMP
2363/**
2364 * Fetches the next signed word from the opcode stream.
2365 *
2366 * @returns Strict VBox status code.
2367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2368 * @param pi16 Where to return the signed word.
2369 */
2370DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2371{
2372 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2373}
2374#endif /* !IEM_WITH_SETJMP */
2375
2376
2377/**
2378 * Fetches the next signed word from the opcode stream, returning automatically
2379 * on failure.
2380 *
2381 * @param a_pi16 Where to return the signed word.
2382 * @remark Implicitly references pVCpu.
2383 */
2384#ifndef IEM_WITH_SETJMP
2385# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2386 do \
2387 { \
2388 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2389 if (rcStrict2 != VINF_SUCCESS) \
2390 return rcStrict2; \
2391 } while (0)
2392#else
2393# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2394#endif
2395
2396#ifndef IEM_WITH_SETJMP
2397
2398/**
2399 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2400 *
2401 * @returns Strict VBox status code.
2402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2403 * @param pu32 Where to return the opcode dword.
2404 */
2405DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2406{
2407 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2408 if (rcStrict == VINF_SUCCESS)
2409 {
2410 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2411# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2412 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2413# else
2414 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2415 pVCpu->iem.s.abOpcode[offOpcode + 1],
2416 pVCpu->iem.s.abOpcode[offOpcode + 2],
2417 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2418# endif
2419 pVCpu->iem.s.offOpcode = offOpcode + 4;
2420 }
2421 else
2422 *pu32 = 0;
2423 return rcStrict;
2424}
2425
2426
2427/**
2428 * Fetches the next opcode dword.
2429 *
2430 * @returns Strict VBox status code.
2431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2432 * @param pu32 Where to return the opcode double word.
2433 */
2434DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2435{
2436 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2437 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2438 {
2439 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2440# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2441 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2442# else
2443 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2444 pVCpu->iem.s.abOpcode[offOpcode + 1],
2445 pVCpu->iem.s.abOpcode[offOpcode + 2],
2446 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2447# endif
2448 return VINF_SUCCESS;
2449 }
2450 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2451}
2452
2453#else /* !IEM_WITH_SETJMP */
2454
2455/**
2456 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2457 *
2458 * @returns The opcode dword.
2459 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2460 */
2461DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2462{
2463# ifdef IEM_WITH_CODE_TLB
2464 uint32_t u32;
2465 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2466 return u32;
2467# else
2468 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2469 if (rcStrict == VINF_SUCCESS)
2470 {
2471 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2472 pVCpu->iem.s.offOpcode = offOpcode + 4;
2473# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2474 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2475# else
2476 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2477 pVCpu->iem.s.abOpcode[offOpcode + 1],
2478 pVCpu->iem.s.abOpcode[offOpcode + 2],
2479 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2480# endif
2481 }
2482 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2483# endif
2484}
2485
2486
2487/**
2488 * Fetches the next opcode dword, longjmp on error.
2489 *
2490 * @returns The opcode dword.
2491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2492 */
2493DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2494{
2495# ifdef IEM_WITH_CODE_TLB
2496 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2497 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2498 if (RT_LIKELY( pbBuf != NULL
2499 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2500 {
2501 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2502# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2503 return *(uint32_t const *)&pbBuf[offBuf];
2504# else
2505 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2506 pbBuf[offBuf + 1],
2507 pbBuf[offBuf + 2],
2508 pbBuf[offBuf + 3]);
2509# endif
2510 }
2511# else
2512 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2513 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2514 {
2515 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2516# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2517 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2518# else
2519 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2520 pVCpu->iem.s.abOpcode[offOpcode + 1],
2521 pVCpu->iem.s.abOpcode[offOpcode + 2],
2522 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2523# endif
2524 }
2525# endif
2526 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2527}
2528
2529#endif /* !IEM_WITH_SETJMP */
2530
2531
2532/**
2533 * Fetches the next opcode dword, returns automatically on failure.
2534 *
2535 * @param a_pu32 Where to return the opcode dword.
2536 * @remark Implicitly references pVCpu.
2537 */
2538#ifndef IEM_WITH_SETJMP
2539# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2540 do \
2541 { \
2542 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2543 if (rcStrict2 != VINF_SUCCESS) \
2544 return rcStrict2; \
2545 } while (0)
2546#else
2547# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2548#endif
2549
2550#ifndef IEM_WITH_SETJMP
2551
2552/**
2553 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2554 *
2555 * @returns Strict VBox status code.
2556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2557 * @param pu64 Where to return the opcode dword.
2558 */
2559DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2560{
2561 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2562 if (rcStrict == VINF_SUCCESS)
2563 {
2564 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2565 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2566 pVCpu->iem.s.abOpcode[offOpcode + 1],
2567 pVCpu->iem.s.abOpcode[offOpcode + 2],
2568 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2569 pVCpu->iem.s.offOpcode = offOpcode + 4;
2570 }
2571 else
2572 *pu64 = 0;
2573 return rcStrict;
2574}
2575
2576
2577/**
2578 * Fetches the next opcode dword, zero extending it to a quad word.
2579 *
2580 * @returns Strict VBox status code.
2581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2582 * @param pu64 Where to return the opcode quad word.
2583 */
2584DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2585{
2586 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2587 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2588 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2589
2590 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2591 pVCpu->iem.s.abOpcode[offOpcode + 1],
2592 pVCpu->iem.s.abOpcode[offOpcode + 2],
2593 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2594 pVCpu->iem.s.offOpcode = offOpcode + 4;
2595 return VINF_SUCCESS;
2596}
2597
2598#endif /* !IEM_WITH_SETJMP */
2599
2600
2601/**
2602 * Fetches the next opcode dword and zero extends it to a quad word, returns
2603 * automatically on failure.
2604 *
2605 * @param a_pu64 Where to return the opcode quad word.
2606 * @remark Implicitly references pVCpu.
2607 */
2608#ifndef IEM_WITH_SETJMP
2609# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2610 do \
2611 { \
2612 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2613 if (rcStrict2 != VINF_SUCCESS) \
2614 return rcStrict2; \
2615 } while (0)
2616#else
2617# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2618#endif
2619
2620
2621#ifndef IEM_WITH_SETJMP
2622/**
2623 * Fetches the next signed double word from the opcode stream.
2624 *
2625 * @returns Strict VBox status code.
2626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2627 * @param pi32 Where to return the signed double word.
2628 */
2629DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2630{
2631 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2632}
2633#endif
2634
2635/**
2636 * Fetches the next signed double word from the opcode stream, returning
2637 * automatically on failure.
2638 *
2639 * @param a_pi32 Where to return the signed double word.
2640 * @remark Implicitly references pVCpu.
2641 */
2642#ifndef IEM_WITH_SETJMP
2643# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2644 do \
2645 { \
2646 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2647 if (rcStrict2 != VINF_SUCCESS) \
2648 return rcStrict2; \
2649 } while (0)
2650#else
2651# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2652#endif
2653
2654#ifndef IEM_WITH_SETJMP
2655
2656/**
2657 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2658 *
2659 * @returns Strict VBox status code.
2660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2661 * @param pu64 Where to return the opcode qword.
2662 */
2663DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2664{
2665 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2666 if (rcStrict == VINF_SUCCESS)
2667 {
2668 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2669 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2670 pVCpu->iem.s.abOpcode[offOpcode + 1],
2671 pVCpu->iem.s.abOpcode[offOpcode + 2],
2672 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2673 pVCpu->iem.s.offOpcode = offOpcode + 4;
2674 }
2675 else
2676 *pu64 = 0;
2677 return rcStrict;
2678}
2679
2680
2681/**
2682 * Fetches the next opcode dword, sign extending it into a quad word.
2683 *
2684 * @returns Strict VBox status code.
2685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2686 * @param pu64 Where to return the opcode quad word.
2687 */
2688DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
2689{
2690 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2691 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2692 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
2693
2694 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2695 pVCpu->iem.s.abOpcode[offOpcode + 1],
2696 pVCpu->iem.s.abOpcode[offOpcode + 2],
2697 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2698 *pu64 = i32;
2699 pVCpu->iem.s.offOpcode = offOpcode + 4;
2700 return VINF_SUCCESS;
2701}
2702
2703#endif /* !IEM_WITH_SETJMP */
2704
2705
2706/**
2707 * Fetches the next opcode double word and sign extends it to a quad word,
2708 * returns automatically on failure.
2709 *
2710 * @param a_pu64 Where to return the opcode quad word.
2711 * @remark Implicitly references pVCpu.
2712 */
2713#ifndef IEM_WITH_SETJMP
2714# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
2715 do \
2716 { \
2717 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
2718 if (rcStrict2 != VINF_SUCCESS) \
2719 return rcStrict2; \
2720 } while (0)
2721#else
2722# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2723#endif
2724
2725#ifndef IEM_WITH_SETJMP
2726
2727/**
2728 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
2729 *
2730 * @returns Strict VBox status code.
2731 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2732 * @param pu64 Where to return the opcode qword.
2733 */
2734DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2735{
2736 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2737 if (rcStrict == VINF_SUCCESS)
2738 {
2739 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2740# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2741 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2742# else
2743 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2744 pVCpu->iem.s.abOpcode[offOpcode + 1],
2745 pVCpu->iem.s.abOpcode[offOpcode + 2],
2746 pVCpu->iem.s.abOpcode[offOpcode + 3],
2747 pVCpu->iem.s.abOpcode[offOpcode + 4],
2748 pVCpu->iem.s.abOpcode[offOpcode + 5],
2749 pVCpu->iem.s.abOpcode[offOpcode + 6],
2750 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2751# endif
2752 pVCpu->iem.s.offOpcode = offOpcode + 8;
2753 }
2754 else
2755 *pu64 = 0;
2756 return rcStrict;
2757}
2758
2759
2760/**
2761 * Fetches the next opcode qword.
2762 *
2763 * @returns Strict VBox status code.
2764 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2765 * @param pu64 Where to return the opcode qword.
2766 */
2767DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
2768{
2769 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2770 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2771 {
2772# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2773 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2774# else
2775 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2776 pVCpu->iem.s.abOpcode[offOpcode + 1],
2777 pVCpu->iem.s.abOpcode[offOpcode + 2],
2778 pVCpu->iem.s.abOpcode[offOpcode + 3],
2779 pVCpu->iem.s.abOpcode[offOpcode + 4],
2780 pVCpu->iem.s.abOpcode[offOpcode + 5],
2781 pVCpu->iem.s.abOpcode[offOpcode + 6],
2782 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2783# endif
2784 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2785 return VINF_SUCCESS;
2786 }
2787 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
2788}
2789
2790#else /* IEM_WITH_SETJMP */
2791
2792/**
2793 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
2794 *
2795 * @returns The opcode qword.
2796 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2797 */
2798DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
2799{
2800# ifdef IEM_WITH_CODE_TLB
2801 uint64_t u64;
2802 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
2803 return u64;
2804# else
2805 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
2806 if (rcStrict == VINF_SUCCESS)
2807 {
2808 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2809 pVCpu->iem.s.offOpcode = offOpcode + 8;
2810# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2811 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2812# else
2813 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2814 pVCpu->iem.s.abOpcode[offOpcode + 1],
2815 pVCpu->iem.s.abOpcode[offOpcode + 2],
2816 pVCpu->iem.s.abOpcode[offOpcode + 3],
2817 pVCpu->iem.s.abOpcode[offOpcode + 4],
2818 pVCpu->iem.s.abOpcode[offOpcode + 5],
2819 pVCpu->iem.s.abOpcode[offOpcode + 6],
2820 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2821# endif
2822 }
2823 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2824# endif
2825}
2826
2827
2828/**
2829 * Fetches the next opcode qword, longjmp on error.
2830 *
2831 * @returns The opcode qword.
2832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2833 */
2834DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
2835{
2836# ifdef IEM_WITH_CODE_TLB
2837 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2838 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2839 if (RT_LIKELY( pbBuf != NULL
2840 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
2841 {
2842 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
2843# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2844 return *(uint64_t const *)&pbBuf[offBuf];
2845# else
2846 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
2847 pbBuf[offBuf + 1],
2848 pbBuf[offBuf + 2],
2849 pbBuf[offBuf + 3],
2850 pbBuf[offBuf + 4],
2851 pbBuf[offBuf + 5],
2852 pbBuf[offBuf + 6],
2853 pbBuf[offBuf + 7]);
2854# endif
2855 }
2856# else
2857 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2858 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
2859 {
2860 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
2861# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2862 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2863# else
2864 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2865 pVCpu->iem.s.abOpcode[offOpcode + 1],
2866 pVCpu->iem.s.abOpcode[offOpcode + 2],
2867 pVCpu->iem.s.abOpcode[offOpcode + 3],
2868 pVCpu->iem.s.abOpcode[offOpcode + 4],
2869 pVCpu->iem.s.abOpcode[offOpcode + 5],
2870 pVCpu->iem.s.abOpcode[offOpcode + 6],
2871 pVCpu->iem.s.abOpcode[offOpcode + 7]);
2872# endif
2873 }
2874# endif
2875 return iemOpcodeGetNextU64SlowJmp(pVCpu);
2876}
2877
2878#endif /* IEM_WITH_SETJMP */
2879
2880/**
2881 * Fetches the next opcode quad word, returns automatically on failure.
2882 *
2883 * @param a_pu64 Where to return the opcode quad word.
2884 * @remark Implicitly references pVCpu.
2885 */
2886#ifndef IEM_WITH_SETJMP
2887# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
2888 do \
2889 { \
2890 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
2891 if (rcStrict2 != VINF_SUCCESS) \
2892 return rcStrict2; \
2893 } while (0)
2894#else
2895# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
2896#endif
2897
2898
2899/** @name Misc Worker Functions.
2900 * @{
2901 */
2902
2903
2904/**
2905 * Validates a new SS segment.
2906 *
2907 * @returns VBox strict status code.
2908 * @param pVCpu The cross context virtual CPU structure of the
2909 * calling thread.
2910 * @param pCtx The CPU context.
2911 * @param NewSS The new SS selctor.
2912 * @param uCpl The CPL to load the stack for.
2913 * @param pDesc Where to return the descriptor.
2914 */
2915IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
2916{
2917 NOREF(pCtx);
2918
2919 /* Null selectors are not allowed (we're not called for dispatching
2920 interrupts with SS=0 in long mode). */
2921 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
2922 {
2923 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
2924 return iemRaiseTaskSwitchFault0(pVCpu);
2925 }
2926
2927 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
2928 if ((NewSS & X86_SEL_RPL) != uCpl)
2929 {
2930 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
2931 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2932 }
2933
2934 /*
2935 * Read the descriptor.
2936 */
2937 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
2938 if (rcStrict != VINF_SUCCESS)
2939 return rcStrict;
2940
2941 /*
2942 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
2943 */
2944 if (!pDesc->Legacy.Gen.u1DescType)
2945 {
2946 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2947 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2948 }
2949
2950 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2951 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2952 {
2953 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
2954 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2955 }
2956 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
2957 {
2958 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
2959 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
2960 }
2961
2962 /* Is it there? */
2963 /** @todo testcase: Is this checked before the canonical / limit check below? */
2964 if (!pDesc->Legacy.Gen.u1Present)
2965 {
2966 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
2967 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
2968 }
2969
2970 return VINF_SUCCESS;
2971}
2972
2973
2974/**
2975 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
2976 * not.
2977 *
2978 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2979 * @param a_pCtx The CPU context.
2980 */
2981#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2982# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
2983 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
2984 ? (a_pCtx)->eflags.u \
2985 : CPUMRawGetEFlags(a_pVCpu) )
2986#else
2987# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
2988 ( (a_pCtx)->eflags.u )
2989#endif
2990
2991/**
2992 * Updates the EFLAGS in the correct manner wrt. PATM.
2993 *
2994 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2995 * @param a_pCtx The CPU context.
2996 * @param a_fEfl The new EFLAGS.
2997 */
2998#ifdef VBOX_WITH_RAW_MODE_NOT_R0
2999# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3000 do { \
3001 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3002 (a_pCtx)->eflags.u = (a_fEfl); \
3003 else \
3004 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3005 } while (0)
3006#else
3007# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3008 do { \
3009 (a_pCtx)->eflags.u = (a_fEfl); \
3010 } while (0)
3011#endif
3012
3013
3014/** @} */
3015
3016/** @name Raising Exceptions.
3017 *
3018 * @{
3019 */
3020
3021/** @name IEM_XCPT_FLAGS_XXX - flags for iemRaiseXcptOrInt.
3022 * @{ */
3023/** CPU exception. */
3024#define IEM_XCPT_FLAGS_T_CPU_XCPT RT_BIT_32(0)
3025/** External interrupt (from PIC, APIC, whatever). */
3026#define IEM_XCPT_FLAGS_T_EXT_INT RT_BIT_32(1)
3027/** Software interrupt (int or into, not bound).
3028 * Returns to the following instruction */
3029#define IEM_XCPT_FLAGS_T_SOFT_INT RT_BIT_32(2)
3030/** Takes an error code. */
3031#define IEM_XCPT_FLAGS_ERR RT_BIT_32(3)
3032/** Takes a CR2. */
3033#define IEM_XCPT_FLAGS_CR2 RT_BIT_32(4)
3034/** Generated by the breakpoint instruction. */
3035#define IEM_XCPT_FLAGS_BP_INSTR RT_BIT_32(5)
3036/** Generated by a DRx instruction breakpoint and RF should be cleared. */
3037#define IEM_XCPT_FLAGS_DRx_INSTR_BP RT_BIT_32(6)
3038/** @} */
3039
3040
3041/**
3042 * Loads the specified stack far pointer from the TSS.
3043 *
3044 * @returns VBox strict status code.
3045 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3046 * @param pCtx The CPU context.
3047 * @param uCpl The CPL to load the stack for.
3048 * @param pSelSS Where to return the new stack segment.
3049 * @param puEsp Where to return the new stack pointer.
3050 */
3051IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3052 PRTSEL pSelSS, uint32_t *puEsp)
3053{
3054 VBOXSTRICTRC rcStrict;
3055 Assert(uCpl < 4);
3056
3057 switch (pCtx->tr.Attr.n.u4Type)
3058 {
3059 /*
3060 * 16-bit TSS (X86TSS16).
3061 */
3062 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed();
3063 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3064 {
3065 uint32_t off = uCpl * 4 + 2;
3066 if (off + 4 <= pCtx->tr.u32Limit)
3067 {
3068 /** @todo check actual access pattern here. */
3069 uint32_t u32Tmp = 0; /* gcc maybe... */
3070 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3071 if (rcStrict == VINF_SUCCESS)
3072 {
3073 *puEsp = RT_LOWORD(u32Tmp);
3074 *pSelSS = RT_HIWORD(u32Tmp);
3075 return VINF_SUCCESS;
3076 }
3077 }
3078 else
3079 {
3080 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3081 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3082 }
3083 break;
3084 }
3085
3086 /*
3087 * 32-bit TSS (X86TSS32).
3088 */
3089 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed();
3090 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3091 {
3092 uint32_t off = uCpl * 8 + 4;
3093 if (off + 7 <= pCtx->tr.u32Limit)
3094 {
3095/** @todo check actual access pattern here. */
3096 uint64_t u64Tmp;
3097 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3098 if (rcStrict == VINF_SUCCESS)
3099 {
3100 *puEsp = u64Tmp & UINT32_MAX;
3101 *pSelSS = (RTSEL)(u64Tmp >> 32);
3102 return VINF_SUCCESS;
3103 }
3104 }
3105 else
3106 {
3107 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3108 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3109 }
3110 break;
3111 }
3112
3113 default:
3114 AssertFailed();
3115 rcStrict = VERR_IEM_IPE_4;
3116 break;
3117 }
3118
3119 *puEsp = 0; /* make gcc happy */
3120 *pSelSS = 0; /* make gcc happy */
3121 return rcStrict;
3122}
3123
3124
3125/**
3126 * Loads the specified stack pointer from the 64-bit TSS.
3127 *
3128 * @returns VBox strict status code.
3129 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3130 * @param pCtx The CPU context.
3131 * @param uCpl The CPL to load the stack for.
3132 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3133 * @param puRsp Where to return the new stack pointer.
3134 */
3135IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3136{
3137 Assert(uCpl < 4);
3138 Assert(uIst < 8);
3139 *puRsp = 0; /* make gcc happy */
3140
3141 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3142
3143 uint32_t off;
3144 if (uIst)
3145 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3146 else
3147 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3148 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3149 {
3150 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3151 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3152 }
3153
3154 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3155}
3156
3157
3158/**
3159 * Adjust the CPU state according to the exception being raised.
3160 *
3161 * @param pCtx The CPU context.
3162 * @param u8Vector The exception that has been raised.
3163 */
3164DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3165{
3166 switch (u8Vector)
3167 {
3168 case X86_XCPT_DB:
3169 pCtx->dr[7] &= ~X86_DR7_GD;
3170 break;
3171 /** @todo Read the AMD and Intel exception reference... */
3172 }
3173}
3174
3175
3176/**
3177 * Implements exceptions and interrupts for real mode.
3178 *
3179 * @returns VBox strict status code.
3180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3181 * @param pCtx The CPU context.
3182 * @param cbInstr The number of bytes to offset rIP by in the return
3183 * address.
3184 * @param u8Vector The interrupt / exception vector number.
3185 * @param fFlags The flags.
3186 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3187 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3188 */
3189IEM_STATIC VBOXSTRICTRC
3190iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3191 PCPUMCTX pCtx,
3192 uint8_t cbInstr,
3193 uint8_t u8Vector,
3194 uint32_t fFlags,
3195 uint16_t uErr,
3196 uint64_t uCr2)
3197{
3198 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3199 NOREF(uErr); NOREF(uCr2);
3200
3201 /*
3202 * Read the IDT entry.
3203 */
3204 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3205 {
3206 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3207 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3208 }
3209 RTFAR16 Idte;
3210 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3211 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3212 return rcStrict;
3213
3214 /*
3215 * Push the stack frame.
3216 */
3217 uint16_t *pu16Frame;
3218 uint64_t uNewRsp;
3219 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3220 if (rcStrict != VINF_SUCCESS)
3221 return rcStrict;
3222
3223 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3224#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3225 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3226 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3227 fEfl |= UINT16_C(0xf000);
3228#endif
3229 pu16Frame[2] = (uint16_t)fEfl;
3230 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3231 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3232 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3233 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3234 return rcStrict;
3235
3236 /*
3237 * Load the vector address into cs:ip and make exception specific state
3238 * adjustments.
3239 */
3240 pCtx->cs.Sel = Idte.sel;
3241 pCtx->cs.ValidSel = Idte.sel;
3242 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3243 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3244 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3245 pCtx->rip = Idte.off;
3246 fEfl &= ~X86_EFL_IF;
3247 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3248
3249 /** @todo do we actually do this in real mode? */
3250 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3251 iemRaiseXcptAdjustState(pCtx, u8Vector);
3252
3253 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3254}
3255
3256
3257/**
3258 * Loads a NULL data selector into when coming from V8086 mode.
3259 *
3260 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3261 * @param pSReg Pointer to the segment register.
3262 */
3263IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3264{
3265 pSReg->Sel = 0;
3266 pSReg->ValidSel = 0;
3267 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3268 {
3269 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3270 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3271 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3272 }
3273 else
3274 {
3275 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3276 /** @todo check this on AMD-V */
3277 pSReg->u64Base = 0;
3278 pSReg->u32Limit = 0;
3279 }
3280}
3281
3282
3283/**
3284 * Loads a segment selector during a task switch in V8086 mode.
3285 *
3286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3287 * @param pSReg Pointer to the segment register.
3288 * @param uSel The selector value to load.
3289 */
3290IEM_STATIC void iemHlpLoadSelectorInV86Mode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3291{
3292 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3293 pSReg->Sel = uSel;
3294 pSReg->ValidSel = uSel;
3295 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3296 pSReg->u64Base = uSel << 4;
3297 pSReg->u32Limit = 0xffff;
3298 pSReg->Attr.u = 0xf3;
3299}
3300
3301
3302/**
3303 * Loads a NULL data selector into a selector register, both the hidden and
3304 * visible parts, in protected mode.
3305 *
3306 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3307 * @param pSReg Pointer to the segment register.
3308 * @param uRpl The RPL.
3309 */
3310IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3311{
3312 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3313 * data selector in protected mode. */
3314 pSReg->Sel = uRpl;
3315 pSReg->ValidSel = uRpl;
3316 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3317 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3318 {
3319 /* VT-x (Intel 3960x) observed doing something like this. */
3320 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3321 pSReg->u32Limit = UINT32_MAX;
3322 pSReg->u64Base = 0;
3323 }
3324 else
3325 {
3326 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3327 pSReg->u32Limit = 0;
3328 pSReg->u64Base = 0;
3329 }
3330}
3331
3332
3333/**
3334 * Loads a segment selector during a task switch in protected mode.
3335 *
3336 * In this task switch scenario, we would throw \#TS exceptions rather than
3337 * \#GPs.
3338 *
3339 * @returns VBox strict status code.
3340 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3341 * @param pSReg Pointer to the segment register.
3342 * @param uSel The new selector value.
3343 *
3344 * @remarks This does _not_ handle CS or SS.
3345 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3346 */
3347IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3348{
3349 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3350
3351 /* Null data selector. */
3352 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3353 {
3354 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3355 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3356 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3357 return VINF_SUCCESS;
3358 }
3359
3360 /* Fetch the descriptor. */
3361 IEMSELDESC Desc;
3362 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3363 if (rcStrict != VINF_SUCCESS)
3364 {
3365 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3366 VBOXSTRICTRC_VAL(rcStrict)));
3367 return rcStrict;
3368 }
3369
3370 /* Must be a data segment or readable code segment. */
3371 if ( !Desc.Legacy.Gen.u1DescType
3372 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3373 {
3374 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3375 Desc.Legacy.Gen.u4Type));
3376 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3377 }
3378
3379 /* Check privileges for data segments and non-conforming code segments. */
3380 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3381 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3382 {
3383 /* The RPL and the new CPL must be less than or equal to the DPL. */
3384 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3385 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3386 {
3387 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3388 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3389 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3390 }
3391 }
3392
3393 /* Is it there? */
3394 if (!Desc.Legacy.Gen.u1Present)
3395 {
3396 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3397 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3398 }
3399
3400 /* The base and limit. */
3401 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3402 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3403
3404 /*
3405 * Ok, everything checked out fine. Now set the accessed bit before
3406 * committing the result into the registers.
3407 */
3408 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3409 {
3410 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3411 if (rcStrict != VINF_SUCCESS)
3412 return rcStrict;
3413 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3414 }
3415
3416 /* Commit */
3417 pSReg->Sel = uSel;
3418 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3419 pSReg->u32Limit = cbLimit;
3420 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3421 pSReg->ValidSel = uSel;
3422 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3423 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3424 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3425
3426 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3427 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3428 return VINF_SUCCESS;
3429}
3430
3431
3432/**
3433 * Performs a task switch.
3434 *
3435 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3436 * caller is responsible for performing the necessary checks (like DPL, TSS
3437 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3438 * reference for JMP, CALL, IRET.
3439 *
3440 * If the task switch is the due to a software interrupt or hardware exception,
3441 * the caller is responsible for validating the TSS selector and descriptor. See
3442 * Intel Instruction reference for INT n.
3443 *
3444 * @returns VBox strict status code.
3445 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3446 * @param pCtx The CPU context.
3447 * @param enmTaskSwitch What caused this task switch.
3448 * @param uNextEip The EIP effective after the task switch.
3449 * @param fFlags The flags.
3450 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3451 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3452 * @param SelTSS The TSS selector of the new task.
3453 * @param pNewDescTSS Pointer to the new TSS descriptor.
3454 */
3455IEM_STATIC VBOXSTRICTRC
3456iemTaskSwitch(PVMCPU pVCpu,
3457 PCPUMCTX pCtx,
3458 IEMTASKSWITCH enmTaskSwitch,
3459 uint32_t uNextEip,
3460 uint32_t fFlags,
3461 uint16_t uErr,
3462 uint64_t uCr2,
3463 RTSEL SelTSS,
3464 PIEMSELDESC pNewDescTSS)
3465{
3466 Assert(!IEM_IS_REAL_MODE(pVCpu));
3467 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3468
3469 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3470 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3471 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3472 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3473 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3474
3475 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3476 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3477
3478 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3479 fIsNewTSS386, pCtx->eip, uNextEip));
3480
3481 /* Update CR2 in case it's a page-fault. */
3482 /** @todo This should probably be done much earlier in IEM/PGM. See
3483 * @bugref{5653#c49}. */
3484 if (fFlags & IEM_XCPT_FLAGS_CR2)
3485 pCtx->cr2 = uCr2;
3486
3487 /*
3488 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3489 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3490 */
3491 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3492 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3493 if (uNewTSSLimit < uNewTSSLimitMin)
3494 {
3495 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3496 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3497 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3498 }
3499
3500 /*
3501 * Check the current TSS limit. The last written byte to the current TSS during the
3502 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3503 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3504 *
3505 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3506 * end up with smaller than "legal" TSS limits.
3507 */
3508 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3509 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3510 if (uCurTSSLimit < uCurTSSLimitMin)
3511 {
3512 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3513 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3514 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3515 }
3516
3517 /*
3518 * Verify that the new TSS can be accessed and map it. Map only the required contents
3519 * and not the entire TSS.
3520 */
3521 void *pvNewTSS;
3522 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3523 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3524 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3525 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3526 * not perform correct translation if this happens. See Intel spec. 7.2.1
3527 * "Task-State Segment" */
3528 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3529 if (rcStrict != VINF_SUCCESS)
3530 {
3531 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3532 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3533 return rcStrict;
3534 }
3535
3536 /*
3537 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3538 */
3539 uint32_t u32EFlags = pCtx->eflags.u32;
3540 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3541 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3542 {
3543 PX86DESC pDescCurTSS;
3544 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3545 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3546 if (rcStrict != VINF_SUCCESS)
3547 {
3548 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3549 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3550 return rcStrict;
3551 }
3552
3553 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3554 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3555 if (rcStrict != VINF_SUCCESS)
3556 {
3557 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3558 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3559 return rcStrict;
3560 }
3561
3562 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3563 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3564 {
3565 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3566 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3567 u32EFlags &= ~X86_EFL_NT;
3568 }
3569 }
3570
3571 /*
3572 * Save the CPU state into the current TSS.
3573 */
3574 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
3575 if (GCPtrNewTSS == GCPtrCurTSS)
3576 {
3577 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
3578 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
3579 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
3580 }
3581 if (fIsNewTSS386)
3582 {
3583 /*
3584 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
3585 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3586 */
3587 void *pvCurTSS32;
3588 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
3589 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
3590 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
3591 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3592 if (rcStrict != VINF_SUCCESS)
3593 {
3594 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3595 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3596 return rcStrict;
3597 }
3598
3599 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3600 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
3601 pCurTSS32->eip = uNextEip;
3602 pCurTSS32->eflags = u32EFlags;
3603 pCurTSS32->eax = pCtx->eax;
3604 pCurTSS32->ecx = pCtx->ecx;
3605 pCurTSS32->edx = pCtx->edx;
3606 pCurTSS32->ebx = pCtx->ebx;
3607 pCurTSS32->esp = pCtx->esp;
3608 pCurTSS32->ebp = pCtx->ebp;
3609 pCurTSS32->esi = pCtx->esi;
3610 pCurTSS32->edi = pCtx->edi;
3611 pCurTSS32->es = pCtx->es.Sel;
3612 pCurTSS32->cs = pCtx->cs.Sel;
3613 pCurTSS32->ss = pCtx->ss.Sel;
3614 pCurTSS32->ds = pCtx->ds.Sel;
3615 pCurTSS32->fs = pCtx->fs.Sel;
3616 pCurTSS32->gs = pCtx->gs.Sel;
3617
3618 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
3619 if (rcStrict != VINF_SUCCESS)
3620 {
3621 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3622 VBOXSTRICTRC_VAL(rcStrict)));
3623 return rcStrict;
3624 }
3625 }
3626 else
3627 {
3628 /*
3629 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
3630 */
3631 void *pvCurTSS16;
3632 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
3633 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
3634 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
3635 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
3636 if (rcStrict != VINF_SUCCESS)
3637 {
3638 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
3639 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
3640 return rcStrict;
3641 }
3642
3643 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
3644 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
3645 pCurTSS16->ip = uNextEip;
3646 pCurTSS16->flags = u32EFlags;
3647 pCurTSS16->ax = pCtx->ax;
3648 pCurTSS16->cx = pCtx->cx;
3649 pCurTSS16->dx = pCtx->dx;
3650 pCurTSS16->bx = pCtx->bx;
3651 pCurTSS16->sp = pCtx->sp;
3652 pCurTSS16->bp = pCtx->bp;
3653 pCurTSS16->si = pCtx->si;
3654 pCurTSS16->di = pCtx->di;
3655 pCurTSS16->es = pCtx->es.Sel;
3656 pCurTSS16->cs = pCtx->cs.Sel;
3657 pCurTSS16->ss = pCtx->ss.Sel;
3658 pCurTSS16->ds = pCtx->ds.Sel;
3659
3660 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
3661 if (rcStrict != VINF_SUCCESS)
3662 {
3663 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
3664 VBOXSTRICTRC_VAL(rcStrict)));
3665 return rcStrict;
3666 }
3667 }
3668
3669 /*
3670 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
3671 */
3672 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3673 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3674 {
3675 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
3676 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
3677 pNewTSS->selPrev = pCtx->tr.Sel;
3678 }
3679
3680 /*
3681 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
3682 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
3683 */
3684 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
3685 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
3686 bool fNewDebugTrap;
3687 if (fIsNewTSS386)
3688 {
3689 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
3690 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
3691 uNewEip = pNewTSS32->eip;
3692 uNewEflags = pNewTSS32->eflags;
3693 uNewEax = pNewTSS32->eax;
3694 uNewEcx = pNewTSS32->ecx;
3695 uNewEdx = pNewTSS32->edx;
3696 uNewEbx = pNewTSS32->ebx;
3697 uNewEsp = pNewTSS32->esp;
3698 uNewEbp = pNewTSS32->ebp;
3699 uNewEsi = pNewTSS32->esi;
3700 uNewEdi = pNewTSS32->edi;
3701 uNewES = pNewTSS32->es;
3702 uNewCS = pNewTSS32->cs;
3703 uNewSS = pNewTSS32->ss;
3704 uNewDS = pNewTSS32->ds;
3705 uNewFS = pNewTSS32->fs;
3706 uNewGS = pNewTSS32->gs;
3707 uNewLdt = pNewTSS32->selLdt;
3708 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
3709 }
3710 else
3711 {
3712 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
3713 uNewCr3 = 0;
3714 uNewEip = pNewTSS16->ip;
3715 uNewEflags = pNewTSS16->flags;
3716 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
3717 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
3718 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
3719 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
3720 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
3721 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
3722 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
3723 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
3724 uNewES = pNewTSS16->es;
3725 uNewCS = pNewTSS16->cs;
3726 uNewSS = pNewTSS16->ss;
3727 uNewDS = pNewTSS16->ds;
3728 uNewFS = 0;
3729 uNewGS = 0;
3730 uNewLdt = pNewTSS16->selLdt;
3731 fNewDebugTrap = false;
3732 }
3733
3734 if (GCPtrNewTSS == GCPtrCurTSS)
3735 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
3736 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
3737
3738 /*
3739 * We're done accessing the new TSS.
3740 */
3741 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
3742 if (rcStrict != VINF_SUCCESS)
3743 {
3744 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
3745 return rcStrict;
3746 }
3747
3748 /*
3749 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
3750 */
3751 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
3752 {
3753 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
3754 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3755 if (rcStrict != VINF_SUCCESS)
3756 {
3757 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3758 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3759 return rcStrict;
3760 }
3761
3762 /* Check that the descriptor indicates the new TSS is available (not busy). */
3763 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3764 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
3765 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
3766
3767 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3768 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
3769 if (rcStrict != VINF_SUCCESS)
3770 {
3771 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3772 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3773 return rcStrict;
3774 }
3775 }
3776
3777 /*
3778 * From this point on, we're technically in the new task. We will defer exceptions
3779 * until the completion of the task switch but before executing any instructions in the new task.
3780 */
3781 pCtx->tr.Sel = SelTSS;
3782 pCtx->tr.ValidSel = SelTSS;
3783 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3784 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
3785 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
3786 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
3787 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
3788
3789 /* Set the busy bit in TR. */
3790 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3791 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
3792 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
3793 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
3794 {
3795 uNewEflags |= X86_EFL_NT;
3796 }
3797
3798 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
3799 pCtx->cr0 |= X86_CR0_TS;
3800 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
3801
3802 pCtx->eip = uNewEip;
3803 pCtx->eax = uNewEax;
3804 pCtx->ecx = uNewEcx;
3805 pCtx->edx = uNewEdx;
3806 pCtx->ebx = uNewEbx;
3807 pCtx->esp = uNewEsp;
3808 pCtx->ebp = uNewEbp;
3809 pCtx->esi = uNewEsi;
3810 pCtx->edi = uNewEdi;
3811
3812 uNewEflags &= X86_EFL_LIVE_MASK;
3813 uNewEflags |= X86_EFL_RA1_MASK;
3814 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
3815
3816 /*
3817 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
3818 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
3819 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
3820 */
3821 pCtx->es.Sel = uNewES;
3822 pCtx->es.Attr.u &= ~X86DESCATTR_P;
3823
3824 pCtx->cs.Sel = uNewCS;
3825 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
3826
3827 pCtx->ss.Sel = uNewSS;
3828 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
3829
3830 pCtx->ds.Sel = uNewDS;
3831 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
3832
3833 pCtx->fs.Sel = uNewFS;
3834 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
3835
3836 pCtx->gs.Sel = uNewGS;
3837 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
3838 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3839
3840 pCtx->ldtr.Sel = uNewLdt;
3841 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
3842 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
3843 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
3844
3845 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3846 {
3847 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
3848 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
3849 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
3850 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
3851 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
3852 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
3853 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
3854 }
3855
3856 /*
3857 * Switch CR3 for the new task.
3858 */
3859 if ( fIsNewTSS386
3860 && (pCtx->cr0 & X86_CR0_PG))
3861 {
3862 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
3863 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
3864 {
3865 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
3866 AssertRCSuccessReturn(rc, rc);
3867 }
3868 else
3869 pCtx->cr3 = uNewCr3;
3870
3871 /* Inform PGM. */
3872 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
3873 {
3874 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
3875 AssertRCReturn(rc, rc);
3876 /* ignore informational status codes */
3877 }
3878 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
3879 }
3880
3881 /*
3882 * Switch LDTR for the new task.
3883 */
3884 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
3885 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
3886 else
3887 {
3888 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
3889
3890 IEMSELDESC DescNewLdt;
3891 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
3892 if (rcStrict != VINF_SUCCESS)
3893 {
3894 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
3895 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
3896 return rcStrict;
3897 }
3898 if ( !DescNewLdt.Legacy.Gen.u1Present
3899 || DescNewLdt.Legacy.Gen.u1DescType
3900 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
3901 {
3902 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
3903 uNewLdt, DescNewLdt.Legacy.u));
3904 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
3905 }
3906
3907 pCtx->ldtr.ValidSel = uNewLdt;
3908 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3909 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
3910 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
3911 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
3912 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3913 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
3914 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
3915 }
3916
3917 IEMSELDESC DescSS;
3918 if (IEM_IS_V86_MODE(pVCpu))
3919 {
3920 pVCpu->iem.s.uCpl = 3;
3921 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->es, uNewES);
3922 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->cs, uNewCS);
3923 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->ss, uNewSS);
3924 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->ds, uNewDS);
3925 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->fs, uNewFS);
3926 iemHlpLoadSelectorInV86Mode(pVCpu, &pCtx->gs, uNewGS);
3927 }
3928 else
3929 {
3930 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
3931
3932 /*
3933 * Load the stack segment for the new task.
3934 */
3935 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
3936 {
3937 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
3938 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3939 }
3940
3941 /* Fetch the descriptor. */
3942 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
3943 if (rcStrict != VINF_SUCCESS)
3944 {
3945 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
3946 VBOXSTRICTRC_VAL(rcStrict)));
3947 return rcStrict;
3948 }
3949
3950 /* SS must be a data segment and writable. */
3951 if ( !DescSS.Legacy.Gen.u1DescType
3952 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3953 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
3954 {
3955 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
3956 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
3957 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3958 }
3959
3960 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
3961 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
3962 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
3963 {
3964 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
3965 uNewCpl));
3966 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3967 }
3968
3969 /* Is it there? */
3970 if (!DescSS.Legacy.Gen.u1Present)
3971 {
3972 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
3973 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
3974 }
3975
3976 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
3977 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
3978
3979 /* Set the accessed bit before committing the result into SS. */
3980 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3981 {
3982 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
3983 if (rcStrict != VINF_SUCCESS)
3984 return rcStrict;
3985 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3986 }
3987
3988 /* Commit SS. */
3989 pCtx->ss.Sel = uNewSS;
3990 pCtx->ss.ValidSel = uNewSS;
3991 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3992 pCtx->ss.u32Limit = cbLimit;
3993 pCtx->ss.u64Base = u64Base;
3994 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
3995 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
3996
3997 /* CPL has changed, update IEM before loading rest of segments. */
3998 pVCpu->iem.s.uCpl = uNewCpl;
3999
4000 /*
4001 * Load the data segments for the new task.
4002 */
4003 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4004 if (rcStrict != VINF_SUCCESS)
4005 return rcStrict;
4006 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4007 if (rcStrict != VINF_SUCCESS)
4008 return rcStrict;
4009 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4010 if (rcStrict != VINF_SUCCESS)
4011 return rcStrict;
4012 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4013 if (rcStrict != VINF_SUCCESS)
4014 return rcStrict;
4015
4016 /*
4017 * Load the code segment for the new task.
4018 */
4019 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4020 {
4021 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4022 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4023 }
4024
4025 /* Fetch the descriptor. */
4026 IEMSELDESC DescCS;
4027 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4028 if (rcStrict != VINF_SUCCESS)
4029 {
4030 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4031 return rcStrict;
4032 }
4033
4034 /* CS must be a code segment. */
4035 if ( !DescCS.Legacy.Gen.u1DescType
4036 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4037 {
4038 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4039 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4040 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4041 }
4042
4043 /* For conforming CS, DPL must be less than or equal to the RPL. */
4044 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4045 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4046 {
4047 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4048 DescCS.Legacy.Gen.u2Dpl));
4049 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4050 }
4051
4052 /* For non-conforming CS, DPL must match RPL. */
4053 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4054 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4055 {
4056 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4057 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4058 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4059 }
4060
4061 /* Is it there? */
4062 if (!DescCS.Legacy.Gen.u1Present)
4063 {
4064 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4065 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4066 }
4067
4068 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4069 u64Base = X86DESC_BASE(&DescCS.Legacy);
4070
4071 /* Set the accessed bit before committing the result into CS. */
4072 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4073 {
4074 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4075 if (rcStrict != VINF_SUCCESS)
4076 return rcStrict;
4077 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4078 }
4079
4080 /* Commit CS. */
4081 pCtx->cs.Sel = uNewCS;
4082 pCtx->cs.ValidSel = uNewCS;
4083 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4084 pCtx->cs.u32Limit = cbLimit;
4085 pCtx->cs.u64Base = u64Base;
4086 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4087 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4088 }
4089
4090 /** @todo Debug trap. */
4091 if (fIsNewTSS386 && fNewDebugTrap)
4092 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4093
4094 /*
4095 * Construct the error code masks based on what caused this task switch.
4096 * See Intel Instruction reference for INT.
4097 */
4098 uint16_t uExt;
4099 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4100 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4101 {
4102 uExt = 1;
4103 }
4104 else
4105 uExt = 0;
4106
4107 /*
4108 * Push any error code on to the new stack.
4109 */
4110 if (fFlags & IEM_XCPT_FLAGS_ERR)
4111 {
4112 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4113 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4114 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4115
4116 /* Check that there is sufficient space on the stack. */
4117 /** @todo Factor out segment limit checking for normal/expand down segments
4118 * into a separate function. */
4119 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4120 {
4121 if ( pCtx->esp - 1 > cbLimitSS
4122 || pCtx->esp < cbStackFrame)
4123 {
4124 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4125 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
4126 cbStackFrame));
4127 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4128 }
4129 }
4130 else
4131 {
4132 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
4133 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4134 {
4135 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n", pCtx->ss.Sel, pCtx->esp,
4136 cbStackFrame));
4137 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4138 }
4139 }
4140
4141
4142 if (fIsNewTSS386)
4143 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4144 else
4145 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4146 if (rcStrict != VINF_SUCCESS)
4147 {
4148 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n", fIsNewTSS386 ? "32" : "16",
4149 VBOXSTRICTRC_VAL(rcStrict)));
4150 return rcStrict;
4151 }
4152 }
4153
4154 /* Check the new EIP against the new CS limit. */
4155 if (pCtx->eip > pCtx->cs.u32Limit)
4156 {
4157 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4158 pCtx->eip, pCtx->cs.u32Limit));
4159 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4160 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4161 }
4162
4163 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4164 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4165}
4166
4167
4168/**
4169 * Implements exceptions and interrupts for protected mode.
4170 *
4171 * @returns VBox strict status code.
4172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4173 * @param pCtx The CPU context.
4174 * @param cbInstr The number of bytes to offset rIP by in the return
4175 * address.
4176 * @param u8Vector The interrupt / exception vector number.
4177 * @param fFlags The flags.
4178 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4179 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4180 */
4181IEM_STATIC VBOXSTRICTRC
4182iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4183 PCPUMCTX pCtx,
4184 uint8_t cbInstr,
4185 uint8_t u8Vector,
4186 uint32_t fFlags,
4187 uint16_t uErr,
4188 uint64_t uCr2)
4189{
4190 /*
4191 * Read the IDT entry.
4192 */
4193 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4194 {
4195 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4196 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4197 }
4198 X86DESC Idte;
4199 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4200 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4201 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4202 return rcStrict;
4203 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4204 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4205 Idte.Gate.u4ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4206
4207 /*
4208 * Check the descriptor type, DPL and such.
4209 * ASSUMES this is done in the same order as described for call-gate calls.
4210 */
4211 if (Idte.Gate.u1DescType)
4212 {
4213 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4214 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4215 }
4216 bool fTaskGate = false;
4217 uint8_t f32BitGate = true;
4218 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4219 switch (Idte.Gate.u4Type)
4220 {
4221 case X86_SEL_TYPE_SYS_UNDEFINED:
4222 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4223 case X86_SEL_TYPE_SYS_LDT:
4224 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4225 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4226 case X86_SEL_TYPE_SYS_UNDEFINED2:
4227 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4228 case X86_SEL_TYPE_SYS_UNDEFINED3:
4229 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4230 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4231 case X86_SEL_TYPE_SYS_UNDEFINED4:
4232 {
4233 /** @todo check what actually happens when the type is wrong...
4234 * esp. call gates. */
4235 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4236 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4237 }
4238
4239 case X86_SEL_TYPE_SYS_286_INT_GATE:
4240 f32BitGate = false;
4241 case X86_SEL_TYPE_SYS_386_INT_GATE:
4242 fEflToClear |= X86_EFL_IF;
4243 break;
4244
4245 case X86_SEL_TYPE_SYS_TASK_GATE:
4246 fTaskGate = true;
4247#ifndef IEM_IMPLEMENTS_TASKSWITCH
4248 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4249#endif
4250 break;
4251
4252 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4253 f32BitGate = false;
4254 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4255 break;
4256
4257 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4258 }
4259
4260 /* Check DPL against CPL if applicable. */
4261 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4262 {
4263 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4264 {
4265 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4266 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4267 }
4268 }
4269
4270 /* Is it there? */
4271 if (!Idte.Gate.u1Present)
4272 {
4273 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4274 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4275 }
4276
4277 /* Is it a task-gate? */
4278 if (fTaskGate)
4279 {
4280 /*
4281 * Construct the error code masks based on what caused this task switch.
4282 * See Intel Instruction reference for INT.
4283 */
4284 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4285 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4286 RTSEL SelTSS = Idte.Gate.u16Sel;
4287
4288 /*
4289 * Fetch the TSS descriptor in the GDT.
4290 */
4291 IEMSELDESC DescTSS;
4292 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4293 if (rcStrict != VINF_SUCCESS)
4294 {
4295 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4296 VBOXSTRICTRC_VAL(rcStrict)));
4297 return rcStrict;
4298 }
4299
4300 /* The TSS descriptor must be a system segment and be available (not busy). */
4301 if ( DescTSS.Legacy.Gen.u1DescType
4302 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4303 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4304 {
4305 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4306 u8Vector, SelTSS, DescTSS.Legacy.au64));
4307 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4308 }
4309
4310 /* The TSS must be present. */
4311 if (!DescTSS.Legacy.Gen.u1Present)
4312 {
4313 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4314 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4315 }
4316
4317 /* Do the actual task switch. */
4318 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4319 }
4320
4321 /* A null CS is bad. */
4322 RTSEL NewCS = Idte.Gate.u16Sel;
4323 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4324 {
4325 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4326 return iemRaiseGeneralProtectionFault0(pVCpu);
4327 }
4328
4329 /* Fetch the descriptor for the new CS. */
4330 IEMSELDESC DescCS;
4331 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4332 if (rcStrict != VINF_SUCCESS)
4333 {
4334 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4335 return rcStrict;
4336 }
4337
4338 /* Must be a code segment. */
4339 if (!DescCS.Legacy.Gen.u1DescType)
4340 {
4341 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4342 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4343 }
4344 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4345 {
4346 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4347 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4348 }
4349
4350 /* Don't allow lowering the privilege level. */
4351 /** @todo Does the lowering of privileges apply to software interrupts
4352 * only? This has bearings on the more-privileged or
4353 * same-privilege stack behavior further down. A testcase would
4354 * be nice. */
4355 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4356 {
4357 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4358 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4359 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4360 }
4361
4362 /* Make sure the selector is present. */
4363 if (!DescCS.Legacy.Gen.u1Present)
4364 {
4365 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4366 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4367 }
4368
4369 /* Check the new EIP against the new CS limit. */
4370 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4371 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4372 ? Idte.Gate.u16OffsetLow
4373 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4374 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4375 if (uNewEip > cbLimitCS)
4376 {
4377 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4378 u8Vector, uNewEip, cbLimitCS, NewCS));
4379 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4380 }
4381
4382 /* Calc the flag image to push. */
4383 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4384 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4385 fEfl &= ~X86_EFL_RF;
4386 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4387 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4388
4389 /* From V8086 mode only go to CPL 0. */
4390 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4391 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4392 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4393 {
4394 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4395 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4396 }
4397
4398 /*
4399 * If the privilege level changes, we need to get a new stack from the TSS.
4400 * This in turns means validating the new SS and ESP...
4401 */
4402 if (uNewCpl != pVCpu->iem.s.uCpl)
4403 {
4404 RTSEL NewSS;
4405 uint32_t uNewEsp;
4406 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4407 if (rcStrict != VINF_SUCCESS)
4408 return rcStrict;
4409
4410 IEMSELDESC DescSS;
4411 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4412 if (rcStrict != VINF_SUCCESS)
4413 return rcStrict;
4414
4415 /* Check that there is sufficient space for the stack frame. */
4416 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4417 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4418 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4419 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4420
4421 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4422 {
4423 if ( uNewEsp - 1 > cbLimitSS
4424 || uNewEsp < cbStackFrame)
4425 {
4426 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4427 u8Vector, NewSS, uNewEsp, cbStackFrame));
4428 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4429 }
4430 }
4431 else
4432 {
4433 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u4Type & X86_DESC_DB ? UINT32_MAX : UINT32_C(0xffff))
4434 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4435 {
4436 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4437 u8Vector, NewSS, uNewEsp, cbStackFrame));
4438 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4439 }
4440 }
4441
4442 /*
4443 * Start making changes.
4444 */
4445
4446 /* Set the new CPL so that stack accesses use it. */
4447 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4448 pVCpu->iem.s.uCpl = uNewCpl;
4449
4450 /* Create the stack frame. */
4451 RTPTRUNION uStackFrame;
4452 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4453 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4454 if (rcStrict != VINF_SUCCESS)
4455 return rcStrict;
4456 void * const pvStackFrame = uStackFrame.pv;
4457 if (f32BitGate)
4458 {
4459 if (fFlags & IEM_XCPT_FLAGS_ERR)
4460 *uStackFrame.pu32++ = uErr;
4461 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4462 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4463 uStackFrame.pu32[2] = fEfl;
4464 uStackFrame.pu32[3] = pCtx->esp;
4465 uStackFrame.pu32[4] = pCtx->ss.Sel;
4466 if (fEfl & X86_EFL_VM)
4467 {
4468 uStackFrame.pu32[1] = pCtx->cs.Sel;
4469 uStackFrame.pu32[5] = pCtx->es.Sel;
4470 uStackFrame.pu32[6] = pCtx->ds.Sel;
4471 uStackFrame.pu32[7] = pCtx->fs.Sel;
4472 uStackFrame.pu32[8] = pCtx->gs.Sel;
4473 }
4474 }
4475 else
4476 {
4477 if (fFlags & IEM_XCPT_FLAGS_ERR)
4478 *uStackFrame.pu16++ = uErr;
4479 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4480 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4481 uStackFrame.pu16[2] = fEfl;
4482 uStackFrame.pu16[3] = pCtx->sp;
4483 uStackFrame.pu16[4] = pCtx->ss.Sel;
4484 if (fEfl & X86_EFL_VM)
4485 {
4486 uStackFrame.pu16[1] = pCtx->cs.Sel;
4487 uStackFrame.pu16[5] = pCtx->es.Sel;
4488 uStackFrame.pu16[6] = pCtx->ds.Sel;
4489 uStackFrame.pu16[7] = pCtx->fs.Sel;
4490 uStackFrame.pu16[8] = pCtx->gs.Sel;
4491 }
4492 }
4493 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4494 if (rcStrict != VINF_SUCCESS)
4495 return rcStrict;
4496
4497 /* Mark the selectors 'accessed' (hope this is the correct time). */
4498 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4499 * after pushing the stack frame? (Write protect the gdt + stack to
4500 * find out.) */
4501 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4502 {
4503 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4504 if (rcStrict != VINF_SUCCESS)
4505 return rcStrict;
4506 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4507 }
4508
4509 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4510 {
4511 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4512 if (rcStrict != VINF_SUCCESS)
4513 return rcStrict;
4514 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4515 }
4516
4517 /*
4518 * Start comitting the register changes (joins with the DPL=CPL branch).
4519 */
4520 pCtx->ss.Sel = NewSS;
4521 pCtx->ss.ValidSel = NewSS;
4522 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4523 pCtx->ss.u32Limit = cbLimitSS;
4524 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4525 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4526 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4527 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4528 * SP is loaded).
4529 * Need to check the other combinations too:
4530 * - 16-bit TSS, 32-bit handler
4531 * - 32-bit TSS, 16-bit handler */
4532 if (!pCtx->ss.Attr.n.u1DefBig)
4533 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4534 else
4535 pCtx->rsp = uNewEsp - cbStackFrame;
4536
4537 if (fEfl & X86_EFL_VM)
4538 {
4539 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
4540 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
4541 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
4542 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
4543 }
4544 }
4545 /*
4546 * Same privilege, no stack change and smaller stack frame.
4547 */
4548 else
4549 {
4550 uint64_t uNewRsp;
4551 RTPTRUNION uStackFrame;
4552 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4553 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4554 if (rcStrict != VINF_SUCCESS)
4555 return rcStrict;
4556 void * const pvStackFrame = uStackFrame.pv;
4557
4558 if (f32BitGate)
4559 {
4560 if (fFlags & IEM_XCPT_FLAGS_ERR)
4561 *uStackFrame.pu32++ = uErr;
4562 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4563 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4564 uStackFrame.pu32[2] = fEfl;
4565 }
4566 else
4567 {
4568 if (fFlags & IEM_XCPT_FLAGS_ERR)
4569 *uStackFrame.pu16++ = uErr;
4570 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
4571 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
4572 uStackFrame.pu16[2] = fEfl;
4573 }
4574 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
4575 if (rcStrict != VINF_SUCCESS)
4576 return rcStrict;
4577
4578 /* Mark the CS selector as 'accessed'. */
4579 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4580 {
4581 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4582 if (rcStrict != VINF_SUCCESS)
4583 return rcStrict;
4584 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4585 }
4586
4587 /*
4588 * Start committing the register changes (joins with the other branch).
4589 */
4590 pCtx->rsp = uNewRsp;
4591 }
4592
4593 /* ... register committing continues. */
4594 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4595 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4596 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4597 pCtx->cs.u32Limit = cbLimitCS;
4598 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4599 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4600
4601 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
4602 fEfl &= ~fEflToClear;
4603 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4604
4605 if (fFlags & IEM_XCPT_FLAGS_CR2)
4606 pCtx->cr2 = uCr2;
4607
4608 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4609 iemRaiseXcptAdjustState(pCtx, u8Vector);
4610
4611 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4612}
4613
4614
4615/**
4616 * Implements exceptions and interrupts for long mode.
4617 *
4618 * @returns VBox strict status code.
4619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4620 * @param pCtx The CPU context.
4621 * @param cbInstr The number of bytes to offset rIP by in the return
4622 * address.
4623 * @param u8Vector The interrupt / exception vector number.
4624 * @param fFlags The flags.
4625 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4626 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4627 */
4628IEM_STATIC VBOXSTRICTRC
4629iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
4630 PCPUMCTX pCtx,
4631 uint8_t cbInstr,
4632 uint8_t u8Vector,
4633 uint32_t fFlags,
4634 uint16_t uErr,
4635 uint64_t uCr2)
4636{
4637 /*
4638 * Read the IDT entry.
4639 */
4640 uint16_t offIdt = (uint16_t)u8Vector << 4;
4641 if (pCtx->idtr.cbIdt < offIdt + 7)
4642 {
4643 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4644 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4645 }
4646 X86DESC64 Idte;
4647 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
4648 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
4649 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
4650 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4651 return rcStrict;
4652 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
4653 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4654 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4655
4656 /*
4657 * Check the descriptor type, DPL and such.
4658 * ASSUMES this is done in the same order as described for call-gate calls.
4659 */
4660 if (Idte.Gate.u1DescType)
4661 {
4662 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4663 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4664 }
4665 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4666 switch (Idte.Gate.u4Type)
4667 {
4668 case AMD64_SEL_TYPE_SYS_INT_GATE:
4669 fEflToClear |= X86_EFL_IF;
4670 break;
4671 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
4672 break;
4673
4674 default:
4675 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4676 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4677 }
4678
4679 /* Check DPL against CPL if applicable. */
4680 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4681 {
4682 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4683 {
4684 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4685 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4686 }
4687 }
4688
4689 /* Is it there? */
4690 if (!Idte.Gate.u1Present)
4691 {
4692 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
4693 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4694 }
4695
4696 /* A null CS is bad. */
4697 RTSEL NewCS = Idte.Gate.u16Sel;
4698 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4699 {
4700 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4701 return iemRaiseGeneralProtectionFault0(pVCpu);
4702 }
4703
4704 /* Fetch the descriptor for the new CS. */
4705 IEMSELDESC DescCS;
4706 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
4707 if (rcStrict != VINF_SUCCESS)
4708 {
4709 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4710 return rcStrict;
4711 }
4712
4713 /* Must be a 64-bit code segment. */
4714 if (!DescCS.Long.Gen.u1DescType)
4715 {
4716 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4717 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4718 }
4719 if ( !DescCS.Long.Gen.u1Long
4720 || DescCS.Long.Gen.u1DefBig
4721 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
4722 {
4723 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
4724 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
4725 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4726 }
4727
4728 /* Don't allow lowering the privilege level. For non-conforming CS
4729 selectors, the CS.DPL sets the privilege level the trap/interrupt
4730 handler runs at. For conforming CS selectors, the CPL remains
4731 unchanged, but the CS.DPL must be <= CPL. */
4732 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
4733 * when CPU in Ring-0. Result \#GP? */
4734 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4735 {
4736 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4737 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4738 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4739 }
4740
4741
4742 /* Make sure the selector is present. */
4743 if (!DescCS.Legacy.Gen.u1Present)
4744 {
4745 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4746 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4747 }
4748
4749 /* Check that the new RIP is canonical. */
4750 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
4751 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
4752 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
4753 if (!IEM_IS_CANONICAL(uNewRip))
4754 {
4755 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
4756 return iemRaiseGeneralProtectionFault0(pVCpu);
4757 }
4758
4759 /*
4760 * If the privilege level changes or if the IST isn't zero, we need to get
4761 * a new stack from the TSS.
4762 */
4763 uint64_t uNewRsp;
4764 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4765 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4766 if ( uNewCpl != pVCpu->iem.s.uCpl
4767 || Idte.Gate.u3IST != 0)
4768 {
4769 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
4770 if (rcStrict != VINF_SUCCESS)
4771 return rcStrict;
4772 }
4773 else
4774 uNewRsp = pCtx->rsp;
4775 uNewRsp &= ~(uint64_t)0xf;
4776
4777 /*
4778 * Calc the flag image to push.
4779 */
4780 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4781 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4782 fEfl &= ~X86_EFL_RF;
4783 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4784 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4785
4786 /*
4787 * Start making changes.
4788 */
4789 /* Set the new CPL so that stack accesses use it. */
4790 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4791 pVCpu->iem.s.uCpl = uNewCpl;
4792
4793 /* Create the stack frame. */
4794 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
4795 RTPTRUNION uStackFrame;
4796 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4797 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4798 if (rcStrict != VINF_SUCCESS)
4799 return rcStrict;
4800 void * const pvStackFrame = uStackFrame.pv;
4801
4802 if (fFlags & IEM_XCPT_FLAGS_ERR)
4803 *uStackFrame.pu64++ = uErr;
4804 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
4805 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
4806 uStackFrame.pu64[2] = fEfl;
4807 uStackFrame.pu64[3] = pCtx->rsp;
4808 uStackFrame.pu64[4] = pCtx->ss.Sel;
4809 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4810 if (rcStrict != VINF_SUCCESS)
4811 return rcStrict;
4812
4813 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
4814 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4815 * after pushing the stack frame? (Write protect the gdt + stack to
4816 * find out.) */
4817 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4818 {
4819 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4820 if (rcStrict != VINF_SUCCESS)
4821 return rcStrict;
4822 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4823 }
4824
4825 /*
4826 * Start comitting the register changes.
4827 */
4828 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
4829 * hidden registers when interrupting 32-bit or 16-bit code! */
4830 if (uNewCpl != uOldCpl)
4831 {
4832 pCtx->ss.Sel = 0 | uNewCpl;
4833 pCtx->ss.ValidSel = 0 | uNewCpl;
4834 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4835 pCtx->ss.u32Limit = UINT32_MAX;
4836 pCtx->ss.u64Base = 0;
4837 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
4838 }
4839 pCtx->rsp = uNewRsp - cbStackFrame;
4840 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4841 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
4842 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4843 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
4844 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
4845 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4846 pCtx->rip = uNewRip;
4847
4848 fEfl &= ~fEflToClear;
4849 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
4850
4851 if (fFlags & IEM_XCPT_FLAGS_CR2)
4852 pCtx->cr2 = uCr2;
4853
4854 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
4855 iemRaiseXcptAdjustState(pCtx, u8Vector);
4856
4857 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4858}
4859
4860
4861/**
4862 * Implements exceptions and interrupts.
4863 *
4864 * All exceptions and interrupts goes thru this function!
4865 *
4866 * @returns VBox strict status code.
4867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4868 * @param cbInstr The number of bytes to offset rIP by in the return
4869 * address.
4870 * @param u8Vector The interrupt / exception vector number.
4871 * @param fFlags The flags.
4872 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4873 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4874 */
4875DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
4876iemRaiseXcptOrInt(PVMCPU pVCpu,
4877 uint8_t cbInstr,
4878 uint8_t u8Vector,
4879 uint32_t fFlags,
4880 uint16_t uErr,
4881 uint64_t uCr2)
4882{
4883 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
4884#ifdef IN_RING0
4885 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
4886 AssertRCReturn(rc, rc);
4887#endif
4888
4889#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
4890 /*
4891 * Flush prefetch buffer
4892 */
4893 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
4894#endif
4895
4896 /*
4897 * Perform the V8086 IOPL check and upgrade the fault without nesting.
4898 */
4899 if ( pCtx->eflags.Bits.u1VM
4900 && pCtx->eflags.Bits.u2IOPL != 3
4901 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
4902 && (pCtx->cr0 & X86_CR0_PE) )
4903 {
4904 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
4905 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
4906 u8Vector = X86_XCPT_GP;
4907 uErr = 0;
4908 }
4909#ifdef DBGFTRACE_ENABLED
4910 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
4911 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
4912 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
4913#endif
4914
4915 /*
4916 * Do recursion accounting.
4917 */
4918 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
4919 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
4920 if (pVCpu->iem.s.cXcptRecursions == 0)
4921 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
4922 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
4923 else
4924 {
4925 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
4926 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt, pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
4927
4928 /** @todo double and tripple faults. */
4929 if (pVCpu->iem.s.cXcptRecursions >= 3)
4930 {
4931#ifdef DEBUG_bird
4932 AssertFailed();
4933#endif
4934 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
4935 }
4936
4937 /** @todo set X86_TRAP_ERR_EXTERNAL when appropriate.
4938 if (fPrevXcpt & IEM_XCPT_FLAGS_T_EXT_INT)
4939 {
4940 ....
4941 } */
4942 }
4943 pVCpu->iem.s.cXcptRecursions++;
4944 pVCpu->iem.s.uCurXcpt = u8Vector;
4945 pVCpu->iem.s.fCurXcpt = fFlags;
4946
4947 /*
4948 * Extensive logging.
4949 */
4950#if defined(LOG_ENABLED) && defined(IN_RING3)
4951 if (LogIs3Enabled())
4952 {
4953 PVM pVM = pVCpu->CTX_SUFF(pVM);
4954 char szRegs[4096];
4955 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4956 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4957 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4958 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4959 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4960 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4961 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4962 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4963 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4964 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4965 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4966 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4967 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4968 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4969 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4970 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4971 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4972 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4973 " efer=%016VR{efer}\n"
4974 " pat=%016VR{pat}\n"
4975 " sf_mask=%016VR{sf_mask}\n"
4976 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4977 " lstar=%016VR{lstar}\n"
4978 " star=%016VR{star} cstar=%016VR{cstar}\n"
4979 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4980 );
4981
4982 char szInstr[256];
4983 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4984 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4985 szInstr, sizeof(szInstr), NULL);
4986 Log3(("%s%s\n", szRegs, szInstr));
4987 }
4988#endif /* LOG_ENABLED */
4989
4990 /*
4991 * Call the mode specific worker function.
4992 */
4993 VBOXSTRICTRC rcStrict;
4994 if (!(pCtx->cr0 & X86_CR0_PE))
4995 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4996 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
4997 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
4998 else
4999 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5000
5001 /* Flush the prefetch buffer. */
5002#ifdef IEM_WITH_CODE_TLB
5003 pVCpu->iem.s.pbInstrBuf = NULL;
5004#else
5005 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5006#endif
5007
5008 /*
5009 * Unwind.
5010 */
5011 pVCpu->iem.s.cXcptRecursions--;
5012 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5013 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5014 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5015 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5016 return rcStrict;
5017}
5018
5019#ifdef IEM_WITH_SETJMP
5020/**
5021 * See iemRaiseXcptOrInt. Will not return.
5022 */
5023IEM_STATIC DECL_NO_RETURN(void)
5024iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5025 uint8_t cbInstr,
5026 uint8_t u8Vector,
5027 uint32_t fFlags,
5028 uint16_t uErr,
5029 uint64_t uCr2)
5030{
5031 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5032 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5033}
5034#endif
5035
5036
5037/** \#DE - 00. */
5038DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5039{
5040 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5041}
5042
5043
5044/** \#DB - 01.
5045 * @note This automatically clear DR7.GD. */
5046DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5047{
5048 /** @todo set/clear RF. */
5049 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5050 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5051}
5052
5053
5054/** \#UD - 06. */
5055DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5056{
5057 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5058}
5059
5060
5061/** \#NM - 07. */
5062DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5063{
5064 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5065}
5066
5067
5068/** \#TS(err) - 0a. */
5069DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5070{
5071 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5072}
5073
5074
5075/** \#TS(tr) - 0a. */
5076DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5077{
5078 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5079 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5080}
5081
5082
5083/** \#TS(0) - 0a. */
5084DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5085{
5086 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5087 0, 0);
5088}
5089
5090
5091/** \#TS(err) - 0a. */
5092DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5093{
5094 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5095 uSel & X86_SEL_MASK_OFF_RPL, 0);
5096}
5097
5098
5099/** \#NP(err) - 0b. */
5100DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5101{
5102 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5103}
5104
5105
5106/** \#NP(seg) - 0b. */
5107DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySegReg(PVMCPU pVCpu, uint32_t iSegReg)
5108{
5109 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5110 iemSRegFetchU16(pVCpu, iSegReg) & ~X86_SEL_RPL, 0);
5111}
5112
5113
5114/** \#NP(sel) - 0b. */
5115DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5116{
5117 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5118 uSel & ~X86_SEL_RPL, 0);
5119}
5120
5121
5122/** \#SS(seg) - 0c. */
5123DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5124{
5125 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5126 uSel & ~X86_SEL_RPL, 0);
5127}
5128
5129
5130/** \#SS(err) - 0c. */
5131DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5132{
5133 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5134}
5135
5136
5137/** \#GP(n) - 0d. */
5138DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5139{
5140 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5141}
5142
5143
5144/** \#GP(0) - 0d. */
5145DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5146{
5147 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5148}
5149
5150#ifdef IEM_WITH_SETJMP
5151/** \#GP(0) - 0d. */
5152DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5153{
5154 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5155}
5156#endif
5157
5158
5159/** \#GP(sel) - 0d. */
5160DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5161{
5162 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5163 Sel & ~X86_SEL_RPL, 0);
5164}
5165
5166
5167/** \#GP(0) - 0d. */
5168DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5169{
5170 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5171}
5172
5173
5174/** \#GP(sel) - 0d. */
5175DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5176{
5177 NOREF(iSegReg); NOREF(fAccess);
5178 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5179 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5180}
5181
5182#ifdef IEM_WITH_SETJMP
5183/** \#GP(sel) - 0d, longjmp. */
5184DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5185{
5186 NOREF(iSegReg); NOREF(fAccess);
5187 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5188 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5189}
5190#endif
5191
5192/** \#GP(sel) - 0d. */
5193DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5194{
5195 NOREF(Sel);
5196 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5197}
5198
5199#ifdef IEM_WITH_SETJMP
5200/** \#GP(sel) - 0d, longjmp. */
5201DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5202{
5203 NOREF(Sel);
5204 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5205}
5206#endif
5207
5208
5209/** \#GP(sel) - 0d. */
5210DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5211{
5212 NOREF(iSegReg); NOREF(fAccess);
5213 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5214}
5215
5216#ifdef IEM_WITH_SETJMP
5217/** \#GP(sel) - 0d, longjmp. */
5218DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5219 uint32_t fAccess)
5220{
5221 NOREF(iSegReg); NOREF(fAccess);
5222 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5223}
5224#endif
5225
5226
5227/** \#PF(n) - 0e. */
5228DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5229{
5230 uint16_t uErr;
5231 switch (rc)
5232 {
5233 case VERR_PAGE_NOT_PRESENT:
5234 case VERR_PAGE_TABLE_NOT_PRESENT:
5235 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5236 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5237 uErr = 0;
5238 break;
5239
5240 default:
5241 AssertMsgFailed(("%Rrc\n", rc));
5242 case VERR_ACCESS_DENIED:
5243 uErr = X86_TRAP_PF_P;
5244 break;
5245
5246 /** @todo reserved */
5247 }
5248
5249 if (pVCpu->iem.s.uCpl == 3)
5250 uErr |= X86_TRAP_PF_US;
5251
5252 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5253 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5254 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5255 uErr |= X86_TRAP_PF_ID;
5256
5257#if 0 /* This is so much non-sense, really. Why was it done like that? */
5258 /* Note! RW access callers reporting a WRITE protection fault, will clear
5259 the READ flag before calling. So, read-modify-write accesses (RW)
5260 can safely be reported as READ faults. */
5261 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5262 uErr |= X86_TRAP_PF_RW;
5263#else
5264 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5265 {
5266 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5267 uErr |= X86_TRAP_PF_RW;
5268 }
5269#endif
5270
5271 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5272 uErr, GCPtrWhere);
5273}
5274
5275#ifdef IEM_WITH_SETJMP
5276/** \#PF(n) - 0e, longjmp. */
5277IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5278{
5279 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5280}
5281#endif
5282
5283
5284/** \#MF(0) - 10. */
5285DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5286{
5287 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5288}
5289
5290
5291/** \#AC(0) - 11. */
5292DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5293{
5294 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5295}
5296
5297
5298/**
5299 * Macro for calling iemCImplRaiseDivideError().
5300 *
5301 * This enables us to add/remove arguments and force different levels of
5302 * inlining as we wish.
5303 *
5304 * @return Strict VBox status code.
5305 */
5306#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5307IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5308{
5309 NOREF(cbInstr);
5310 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5311}
5312
5313
5314/**
5315 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5316 *
5317 * This enables us to add/remove arguments and force different levels of
5318 * inlining as we wish.
5319 *
5320 * @return Strict VBox status code.
5321 */
5322#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5323IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5324{
5325 NOREF(cbInstr);
5326 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5327}
5328
5329
5330/**
5331 * Macro for calling iemCImplRaiseInvalidOpcode().
5332 *
5333 * This enables us to add/remove arguments and force different levels of
5334 * inlining as we wish.
5335 *
5336 * @return Strict VBox status code.
5337 */
5338#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5339IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5340{
5341 NOREF(cbInstr);
5342 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5343}
5344
5345
5346/** @} */
5347
5348
5349/*
5350 *
5351 * Helpers routines.
5352 * Helpers routines.
5353 * Helpers routines.
5354 *
5355 */
5356
5357/**
5358 * Recalculates the effective operand size.
5359 *
5360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5361 */
5362IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5363{
5364 switch (pVCpu->iem.s.enmCpuMode)
5365 {
5366 case IEMMODE_16BIT:
5367 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5368 break;
5369 case IEMMODE_32BIT:
5370 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5371 break;
5372 case IEMMODE_64BIT:
5373 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5374 {
5375 case 0:
5376 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5377 break;
5378 case IEM_OP_PRF_SIZE_OP:
5379 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5380 break;
5381 case IEM_OP_PRF_SIZE_REX_W:
5382 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5383 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5384 break;
5385 }
5386 break;
5387 default:
5388 AssertFailed();
5389 }
5390}
5391
5392
5393/**
5394 * Sets the default operand size to 64-bit and recalculates the effective
5395 * operand size.
5396 *
5397 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5398 */
5399IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5400{
5401 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5402 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5403 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5404 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5405 else
5406 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5407}
5408
5409
5410/*
5411 *
5412 * Common opcode decoders.
5413 * Common opcode decoders.
5414 * Common opcode decoders.
5415 *
5416 */
5417//#include <iprt/mem.h>
5418
5419/**
5420 * Used to add extra details about a stub case.
5421 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5422 */
5423IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5424{
5425#if defined(LOG_ENABLED) && defined(IN_RING3)
5426 PVM pVM = pVCpu->CTX_SUFF(pVM);
5427 char szRegs[4096];
5428 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5429 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5430 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5431 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5432 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5433 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5434 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5435 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5436 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5437 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5438 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5439 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5440 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5441 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5442 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5443 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5444 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5445 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5446 " efer=%016VR{efer}\n"
5447 " pat=%016VR{pat}\n"
5448 " sf_mask=%016VR{sf_mask}\n"
5449 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5450 " lstar=%016VR{lstar}\n"
5451 " star=%016VR{star} cstar=%016VR{cstar}\n"
5452 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5453 );
5454
5455 char szInstr[256];
5456 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5457 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5458 szInstr, sizeof(szInstr), NULL);
5459
5460 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5461#else
5462 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
5463#endif
5464}
5465
5466/**
5467 * Complains about a stub.
5468 *
5469 * Providing two versions of this macro, one for daily use and one for use when
5470 * working on IEM.
5471 */
5472#if 0
5473# define IEMOP_BITCH_ABOUT_STUB() \
5474 do { \
5475 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
5476 iemOpStubMsg2(pVCpu); \
5477 RTAssertPanic(); \
5478 } while (0)
5479#else
5480# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
5481#endif
5482
5483/** Stubs an opcode. */
5484#define FNIEMOP_STUB(a_Name) \
5485 FNIEMOP_DEF(a_Name) \
5486 { \
5487 IEMOP_BITCH_ABOUT_STUB(); \
5488 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5489 } \
5490 typedef int ignore_semicolon
5491
5492/** Stubs an opcode. */
5493#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
5494 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5495 { \
5496 IEMOP_BITCH_ABOUT_STUB(); \
5497 NOREF(a_Name0); \
5498 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
5499 } \
5500 typedef int ignore_semicolon
5501
5502/** Stubs an opcode which currently should raise \#UD. */
5503#define FNIEMOP_UD_STUB(a_Name) \
5504 FNIEMOP_DEF(a_Name) \
5505 { \
5506 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5507 return IEMOP_RAISE_INVALID_OPCODE(); \
5508 } \
5509 typedef int ignore_semicolon
5510
5511/** Stubs an opcode which currently should raise \#UD. */
5512#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
5513 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
5514 { \
5515 NOREF(a_Name0); \
5516 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
5517 return IEMOP_RAISE_INVALID_OPCODE(); \
5518 } \
5519 typedef int ignore_semicolon
5520
5521
5522
5523/** @name Register Access.
5524 * @{
5525 */
5526
5527/**
5528 * Gets a reference (pointer) to the specified hidden segment register.
5529 *
5530 * @returns Hidden register reference.
5531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5532 * @param iSegReg The segment register.
5533 */
5534IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
5535{
5536 Assert(iSegReg < X86_SREG_COUNT);
5537 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5538 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
5539
5540#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5541 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
5542 { /* likely */ }
5543 else
5544 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5545#else
5546 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5547#endif
5548 return pSReg;
5549}
5550
5551
5552/**
5553 * Ensures that the given hidden segment register is up to date.
5554 *
5555 * @returns Hidden register reference.
5556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5557 * @param pSReg The segment register.
5558 */
5559IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
5560{
5561#ifdef VBOX_WITH_RAW_MODE_NOT_R0
5562 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
5563 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
5564#else
5565 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
5566 NOREF(pVCpu);
5567#endif
5568 return pSReg;
5569}
5570
5571
5572/**
5573 * Gets a reference (pointer) to the specified segment register (the selector
5574 * value).
5575 *
5576 * @returns Pointer to the selector variable.
5577 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5578 * @param iSegReg The segment register.
5579 */
5580DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
5581{
5582 Assert(iSegReg < X86_SREG_COUNT);
5583 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5584 return &pCtx->aSRegs[iSegReg].Sel;
5585}
5586
5587
5588/**
5589 * Fetches the selector value of a segment register.
5590 *
5591 * @returns The selector value.
5592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5593 * @param iSegReg The segment register.
5594 */
5595DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
5596{
5597 Assert(iSegReg < X86_SREG_COUNT);
5598 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
5599}
5600
5601
5602/**
5603 * Gets a reference (pointer) to the specified general purpose register.
5604 *
5605 * @returns Register reference.
5606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5607 * @param iReg The general purpose register.
5608 */
5609DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
5610{
5611 Assert(iReg < 16);
5612 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5613 return &pCtx->aGRegs[iReg];
5614}
5615
5616
5617/**
5618 * Gets a reference (pointer) to the specified 8-bit general purpose register.
5619 *
5620 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
5621 *
5622 * @returns Register reference.
5623 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5624 * @param iReg The register.
5625 */
5626DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
5627{
5628 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5629 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
5630 {
5631 Assert(iReg < 16);
5632 return &pCtx->aGRegs[iReg].u8;
5633 }
5634 /* high 8-bit register. */
5635 Assert(iReg < 8);
5636 return &pCtx->aGRegs[iReg & 3].bHi;
5637}
5638
5639
5640/**
5641 * Gets a reference (pointer) to the specified 16-bit general purpose register.
5642 *
5643 * @returns Register reference.
5644 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5645 * @param iReg The register.
5646 */
5647DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
5648{
5649 Assert(iReg < 16);
5650 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5651 return &pCtx->aGRegs[iReg].u16;
5652}
5653
5654
5655/**
5656 * Gets a reference (pointer) to the specified 32-bit general purpose register.
5657 *
5658 * @returns Register reference.
5659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5660 * @param iReg The register.
5661 */
5662DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
5663{
5664 Assert(iReg < 16);
5665 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5666 return &pCtx->aGRegs[iReg].u32;
5667}
5668
5669
5670/**
5671 * Gets a reference (pointer) to the specified 64-bit general purpose register.
5672 *
5673 * @returns Register reference.
5674 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5675 * @param iReg The register.
5676 */
5677DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
5678{
5679 Assert(iReg < 64);
5680 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5681 return &pCtx->aGRegs[iReg].u64;
5682}
5683
5684
5685/**
5686 * Fetches the value of a 8-bit general purpose register.
5687 *
5688 * @returns The register value.
5689 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5690 * @param iReg The register.
5691 */
5692DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
5693{
5694 return *iemGRegRefU8(pVCpu, iReg);
5695}
5696
5697
5698/**
5699 * Fetches the value of a 16-bit general purpose register.
5700 *
5701 * @returns The register value.
5702 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5703 * @param iReg The register.
5704 */
5705DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
5706{
5707 Assert(iReg < 16);
5708 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
5709}
5710
5711
5712/**
5713 * Fetches the value of a 32-bit general purpose register.
5714 *
5715 * @returns The register value.
5716 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5717 * @param iReg The register.
5718 */
5719DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
5720{
5721 Assert(iReg < 16);
5722 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
5723}
5724
5725
5726/**
5727 * Fetches the value of a 64-bit general purpose register.
5728 *
5729 * @returns The register value.
5730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5731 * @param iReg The register.
5732 */
5733DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
5734{
5735 Assert(iReg < 16);
5736 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
5737}
5738
5739
5740/**
5741 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
5742 *
5743 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5744 * segment limit.
5745 *
5746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5747 * @param offNextInstr The offset of the next instruction.
5748 */
5749IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
5750{
5751 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5752 switch (pVCpu->iem.s.enmEffOpSize)
5753 {
5754 case IEMMODE_16BIT:
5755 {
5756 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5757 if ( uNewIp > pCtx->cs.u32Limit
5758 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5759 return iemRaiseGeneralProtectionFault0(pVCpu);
5760 pCtx->rip = uNewIp;
5761 break;
5762 }
5763
5764 case IEMMODE_32BIT:
5765 {
5766 Assert(pCtx->rip <= UINT32_MAX);
5767 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5768
5769 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5770 if (uNewEip > pCtx->cs.u32Limit)
5771 return iemRaiseGeneralProtectionFault0(pVCpu);
5772 pCtx->rip = uNewEip;
5773 break;
5774 }
5775
5776 case IEMMODE_64BIT:
5777 {
5778 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5779
5780 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5781 if (!IEM_IS_CANONICAL(uNewRip))
5782 return iemRaiseGeneralProtectionFault0(pVCpu);
5783 pCtx->rip = uNewRip;
5784 break;
5785 }
5786
5787 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5788 }
5789
5790 pCtx->eflags.Bits.u1RF = 0;
5791
5792#ifndef IEM_WITH_CODE_TLB
5793 /* Flush the prefetch buffer. */
5794 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5795#endif
5796
5797 return VINF_SUCCESS;
5798}
5799
5800
5801/**
5802 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
5803 *
5804 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5805 * segment limit.
5806 *
5807 * @returns Strict VBox status code.
5808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5809 * @param offNextInstr The offset of the next instruction.
5810 */
5811IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
5812{
5813 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5814 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
5815
5816 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5817 if ( uNewIp > pCtx->cs.u32Limit
5818 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5819 return iemRaiseGeneralProtectionFault0(pVCpu);
5820 /** @todo Test 16-bit jump in 64-bit mode. possible? */
5821 pCtx->rip = uNewIp;
5822 pCtx->eflags.Bits.u1RF = 0;
5823
5824#ifndef IEM_WITH_CODE_TLB
5825 /* Flush the prefetch buffer. */
5826 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5827#endif
5828
5829 return VINF_SUCCESS;
5830}
5831
5832
5833/**
5834 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
5835 *
5836 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5837 * segment limit.
5838 *
5839 * @returns Strict VBox status code.
5840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5841 * @param offNextInstr The offset of the next instruction.
5842 */
5843IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
5844{
5845 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5846 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
5847
5848 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
5849 {
5850 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5851
5852 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5853 if (uNewEip > pCtx->cs.u32Limit)
5854 return iemRaiseGeneralProtectionFault0(pVCpu);
5855 pCtx->rip = uNewEip;
5856 }
5857 else
5858 {
5859 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5860
5861 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
5862 if (!IEM_IS_CANONICAL(uNewRip))
5863 return iemRaiseGeneralProtectionFault0(pVCpu);
5864 pCtx->rip = uNewRip;
5865 }
5866 pCtx->eflags.Bits.u1RF = 0;
5867
5868#ifndef IEM_WITH_CODE_TLB
5869 /* Flush the prefetch buffer. */
5870 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5871#endif
5872
5873 return VINF_SUCCESS;
5874}
5875
5876
5877/**
5878 * Performs a near jump to the specified address.
5879 *
5880 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
5881 * segment limit.
5882 *
5883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5884 * @param uNewRip The new RIP value.
5885 */
5886IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
5887{
5888 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5889 switch (pVCpu->iem.s.enmEffOpSize)
5890 {
5891 case IEMMODE_16BIT:
5892 {
5893 Assert(uNewRip <= UINT16_MAX);
5894 if ( uNewRip > pCtx->cs.u32Limit
5895 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
5896 return iemRaiseGeneralProtectionFault0(pVCpu);
5897 /** @todo Test 16-bit jump in 64-bit mode. */
5898 pCtx->rip = uNewRip;
5899 break;
5900 }
5901
5902 case IEMMODE_32BIT:
5903 {
5904 Assert(uNewRip <= UINT32_MAX);
5905 Assert(pCtx->rip <= UINT32_MAX);
5906 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
5907
5908 if (uNewRip > pCtx->cs.u32Limit)
5909 return iemRaiseGeneralProtectionFault0(pVCpu);
5910 pCtx->rip = uNewRip;
5911 break;
5912 }
5913
5914 case IEMMODE_64BIT:
5915 {
5916 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5917
5918 if (!IEM_IS_CANONICAL(uNewRip))
5919 return iemRaiseGeneralProtectionFault0(pVCpu);
5920 pCtx->rip = uNewRip;
5921 break;
5922 }
5923
5924 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5925 }
5926
5927 pCtx->eflags.Bits.u1RF = 0;
5928
5929#ifndef IEM_WITH_CODE_TLB
5930 /* Flush the prefetch buffer. */
5931 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5932#endif
5933
5934 return VINF_SUCCESS;
5935}
5936
5937
5938/**
5939 * Get the address of the top of the stack.
5940 *
5941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5942 * @param pCtx The CPU context which SP/ESP/RSP should be
5943 * read.
5944 */
5945DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
5946{
5947 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
5948 return pCtx->rsp;
5949 if (pCtx->ss.Attr.n.u1DefBig)
5950 return pCtx->esp;
5951 return pCtx->sp;
5952}
5953
5954
5955/**
5956 * Updates the RIP/EIP/IP to point to the next instruction.
5957 *
5958 * This function leaves the EFLAGS.RF flag alone.
5959 *
5960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5961 * @param cbInstr The number of bytes to add.
5962 */
5963IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
5964{
5965 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5966 switch (pVCpu->iem.s.enmCpuMode)
5967 {
5968 case IEMMODE_16BIT:
5969 Assert(pCtx->rip <= UINT16_MAX);
5970 pCtx->eip += cbInstr;
5971 pCtx->eip &= UINT32_C(0xffff);
5972 break;
5973
5974 case IEMMODE_32BIT:
5975 pCtx->eip += cbInstr;
5976 Assert(pCtx->rip <= UINT32_MAX);
5977 break;
5978
5979 case IEMMODE_64BIT:
5980 pCtx->rip += cbInstr;
5981 break;
5982 default: AssertFailed();
5983 }
5984}
5985
5986
5987#if 0
5988/**
5989 * Updates the RIP/EIP/IP to point to the next instruction.
5990 *
5991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5992 */
5993IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
5994{
5995 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
5996}
5997#endif
5998
5999
6000
6001/**
6002 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6003 *
6004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6005 * @param cbInstr The number of bytes to add.
6006 */
6007IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6008{
6009 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6010
6011 pCtx->eflags.Bits.u1RF = 0;
6012
6013 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6014#if ARCH_BITS >= 64
6015 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6016 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6017 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6018#else
6019 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6020 pCtx->rip += cbInstr;
6021 else
6022 {
6023 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6024 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6025 }
6026#endif
6027}
6028
6029
6030/**
6031 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6032 *
6033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6034 */
6035IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6036{
6037 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6038}
6039
6040
6041/**
6042 * Adds to the stack pointer.
6043 *
6044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6045 * @param pCtx The CPU context which SP/ESP/RSP should be
6046 * updated.
6047 * @param cbToAdd The number of bytes to add (8-bit!).
6048 */
6049DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6050{
6051 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6052 pCtx->rsp += cbToAdd;
6053 else if (pCtx->ss.Attr.n.u1DefBig)
6054 pCtx->esp += cbToAdd;
6055 else
6056 pCtx->sp += cbToAdd;
6057}
6058
6059
6060/**
6061 * Subtracts from the stack pointer.
6062 *
6063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6064 * @param pCtx The CPU context which SP/ESP/RSP should be
6065 * updated.
6066 * @param cbToSub The number of bytes to subtract (8-bit!).
6067 */
6068DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6069{
6070 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6071 pCtx->rsp -= cbToSub;
6072 else if (pCtx->ss.Attr.n.u1DefBig)
6073 pCtx->esp -= cbToSub;
6074 else
6075 pCtx->sp -= cbToSub;
6076}
6077
6078
6079/**
6080 * Adds to the temporary stack pointer.
6081 *
6082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6083 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6084 * @param cbToAdd The number of bytes to add (16-bit).
6085 * @param pCtx Where to get the current stack mode.
6086 */
6087DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6088{
6089 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6090 pTmpRsp->u += cbToAdd;
6091 else if (pCtx->ss.Attr.n.u1DefBig)
6092 pTmpRsp->DWords.dw0 += cbToAdd;
6093 else
6094 pTmpRsp->Words.w0 += cbToAdd;
6095}
6096
6097
6098/**
6099 * Subtracts from the temporary stack pointer.
6100 *
6101 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6102 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6103 * @param cbToSub The number of bytes to subtract.
6104 * @param pCtx Where to get the current stack mode.
6105 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6106 * expecting that.
6107 */
6108DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6109{
6110 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6111 pTmpRsp->u -= cbToSub;
6112 else if (pCtx->ss.Attr.n.u1DefBig)
6113 pTmpRsp->DWords.dw0 -= cbToSub;
6114 else
6115 pTmpRsp->Words.w0 -= cbToSub;
6116}
6117
6118
6119/**
6120 * Calculates the effective stack address for a push of the specified size as
6121 * well as the new RSP value (upper bits may be masked).
6122 *
6123 * @returns Effective stack addressf for the push.
6124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6125 * @param pCtx Where to get the current stack mode.
6126 * @param cbItem The size of the stack item to pop.
6127 * @param puNewRsp Where to return the new RSP value.
6128 */
6129DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6130{
6131 RTUINT64U uTmpRsp;
6132 RTGCPTR GCPtrTop;
6133 uTmpRsp.u = pCtx->rsp;
6134
6135 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6136 GCPtrTop = uTmpRsp.u -= cbItem;
6137 else if (pCtx->ss.Attr.n.u1DefBig)
6138 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6139 else
6140 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6141 *puNewRsp = uTmpRsp.u;
6142 return GCPtrTop;
6143}
6144
6145
6146/**
6147 * Gets the current stack pointer and calculates the value after a pop of the
6148 * specified size.
6149 *
6150 * @returns Current stack pointer.
6151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6152 * @param pCtx Where to get the current stack mode.
6153 * @param cbItem The size of the stack item to pop.
6154 * @param puNewRsp Where to return the new RSP value.
6155 */
6156DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6157{
6158 RTUINT64U uTmpRsp;
6159 RTGCPTR GCPtrTop;
6160 uTmpRsp.u = pCtx->rsp;
6161
6162 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6163 {
6164 GCPtrTop = uTmpRsp.u;
6165 uTmpRsp.u += cbItem;
6166 }
6167 else if (pCtx->ss.Attr.n.u1DefBig)
6168 {
6169 GCPtrTop = uTmpRsp.DWords.dw0;
6170 uTmpRsp.DWords.dw0 += cbItem;
6171 }
6172 else
6173 {
6174 GCPtrTop = uTmpRsp.Words.w0;
6175 uTmpRsp.Words.w0 += cbItem;
6176 }
6177 *puNewRsp = uTmpRsp.u;
6178 return GCPtrTop;
6179}
6180
6181
6182/**
6183 * Calculates the effective stack address for a push of the specified size as
6184 * well as the new temporary RSP value (upper bits may be masked).
6185 *
6186 * @returns Effective stack addressf for the push.
6187 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6188 * @param pCtx Where to get the current stack mode.
6189 * @param pTmpRsp The temporary stack pointer. This is updated.
6190 * @param cbItem The size of the stack item to pop.
6191 */
6192DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6193{
6194 RTGCPTR GCPtrTop;
6195
6196 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6197 GCPtrTop = pTmpRsp->u -= cbItem;
6198 else if (pCtx->ss.Attr.n.u1DefBig)
6199 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6200 else
6201 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6202 return GCPtrTop;
6203}
6204
6205
6206/**
6207 * Gets the effective stack address for a pop of the specified size and
6208 * calculates and updates the temporary RSP.
6209 *
6210 * @returns Current stack pointer.
6211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6212 * @param pCtx Where to get the current stack mode.
6213 * @param pTmpRsp The temporary stack pointer. This is updated.
6214 * @param cbItem The size of the stack item to pop.
6215 */
6216DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6217{
6218 RTGCPTR GCPtrTop;
6219 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6220 {
6221 GCPtrTop = pTmpRsp->u;
6222 pTmpRsp->u += cbItem;
6223 }
6224 else if (pCtx->ss.Attr.n.u1DefBig)
6225 {
6226 GCPtrTop = pTmpRsp->DWords.dw0;
6227 pTmpRsp->DWords.dw0 += cbItem;
6228 }
6229 else
6230 {
6231 GCPtrTop = pTmpRsp->Words.w0;
6232 pTmpRsp->Words.w0 += cbItem;
6233 }
6234 return GCPtrTop;
6235}
6236
6237/** @} */
6238
6239
6240/** @name FPU access and helpers.
6241 *
6242 * @{
6243 */
6244
6245
6246/**
6247 * Hook for preparing to use the host FPU.
6248 *
6249 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6250 *
6251 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6252 */
6253DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6254{
6255#ifdef IN_RING3
6256 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6257#else
6258 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6259#endif
6260}
6261
6262
6263/**
6264 * Hook for preparing to use the host FPU for SSE
6265 *
6266 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6267 *
6268 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6269 */
6270DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6271{
6272 iemFpuPrepareUsage(pVCpu);
6273}
6274
6275
6276/**
6277 * Hook for actualizing the guest FPU state before the interpreter reads it.
6278 *
6279 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6280 *
6281 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6282 */
6283DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6284{
6285#ifdef IN_RING3
6286 NOREF(pVCpu);
6287#else
6288 CPUMRZFpuStateActualizeForRead(pVCpu);
6289#endif
6290}
6291
6292
6293/**
6294 * Hook for actualizing the guest FPU state before the interpreter changes it.
6295 *
6296 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6297 *
6298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6299 */
6300DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6301{
6302#ifdef IN_RING3
6303 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6304#else
6305 CPUMRZFpuStateActualizeForChange(pVCpu);
6306#endif
6307}
6308
6309
6310/**
6311 * Hook for actualizing the guest XMM0..15 register state for read only.
6312 *
6313 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6314 *
6315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6316 */
6317DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6318{
6319#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6320 NOREF(pVCpu);
6321#else
6322 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6323#endif
6324}
6325
6326
6327/**
6328 * Hook for actualizing the guest XMM0..15 register state for read+write.
6329 *
6330 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6331 *
6332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6333 */
6334DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6335{
6336#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6337 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6338#else
6339 CPUMRZFpuStateActualizeForChange(pVCpu);
6340#endif
6341}
6342
6343
6344/**
6345 * Stores a QNaN value into a FPU register.
6346 *
6347 * @param pReg Pointer to the register.
6348 */
6349DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6350{
6351 pReg->au32[0] = UINT32_C(0x00000000);
6352 pReg->au32[1] = UINT32_C(0xc0000000);
6353 pReg->au16[4] = UINT16_C(0xffff);
6354}
6355
6356
6357/**
6358 * Updates the FOP, FPU.CS and FPUIP registers.
6359 *
6360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6361 * @param pCtx The CPU context.
6362 * @param pFpuCtx The FPU context.
6363 */
6364DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6365{
6366 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6367 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6368 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6369 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6370 {
6371 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6372 * happens in real mode here based on the fnsave and fnstenv images. */
6373 pFpuCtx->CS = 0;
6374 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6375 }
6376 else
6377 {
6378 pFpuCtx->CS = pCtx->cs.Sel;
6379 pFpuCtx->FPUIP = pCtx->rip;
6380 }
6381}
6382
6383
6384/**
6385 * Updates the x87.DS and FPUDP registers.
6386 *
6387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6388 * @param pCtx The CPU context.
6389 * @param pFpuCtx The FPU context.
6390 * @param iEffSeg The effective segment register.
6391 * @param GCPtrEff The effective address relative to @a iEffSeg.
6392 */
6393DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6394{
6395 RTSEL sel;
6396 switch (iEffSeg)
6397 {
6398 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
6399 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
6400 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
6401 case X86_SREG_ES: sel = pCtx->es.Sel; break;
6402 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
6403 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
6404 default:
6405 AssertMsgFailed(("%d\n", iEffSeg));
6406 sel = pCtx->ds.Sel;
6407 }
6408 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
6409 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6410 {
6411 pFpuCtx->DS = 0;
6412 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
6413 }
6414 else
6415 {
6416 pFpuCtx->DS = sel;
6417 pFpuCtx->FPUDP = GCPtrEff;
6418 }
6419}
6420
6421
6422/**
6423 * Rotates the stack registers in the push direction.
6424 *
6425 * @param pFpuCtx The FPU context.
6426 * @remarks This is a complete waste of time, but fxsave stores the registers in
6427 * stack order.
6428 */
6429DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
6430{
6431 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
6432 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
6433 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
6434 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
6435 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
6436 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
6437 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
6438 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
6439 pFpuCtx->aRegs[0].r80 = r80Tmp;
6440}
6441
6442
6443/**
6444 * Rotates the stack registers in the pop direction.
6445 *
6446 * @param pFpuCtx The FPU context.
6447 * @remarks This is a complete waste of time, but fxsave stores the registers in
6448 * stack order.
6449 */
6450DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
6451{
6452 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
6453 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
6454 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
6455 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
6456 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
6457 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
6458 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
6459 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
6460 pFpuCtx->aRegs[7].r80 = r80Tmp;
6461}
6462
6463
6464/**
6465 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
6466 * exception prevents it.
6467 *
6468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6469 * @param pResult The FPU operation result to push.
6470 * @param pFpuCtx The FPU context.
6471 */
6472IEM_STATIC void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
6473{
6474 /* Update FSW and bail if there are pending exceptions afterwards. */
6475 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6476 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6477 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6478 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6479 {
6480 pFpuCtx->FSW = fFsw;
6481 return;
6482 }
6483
6484 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6485 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6486 {
6487 /* All is fine, push the actual value. */
6488 pFpuCtx->FTW |= RT_BIT(iNewTop);
6489 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
6490 }
6491 else if (pFpuCtx->FCW & X86_FCW_IM)
6492 {
6493 /* Masked stack overflow, push QNaN. */
6494 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6495 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6496 }
6497 else
6498 {
6499 /* Raise stack overflow, don't push anything. */
6500 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6501 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6502 return;
6503 }
6504
6505 fFsw &= ~X86_FSW_TOP_MASK;
6506 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6507 pFpuCtx->FSW = fFsw;
6508
6509 iemFpuRotateStackPush(pFpuCtx);
6510}
6511
6512
6513/**
6514 * Stores a result in a FPU register and updates the FSW and FTW.
6515 *
6516 * @param pFpuCtx The FPU context.
6517 * @param pResult The result to store.
6518 * @param iStReg Which FPU register to store it in.
6519 */
6520IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
6521{
6522 Assert(iStReg < 8);
6523 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6524 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6525 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
6526 pFpuCtx->FTW |= RT_BIT(iReg);
6527 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
6528}
6529
6530
6531/**
6532 * Only updates the FPU status word (FSW) with the result of the current
6533 * instruction.
6534 *
6535 * @param pFpuCtx The FPU context.
6536 * @param u16FSW The FSW output of the current instruction.
6537 */
6538IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
6539{
6540 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6541 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
6542}
6543
6544
6545/**
6546 * Pops one item off the FPU stack if no pending exception prevents it.
6547 *
6548 * @param pFpuCtx The FPU context.
6549 */
6550IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
6551{
6552 /* Check pending exceptions. */
6553 uint16_t uFSW = pFpuCtx->FSW;
6554 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6555 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6556 return;
6557
6558 /* TOP--. */
6559 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
6560 uFSW &= ~X86_FSW_TOP_MASK;
6561 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6562 pFpuCtx->FSW = uFSW;
6563
6564 /* Mark the previous ST0 as empty. */
6565 iOldTop >>= X86_FSW_TOP_SHIFT;
6566 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
6567
6568 /* Rotate the registers. */
6569 iemFpuRotateStackPop(pFpuCtx);
6570}
6571
6572
6573/**
6574 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
6575 *
6576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6577 * @param pResult The FPU operation result to push.
6578 */
6579IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
6580{
6581 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6582 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6583 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6584 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
6585}
6586
6587
6588/**
6589 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
6590 * and sets FPUDP and FPUDS.
6591 *
6592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6593 * @param pResult The FPU operation result to push.
6594 * @param iEffSeg The effective segment register.
6595 * @param GCPtrEff The effective address relative to @a iEffSeg.
6596 */
6597IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6598{
6599 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6600 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6601 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6602 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6603 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
6604}
6605
6606
6607/**
6608 * Replace ST0 with the first value and push the second onto the FPU stack,
6609 * unless a pending exception prevents it.
6610 *
6611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6612 * @param pResult The FPU operation result to store and push.
6613 */
6614IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
6615{
6616 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6617 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6618 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6619
6620 /* Update FSW and bail if there are pending exceptions afterwards. */
6621 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
6622 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
6623 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
6624 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
6625 {
6626 pFpuCtx->FSW = fFsw;
6627 return;
6628 }
6629
6630 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
6631 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
6632 {
6633 /* All is fine, push the actual value. */
6634 pFpuCtx->FTW |= RT_BIT(iNewTop);
6635 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
6636 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
6637 }
6638 else if (pFpuCtx->FCW & X86_FCW_IM)
6639 {
6640 /* Masked stack overflow, push QNaN. */
6641 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
6642 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
6643 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6644 }
6645 else
6646 {
6647 /* Raise stack overflow, don't push anything. */
6648 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
6649 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
6650 return;
6651 }
6652
6653 fFsw &= ~X86_FSW_TOP_MASK;
6654 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
6655 pFpuCtx->FSW = fFsw;
6656
6657 iemFpuRotateStackPush(pFpuCtx);
6658}
6659
6660
6661/**
6662 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6663 * FOP.
6664 *
6665 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6666 * @param pResult The result to store.
6667 * @param iStReg Which FPU register to store it in.
6668 */
6669IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6670{
6671 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6672 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6673 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6674 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6675}
6676
6677
6678/**
6679 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
6680 * FOP, and then pops the stack.
6681 *
6682 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6683 * @param pResult The result to store.
6684 * @param iStReg Which FPU register to store it in.
6685 */
6686IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
6687{
6688 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6689 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6690 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6691 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6692 iemFpuMaybePopOne(pFpuCtx);
6693}
6694
6695
6696/**
6697 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6698 * FPUDP, and FPUDS.
6699 *
6700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6701 * @param pResult The result to store.
6702 * @param iStReg Which FPU register to store it in.
6703 * @param iEffSeg The effective memory operand selector register.
6704 * @param GCPtrEff The effective memory operand offset.
6705 */
6706IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
6707 uint8_t iEffSeg, RTGCPTR GCPtrEff)
6708{
6709 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6710 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6711 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6712 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6713 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6714}
6715
6716
6717/**
6718 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
6719 * FPUDP, and FPUDS, and then pops the stack.
6720 *
6721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6722 * @param pResult The result to store.
6723 * @param iStReg Which FPU register to store it in.
6724 * @param iEffSeg The effective memory operand selector register.
6725 * @param GCPtrEff The effective memory operand offset.
6726 */
6727IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
6728 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6729{
6730 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6731 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6732 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6733 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6734 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
6735 iemFpuMaybePopOne(pFpuCtx);
6736}
6737
6738
6739/**
6740 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
6741 *
6742 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6743 */
6744IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
6745{
6746 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6747 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6748 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6749}
6750
6751
6752/**
6753 * Marks the specified stack register as free (for FFREE).
6754 *
6755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6756 * @param iStReg The register to free.
6757 */
6758IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
6759{
6760 Assert(iStReg < 8);
6761 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6762 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6763 pFpuCtx->FTW &= ~RT_BIT(iReg);
6764}
6765
6766
6767/**
6768 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
6769 *
6770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6771 */
6772IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
6773{
6774 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6775 uint16_t uFsw = pFpuCtx->FSW;
6776 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6777 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6778 uFsw &= ~X86_FSW_TOP_MASK;
6779 uFsw |= uTop;
6780 pFpuCtx->FSW = uFsw;
6781}
6782
6783
6784/**
6785 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
6786 *
6787 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6788 */
6789IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
6790{
6791 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
6792 uint16_t uFsw = pFpuCtx->FSW;
6793 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
6794 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
6795 uFsw &= ~X86_FSW_TOP_MASK;
6796 uFsw |= uTop;
6797 pFpuCtx->FSW = uFsw;
6798}
6799
6800
6801/**
6802 * Updates the FSW, FOP, FPUIP, and FPUCS.
6803 *
6804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6805 * @param u16FSW The FSW from the current instruction.
6806 */
6807IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
6808{
6809 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6810 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6811 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6812 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6813}
6814
6815
6816/**
6817 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
6818 *
6819 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6820 * @param u16FSW The FSW from the current instruction.
6821 */
6822IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
6823{
6824 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6825 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6826 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6827 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6828 iemFpuMaybePopOne(pFpuCtx);
6829}
6830
6831
6832/**
6833 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
6834 *
6835 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6836 * @param u16FSW The FSW from the current instruction.
6837 * @param iEffSeg The effective memory operand selector register.
6838 * @param GCPtrEff The effective memory operand offset.
6839 */
6840IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6841{
6842 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6843 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6844 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6845 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6846 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6847}
6848
6849
6850/**
6851 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
6852 *
6853 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6854 * @param u16FSW The FSW from the current instruction.
6855 */
6856IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
6857{
6858 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6859 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6860 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6861 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6862 iemFpuMaybePopOne(pFpuCtx);
6863 iemFpuMaybePopOne(pFpuCtx);
6864}
6865
6866
6867/**
6868 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
6869 *
6870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6871 * @param u16FSW The FSW from the current instruction.
6872 * @param iEffSeg The effective memory operand selector register.
6873 * @param GCPtrEff The effective memory operand offset.
6874 */
6875IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6876{
6877 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6878 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6879 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6880 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6881 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
6882 iemFpuMaybePopOne(pFpuCtx);
6883}
6884
6885
6886/**
6887 * Worker routine for raising an FPU stack underflow exception.
6888 *
6889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6890 * @param pFpuCtx The FPU context.
6891 * @param iStReg The stack register being accessed.
6892 */
6893IEM_STATIC void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
6894{
6895 Assert(iStReg < 8 || iStReg == UINT8_MAX);
6896 if (pFpuCtx->FCW & X86_FCW_IM)
6897 {
6898 /* Masked underflow. */
6899 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6900 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
6901 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
6902 if (iStReg != UINT8_MAX)
6903 {
6904 pFpuCtx->FTW |= RT_BIT(iReg);
6905 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
6906 }
6907 }
6908 else
6909 {
6910 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6911 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
6912 }
6913}
6914
6915
6916/**
6917 * Raises a FPU stack underflow exception.
6918 *
6919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6920 * @param iStReg The destination register that should be loaded
6921 * with QNaN if \#IS is not masked. Specify
6922 * UINT8_MAX if none (like for fcom).
6923 */
6924DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
6925{
6926 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6927 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6928 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6929 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
6930}
6931
6932
6933DECL_NO_INLINE(IEM_STATIC, void)
6934iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6935{
6936 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6937 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6938 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6939 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6940 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
6941}
6942
6943
6944DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
6945{
6946 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6947 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6948 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6949 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
6950 iemFpuMaybePopOne(pFpuCtx);
6951}
6952
6953
6954DECL_NO_INLINE(IEM_STATIC, void)
6955iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
6956{
6957 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6958 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6959 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
6960 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6961 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
6962 iemFpuMaybePopOne(pFpuCtx);
6963}
6964
6965
6966DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
6967{
6968 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6969 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6970 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6971 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
6972 iemFpuMaybePopOne(pFpuCtx);
6973 iemFpuMaybePopOne(pFpuCtx);
6974}
6975
6976
6977DECL_NO_INLINE(IEM_STATIC, void)
6978iemFpuStackPushUnderflow(PVMCPU pVCpu)
6979{
6980 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6981 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
6982 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
6983
6984 if (pFpuCtx->FCW & X86_FCW_IM)
6985 {
6986 /* Masked overflow - Push QNaN. */
6987 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
6988 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
6989 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
6990 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
6991 pFpuCtx->FTW |= RT_BIT(iNewTop);
6992 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
6993 iemFpuRotateStackPush(pFpuCtx);
6994 }
6995 else
6996 {
6997 /* Exception pending - don't change TOP or the register stack. */
6998 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
6999 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7000 }
7001}
7002
7003
7004DECL_NO_INLINE(IEM_STATIC, void)
7005iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7006{
7007 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7008 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7009 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7010
7011 if (pFpuCtx->FCW & X86_FCW_IM)
7012 {
7013 /* Masked overflow - Push QNaN. */
7014 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7015 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7016 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7017 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7018 pFpuCtx->FTW |= RT_BIT(iNewTop);
7019 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7020 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7021 iemFpuRotateStackPush(pFpuCtx);
7022 }
7023 else
7024 {
7025 /* Exception pending - don't change TOP or the register stack. */
7026 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7027 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7028 }
7029}
7030
7031
7032/**
7033 * Worker routine for raising an FPU stack overflow exception on a push.
7034 *
7035 * @param pFpuCtx The FPU context.
7036 */
7037IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7038{
7039 if (pFpuCtx->FCW & X86_FCW_IM)
7040 {
7041 /* Masked overflow. */
7042 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7043 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7044 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7045 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7046 pFpuCtx->FTW |= RT_BIT(iNewTop);
7047 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7048 iemFpuRotateStackPush(pFpuCtx);
7049 }
7050 else
7051 {
7052 /* Exception pending - don't change TOP or the register stack. */
7053 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7054 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7055 }
7056}
7057
7058
7059/**
7060 * Raises a FPU stack overflow exception on a push.
7061 *
7062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7063 */
7064DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7065{
7066 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7067 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7068 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7069 iemFpuStackPushOverflowOnly(pFpuCtx);
7070}
7071
7072
7073/**
7074 * Raises a FPU stack overflow exception on a push with a memory operand.
7075 *
7076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7077 * @param iEffSeg The effective memory operand selector register.
7078 * @param GCPtrEff The effective memory operand offset.
7079 */
7080DECL_NO_INLINE(IEM_STATIC, void)
7081iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7082{
7083 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7084 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7085 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7086 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7087 iemFpuStackPushOverflowOnly(pFpuCtx);
7088}
7089
7090
7091IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7092{
7093 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7094 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7095 if (pFpuCtx->FTW & RT_BIT(iReg))
7096 return VINF_SUCCESS;
7097 return VERR_NOT_FOUND;
7098}
7099
7100
7101IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7102{
7103 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7104 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7105 if (pFpuCtx->FTW & RT_BIT(iReg))
7106 {
7107 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7108 return VINF_SUCCESS;
7109 }
7110 return VERR_NOT_FOUND;
7111}
7112
7113
7114IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7115 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7116{
7117 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7118 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7119 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7120 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7121 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7122 {
7123 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7124 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7125 return VINF_SUCCESS;
7126 }
7127 return VERR_NOT_FOUND;
7128}
7129
7130
7131IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7132{
7133 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7134 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7135 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7136 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7137 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7138 {
7139 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7140 return VINF_SUCCESS;
7141 }
7142 return VERR_NOT_FOUND;
7143}
7144
7145
7146/**
7147 * Updates the FPU exception status after FCW is changed.
7148 *
7149 * @param pFpuCtx The FPU context.
7150 */
7151IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7152{
7153 uint16_t u16Fsw = pFpuCtx->FSW;
7154 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7155 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7156 else
7157 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7158 pFpuCtx->FSW = u16Fsw;
7159}
7160
7161
7162/**
7163 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7164 *
7165 * @returns The full FTW.
7166 * @param pFpuCtx The FPU context.
7167 */
7168IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7169{
7170 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7171 uint16_t u16Ftw = 0;
7172 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7173 for (unsigned iSt = 0; iSt < 8; iSt++)
7174 {
7175 unsigned const iReg = (iSt + iTop) & 7;
7176 if (!(u8Ftw & RT_BIT(iReg)))
7177 u16Ftw |= 3 << (iReg * 2); /* empty */
7178 else
7179 {
7180 uint16_t uTag;
7181 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7182 if (pr80Reg->s.uExponent == 0x7fff)
7183 uTag = 2; /* Exponent is all 1's => Special. */
7184 else if (pr80Reg->s.uExponent == 0x0000)
7185 {
7186 if (pr80Reg->s.u64Mantissa == 0x0000)
7187 uTag = 1; /* All bits are zero => Zero. */
7188 else
7189 uTag = 2; /* Must be special. */
7190 }
7191 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7192 uTag = 0; /* Valid. */
7193 else
7194 uTag = 2; /* Must be special. */
7195
7196 u16Ftw |= uTag << (iReg * 2); /* empty */
7197 }
7198 }
7199
7200 return u16Ftw;
7201}
7202
7203
7204/**
7205 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7206 *
7207 * @returns The compressed FTW.
7208 * @param u16FullFtw The full FTW to convert.
7209 */
7210IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7211{
7212 uint8_t u8Ftw = 0;
7213 for (unsigned i = 0; i < 8; i++)
7214 {
7215 if ((u16FullFtw & 3) != 3 /*empty*/)
7216 u8Ftw |= RT_BIT(i);
7217 u16FullFtw >>= 2;
7218 }
7219
7220 return u8Ftw;
7221}
7222
7223/** @} */
7224
7225
7226/** @name Memory access.
7227 *
7228 * @{
7229 */
7230
7231
7232/**
7233 * Updates the IEMCPU::cbWritten counter if applicable.
7234 *
7235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7236 * @param fAccess The access being accounted for.
7237 * @param cbMem The access size.
7238 */
7239DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7240{
7241 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7242 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7243 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7244}
7245
7246
7247/**
7248 * Checks if the given segment can be written to, raise the appropriate
7249 * exception if not.
7250 *
7251 * @returns VBox strict status code.
7252 *
7253 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7254 * @param pHid Pointer to the hidden register.
7255 * @param iSegReg The register number.
7256 * @param pu64BaseAddr Where to return the base address to use for the
7257 * segment. (In 64-bit code it may differ from the
7258 * base in the hidden segment.)
7259 */
7260IEM_STATIC VBOXSTRICTRC
7261iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7262{
7263 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7264 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7265 else
7266 {
7267 if (!pHid->Attr.n.u1Present)
7268 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7269
7270 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7271 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7272 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7273 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7274 *pu64BaseAddr = pHid->u64Base;
7275 }
7276 return VINF_SUCCESS;
7277}
7278
7279
7280/**
7281 * Checks if the given segment can be read from, raise the appropriate
7282 * exception if not.
7283 *
7284 * @returns VBox strict status code.
7285 *
7286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7287 * @param pHid Pointer to the hidden register.
7288 * @param iSegReg The register number.
7289 * @param pu64BaseAddr Where to return the base address to use for the
7290 * segment. (In 64-bit code it may differ from the
7291 * base in the hidden segment.)
7292 */
7293IEM_STATIC VBOXSTRICTRC
7294iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7295{
7296 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7297 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7298 else
7299 {
7300 if (!pHid->Attr.n.u1Present)
7301 return iemRaiseSelectorNotPresentBySegReg(pVCpu, iSegReg);
7302
7303 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7304 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7305 *pu64BaseAddr = pHid->u64Base;
7306 }
7307 return VINF_SUCCESS;
7308}
7309
7310
7311/**
7312 * Applies the segment limit, base and attributes.
7313 *
7314 * This may raise a \#GP or \#SS.
7315 *
7316 * @returns VBox strict status code.
7317 *
7318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7319 * @param fAccess The kind of access which is being performed.
7320 * @param iSegReg The index of the segment register to apply.
7321 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7322 * TSS, ++).
7323 * @param cbMem The access size.
7324 * @param pGCPtrMem Pointer to the guest memory address to apply
7325 * segmentation to. Input and output parameter.
7326 */
7327IEM_STATIC VBOXSTRICTRC
7328iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7329{
7330 if (iSegReg == UINT8_MAX)
7331 return VINF_SUCCESS;
7332
7333 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7334 switch (pVCpu->iem.s.enmCpuMode)
7335 {
7336 case IEMMODE_16BIT:
7337 case IEMMODE_32BIT:
7338 {
7339 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7340 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7341
7342 if ( pSel->Attr.n.u1Present
7343 && !pSel->Attr.n.u1Unusable)
7344 {
7345 Assert(pSel->Attr.n.u1DescType);
7346 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7347 {
7348 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7349 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7350 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7351
7352 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7353 {
7354 /** @todo CPL check. */
7355 }
7356
7357 /*
7358 * There are two kinds of data selectors, normal and expand down.
7359 */
7360 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7361 {
7362 if ( GCPtrFirst32 > pSel->u32Limit
7363 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7364 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7365 }
7366 else
7367 {
7368 /*
7369 * The upper boundary is defined by the B bit, not the G bit!
7370 */
7371 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7372 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7373 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7374 }
7375 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7376 }
7377 else
7378 {
7379
7380 /*
7381 * Code selector and usually be used to read thru, writing is
7382 * only permitted in real and V8086 mode.
7383 */
7384 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7385 || ( (fAccess & IEM_ACCESS_TYPE_READ)
7386 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
7387 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
7388 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7389
7390 if ( GCPtrFirst32 > pSel->u32Limit
7391 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7392 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7393
7394 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7395 {
7396 /** @todo CPL check. */
7397 }
7398
7399 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7400 }
7401 }
7402 else
7403 return iemRaiseGeneralProtectionFault0(pVCpu);
7404 return VINF_SUCCESS;
7405 }
7406
7407 case IEMMODE_64BIT:
7408 {
7409 RTGCPTR GCPtrMem = *pGCPtrMem;
7410 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
7411 *pGCPtrMem = GCPtrMem + pSel->u64Base;
7412
7413 Assert(cbMem >= 1);
7414 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
7415 return VINF_SUCCESS;
7416 return iemRaiseGeneralProtectionFault0(pVCpu);
7417 }
7418
7419 default:
7420 AssertFailedReturn(VERR_IEM_IPE_7);
7421 }
7422}
7423
7424
7425/**
7426 * Translates a virtual address to a physical physical address and checks if we
7427 * can access the page as specified.
7428 *
7429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7430 * @param GCPtrMem The virtual address.
7431 * @param fAccess The intended access.
7432 * @param pGCPhysMem Where to return the physical address.
7433 */
7434IEM_STATIC VBOXSTRICTRC
7435iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
7436{
7437 /** @todo Need a different PGM interface here. We're currently using
7438 * generic / REM interfaces. this won't cut it for R0 & RC. */
7439 RTGCPHYS GCPhys;
7440 uint64_t fFlags;
7441 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
7442 if (RT_FAILURE(rc))
7443 {
7444 /** @todo Check unassigned memory in unpaged mode. */
7445 /** @todo Reserved bits in page tables. Requires new PGM interface. */
7446 *pGCPhysMem = NIL_RTGCPHYS;
7447 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
7448 }
7449
7450 /* If the page is writable and does not have the no-exec bit set, all
7451 access is allowed. Otherwise we'll have to check more carefully... */
7452 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
7453 {
7454 /* Write to read only memory? */
7455 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7456 && !(fFlags & X86_PTE_RW)
7457 && ( pVCpu->iem.s.uCpl != 0
7458 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
7459 {
7460 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
7461 *pGCPhysMem = NIL_RTGCPHYS;
7462 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
7463 }
7464
7465 /* Kernel memory accessed by userland? */
7466 if ( !(fFlags & X86_PTE_US)
7467 && pVCpu->iem.s.uCpl == 3
7468 && !(fAccess & IEM_ACCESS_WHAT_SYS))
7469 {
7470 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
7471 *pGCPhysMem = NIL_RTGCPHYS;
7472 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
7473 }
7474
7475 /* Executing non-executable memory? */
7476 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
7477 && (fFlags & X86_PTE_PAE_NX)
7478 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
7479 {
7480 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
7481 *pGCPhysMem = NIL_RTGCPHYS;
7482 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
7483 VERR_ACCESS_DENIED);
7484 }
7485 }
7486
7487 /*
7488 * Set the dirty / access flags.
7489 * ASSUMES this is set when the address is translated rather than on committ...
7490 */
7491 /** @todo testcase: check when A and D bits are actually set by the CPU. */
7492 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
7493 if ((fFlags & fAccessedDirty) != fAccessedDirty)
7494 {
7495 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
7496 AssertRC(rc2);
7497 }
7498
7499 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
7500 *pGCPhysMem = GCPhys;
7501 return VINF_SUCCESS;
7502}
7503
7504
7505
7506/**
7507 * Maps a physical page.
7508 *
7509 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
7510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7511 * @param GCPhysMem The physical address.
7512 * @param fAccess The intended access.
7513 * @param ppvMem Where to return the mapping address.
7514 * @param pLock The PGM lock.
7515 */
7516IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
7517{
7518#ifdef IEM_VERIFICATION_MODE_FULL
7519 /* Force the alternative path so we can ignore writes. */
7520 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
7521 {
7522 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7523 {
7524 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
7525 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7526 if (RT_FAILURE(rc2))
7527 pVCpu->iem.s.fProblematicMemory = true;
7528 }
7529 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7530 }
7531#endif
7532#ifdef IEM_LOG_MEMORY_WRITES
7533 if (fAccess & IEM_ACCESS_TYPE_WRITE)
7534 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7535#endif
7536#ifdef IEM_VERIFICATION_MODE_MINIMAL
7537 return VERR_PGM_PHYS_TLB_CATCH_ALL;
7538#endif
7539
7540 /** @todo This API may require some improving later. A private deal with PGM
7541 * regarding locking and unlocking needs to be struct. A couple of TLBs
7542 * living in PGM, but with publicly accessible inlined access methods
7543 * could perhaps be an even better solution. */
7544 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
7545 GCPhysMem,
7546 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
7547 pVCpu->iem.s.fBypassHandlers,
7548 ppvMem,
7549 pLock);
7550 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
7551 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
7552
7553#ifdef IEM_VERIFICATION_MODE_FULL
7554 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7555 pVCpu->iem.s.fProblematicMemory = true;
7556#endif
7557 return rc;
7558}
7559
7560
7561/**
7562 * Unmap a page previously mapped by iemMemPageMap.
7563 *
7564 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7565 * @param GCPhysMem The physical address.
7566 * @param fAccess The intended access.
7567 * @param pvMem What iemMemPageMap returned.
7568 * @param pLock The PGM lock.
7569 */
7570DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
7571{
7572 NOREF(pVCpu);
7573 NOREF(GCPhysMem);
7574 NOREF(fAccess);
7575 NOREF(pvMem);
7576 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
7577}
7578
7579
7580/**
7581 * Looks up a memory mapping entry.
7582 *
7583 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
7584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7585 * @param pvMem The memory address.
7586 * @param fAccess The access to.
7587 */
7588DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
7589{
7590 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
7591 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
7592 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
7593 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7594 return 0;
7595 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
7596 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7597 return 1;
7598 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
7599 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
7600 return 2;
7601 return VERR_NOT_FOUND;
7602}
7603
7604
7605/**
7606 * Finds a free memmap entry when using iNextMapping doesn't work.
7607 *
7608 * @returns Memory mapping index, 1024 on failure.
7609 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7610 */
7611IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
7612{
7613 /*
7614 * The easy case.
7615 */
7616 if (pVCpu->iem.s.cActiveMappings == 0)
7617 {
7618 pVCpu->iem.s.iNextMapping = 1;
7619 return 0;
7620 }
7621
7622 /* There should be enough mappings for all instructions. */
7623 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
7624
7625 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
7626 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
7627 return i;
7628
7629 AssertFailedReturn(1024);
7630}
7631
7632
7633/**
7634 * Commits a bounce buffer that needs writing back and unmaps it.
7635 *
7636 * @returns Strict VBox status code.
7637 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7638 * @param iMemMap The index of the buffer to commit.
7639 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
7640 * Always false in ring-3, obviously.
7641 */
7642IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
7643{
7644 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
7645 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
7646#ifdef IN_RING3
7647 Assert(!fPostponeFail);
7648#endif
7649
7650 /*
7651 * Do the writing.
7652 */
7653#ifndef IEM_VERIFICATION_MODE_MINIMAL
7654 PVM pVM = pVCpu->CTX_SUFF(pVM);
7655 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
7656 && !IEM_VERIFICATION_ENABLED(pVCpu))
7657 {
7658 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7659 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7660 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7661 if (!pVCpu->iem.s.fBypassHandlers)
7662 {
7663 /*
7664 * Carefully and efficiently dealing with access handler return
7665 * codes make this a little bloated.
7666 */
7667 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
7668 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7669 pbBuf,
7670 cbFirst,
7671 PGMACCESSORIGIN_IEM);
7672 if (rcStrict == VINF_SUCCESS)
7673 {
7674 if (cbSecond)
7675 {
7676 rcStrict = PGMPhysWrite(pVM,
7677 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7678 pbBuf + cbFirst,
7679 cbSecond,
7680 PGMACCESSORIGIN_IEM);
7681 if (rcStrict == VINF_SUCCESS)
7682 { /* nothing */ }
7683 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7684 {
7685 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
7686 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7687 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7688 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7689 }
7690# ifndef IN_RING3
7691 else if (fPostponeFail)
7692 {
7693 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7694 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7695 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7696 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7697 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7698 return iemSetPassUpStatus(pVCpu, rcStrict);
7699 }
7700# endif
7701 else
7702 {
7703 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7704 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7705 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7706 return rcStrict;
7707 }
7708 }
7709 }
7710 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7711 {
7712 if (!cbSecond)
7713 {
7714 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
7715 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7716 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7717 }
7718 else
7719 {
7720 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
7721 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7722 pbBuf + cbFirst,
7723 cbSecond,
7724 PGMACCESSORIGIN_IEM);
7725 if (rcStrict2 == VINF_SUCCESS)
7726 {
7727 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
7728 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7729 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7730 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7731 }
7732 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
7733 {
7734 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
7735 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7736 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7737 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
7738 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7739 }
7740# ifndef IN_RING3
7741 else if (fPostponeFail)
7742 {
7743 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7744 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7745 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7746 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
7747 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7748 return iemSetPassUpStatus(pVCpu, rcStrict);
7749 }
7750# endif
7751 else
7752 {
7753 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7754 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7755 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
7756 return rcStrict2;
7757 }
7758 }
7759 }
7760# ifndef IN_RING3
7761 else if (fPostponeFail)
7762 {
7763 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
7764 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7765 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7766 if (!cbSecond)
7767 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
7768 else
7769 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
7770 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
7771 return iemSetPassUpStatus(pVCpu, rcStrict);
7772 }
7773# endif
7774 else
7775 {
7776 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7777 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
7778 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7779 return rcStrict;
7780 }
7781 }
7782 else
7783 {
7784 /*
7785 * No access handlers, much simpler.
7786 */
7787 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
7788 if (RT_SUCCESS(rc))
7789 {
7790 if (cbSecond)
7791 {
7792 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
7793 if (RT_SUCCESS(rc))
7794 { /* likely */ }
7795 else
7796 {
7797 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
7798 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
7799 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
7800 return rc;
7801 }
7802 }
7803 }
7804 else
7805 {
7806 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
7807 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
7808 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
7809 return rc;
7810 }
7811 }
7812 }
7813#endif
7814
7815#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7816 /*
7817 * Record the write(s).
7818 */
7819 if (!pVCpu->iem.s.fNoRem)
7820 {
7821 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
7822 if (pEvtRec)
7823 {
7824 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7825 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
7826 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
7827 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
7828 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
7829 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7830 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7831 }
7832 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
7833 {
7834 pEvtRec = iemVerifyAllocRecord(pVCpu);
7835 if (pEvtRec)
7836 {
7837 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
7838 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
7839 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7840 memcpy(pEvtRec->u.RamWrite.ab,
7841 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
7842 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
7843 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
7844 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
7845 }
7846 }
7847 }
7848#endif
7849#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
7850 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
7851 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
7852 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
7853 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
7854 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
7855 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
7856
7857 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
7858 g_cbIemWrote = cbWrote;
7859 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
7860#endif
7861
7862 /*
7863 * Free the mapping entry.
7864 */
7865 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
7866 Assert(pVCpu->iem.s.cActiveMappings != 0);
7867 pVCpu->iem.s.cActiveMappings--;
7868 return VINF_SUCCESS;
7869}
7870
7871
7872/**
7873 * iemMemMap worker that deals with a request crossing pages.
7874 */
7875IEM_STATIC VBOXSTRICTRC
7876iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
7877{
7878 /*
7879 * Do the address translations.
7880 */
7881 RTGCPHYS GCPhysFirst;
7882 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
7883 if (rcStrict != VINF_SUCCESS)
7884 return rcStrict;
7885
7886 RTGCPHYS GCPhysSecond;
7887 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
7888 fAccess, &GCPhysSecond);
7889 if (rcStrict != VINF_SUCCESS)
7890 return rcStrict;
7891 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
7892
7893 PVM pVM = pVCpu->CTX_SUFF(pVM);
7894#ifdef IEM_VERIFICATION_MODE_FULL
7895 /*
7896 * Detect problematic memory when verifying so we can select
7897 * the right execution engine. (TLB: Redo this.)
7898 */
7899 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
7900 {
7901 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7902 if (RT_SUCCESS(rc2))
7903 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
7904 if (RT_FAILURE(rc2))
7905 pVCpu->iem.s.fProblematicMemory = true;
7906 }
7907#endif
7908
7909
7910 /*
7911 * Read in the current memory content if it's a read, execute or partial
7912 * write access.
7913 */
7914 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
7915 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
7916 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
7917
7918 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
7919 {
7920 if (!pVCpu->iem.s.fBypassHandlers)
7921 {
7922 /*
7923 * Must carefully deal with access handler status codes here,
7924 * makes the code a bit bloated.
7925 */
7926 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
7927 if (rcStrict == VINF_SUCCESS)
7928 {
7929 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
7930 if (rcStrict == VINF_SUCCESS)
7931 { /*likely */ }
7932 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7933 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7934 else
7935 {
7936 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
7937 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
7938 return rcStrict;
7939 }
7940 }
7941 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
7942 {
7943 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
7944 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
7945 {
7946 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
7947 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
7948 }
7949 else
7950 {
7951 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
7952 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
7953 return rcStrict2;
7954 }
7955 }
7956 else
7957 {
7958 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
7959 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
7960 return rcStrict;
7961 }
7962 }
7963 else
7964 {
7965 /*
7966 * No informational status codes here, much more straight forward.
7967 */
7968 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
7969 if (RT_SUCCESS(rc))
7970 {
7971 Assert(rc == VINF_SUCCESS);
7972 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
7973 if (RT_SUCCESS(rc))
7974 Assert(rc == VINF_SUCCESS);
7975 else
7976 {
7977 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
7978 return rc;
7979 }
7980 }
7981 else
7982 {
7983 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
7984 return rc;
7985 }
7986 }
7987
7988#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
7989 if ( !pVCpu->iem.s.fNoRem
7990 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
7991 {
7992 /*
7993 * Record the reads.
7994 */
7995 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
7996 if (pEvtRec)
7997 {
7998 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
7999 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8000 pEvtRec->u.RamRead.cb = cbFirstPage;
8001 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8002 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8003 }
8004 pEvtRec = iemVerifyAllocRecord(pVCpu);
8005 if (pEvtRec)
8006 {
8007 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8008 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8009 pEvtRec->u.RamRead.cb = cbSecondPage;
8010 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8011 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8012 }
8013 }
8014#endif
8015 }
8016#ifdef VBOX_STRICT
8017 else
8018 memset(pbBuf, 0xcc, cbMem);
8019 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8020 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8021#endif
8022
8023 /*
8024 * Commit the bounce buffer entry.
8025 */
8026 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8027 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8028 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8029 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8030 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8031 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8032 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8033 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8034 pVCpu->iem.s.cActiveMappings++;
8035
8036 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8037 *ppvMem = pbBuf;
8038 return VINF_SUCCESS;
8039}
8040
8041
8042/**
8043 * iemMemMap woker that deals with iemMemPageMap failures.
8044 */
8045IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8046 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8047{
8048 /*
8049 * Filter out conditions we can handle and the ones which shouldn't happen.
8050 */
8051 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8052 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8053 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8054 {
8055 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8056 return rcMap;
8057 }
8058 pVCpu->iem.s.cPotentialExits++;
8059
8060 /*
8061 * Read in the current memory content if it's a read, execute or partial
8062 * write access.
8063 */
8064 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8065 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8066 {
8067 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8068 memset(pbBuf, 0xff, cbMem);
8069 else
8070 {
8071 int rc;
8072 if (!pVCpu->iem.s.fBypassHandlers)
8073 {
8074 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8075 if (rcStrict == VINF_SUCCESS)
8076 { /* nothing */ }
8077 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8078 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8079 else
8080 {
8081 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8082 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8083 return rcStrict;
8084 }
8085 }
8086 else
8087 {
8088 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8089 if (RT_SUCCESS(rc))
8090 { /* likely */ }
8091 else
8092 {
8093 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8094 GCPhysFirst, rc));
8095 return rc;
8096 }
8097 }
8098 }
8099
8100#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8101 if ( !pVCpu->iem.s.fNoRem
8102 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8103 {
8104 /*
8105 * Record the read.
8106 */
8107 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8108 if (pEvtRec)
8109 {
8110 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8111 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8112 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8113 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8114 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8115 }
8116 }
8117#endif
8118 }
8119#ifdef VBOX_STRICT
8120 else
8121 memset(pbBuf, 0xcc, cbMem);
8122#endif
8123#ifdef VBOX_STRICT
8124 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8125 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8126#endif
8127
8128 /*
8129 * Commit the bounce buffer entry.
8130 */
8131 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8132 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8133 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8134 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8135 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8136 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8137 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8138 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8139 pVCpu->iem.s.cActiveMappings++;
8140
8141 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8142 *ppvMem = pbBuf;
8143 return VINF_SUCCESS;
8144}
8145
8146
8147
8148/**
8149 * Maps the specified guest memory for the given kind of access.
8150 *
8151 * This may be using bounce buffering of the memory if it's crossing a page
8152 * boundary or if there is an access handler installed for any of it. Because
8153 * of lock prefix guarantees, we're in for some extra clutter when this
8154 * happens.
8155 *
8156 * This may raise a \#GP, \#SS, \#PF or \#AC.
8157 *
8158 * @returns VBox strict status code.
8159 *
8160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8161 * @param ppvMem Where to return the pointer to the mapped
8162 * memory.
8163 * @param cbMem The number of bytes to map. This is usually 1,
8164 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8165 * string operations it can be up to a page.
8166 * @param iSegReg The index of the segment register to use for
8167 * this access. The base and limits are checked.
8168 * Use UINT8_MAX to indicate that no segmentation
8169 * is required (for IDT, GDT and LDT accesses).
8170 * @param GCPtrMem The address of the guest memory.
8171 * @param fAccess How the memory is being accessed. The
8172 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8173 * how to map the memory, while the
8174 * IEM_ACCESS_WHAT_XXX bit is used when raising
8175 * exceptions.
8176 */
8177IEM_STATIC VBOXSTRICTRC
8178iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8179{
8180 /*
8181 * Check the input and figure out which mapping entry to use.
8182 */
8183 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8184 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8185 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8186
8187 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8188 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8189 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8190 {
8191 iMemMap = iemMemMapFindFree(pVCpu);
8192 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8193 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8194 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8195 pVCpu->iem.s.aMemMappings[2].fAccess),
8196 VERR_IEM_IPE_9);
8197 }
8198
8199 /*
8200 * Map the memory, checking that we can actually access it. If something
8201 * slightly complicated happens, fall back on bounce buffering.
8202 */
8203 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8204 if (rcStrict != VINF_SUCCESS)
8205 return rcStrict;
8206
8207 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8208 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8209
8210 RTGCPHYS GCPhysFirst;
8211 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8212 if (rcStrict != VINF_SUCCESS)
8213 return rcStrict;
8214
8215 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8216 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8217 if (fAccess & IEM_ACCESS_TYPE_READ)
8218 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8219
8220 void *pvMem;
8221 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8222 if (rcStrict != VINF_SUCCESS)
8223 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8224
8225 /*
8226 * Fill in the mapping table entry.
8227 */
8228 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8229 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8230 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8231 pVCpu->iem.s.cActiveMappings++;
8232
8233 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8234 *ppvMem = pvMem;
8235 return VINF_SUCCESS;
8236}
8237
8238
8239/**
8240 * Commits the guest memory if bounce buffered and unmaps it.
8241 *
8242 * @returns Strict VBox status code.
8243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8244 * @param pvMem The mapping.
8245 * @param fAccess The kind of access.
8246 */
8247IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8248{
8249 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8250 AssertReturn(iMemMap >= 0, iMemMap);
8251
8252 /* If it's bounce buffered, we may need to write back the buffer. */
8253 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8254 {
8255 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8256 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8257 }
8258 /* Otherwise unlock it. */
8259 else
8260 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8261
8262 /* Free the entry. */
8263 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8264 Assert(pVCpu->iem.s.cActiveMappings != 0);
8265 pVCpu->iem.s.cActiveMappings--;
8266 return VINF_SUCCESS;
8267}
8268
8269#ifdef IEM_WITH_SETJMP
8270
8271/**
8272 * Maps the specified guest memory for the given kind of access, longjmp on
8273 * error.
8274 *
8275 * This may be using bounce buffering of the memory if it's crossing a page
8276 * boundary or if there is an access handler installed for any of it. Because
8277 * of lock prefix guarantees, we're in for some extra clutter when this
8278 * happens.
8279 *
8280 * This may raise a \#GP, \#SS, \#PF or \#AC.
8281 *
8282 * @returns Pointer to the mapped memory.
8283 *
8284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8285 * @param cbMem The number of bytes to map. This is usually 1,
8286 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8287 * string operations it can be up to a page.
8288 * @param iSegReg The index of the segment register to use for
8289 * this access. The base and limits are checked.
8290 * Use UINT8_MAX to indicate that no segmentation
8291 * is required (for IDT, GDT and LDT accesses).
8292 * @param GCPtrMem The address of the guest memory.
8293 * @param fAccess How the memory is being accessed. The
8294 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8295 * how to map the memory, while the
8296 * IEM_ACCESS_WHAT_XXX bit is used when raising
8297 * exceptions.
8298 */
8299IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8300{
8301 /*
8302 * Check the input and figure out which mapping entry to use.
8303 */
8304 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8305 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8306 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8307
8308 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8309 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8310 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8311 {
8312 iMemMap = iemMemMapFindFree(pVCpu);
8313 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8314 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8315 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8316 pVCpu->iem.s.aMemMappings[2].fAccess),
8317 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8318 }
8319
8320 /*
8321 * Map the memory, checking that we can actually access it. If something
8322 * slightly complicated happens, fall back on bounce buffering.
8323 */
8324 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8325 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8326 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8327
8328 /* Crossing a page boundary? */
8329 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8330 { /* No (likely). */ }
8331 else
8332 {
8333 void *pvMem;
8334 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8335 if (rcStrict == VINF_SUCCESS)
8336 return pvMem;
8337 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8338 }
8339
8340 RTGCPHYS GCPhysFirst;
8341 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8342 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8343 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8344
8345 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8346 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8347 if (fAccess & IEM_ACCESS_TYPE_READ)
8348 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8349
8350 void *pvMem;
8351 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8352 if (rcStrict == VINF_SUCCESS)
8353 { /* likely */ }
8354 else
8355 {
8356 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8357 if (rcStrict == VINF_SUCCESS)
8358 return pvMem;
8359 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8360 }
8361
8362 /*
8363 * Fill in the mapping table entry.
8364 */
8365 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8366 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8367 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8368 pVCpu->iem.s.cActiveMappings++;
8369
8370 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8371 return pvMem;
8372}
8373
8374
8375/**
8376 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8377 *
8378 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8379 * @param pvMem The mapping.
8380 * @param fAccess The kind of access.
8381 */
8382IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8383{
8384 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8385 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8386
8387 /* If it's bounce buffered, we may need to write back the buffer. */
8388 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8389 {
8390 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8391 {
8392 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8393 if (rcStrict == VINF_SUCCESS)
8394 return;
8395 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8396 }
8397 }
8398 /* Otherwise unlock it. */
8399 else
8400 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8401
8402 /* Free the entry. */
8403 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8404 Assert(pVCpu->iem.s.cActiveMappings != 0);
8405 pVCpu->iem.s.cActiveMappings--;
8406}
8407
8408#endif
8409
8410#ifndef IN_RING3
8411/**
8412 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8413 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8414 *
8415 * Allows the instruction to be completed and retired, while the IEM user will
8416 * return to ring-3 immediately afterwards and do the postponed writes there.
8417 *
8418 * @returns VBox status code (no strict statuses). Caller must check
8419 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8421 * @param pvMem The mapping.
8422 * @param fAccess The kind of access.
8423 */
8424IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8425{
8426 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8427 AssertReturn(iMemMap >= 0, iMemMap);
8428
8429 /* If it's bounce buffered, we may need to write back the buffer. */
8430 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8431 {
8432 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8433 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8434 }
8435 /* Otherwise unlock it. */
8436 else
8437 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8438
8439 /* Free the entry. */
8440 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8441 Assert(pVCpu->iem.s.cActiveMappings != 0);
8442 pVCpu->iem.s.cActiveMappings--;
8443 return VINF_SUCCESS;
8444}
8445#endif
8446
8447
8448/**
8449 * Rollbacks mappings, releasing page locks and such.
8450 *
8451 * The caller shall only call this after checking cActiveMappings.
8452 *
8453 * @returns Strict VBox status code to pass up.
8454 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8455 */
8456IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8457{
8458 Assert(pVCpu->iem.s.cActiveMappings > 0);
8459
8460 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8461 while (iMemMap-- > 0)
8462 {
8463 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8464 if (fAccess != IEM_ACCESS_INVALID)
8465 {
8466 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8467 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8468 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8469 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8470 Assert(pVCpu->iem.s.cActiveMappings > 0);
8471 pVCpu->iem.s.cActiveMappings--;
8472 }
8473 }
8474}
8475
8476
8477/**
8478 * Fetches a data byte.
8479 *
8480 * @returns Strict VBox status code.
8481 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8482 * @param pu8Dst Where to return the byte.
8483 * @param iSegReg The index of the segment register to use for
8484 * this access. The base and limits are checked.
8485 * @param GCPtrMem The address of the guest memory.
8486 */
8487IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8488{
8489 /* The lazy approach for now... */
8490 uint8_t const *pu8Src;
8491 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8492 if (rc == VINF_SUCCESS)
8493 {
8494 *pu8Dst = *pu8Src;
8495 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8496 }
8497 return rc;
8498}
8499
8500
8501#ifdef IEM_WITH_SETJMP
8502/**
8503 * Fetches a data byte, longjmp on error.
8504 *
8505 * @returns The byte.
8506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8507 * @param iSegReg The index of the segment register to use for
8508 * this access. The base and limits are checked.
8509 * @param GCPtrMem The address of the guest memory.
8510 */
8511DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8512{
8513 /* The lazy approach for now... */
8514 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8515 uint8_t const bRet = *pu8Src;
8516 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
8517 return bRet;
8518}
8519#endif /* IEM_WITH_SETJMP */
8520
8521
8522/**
8523 * Fetches a data word.
8524 *
8525 * @returns Strict VBox status code.
8526 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8527 * @param pu16Dst Where to return the word.
8528 * @param iSegReg The index of the segment register to use for
8529 * this access. The base and limits are checked.
8530 * @param GCPtrMem The address of the guest memory.
8531 */
8532IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8533{
8534 /* The lazy approach for now... */
8535 uint16_t const *pu16Src;
8536 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8537 if (rc == VINF_SUCCESS)
8538 {
8539 *pu16Dst = *pu16Src;
8540 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8541 }
8542 return rc;
8543}
8544
8545
8546#ifdef IEM_WITH_SETJMP
8547/**
8548 * Fetches a data word, longjmp on error.
8549 *
8550 * @returns The word
8551 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8552 * @param iSegReg The index of the segment register to use for
8553 * this access. The base and limits are checked.
8554 * @param GCPtrMem The address of the guest memory.
8555 */
8556DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8557{
8558 /* The lazy approach for now... */
8559 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8560 uint16_t const u16Ret = *pu16Src;
8561 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
8562 return u16Ret;
8563}
8564#endif
8565
8566
8567/**
8568 * Fetches a data dword.
8569 *
8570 * @returns Strict VBox status code.
8571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8572 * @param pu32Dst Where to return the dword.
8573 * @param iSegReg The index of the segment register to use for
8574 * this access. The base and limits are checked.
8575 * @param GCPtrMem The address of the guest memory.
8576 */
8577IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8578{
8579 /* The lazy approach for now... */
8580 uint32_t const *pu32Src;
8581 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8582 if (rc == VINF_SUCCESS)
8583 {
8584 *pu32Dst = *pu32Src;
8585 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8586 }
8587 return rc;
8588}
8589
8590
8591#ifdef IEM_WITH_SETJMP
8592
8593IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8594{
8595 Assert(cbMem >= 1);
8596 Assert(iSegReg < X86_SREG_COUNT);
8597
8598 /*
8599 * 64-bit mode is simpler.
8600 */
8601 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8602 {
8603 if (iSegReg >= X86_SREG_FS)
8604 {
8605 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8606 GCPtrMem += pSel->u64Base;
8607 }
8608
8609 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8610 return GCPtrMem;
8611 }
8612 /*
8613 * 16-bit and 32-bit segmentation.
8614 */
8615 else
8616 {
8617 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8618 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8619 == X86DESCATTR_P /* data, expand up */
8620 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
8621 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
8622 {
8623 /* expand up */
8624 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8625 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8626 && GCPtrLast32 > (uint32_t)GCPtrMem))
8627 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8628 }
8629 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
8630 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
8631 {
8632 /* expand down */
8633 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8634 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8635 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8636 && GCPtrLast32 > (uint32_t)GCPtrMem))
8637 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8638 }
8639 else
8640 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8641 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8642 }
8643 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8644}
8645
8646
8647IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
8648{
8649 Assert(cbMem >= 1);
8650 Assert(iSegReg < X86_SREG_COUNT);
8651
8652 /*
8653 * 64-bit mode is simpler.
8654 */
8655 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8656 {
8657 if (iSegReg >= X86_SREG_FS)
8658 {
8659 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8660 GCPtrMem += pSel->u64Base;
8661 }
8662
8663 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8664 return GCPtrMem;
8665 }
8666 /*
8667 * 16-bit and 32-bit segmentation.
8668 */
8669 else
8670 {
8671 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8672 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
8673 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
8674 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
8675 {
8676 /* expand up */
8677 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8678 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
8679 && GCPtrLast32 > (uint32_t)GCPtrMem))
8680 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8681 }
8682 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
8683 {
8684 /* expand down */
8685 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
8686 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
8687 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
8688 && GCPtrLast32 > (uint32_t)GCPtrMem))
8689 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
8690 }
8691 else
8692 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8693 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8694 }
8695 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
8696}
8697
8698
8699/**
8700 * Fetches a data dword, longjmp on error, fallback/safe version.
8701 *
8702 * @returns The dword
8703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8704 * @param iSegReg The index of the segment register to use for
8705 * this access. The base and limits are checked.
8706 * @param GCPtrMem The address of the guest memory.
8707 */
8708IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8709{
8710 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8711 uint32_t const u32Ret = *pu32Src;
8712 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8713 return u32Ret;
8714}
8715
8716
8717/**
8718 * Fetches a data dword, longjmp on error.
8719 *
8720 * @returns The dword
8721 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8722 * @param iSegReg The index of the segment register to use for
8723 * this access. The base and limits are checked.
8724 * @param GCPtrMem The address of the guest memory.
8725 */
8726DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8727{
8728# ifdef IEM_WITH_DATA_TLB
8729 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
8730 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
8731 {
8732 /// @todo more later.
8733 }
8734
8735 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
8736# else
8737 /* The lazy approach. */
8738 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8739 uint32_t const u32Ret = *pu32Src;
8740 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
8741 return u32Ret;
8742# endif
8743}
8744#endif
8745
8746
8747#ifdef SOME_UNUSED_FUNCTION
8748/**
8749 * Fetches a data dword and sign extends it to a qword.
8750 *
8751 * @returns Strict VBox status code.
8752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8753 * @param pu64Dst Where to return the sign extended value.
8754 * @param iSegReg The index of the segment register to use for
8755 * this access. The base and limits are checked.
8756 * @param GCPtrMem The address of the guest memory.
8757 */
8758IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8759{
8760 /* The lazy approach for now... */
8761 int32_t const *pi32Src;
8762 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8763 if (rc == VINF_SUCCESS)
8764 {
8765 *pu64Dst = *pi32Src;
8766 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
8767 }
8768#ifdef __GNUC__ /* warning: GCC may be a royal pain */
8769 else
8770 *pu64Dst = 0;
8771#endif
8772 return rc;
8773}
8774#endif
8775
8776
8777/**
8778 * Fetches a data qword.
8779 *
8780 * @returns Strict VBox status code.
8781 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8782 * @param pu64Dst Where to return the qword.
8783 * @param iSegReg The index of the segment register to use for
8784 * this access. The base and limits are checked.
8785 * @param GCPtrMem The address of the guest memory.
8786 */
8787IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8788{
8789 /* The lazy approach for now... */
8790 uint64_t const *pu64Src;
8791 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8792 if (rc == VINF_SUCCESS)
8793 {
8794 *pu64Dst = *pu64Src;
8795 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8796 }
8797 return rc;
8798}
8799
8800
8801#ifdef IEM_WITH_SETJMP
8802/**
8803 * Fetches a data qword, longjmp on error.
8804 *
8805 * @returns The qword.
8806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8807 * @param iSegReg The index of the segment register to use for
8808 * this access. The base and limits are checked.
8809 * @param GCPtrMem The address of the guest memory.
8810 */
8811DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8812{
8813 /* The lazy approach for now... */
8814 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8815 uint64_t const u64Ret = *pu64Src;
8816 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8817 return u64Ret;
8818}
8819#endif
8820
8821
8822/**
8823 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
8824 *
8825 * @returns Strict VBox status code.
8826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8827 * @param pu64Dst Where to return the qword.
8828 * @param iSegReg The index of the segment register to use for
8829 * this access. The base and limits are checked.
8830 * @param GCPtrMem The address of the guest memory.
8831 */
8832IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8833{
8834 /* The lazy approach for now... */
8835 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
8836 if (RT_UNLIKELY(GCPtrMem & 15))
8837 return iemRaiseGeneralProtectionFault0(pVCpu);
8838
8839 uint64_t const *pu64Src;
8840 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8841 if (rc == VINF_SUCCESS)
8842 {
8843 *pu64Dst = *pu64Src;
8844 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8845 }
8846 return rc;
8847}
8848
8849
8850#ifdef IEM_WITH_SETJMP
8851/**
8852 * Fetches a data qword, longjmp on error.
8853 *
8854 * @returns The qword.
8855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8856 * @param iSegReg The index of the segment register to use for
8857 * this access. The base and limits are checked.
8858 * @param GCPtrMem The address of the guest memory.
8859 */
8860DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
8861{
8862 /* The lazy approach for now... */
8863 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
8864 if (RT_LIKELY(!(GCPtrMem & 15)))
8865 {
8866 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8867 uint64_t const u64Ret = *pu64Src;
8868 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
8869 return u64Ret;
8870 }
8871
8872 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
8873 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
8874}
8875#endif
8876
8877
8878/**
8879 * Fetches a data tword.
8880 *
8881 * @returns Strict VBox status code.
8882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8883 * @param pr80Dst Where to return the tword.
8884 * @param iSegReg The index of the segment register to use for
8885 * this access. The base and limits are checked.
8886 * @param GCPtrMem The address of the guest memory.
8887 */
8888IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8889{
8890 /* The lazy approach for now... */
8891 PCRTFLOAT80U pr80Src;
8892 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8893 if (rc == VINF_SUCCESS)
8894 {
8895 *pr80Dst = *pr80Src;
8896 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
8897 }
8898 return rc;
8899}
8900
8901
8902#ifdef IEM_WITH_SETJMP
8903/**
8904 * Fetches a data tword, longjmp on error.
8905 *
8906 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8907 * @param pr80Dst Where to return the tword.
8908 * @param iSegReg The index of the segment register to use for
8909 * this access. The base and limits are checked.
8910 * @param GCPtrMem The address of the guest memory.
8911 */
8912DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8913{
8914 /* The lazy approach for now... */
8915 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8916 *pr80Dst = *pr80Src;
8917 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
8918}
8919#endif
8920
8921
8922/**
8923 * Fetches a data dqword (double qword), generally SSE related.
8924 *
8925 * @returns Strict VBox status code.
8926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8927 * @param pu128Dst Where to return the qword.
8928 * @param iSegReg The index of the segment register to use for
8929 * this access. The base and limits are checked.
8930 * @param GCPtrMem The address of the guest memory.
8931 */
8932IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8933{
8934 /* The lazy approach for now... */
8935 uint128_t const *pu128Src;
8936 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8937 if (rc == VINF_SUCCESS)
8938 {
8939 *pu128Dst = *pu128Src;
8940 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
8941 }
8942 return rc;
8943}
8944
8945
8946#ifdef IEM_WITH_SETJMP
8947/**
8948 * Fetches a data dqword (double qword), generally SSE related.
8949 *
8950 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8951 * @param pu128Dst Where to return the qword.
8952 * @param iSegReg The index of the segment register to use for
8953 * this access. The base and limits are checked.
8954 * @param GCPtrMem The address of the guest memory.
8955 */
8956IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8957{
8958 /* The lazy approach for now... */
8959 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8960 *pu128Dst = *pu128Src;
8961 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
8962}
8963#endif
8964
8965
8966/**
8967 * Fetches a data dqword (double qword) at an aligned address, generally SSE
8968 * related.
8969 *
8970 * Raises \#GP(0) if not aligned.
8971 *
8972 * @returns Strict VBox status code.
8973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8974 * @param pu128Dst Where to return the qword.
8975 * @param iSegReg The index of the segment register to use for
8976 * this access. The base and limits are checked.
8977 * @param GCPtrMem The address of the guest memory.
8978 */
8979IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
8980{
8981 /* The lazy approach for now... */
8982 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
8983 if ( (GCPtrMem & 15)
8984 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
8985 return iemRaiseGeneralProtectionFault0(pVCpu);
8986
8987 uint128_t const *pu128Src;
8988 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
8989 if (rc == VINF_SUCCESS)
8990 {
8991 *pu128Dst = *pu128Src;
8992 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
8993 }
8994 return rc;
8995}
8996
8997
8998#ifdef IEM_WITH_SETJMP
8999/**
9000 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9001 * related, longjmp on error.
9002 *
9003 * Raises \#GP(0) if not aligned.
9004 *
9005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9006 * @param pu128Dst Where to return the qword.
9007 * @param iSegReg The index of the segment register to use for
9008 * this access. The base and limits are checked.
9009 * @param GCPtrMem The address of the guest memory.
9010 */
9011DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, uint128_t *pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9012{
9013 /* The lazy approach for now... */
9014 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9015 if ( (GCPtrMem & 15) == 0
9016 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9017 {
9018 uint128_t const *pu128Src = (uint128_t const *)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
9019 IEM_ACCESS_DATA_R);
9020 *pu128Dst = *pu128Src;
9021 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9022 return;
9023 }
9024
9025 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9026 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9027}
9028#endif
9029
9030
9031
9032/**
9033 * Fetches a descriptor register (lgdt, lidt).
9034 *
9035 * @returns Strict VBox status code.
9036 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9037 * @param pcbLimit Where to return the limit.
9038 * @param pGCPtrBase Where to return the base.
9039 * @param iSegReg The index of the segment register to use for
9040 * this access. The base and limits are checked.
9041 * @param GCPtrMem The address of the guest memory.
9042 * @param enmOpSize The effective operand size.
9043 */
9044IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9045 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9046{
9047 /*
9048 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9049 * little special:
9050 * - The two reads are done separately.
9051 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9052 * - We suspect the 386 to actually commit the limit before the base in
9053 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9054 * don't try emulate this eccentric behavior, because it's not well
9055 * enough understood and rather hard to trigger.
9056 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9057 */
9058 VBOXSTRICTRC rcStrict;
9059 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9060 {
9061 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9062 if (rcStrict == VINF_SUCCESS)
9063 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9064 }
9065 else
9066 {
9067 uint32_t uTmp;
9068 if (enmOpSize == IEMMODE_32BIT)
9069 {
9070 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9071 {
9072 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9073 if (rcStrict == VINF_SUCCESS)
9074 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9075 }
9076 else
9077 {
9078 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9079 if (rcStrict == VINF_SUCCESS)
9080 {
9081 *pcbLimit = (uint16_t)uTmp;
9082 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9083 }
9084 }
9085 if (rcStrict == VINF_SUCCESS)
9086 *pGCPtrBase = uTmp;
9087 }
9088 else
9089 {
9090 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9091 if (rcStrict == VINF_SUCCESS)
9092 {
9093 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9094 if (rcStrict == VINF_SUCCESS)
9095 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9096 }
9097 }
9098 }
9099 return rcStrict;
9100}
9101
9102
9103
9104/**
9105 * Stores a data byte.
9106 *
9107 * @returns Strict VBox status code.
9108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9109 * @param iSegReg The index of the segment register to use for
9110 * this access. The base and limits are checked.
9111 * @param GCPtrMem The address of the guest memory.
9112 * @param u8Value The value to store.
9113 */
9114IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9115{
9116 /* The lazy approach for now... */
9117 uint8_t *pu8Dst;
9118 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9119 if (rc == VINF_SUCCESS)
9120 {
9121 *pu8Dst = u8Value;
9122 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9123 }
9124 return rc;
9125}
9126
9127
9128#ifdef IEM_WITH_SETJMP
9129/**
9130 * Stores a data byte, longjmp on error.
9131 *
9132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9133 * @param iSegReg The index of the segment register to use for
9134 * this access. The base and limits are checked.
9135 * @param GCPtrMem The address of the guest memory.
9136 * @param u8Value The value to store.
9137 */
9138IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9139{
9140 /* The lazy approach for now... */
9141 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9142 *pu8Dst = u8Value;
9143 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9144}
9145#endif
9146
9147
9148/**
9149 * Stores a data word.
9150 *
9151 * @returns Strict VBox status code.
9152 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9153 * @param iSegReg The index of the segment register to use for
9154 * this access. The base and limits are checked.
9155 * @param GCPtrMem The address of the guest memory.
9156 * @param u16Value The value to store.
9157 */
9158IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9159{
9160 /* The lazy approach for now... */
9161 uint16_t *pu16Dst;
9162 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9163 if (rc == VINF_SUCCESS)
9164 {
9165 *pu16Dst = u16Value;
9166 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9167 }
9168 return rc;
9169}
9170
9171
9172#ifdef IEM_WITH_SETJMP
9173/**
9174 * Stores a data word, longjmp on error.
9175 *
9176 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9177 * @param iSegReg The index of the segment register to use for
9178 * this access. The base and limits are checked.
9179 * @param GCPtrMem The address of the guest memory.
9180 * @param u16Value The value to store.
9181 */
9182IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9183{
9184 /* The lazy approach for now... */
9185 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9186 *pu16Dst = u16Value;
9187 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9188}
9189#endif
9190
9191
9192/**
9193 * Stores a data dword.
9194 *
9195 * @returns Strict VBox status code.
9196 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9197 * @param iSegReg The index of the segment register to use for
9198 * this access. The base and limits are checked.
9199 * @param GCPtrMem The address of the guest memory.
9200 * @param u32Value The value to store.
9201 */
9202IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9203{
9204 /* The lazy approach for now... */
9205 uint32_t *pu32Dst;
9206 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9207 if (rc == VINF_SUCCESS)
9208 {
9209 *pu32Dst = u32Value;
9210 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9211 }
9212 return rc;
9213}
9214
9215
9216#ifdef IEM_WITH_SETJMP
9217/**
9218 * Stores a data dword.
9219 *
9220 * @returns Strict VBox status code.
9221 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9222 * @param iSegReg The index of the segment register to use for
9223 * this access. The base and limits are checked.
9224 * @param GCPtrMem The address of the guest memory.
9225 * @param u32Value The value to store.
9226 */
9227IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9228{
9229 /* The lazy approach for now... */
9230 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9231 *pu32Dst = u32Value;
9232 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9233}
9234#endif
9235
9236
9237/**
9238 * Stores a data qword.
9239 *
9240 * @returns Strict VBox status code.
9241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9242 * @param iSegReg The index of the segment register to use for
9243 * this access. The base and limits are checked.
9244 * @param GCPtrMem The address of the guest memory.
9245 * @param u64Value The value to store.
9246 */
9247IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9248{
9249 /* The lazy approach for now... */
9250 uint64_t *pu64Dst;
9251 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9252 if (rc == VINF_SUCCESS)
9253 {
9254 *pu64Dst = u64Value;
9255 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9256 }
9257 return rc;
9258}
9259
9260
9261#ifdef IEM_WITH_SETJMP
9262/**
9263 * Stores a data qword, longjmp on error.
9264 *
9265 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9266 * @param iSegReg The index of the segment register to use for
9267 * this access. The base and limits are checked.
9268 * @param GCPtrMem The address of the guest memory.
9269 * @param u64Value The value to store.
9270 */
9271IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9272{
9273 /* The lazy approach for now... */
9274 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9275 *pu64Dst = u64Value;
9276 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9277}
9278#endif
9279
9280
9281/**
9282 * Stores a data dqword.
9283 *
9284 * @returns Strict VBox status code.
9285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9286 * @param iSegReg The index of the segment register to use for
9287 * this access. The base and limits are checked.
9288 * @param GCPtrMem The address of the guest memory.
9289 * @param u128Value The value to store.
9290 */
9291IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9292{
9293 /* The lazy approach for now... */
9294 uint128_t *pu128Dst;
9295 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9296 if (rc == VINF_SUCCESS)
9297 {
9298 *pu128Dst = u128Value;
9299 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9300 }
9301 return rc;
9302}
9303
9304
9305#ifdef IEM_WITH_SETJMP
9306/**
9307 * Stores a data dqword, longjmp on error.
9308 *
9309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9310 * @param iSegReg The index of the segment register to use for
9311 * this access. The base and limits are checked.
9312 * @param GCPtrMem The address of the guest memory.
9313 * @param u128Value The value to store.
9314 */
9315IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9316{
9317 /* The lazy approach for now... */
9318 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9319 *pu128Dst = u128Value;
9320 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9321}
9322#endif
9323
9324
9325/**
9326 * Stores a data dqword, SSE aligned.
9327 *
9328 * @returns Strict VBox status code.
9329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9330 * @param iSegReg The index of the segment register to use for
9331 * this access. The base and limits are checked.
9332 * @param GCPtrMem The address of the guest memory.
9333 * @param u128Value The value to store.
9334 */
9335IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9336{
9337 /* The lazy approach for now... */
9338 if ( (GCPtrMem & 15)
9339 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9340 return iemRaiseGeneralProtectionFault0(pVCpu);
9341
9342 uint128_t *pu128Dst;
9343 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9344 if (rc == VINF_SUCCESS)
9345 {
9346 *pu128Dst = u128Value;
9347 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9348 }
9349 return rc;
9350}
9351
9352
9353#ifdef IEM_WITH_SETJMP
9354/**
9355 * Stores a data dqword, SSE aligned.
9356 *
9357 * @returns Strict VBox status code.
9358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9359 * @param iSegReg The index of the segment register to use for
9360 * this access. The base and limits are checked.
9361 * @param GCPtrMem The address of the guest memory.
9362 * @param u128Value The value to store.
9363 */
9364DECL_NO_INLINE(IEM_STATIC, void)
9365iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint128_t u128Value)
9366{
9367 /* The lazy approach for now... */
9368 if ( (GCPtrMem & 15) == 0
9369 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXSCR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9370 {
9371 uint128_t *pu128Dst = (uint128_t *)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9372 *pu128Dst = u128Value;
9373 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9374 return;
9375 }
9376
9377 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9378 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9379}
9380#endif
9381
9382
9383/**
9384 * Stores a descriptor register (sgdt, sidt).
9385 *
9386 * @returns Strict VBox status code.
9387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9388 * @param cbLimit The limit.
9389 * @param GCPtrBase The base address.
9390 * @param iSegReg The index of the segment register to use for
9391 * this access. The base and limits are checked.
9392 * @param GCPtrMem The address of the guest memory.
9393 */
9394IEM_STATIC VBOXSTRICTRC
9395iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
9396{
9397 /*
9398 * The SIDT and SGDT instructions actually stores the data using two
9399 * independent writes. The instructions does not respond to opsize prefixes.
9400 */
9401 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
9402 if (rcStrict == VINF_SUCCESS)
9403 {
9404 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
9405 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
9406 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
9407 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
9408 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
9409 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
9410 else
9411 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
9412 }
9413 return rcStrict;
9414}
9415
9416
9417/**
9418 * Pushes a word onto the stack.
9419 *
9420 * @returns Strict VBox status code.
9421 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9422 * @param u16Value The value to push.
9423 */
9424IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
9425{
9426 /* Increment the stack pointer. */
9427 uint64_t uNewRsp;
9428 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9429 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
9430
9431 /* Write the word the lazy way. */
9432 uint16_t *pu16Dst;
9433 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9434 if (rc == VINF_SUCCESS)
9435 {
9436 *pu16Dst = u16Value;
9437 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9438 }
9439
9440 /* Commit the new RSP value unless we an access handler made trouble. */
9441 if (rc == VINF_SUCCESS)
9442 pCtx->rsp = uNewRsp;
9443
9444 return rc;
9445}
9446
9447
9448/**
9449 * Pushes a dword onto the stack.
9450 *
9451 * @returns Strict VBox status code.
9452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9453 * @param u32Value The value to push.
9454 */
9455IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
9456{
9457 /* Increment the stack pointer. */
9458 uint64_t uNewRsp;
9459 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9460 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9461
9462 /* Write the dword the lazy way. */
9463 uint32_t *pu32Dst;
9464 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9465 if (rc == VINF_SUCCESS)
9466 {
9467 *pu32Dst = u32Value;
9468 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9469 }
9470
9471 /* Commit the new RSP value unless we an access handler made trouble. */
9472 if (rc == VINF_SUCCESS)
9473 pCtx->rsp = uNewRsp;
9474
9475 return rc;
9476}
9477
9478
9479/**
9480 * Pushes a dword segment register value onto the stack.
9481 *
9482 * @returns Strict VBox status code.
9483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9484 * @param u32Value The value to push.
9485 */
9486IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
9487{
9488 /* Increment the stack pointer. */
9489 uint64_t uNewRsp;
9490 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9491 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
9492
9493 VBOXSTRICTRC rc;
9494 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
9495 {
9496 /* The recompiler writes a full dword. */
9497 uint32_t *pu32Dst;
9498 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9499 if (rc == VINF_SUCCESS)
9500 {
9501 *pu32Dst = u32Value;
9502 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9503 }
9504 }
9505 else
9506 {
9507 /* The intel docs talks about zero extending the selector register
9508 value. My actual intel CPU here might be zero extending the value
9509 but it still only writes the lower word... */
9510 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
9511 * happens when crossing an electric page boundrary, is the high word checked
9512 * for write accessibility or not? Probably it is. What about segment limits?
9513 * It appears this behavior is also shared with trap error codes.
9514 *
9515 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
9516 * ancient hardware when it actually did change. */
9517 uint16_t *pu16Dst;
9518 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
9519 if (rc == VINF_SUCCESS)
9520 {
9521 *pu16Dst = (uint16_t)u32Value;
9522 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
9523 }
9524 }
9525
9526 /* Commit the new RSP value unless we an access handler made trouble. */
9527 if (rc == VINF_SUCCESS)
9528 pCtx->rsp = uNewRsp;
9529
9530 return rc;
9531}
9532
9533
9534/**
9535 * Pushes a qword onto the stack.
9536 *
9537 * @returns Strict VBox status code.
9538 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9539 * @param u64Value The value to push.
9540 */
9541IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
9542{
9543 /* Increment the stack pointer. */
9544 uint64_t uNewRsp;
9545 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9546 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
9547
9548 /* Write the word the lazy way. */
9549 uint64_t *pu64Dst;
9550 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9551 if (rc == VINF_SUCCESS)
9552 {
9553 *pu64Dst = u64Value;
9554 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9555 }
9556
9557 /* Commit the new RSP value unless we an access handler made trouble. */
9558 if (rc == VINF_SUCCESS)
9559 pCtx->rsp = uNewRsp;
9560
9561 return rc;
9562}
9563
9564
9565/**
9566 * Pops a word from the stack.
9567 *
9568 * @returns Strict VBox status code.
9569 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9570 * @param pu16Value Where to store the popped value.
9571 */
9572IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
9573{
9574 /* Increment the stack pointer. */
9575 uint64_t uNewRsp;
9576 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9577 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
9578
9579 /* Write the word the lazy way. */
9580 uint16_t const *pu16Src;
9581 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9582 if (rc == VINF_SUCCESS)
9583 {
9584 *pu16Value = *pu16Src;
9585 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9586
9587 /* Commit the new RSP value. */
9588 if (rc == VINF_SUCCESS)
9589 pCtx->rsp = uNewRsp;
9590 }
9591
9592 return rc;
9593}
9594
9595
9596/**
9597 * Pops a dword from the stack.
9598 *
9599 * @returns Strict VBox status code.
9600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9601 * @param pu32Value Where to store the popped value.
9602 */
9603IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
9604{
9605 /* Increment the stack pointer. */
9606 uint64_t uNewRsp;
9607 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9608 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
9609
9610 /* Write the word the lazy way. */
9611 uint32_t const *pu32Src;
9612 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9613 if (rc == VINF_SUCCESS)
9614 {
9615 *pu32Value = *pu32Src;
9616 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9617
9618 /* Commit the new RSP value. */
9619 if (rc == VINF_SUCCESS)
9620 pCtx->rsp = uNewRsp;
9621 }
9622
9623 return rc;
9624}
9625
9626
9627/**
9628 * Pops a qword from the stack.
9629 *
9630 * @returns Strict VBox status code.
9631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9632 * @param pu64Value Where to store the popped value.
9633 */
9634IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
9635{
9636 /* Increment the stack pointer. */
9637 uint64_t uNewRsp;
9638 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9639 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
9640
9641 /* Write the word the lazy way. */
9642 uint64_t const *pu64Src;
9643 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9644 if (rc == VINF_SUCCESS)
9645 {
9646 *pu64Value = *pu64Src;
9647 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9648
9649 /* Commit the new RSP value. */
9650 if (rc == VINF_SUCCESS)
9651 pCtx->rsp = uNewRsp;
9652 }
9653
9654 return rc;
9655}
9656
9657
9658/**
9659 * Pushes a word onto the stack, using a temporary stack pointer.
9660 *
9661 * @returns Strict VBox status code.
9662 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9663 * @param u16Value The value to push.
9664 * @param pTmpRsp Pointer to the temporary stack pointer.
9665 */
9666IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
9667{
9668 /* Increment the stack pointer. */
9669 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9670 RTUINT64U NewRsp = *pTmpRsp;
9671 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
9672
9673 /* Write the word the lazy way. */
9674 uint16_t *pu16Dst;
9675 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9676 if (rc == VINF_SUCCESS)
9677 {
9678 *pu16Dst = u16Value;
9679 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
9680 }
9681
9682 /* Commit the new RSP value unless we an access handler made trouble. */
9683 if (rc == VINF_SUCCESS)
9684 *pTmpRsp = NewRsp;
9685
9686 return rc;
9687}
9688
9689
9690/**
9691 * Pushes a dword onto the stack, using a temporary stack pointer.
9692 *
9693 * @returns Strict VBox status code.
9694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9695 * @param u32Value The value to push.
9696 * @param pTmpRsp Pointer to the temporary stack pointer.
9697 */
9698IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
9699{
9700 /* Increment the stack pointer. */
9701 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9702 RTUINT64U NewRsp = *pTmpRsp;
9703 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
9704
9705 /* Write the word the lazy way. */
9706 uint32_t *pu32Dst;
9707 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9708 if (rc == VINF_SUCCESS)
9709 {
9710 *pu32Dst = u32Value;
9711 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
9712 }
9713
9714 /* Commit the new RSP value unless we an access handler made trouble. */
9715 if (rc == VINF_SUCCESS)
9716 *pTmpRsp = NewRsp;
9717
9718 return rc;
9719}
9720
9721
9722/**
9723 * Pushes a dword onto the stack, using a temporary stack pointer.
9724 *
9725 * @returns Strict VBox status code.
9726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9727 * @param u64Value The value to push.
9728 * @param pTmpRsp Pointer to the temporary stack pointer.
9729 */
9730IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
9731{
9732 /* Increment the stack pointer. */
9733 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9734 RTUINT64U NewRsp = *pTmpRsp;
9735 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
9736
9737 /* Write the word the lazy way. */
9738 uint64_t *pu64Dst;
9739 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9740 if (rc == VINF_SUCCESS)
9741 {
9742 *pu64Dst = u64Value;
9743 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
9744 }
9745
9746 /* Commit the new RSP value unless we an access handler made trouble. */
9747 if (rc == VINF_SUCCESS)
9748 *pTmpRsp = NewRsp;
9749
9750 return rc;
9751}
9752
9753
9754/**
9755 * Pops a word from the stack, using a temporary stack pointer.
9756 *
9757 * @returns Strict VBox status code.
9758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9759 * @param pu16Value Where to store the popped value.
9760 * @param pTmpRsp Pointer to the temporary stack pointer.
9761 */
9762IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
9763{
9764 /* Increment the stack pointer. */
9765 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9766 RTUINT64U NewRsp = *pTmpRsp;
9767 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
9768
9769 /* Write the word the lazy way. */
9770 uint16_t const *pu16Src;
9771 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9772 if (rc == VINF_SUCCESS)
9773 {
9774 *pu16Value = *pu16Src;
9775 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
9776
9777 /* Commit the new RSP value. */
9778 if (rc == VINF_SUCCESS)
9779 *pTmpRsp = NewRsp;
9780 }
9781
9782 return rc;
9783}
9784
9785
9786/**
9787 * Pops a dword from the stack, using a temporary stack pointer.
9788 *
9789 * @returns Strict VBox status code.
9790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9791 * @param pu32Value Where to store the popped value.
9792 * @param pTmpRsp Pointer to the temporary stack pointer.
9793 */
9794IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
9795{
9796 /* Increment the stack pointer. */
9797 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9798 RTUINT64U NewRsp = *pTmpRsp;
9799 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
9800
9801 /* Write the word the lazy way. */
9802 uint32_t const *pu32Src;
9803 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9804 if (rc == VINF_SUCCESS)
9805 {
9806 *pu32Value = *pu32Src;
9807 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
9808
9809 /* Commit the new RSP value. */
9810 if (rc == VINF_SUCCESS)
9811 *pTmpRsp = NewRsp;
9812 }
9813
9814 return rc;
9815}
9816
9817
9818/**
9819 * Pops a qword from the stack, using a temporary stack pointer.
9820 *
9821 * @returns Strict VBox status code.
9822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9823 * @param pu64Value Where to store the popped value.
9824 * @param pTmpRsp Pointer to the temporary stack pointer.
9825 */
9826IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
9827{
9828 /* Increment the stack pointer. */
9829 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9830 RTUINT64U NewRsp = *pTmpRsp;
9831 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
9832
9833 /* Write the word the lazy way. */
9834 uint64_t const *pu64Src;
9835 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9836 if (rcStrict == VINF_SUCCESS)
9837 {
9838 *pu64Value = *pu64Src;
9839 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
9840
9841 /* Commit the new RSP value. */
9842 if (rcStrict == VINF_SUCCESS)
9843 *pTmpRsp = NewRsp;
9844 }
9845
9846 return rcStrict;
9847}
9848
9849
9850/**
9851 * Begin a special stack push (used by interrupt, exceptions and such).
9852 *
9853 * This will raise \#SS or \#PF if appropriate.
9854 *
9855 * @returns Strict VBox status code.
9856 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9857 * @param cbMem The number of bytes to push onto the stack.
9858 * @param ppvMem Where to return the pointer to the stack memory.
9859 * As with the other memory functions this could be
9860 * direct access or bounce buffered access, so
9861 * don't commit register until the commit call
9862 * succeeds.
9863 * @param puNewRsp Where to return the new RSP value. This must be
9864 * passed unchanged to
9865 * iemMemStackPushCommitSpecial().
9866 */
9867IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
9868{
9869 Assert(cbMem < UINT8_MAX);
9870 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9871 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
9872 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
9873}
9874
9875
9876/**
9877 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
9878 *
9879 * This will update the rSP.
9880 *
9881 * @returns Strict VBox status code.
9882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9883 * @param pvMem The pointer returned by
9884 * iemMemStackPushBeginSpecial().
9885 * @param uNewRsp The new RSP value returned by
9886 * iemMemStackPushBeginSpecial().
9887 */
9888IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
9889{
9890 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
9891 if (rcStrict == VINF_SUCCESS)
9892 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
9893 return rcStrict;
9894}
9895
9896
9897/**
9898 * Begin a special stack pop (used by iret, retf and such).
9899 *
9900 * This will raise \#SS or \#PF if appropriate.
9901 *
9902 * @returns Strict VBox status code.
9903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9904 * @param cbMem The number of bytes to pop from the stack.
9905 * @param ppvMem Where to return the pointer to the stack memory.
9906 * @param puNewRsp Where to return the new RSP value. This must be
9907 * assigned to CPUMCTX::rsp manually some time
9908 * after iemMemStackPopDoneSpecial() has been
9909 * called.
9910 */
9911IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
9912{
9913 Assert(cbMem < UINT8_MAX);
9914 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9915 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
9916 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9917}
9918
9919
9920/**
9921 * Continue a special stack pop (used by iret and retf).
9922 *
9923 * This will raise \#SS or \#PF if appropriate.
9924 *
9925 * @returns Strict VBox status code.
9926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9927 * @param cbMem The number of bytes to pop from the stack.
9928 * @param ppvMem Where to return the pointer to the stack memory.
9929 * @param puNewRsp Where to return the new RSP value. This must be
9930 * assigned to CPUMCTX::rsp manually some time
9931 * after iemMemStackPopDoneSpecial() has been
9932 * called.
9933 */
9934IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
9935{
9936 Assert(cbMem < UINT8_MAX);
9937 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
9938 RTUINT64U NewRsp;
9939 NewRsp.u = *puNewRsp;
9940 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
9941 *puNewRsp = NewRsp.u;
9942 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
9943}
9944
9945
9946/**
9947 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
9948 * iemMemStackPopContinueSpecial).
9949 *
9950 * The caller will manually commit the rSP.
9951 *
9952 * @returns Strict VBox status code.
9953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9954 * @param pvMem The pointer returned by
9955 * iemMemStackPopBeginSpecial() or
9956 * iemMemStackPopContinueSpecial().
9957 */
9958IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
9959{
9960 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
9961}
9962
9963
9964/**
9965 * Fetches a system table byte.
9966 *
9967 * @returns Strict VBox status code.
9968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9969 * @param pbDst Where to return the byte.
9970 * @param iSegReg The index of the segment register to use for
9971 * this access. The base and limits are checked.
9972 * @param GCPtrMem The address of the guest memory.
9973 */
9974IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9975{
9976 /* The lazy approach for now... */
9977 uint8_t const *pbSrc;
9978 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
9979 if (rc == VINF_SUCCESS)
9980 {
9981 *pbDst = *pbSrc;
9982 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
9983 }
9984 return rc;
9985}
9986
9987
9988/**
9989 * Fetches a system table word.
9990 *
9991 * @returns Strict VBox status code.
9992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9993 * @param pu16Dst Where to return the word.
9994 * @param iSegReg The index of the segment register to use for
9995 * this access. The base and limits are checked.
9996 * @param GCPtrMem The address of the guest memory.
9997 */
9998IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9999{
10000 /* The lazy approach for now... */
10001 uint16_t const *pu16Src;
10002 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10003 if (rc == VINF_SUCCESS)
10004 {
10005 *pu16Dst = *pu16Src;
10006 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10007 }
10008 return rc;
10009}
10010
10011
10012/**
10013 * Fetches a system table dword.
10014 *
10015 * @returns Strict VBox status code.
10016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10017 * @param pu32Dst Where to return the dword.
10018 * @param iSegReg The index of the segment register to use for
10019 * this access. The base and limits are checked.
10020 * @param GCPtrMem The address of the guest memory.
10021 */
10022IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10023{
10024 /* The lazy approach for now... */
10025 uint32_t const *pu32Src;
10026 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10027 if (rc == VINF_SUCCESS)
10028 {
10029 *pu32Dst = *pu32Src;
10030 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10031 }
10032 return rc;
10033}
10034
10035
10036/**
10037 * Fetches a system table qword.
10038 *
10039 * @returns Strict VBox status code.
10040 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10041 * @param pu64Dst Where to return the qword.
10042 * @param iSegReg The index of the segment register to use for
10043 * this access. The base and limits are checked.
10044 * @param GCPtrMem The address of the guest memory.
10045 */
10046IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10047{
10048 /* The lazy approach for now... */
10049 uint64_t const *pu64Src;
10050 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10051 if (rc == VINF_SUCCESS)
10052 {
10053 *pu64Dst = *pu64Src;
10054 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10055 }
10056 return rc;
10057}
10058
10059
10060/**
10061 * Fetches a descriptor table entry with caller specified error code.
10062 *
10063 * @returns Strict VBox status code.
10064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10065 * @param pDesc Where to return the descriptor table entry.
10066 * @param uSel The selector which table entry to fetch.
10067 * @param uXcpt The exception to raise on table lookup error.
10068 * @param uErrorCode The error code associated with the exception.
10069 */
10070IEM_STATIC VBOXSTRICTRC
10071iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10072{
10073 AssertPtr(pDesc);
10074 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10075
10076 /** @todo did the 286 require all 8 bytes to be accessible? */
10077 /*
10078 * Get the selector table base and check bounds.
10079 */
10080 RTGCPTR GCPtrBase;
10081 if (uSel & X86_SEL_LDT)
10082 {
10083 if ( !pCtx->ldtr.Attr.n.u1Present
10084 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10085 {
10086 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10087 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10088 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10089 uErrorCode, 0);
10090 }
10091
10092 Assert(pCtx->ldtr.Attr.n.u1Present);
10093 GCPtrBase = pCtx->ldtr.u64Base;
10094 }
10095 else
10096 {
10097 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10098 {
10099 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10100 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10101 uErrorCode, 0);
10102 }
10103 GCPtrBase = pCtx->gdtr.pGdt;
10104 }
10105
10106 /*
10107 * Read the legacy descriptor and maybe the long mode extensions if
10108 * required.
10109 */
10110 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10111 if (rcStrict == VINF_SUCCESS)
10112 {
10113 if ( !IEM_IS_LONG_MODE(pVCpu)
10114 || pDesc->Legacy.Gen.u1DescType)
10115 pDesc->Long.au64[1] = 0;
10116 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10117 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10118 else
10119 {
10120 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10121 /** @todo is this the right exception? */
10122 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10123 }
10124 }
10125 return rcStrict;
10126}
10127
10128
10129/**
10130 * Fetches a descriptor table entry.
10131 *
10132 * @returns Strict VBox status code.
10133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10134 * @param pDesc Where to return the descriptor table entry.
10135 * @param uSel The selector which table entry to fetch.
10136 * @param uXcpt The exception to raise on table lookup error.
10137 */
10138IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10139{
10140 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10141}
10142
10143
10144/**
10145 * Fakes a long mode stack selector for SS = 0.
10146 *
10147 * @param pDescSs Where to return the fake stack descriptor.
10148 * @param uDpl The DPL we want.
10149 */
10150IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10151{
10152 pDescSs->Long.au64[0] = 0;
10153 pDescSs->Long.au64[1] = 0;
10154 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10155 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10156 pDescSs->Long.Gen.u2Dpl = uDpl;
10157 pDescSs->Long.Gen.u1Present = 1;
10158 pDescSs->Long.Gen.u1Long = 1;
10159}
10160
10161
10162/**
10163 * Marks the selector descriptor as accessed (only non-system descriptors).
10164 *
10165 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10166 * will therefore skip the limit checks.
10167 *
10168 * @returns Strict VBox status code.
10169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10170 * @param uSel The selector.
10171 */
10172IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10173{
10174 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10175
10176 /*
10177 * Get the selector table base and calculate the entry address.
10178 */
10179 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10180 ? pCtx->ldtr.u64Base
10181 : pCtx->gdtr.pGdt;
10182 GCPtr += uSel & X86_SEL_MASK;
10183
10184 /*
10185 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10186 * ugly stuff to avoid this. This will make sure it's an atomic access
10187 * as well more or less remove any question about 8-bit or 32-bit accesss.
10188 */
10189 VBOXSTRICTRC rcStrict;
10190 uint32_t volatile *pu32;
10191 if ((GCPtr & 3) == 0)
10192 {
10193 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10194 GCPtr += 2 + 2;
10195 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10196 if (rcStrict != VINF_SUCCESS)
10197 return rcStrict;
10198 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10199 }
10200 else
10201 {
10202 /* The misaligned GDT/LDT case, map the whole thing. */
10203 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10204 if (rcStrict != VINF_SUCCESS)
10205 return rcStrict;
10206 switch ((uintptr_t)pu32 & 3)
10207 {
10208 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10209 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10210 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10211 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10212 }
10213 }
10214
10215 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10216}
10217
10218/** @} */
10219
10220
10221/*
10222 * Include the C/C++ implementation of instruction.
10223 */
10224#include "IEMAllCImpl.cpp.h"
10225
10226
10227
10228/** @name "Microcode" macros.
10229 *
10230 * The idea is that we should be able to use the same code to interpret
10231 * instructions as well as recompiler instructions. Thus this obfuscation.
10232 *
10233 * @{
10234 */
10235#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10236#define IEM_MC_END() }
10237#define IEM_MC_PAUSE() do {} while (0)
10238#define IEM_MC_CONTINUE() do {} while (0)
10239
10240/** Internal macro. */
10241#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10242 do \
10243 { \
10244 VBOXSTRICTRC rcStrict2 = a_Expr; \
10245 if (rcStrict2 != VINF_SUCCESS) \
10246 return rcStrict2; \
10247 } while (0)
10248
10249
10250#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10251#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10252#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10253#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10254#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10255#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10256#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10257#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10258#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10259 do { \
10260 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10261 return iemRaiseDeviceNotAvailable(pVCpu); \
10262 } while (0)
10263#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10264 do { \
10265 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10266 return iemRaiseMathFault(pVCpu); \
10267 } while (0)
10268#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10269 do { \
10270 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10271 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10272 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10273 return iemRaiseUndefinedOpcode(pVCpu); \
10274 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10275 return iemRaiseDeviceNotAvailable(pVCpu); \
10276 } while (0)
10277#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10278 do { \
10279 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10280 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10281 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10282 return iemRaiseUndefinedOpcode(pVCpu); \
10283 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10284 return iemRaiseDeviceNotAvailable(pVCpu); \
10285 } while (0)
10286#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10287 do { \
10288 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10289 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10290 return iemRaiseUndefinedOpcode(pVCpu); \
10291 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10292 return iemRaiseDeviceNotAvailable(pVCpu); \
10293 } while (0)
10294#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10295 do { \
10296 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10297 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10298 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
10299 return iemRaiseUndefinedOpcode(pVCpu); \
10300 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10301 return iemRaiseDeviceNotAvailable(pVCpu); \
10302 } while (0)
10303#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
10304 do { \
10305 if (pVCpu->iem.s.uCpl != 0) \
10306 return iemRaiseGeneralProtectionFault0(pVCpu); \
10307 } while (0)
10308
10309
10310#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
10311#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
10312#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
10313#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
10314#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
10315#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
10316#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
10317 uint32_t a_Name; \
10318 uint32_t *a_pName = &a_Name
10319#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
10320 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
10321
10322#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
10323#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
10324
10325#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10326#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10327#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10328#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10329#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10330#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10331#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
10332#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10333#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10334#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
10335#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10336#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
10337#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10338#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
10339#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
10340#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
10341#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
10342#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10343#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10344#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
10345#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10346#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10347#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
10348#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10349#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10350#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
10351#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10352#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10353#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
10354/** @note Not for IOPL or IF testing or modification. */
10355#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10356#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10357#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
10358#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
10359
10360#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
10361#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
10362#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
10363#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
10364#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
10365#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
10366#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
10367#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
10368#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
10369#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
10370#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
10371 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
10372
10373#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
10374#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
10375/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
10376 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
10377#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
10378#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
10379/** @note Not for IOPL or IF testing or modification. */
10380#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
10381
10382#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
10383#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
10384#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
10385 do { \
10386 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10387 *pu32Reg += (a_u32Value); \
10388 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10389 } while (0)
10390#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
10391
10392#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
10393#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
10394#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
10395 do { \
10396 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10397 *pu32Reg -= (a_u32Value); \
10398 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10399 } while (0)
10400#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
10401#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
10402
10403#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
10404#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
10405#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
10406#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
10407#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
10408#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
10409#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
10410
10411#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
10412#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
10413#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10414#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
10415
10416#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
10417#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
10418#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
10419
10420#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
10421#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
10422#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10423
10424#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
10425#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
10426#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
10427
10428#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
10429#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
10430#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
10431
10432#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
10433
10434#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
10435
10436#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
10437#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
10438#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
10439 do { \
10440 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10441 *pu32Reg &= (a_u32Value); \
10442 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10443 } while (0)
10444#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
10445
10446#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
10447#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
10448#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
10449 do { \
10450 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
10451 *pu32Reg |= (a_u32Value); \
10452 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
10453 } while (0)
10454#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
10455
10456
10457/** @note Not for IOPL or IF modification. */
10458#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
10459/** @note Not for IOPL or IF modification. */
10460#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
10461/** @note Not for IOPL or IF modification. */
10462#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
10463
10464#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
10465
10466
10467#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
10468 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
10469#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
10470 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
10471#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
10472 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
10473#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
10474 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
10475#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
10476 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10477#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
10478 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10479#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
10480 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
10481
10482#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
10483 do { (a_u128Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm; } while (0)
10484#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
10485 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
10486#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
10487 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
10488#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
10489 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm = (a_u128Value); } while (0)
10490#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
10491 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
10492#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
10493 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
10494 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10495 } while (0)
10496#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
10497 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
10498 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
10499 } while (0)
10500#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
10501 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10502#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
10503 (a_pu128Dst) = ((uint128_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].xmm)
10504#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
10505 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
10506#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
10507 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].xmm \
10508 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].xmm; } while (0)
10509
10510#ifndef IEM_WITH_SETJMP
10511# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10512 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
10513# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10514 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
10515# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10516 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
10517#else
10518# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
10519 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10520# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
10521 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
10522# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
10523 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
10524#endif
10525
10526#ifndef IEM_WITH_SETJMP
10527# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10528 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
10529# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10530 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10531# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10532 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
10533#else
10534# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10535 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10536# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10537 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10538# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
10539 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10540#endif
10541
10542#ifndef IEM_WITH_SETJMP
10543# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10544 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
10545# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10546 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10547# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10548 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
10549#else
10550# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10551 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10552# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10553 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10554# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
10555 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10556#endif
10557
10558#ifdef SOME_UNUSED_FUNCTION
10559# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10560 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10561#endif
10562
10563#ifndef IEM_WITH_SETJMP
10564# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10565 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10566# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10567 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10568# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10569 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
10570# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10571 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
10572#else
10573# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10574 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10575# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
10576 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
10577# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
10578 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10579# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
10580 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10581#endif
10582
10583#ifndef IEM_WITH_SETJMP
10584# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10585 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
10586# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10587 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
10588# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10589 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
10590#else
10591# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
10592 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10593# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
10594 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10595# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
10596 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
10597#endif
10598
10599#ifndef IEM_WITH_SETJMP
10600# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10601 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10602# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10603 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
10604#else
10605# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
10606 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10607# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
10608 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
10609#endif
10610
10611
10612
10613#ifndef IEM_WITH_SETJMP
10614# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10615 do { \
10616 uint8_t u8Tmp; \
10617 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10618 (a_u16Dst) = u8Tmp; \
10619 } while (0)
10620# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10621 do { \
10622 uint8_t u8Tmp; \
10623 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10624 (a_u32Dst) = u8Tmp; \
10625 } while (0)
10626# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10627 do { \
10628 uint8_t u8Tmp; \
10629 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10630 (a_u64Dst) = u8Tmp; \
10631 } while (0)
10632# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10633 do { \
10634 uint16_t u16Tmp; \
10635 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10636 (a_u32Dst) = u16Tmp; \
10637 } while (0)
10638# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10639 do { \
10640 uint16_t u16Tmp; \
10641 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10642 (a_u64Dst) = u16Tmp; \
10643 } while (0)
10644# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10645 do { \
10646 uint32_t u32Tmp; \
10647 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10648 (a_u64Dst) = u32Tmp; \
10649 } while (0)
10650#else /* IEM_WITH_SETJMP */
10651# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10652 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10653# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10654 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10655# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10656 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10657# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10658 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10659# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10660 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10661# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10662 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10663#endif /* IEM_WITH_SETJMP */
10664
10665#ifndef IEM_WITH_SETJMP
10666# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10667 do { \
10668 uint8_t u8Tmp; \
10669 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10670 (a_u16Dst) = (int8_t)u8Tmp; \
10671 } while (0)
10672# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10673 do { \
10674 uint8_t u8Tmp; \
10675 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10676 (a_u32Dst) = (int8_t)u8Tmp; \
10677 } while (0)
10678# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10679 do { \
10680 uint8_t u8Tmp; \
10681 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
10682 (a_u64Dst) = (int8_t)u8Tmp; \
10683 } while (0)
10684# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10685 do { \
10686 uint16_t u16Tmp; \
10687 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10688 (a_u32Dst) = (int16_t)u16Tmp; \
10689 } while (0)
10690# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10691 do { \
10692 uint16_t u16Tmp; \
10693 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
10694 (a_u64Dst) = (int16_t)u16Tmp; \
10695 } while (0)
10696# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10697 do { \
10698 uint32_t u32Tmp; \
10699 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
10700 (a_u64Dst) = (int32_t)u32Tmp; \
10701 } while (0)
10702#else /* IEM_WITH_SETJMP */
10703# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
10704 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10705# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10706 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10707# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10708 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10709# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
10710 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10711# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10712 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10713# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
10714 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
10715#endif /* IEM_WITH_SETJMP */
10716
10717#ifndef IEM_WITH_SETJMP
10718# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10719 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
10720# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10721 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
10722# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10723 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
10724# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10725 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
10726#else
10727# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
10728 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
10729# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
10730 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
10731# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
10732 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
10733# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
10734 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
10735#endif
10736
10737#ifndef IEM_WITH_SETJMP
10738# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10739 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
10740# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10741 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
10742# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10743 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
10744# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10745 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
10746#else
10747# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
10748 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
10749# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
10750 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
10751# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
10752 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
10753# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
10754 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
10755#endif
10756
10757#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
10758#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
10759#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
10760#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
10761#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
10762#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
10763#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
10764 do { \
10765 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
10766 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
10767 } while (0)
10768
10769#ifndef IEM_WITH_SETJMP
10770# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10771 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10772# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10773 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
10774#else
10775# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
10776 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10777# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
10778 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
10779#endif
10780
10781
10782#define IEM_MC_PUSH_U16(a_u16Value) \
10783 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
10784#define IEM_MC_PUSH_U32(a_u32Value) \
10785 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
10786#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
10787 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
10788#define IEM_MC_PUSH_U64(a_u64Value) \
10789 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
10790
10791#define IEM_MC_POP_U16(a_pu16Value) \
10792 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
10793#define IEM_MC_POP_U32(a_pu32Value) \
10794 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
10795#define IEM_MC_POP_U64(a_pu64Value) \
10796 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
10797
10798/** Maps guest memory for direct or bounce buffered access.
10799 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10800 * @remarks May return.
10801 */
10802#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
10803 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10804
10805/** Maps guest memory for direct or bounce buffered access.
10806 * The purpose is to pass it to an operand implementation, thus the a_iArg.
10807 * @remarks May return.
10808 */
10809#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
10810 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
10811
10812/** Commits the memory and unmaps the guest memory.
10813 * @remarks May return.
10814 */
10815#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
10816 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
10817
10818/** Commits the memory and unmaps the guest memory unless the FPU status word
10819 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
10820 * that would cause FLD not to store.
10821 *
10822 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
10823 * store, while \#P will not.
10824 *
10825 * @remarks May in theory return - for now.
10826 */
10827#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
10828 do { \
10829 if ( !(a_u16FSW & X86_FSW_ES) \
10830 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
10831 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
10832 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
10833 } while (0)
10834
10835/** Calculate efficient address from R/M. */
10836#ifndef IEM_WITH_SETJMP
10837# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
10838 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
10839#else
10840# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
10841 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
10842#endif
10843
10844#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
10845#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
10846#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
10847#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
10848#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
10849#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
10850#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
10851
10852/**
10853 * Defers the rest of the instruction emulation to a C implementation routine
10854 * and returns, only taking the standard parameters.
10855 *
10856 * @param a_pfnCImpl The pointer to the C routine.
10857 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
10858 */
10859#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
10860
10861/**
10862 * Defers the rest of instruction emulation to a C implementation routine and
10863 * returns, taking one argument in addition to the standard ones.
10864 *
10865 * @param a_pfnCImpl The pointer to the C routine.
10866 * @param a0 The argument.
10867 */
10868#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
10869
10870/**
10871 * Defers the rest of the instruction emulation to a C implementation routine
10872 * and returns, taking two arguments in addition to the standard ones.
10873 *
10874 * @param a_pfnCImpl The pointer to the C routine.
10875 * @param a0 The first extra argument.
10876 * @param a1 The second extra argument.
10877 */
10878#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
10879
10880/**
10881 * Defers the rest of the instruction emulation to a C implementation routine
10882 * and returns, taking three arguments in addition to the standard ones.
10883 *
10884 * @param a_pfnCImpl The pointer to the C routine.
10885 * @param a0 The first extra argument.
10886 * @param a1 The second extra argument.
10887 * @param a2 The third extra argument.
10888 */
10889#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
10890
10891/**
10892 * Defers the rest of the instruction emulation to a C implementation routine
10893 * and returns, taking four arguments in addition to the standard ones.
10894 *
10895 * @param a_pfnCImpl The pointer to the C routine.
10896 * @param a0 The first extra argument.
10897 * @param a1 The second extra argument.
10898 * @param a2 The third extra argument.
10899 * @param a3 The fourth extra argument.
10900 */
10901#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
10902
10903/**
10904 * Defers the rest of the instruction emulation to a C implementation routine
10905 * and returns, taking two arguments in addition to the standard ones.
10906 *
10907 * @param a_pfnCImpl The pointer to the C routine.
10908 * @param a0 The first extra argument.
10909 * @param a1 The second extra argument.
10910 * @param a2 The third extra argument.
10911 * @param a3 The fourth extra argument.
10912 * @param a4 The fifth extra argument.
10913 */
10914#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
10915
10916/**
10917 * Defers the entire instruction emulation to a C implementation routine and
10918 * returns, only taking the standard parameters.
10919 *
10920 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
10921 *
10922 * @param a_pfnCImpl The pointer to the C routine.
10923 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
10924 */
10925#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
10926
10927/**
10928 * Defers the entire instruction emulation to a C implementation routine and
10929 * returns, taking one argument in addition to the standard ones.
10930 *
10931 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
10932 *
10933 * @param a_pfnCImpl The pointer to the C routine.
10934 * @param a0 The argument.
10935 */
10936#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
10937
10938/**
10939 * Defers the entire instruction emulation to a C implementation routine and
10940 * returns, taking two arguments in addition to the standard ones.
10941 *
10942 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
10943 *
10944 * @param a_pfnCImpl The pointer to the C routine.
10945 * @param a0 The first extra argument.
10946 * @param a1 The second extra argument.
10947 */
10948#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
10949
10950/**
10951 * Defers the entire instruction emulation to a C implementation routine and
10952 * returns, taking three arguments in addition to the standard ones.
10953 *
10954 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
10955 *
10956 * @param a_pfnCImpl The pointer to the C routine.
10957 * @param a0 The first extra argument.
10958 * @param a1 The second extra argument.
10959 * @param a2 The third extra argument.
10960 */
10961#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
10962
10963/**
10964 * Calls a FPU assembly implementation taking one visible argument.
10965 *
10966 * @param a_pfnAImpl Pointer to the assembly FPU routine.
10967 * @param a0 The first extra argument.
10968 */
10969#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
10970 do { \
10971 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
10972 } while (0)
10973
10974/**
10975 * Calls a FPU assembly implementation taking two visible arguments.
10976 *
10977 * @param a_pfnAImpl Pointer to the assembly FPU routine.
10978 * @param a0 The first extra argument.
10979 * @param a1 The second extra argument.
10980 */
10981#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
10982 do { \
10983 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
10984 } while (0)
10985
10986/**
10987 * Calls a FPU assembly implementation taking three visible arguments.
10988 *
10989 * @param a_pfnAImpl Pointer to the assembly FPU routine.
10990 * @param a0 The first extra argument.
10991 * @param a1 The second extra argument.
10992 * @param a2 The third extra argument.
10993 */
10994#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
10995 do { \
10996 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
10997 } while (0)
10998
10999#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11000 do { \
11001 (a_FpuData).FSW = (a_FSW); \
11002 (a_FpuData).r80Result = *(a_pr80Value); \
11003 } while (0)
11004
11005/** Pushes FPU result onto the stack. */
11006#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11007 iemFpuPushResult(pVCpu, &a_FpuData)
11008/** Pushes FPU result onto the stack and sets the FPUDP. */
11009#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11010 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11011
11012/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11013#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11014 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11015
11016/** Stores FPU result in a stack register. */
11017#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11018 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11019/** Stores FPU result in a stack register and pops the stack. */
11020#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11021 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11022/** Stores FPU result in a stack register and sets the FPUDP. */
11023#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11024 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11025/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11026 * stack. */
11027#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11028 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11029
11030/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11031#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11032 iemFpuUpdateOpcodeAndIp(pVCpu)
11033/** Free a stack register (for FFREE and FFREEP). */
11034#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11035 iemFpuStackFree(pVCpu, a_iStReg)
11036/** Increment the FPU stack pointer. */
11037#define IEM_MC_FPU_STACK_INC_TOP() \
11038 iemFpuStackIncTop(pVCpu)
11039/** Decrement the FPU stack pointer. */
11040#define IEM_MC_FPU_STACK_DEC_TOP() \
11041 iemFpuStackDecTop(pVCpu)
11042
11043/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11044#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11045 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11046/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11047#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11048 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11049/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11050#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11051 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11052/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11053#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11054 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11055/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11056 * stack. */
11057#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11058 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11059/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11060#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11061 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11062
11063/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11064#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11065 iemFpuStackUnderflow(pVCpu, a_iStDst)
11066/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11067 * stack. */
11068#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11069 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11070/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11071 * FPUDS. */
11072#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11073 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11074/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11075 * FPUDS. Pops stack. */
11076#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11077 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11078/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11079 * stack twice. */
11080#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
11081 iemFpuStackUnderflowThenPopPop(pVCpu)
11082/** Raises a FPU stack underflow exception for an instruction pushing a result
11083 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
11084#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
11085 iemFpuStackPushUnderflow(pVCpu)
11086/** Raises a FPU stack underflow exception for an instruction pushing a result
11087 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
11088#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
11089 iemFpuStackPushUnderflowTwo(pVCpu)
11090
11091/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11092 * FPUIP, FPUCS and FOP. */
11093#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
11094 iemFpuStackPushOverflow(pVCpu)
11095/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11096 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
11097#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
11098 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
11099/** Prepares for using the FPU state.
11100 * Ensures that we can use the host FPU in the current context (RC+R0.
11101 * Ensures the guest FPU state in the CPUMCTX is up to date. */
11102#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
11103/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
11104#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
11105/** Actualizes the guest FPU state so it can be accessed and modified. */
11106#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
11107
11108/** Prepares for using the SSE state.
11109 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
11110 * Ensures the guest SSE state in the CPUMCTX is up to date. */
11111#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
11112/** Actualizes the guest XMM0..15 register state for read-only access. */
11113#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
11114/** Actualizes the guest XMM0..15 register state for read-write access. */
11115#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
11116
11117/**
11118 * Calls a MMX assembly implementation taking two visible arguments.
11119 *
11120 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11121 * @param a0 The first extra argument.
11122 * @param a1 The second extra argument.
11123 */
11124#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
11125 do { \
11126 IEM_MC_PREPARE_FPU_USAGE(); \
11127 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11128 } while (0)
11129
11130/**
11131 * Calls a MMX assembly implementation taking three visible arguments.
11132 *
11133 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11134 * @param a0 The first extra argument.
11135 * @param a1 The second extra argument.
11136 * @param a2 The third extra argument.
11137 */
11138#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11139 do { \
11140 IEM_MC_PREPARE_FPU_USAGE(); \
11141 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11142 } while (0)
11143
11144
11145/**
11146 * Calls a SSE assembly implementation taking two visible arguments.
11147 *
11148 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11149 * @param a0 The first extra argument.
11150 * @param a1 The second extra argument.
11151 */
11152#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
11153 do { \
11154 IEM_MC_PREPARE_SSE_USAGE(); \
11155 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11156 } while (0)
11157
11158/**
11159 * Calls a SSE assembly implementation taking three visible arguments.
11160 *
11161 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11162 * @param a0 The first extra argument.
11163 * @param a1 The second extra argument.
11164 * @param a2 The third extra argument.
11165 */
11166#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11167 do { \
11168 IEM_MC_PREPARE_SSE_USAGE(); \
11169 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11170 } while (0)
11171
11172/** @note Not for IOPL or IF testing. */
11173#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
11174/** @note Not for IOPL or IF testing. */
11175#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
11176/** @note Not for IOPL or IF testing. */
11177#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
11178/** @note Not for IOPL or IF testing. */
11179#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
11180/** @note Not for IOPL or IF testing. */
11181#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
11182 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11183 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11184/** @note Not for IOPL or IF testing. */
11185#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
11186 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11187 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11188/** @note Not for IOPL or IF testing. */
11189#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
11190 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11191 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11192 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11193/** @note Not for IOPL or IF testing. */
11194#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
11195 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11196 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11197 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11198#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
11199#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
11200#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
11201/** @note Not for IOPL or IF testing. */
11202#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11203 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11204 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11205/** @note Not for IOPL or IF testing. */
11206#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11207 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11208 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11209/** @note Not for IOPL or IF testing. */
11210#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11211 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11212 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11213/** @note Not for IOPL or IF testing. */
11214#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11215 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11216 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11217/** @note Not for IOPL or IF testing. */
11218#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11219 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11220 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11221/** @note Not for IOPL or IF testing. */
11222#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11223 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11224 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11225#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
11226#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
11227
11228#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
11229 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
11230#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
11231 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
11232#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
11233 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
11234#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
11235 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
11236#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
11237 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
11238#define IEM_MC_IF_FCW_IM() \
11239 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
11240
11241#define IEM_MC_ELSE() } else {
11242#define IEM_MC_ENDIF() } do {} while (0)
11243
11244/** @} */
11245
11246
11247/** @name Opcode Debug Helpers.
11248 * @{
11249 */
11250#ifdef DEBUG
11251# define IEMOP_MNEMONIC(a_szMnemonic) \
11252 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11253 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions))
11254# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) \
11255 Log4(("decode - %04x:%RGv %s%s %s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11256 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, a_szOps, pVCpu->iem.s.cInstructions))
11257#else
11258# define IEMOP_MNEMONIC(a_szMnemonic) do { } while (0)
11259# define IEMOP_MNEMONIC2(a_szMnemonic, a_szOps) do { } while (0)
11260#endif
11261
11262/** @} */
11263
11264
11265/** @name Opcode Helpers.
11266 * @{
11267 */
11268
11269#ifdef IN_RING3
11270# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11271 do { \
11272 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11273 else \
11274 { \
11275 DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
11276 return IEMOP_RAISE_INVALID_OPCODE(); \
11277 } \
11278 } while (0)
11279#else
11280# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
11281 do { \
11282 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
11283 else return IEMOP_RAISE_INVALID_OPCODE(); \
11284 } while (0)
11285#endif
11286
11287/** The instruction requires a 186 or later. */
11288#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
11289# define IEMOP_HLP_MIN_186() do { } while (0)
11290#else
11291# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
11292#endif
11293
11294/** The instruction requires a 286 or later. */
11295#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
11296# define IEMOP_HLP_MIN_286() do { } while (0)
11297#else
11298# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
11299#endif
11300
11301/** The instruction requires a 386 or later. */
11302#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11303# define IEMOP_HLP_MIN_386() do { } while (0)
11304#else
11305# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
11306#endif
11307
11308/** The instruction requires a 386 or later if the given expression is true. */
11309#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
11310# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
11311#else
11312# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
11313#endif
11314
11315/** The instruction requires a 486 or later. */
11316#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
11317# define IEMOP_HLP_MIN_486() do { } while (0)
11318#else
11319# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
11320#endif
11321
11322/** The instruction requires a Pentium (586) or later. */
11323#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_586
11324# define IEMOP_HLP_MIN_586() do { } while (0)
11325#else
11326# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_586, true)
11327#endif
11328
11329/** The instruction requires a PentiumPro (686) or later. */
11330#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_686
11331# define IEMOP_HLP_MIN_686() do { } while (0)
11332#else
11333# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_686, true)
11334#endif
11335
11336
11337/** The instruction raises an \#UD in real and V8086 mode. */
11338#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
11339 do \
11340 { \
11341 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
11342 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11343 } while (0)
11344
11345/** The instruction is not available in 64-bit mode, throw \#UD if we're in
11346 * 64-bit mode. */
11347#define IEMOP_HLP_NO_64BIT() \
11348 do \
11349 { \
11350 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11351 return IEMOP_RAISE_INVALID_OPCODE(); \
11352 } while (0)
11353
11354/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
11355 * 64-bit mode. */
11356#define IEMOP_HLP_ONLY_64BIT() \
11357 do \
11358 { \
11359 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
11360 return IEMOP_RAISE_INVALID_OPCODE(); \
11361 } while (0)
11362
11363/** The instruction defaults to 64-bit operand size if 64-bit mode. */
11364#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
11365 do \
11366 { \
11367 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11368 iemRecalEffOpSize64Default(pVCpu); \
11369 } while (0)
11370
11371/** The instruction has 64-bit operand size if 64-bit mode. */
11372#define IEMOP_HLP_64BIT_OP_SIZE() \
11373 do \
11374 { \
11375 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
11376 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
11377 } while (0)
11378
11379/** Only a REX prefix immediately preceeding the first opcode byte takes
11380 * effect. This macro helps ensuring this as well as logging bad guest code. */
11381#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
11382 do \
11383 { \
11384 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
11385 { \
11386 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
11387 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
11388 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
11389 pVCpu->iem.s.uRexB = 0; \
11390 pVCpu->iem.s.uRexIndex = 0; \
11391 pVCpu->iem.s.uRexReg = 0; \
11392 iemRecalEffOpSize(pVCpu); \
11393 } \
11394 } while (0)
11395
11396/**
11397 * Done decoding.
11398 */
11399#define IEMOP_HLP_DONE_DECODING() \
11400 do \
11401 { \
11402 /*nothing for now, maybe later... */ \
11403 } while (0)
11404
11405/**
11406 * Done decoding, raise \#UD exception if lock prefix present.
11407 */
11408#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
11409 do \
11410 { \
11411 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11412 { /* likely */ } \
11413 else \
11414 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11415 } while (0)
11416#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
11417 do \
11418 { \
11419 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11420 { /* likely */ } \
11421 else \
11422 { \
11423 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
11424 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11425 } \
11426 } while (0)
11427#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
11428 do \
11429 { \
11430 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
11431 { /* likely */ } \
11432 else \
11433 { \
11434 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
11435 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
11436 } \
11437 } while (0)
11438
11439/**
11440 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
11441 * are present.
11442 */
11443#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
11444 do \
11445 { \
11446 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
11447 { /* likely */ } \
11448 else \
11449 return IEMOP_RAISE_INVALID_OPCODE(); \
11450 } while (0)
11451
11452
11453/**
11454 * Calculates the effective address of a ModR/M memory operand.
11455 *
11456 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11457 *
11458 * @return Strict VBox status code.
11459 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11460 * @param bRm The ModRM byte.
11461 * @param cbImm The size of any immediate following the
11462 * effective address opcode bytes. Important for
11463 * RIP relative addressing.
11464 * @param pGCPtrEff Where to return the effective address.
11465 */
11466IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
11467{
11468 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11469 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11470# define SET_SS_DEF() \
11471 do \
11472 { \
11473 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11474 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11475 } while (0)
11476
11477 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11478 {
11479/** @todo Check the effective address size crap! */
11480 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11481 {
11482 uint16_t u16EffAddr;
11483
11484 /* Handle the disp16 form with no registers first. */
11485 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11486 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11487 else
11488 {
11489 /* Get the displacment. */
11490 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11491 {
11492 case 0: u16EffAddr = 0; break;
11493 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11494 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11495 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11496 }
11497
11498 /* Add the base and index registers to the disp. */
11499 switch (bRm & X86_MODRM_RM_MASK)
11500 {
11501 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11502 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11503 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11504 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11505 case 4: u16EffAddr += pCtx->si; break;
11506 case 5: u16EffAddr += pCtx->di; break;
11507 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11508 case 7: u16EffAddr += pCtx->bx; break;
11509 }
11510 }
11511
11512 *pGCPtrEff = u16EffAddr;
11513 }
11514 else
11515 {
11516 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11517 uint32_t u32EffAddr;
11518
11519 /* Handle the disp32 form with no registers first. */
11520 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11521 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11522 else
11523 {
11524 /* Get the register (or SIB) value. */
11525 switch ((bRm & X86_MODRM_RM_MASK))
11526 {
11527 case 0: u32EffAddr = pCtx->eax; break;
11528 case 1: u32EffAddr = pCtx->ecx; break;
11529 case 2: u32EffAddr = pCtx->edx; break;
11530 case 3: u32EffAddr = pCtx->ebx; break;
11531 case 4: /* SIB */
11532 {
11533 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11534
11535 /* Get the index and scale it. */
11536 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11537 {
11538 case 0: u32EffAddr = pCtx->eax; break;
11539 case 1: u32EffAddr = pCtx->ecx; break;
11540 case 2: u32EffAddr = pCtx->edx; break;
11541 case 3: u32EffAddr = pCtx->ebx; break;
11542 case 4: u32EffAddr = 0; /*none */ break;
11543 case 5: u32EffAddr = pCtx->ebp; break;
11544 case 6: u32EffAddr = pCtx->esi; break;
11545 case 7: u32EffAddr = pCtx->edi; break;
11546 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11547 }
11548 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11549
11550 /* add base */
11551 switch (bSib & X86_SIB_BASE_MASK)
11552 {
11553 case 0: u32EffAddr += pCtx->eax; break;
11554 case 1: u32EffAddr += pCtx->ecx; break;
11555 case 2: u32EffAddr += pCtx->edx; break;
11556 case 3: u32EffAddr += pCtx->ebx; break;
11557 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
11558 case 5:
11559 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11560 {
11561 u32EffAddr += pCtx->ebp;
11562 SET_SS_DEF();
11563 }
11564 else
11565 {
11566 uint32_t u32Disp;
11567 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11568 u32EffAddr += u32Disp;
11569 }
11570 break;
11571 case 6: u32EffAddr += pCtx->esi; break;
11572 case 7: u32EffAddr += pCtx->edi; break;
11573 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11574 }
11575 break;
11576 }
11577 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
11578 case 6: u32EffAddr = pCtx->esi; break;
11579 case 7: u32EffAddr = pCtx->edi; break;
11580 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11581 }
11582
11583 /* Get and add the displacement. */
11584 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11585 {
11586 case 0:
11587 break;
11588 case 1:
11589 {
11590 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11591 u32EffAddr += i8Disp;
11592 break;
11593 }
11594 case 2:
11595 {
11596 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11597 u32EffAddr += u32Disp;
11598 break;
11599 }
11600 default:
11601 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
11602 }
11603
11604 }
11605 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
11606 *pGCPtrEff = u32EffAddr;
11607 else
11608 {
11609 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
11610 *pGCPtrEff = u32EffAddr & UINT16_MAX;
11611 }
11612 }
11613 }
11614 else
11615 {
11616 uint64_t u64EffAddr;
11617
11618 /* Handle the rip+disp32 form with no registers first. */
11619 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11620 {
11621 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
11622 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
11623 }
11624 else
11625 {
11626 /* Get the register (or SIB) value. */
11627 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
11628 {
11629 case 0: u64EffAddr = pCtx->rax; break;
11630 case 1: u64EffAddr = pCtx->rcx; break;
11631 case 2: u64EffAddr = pCtx->rdx; break;
11632 case 3: u64EffAddr = pCtx->rbx; break;
11633 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
11634 case 6: u64EffAddr = pCtx->rsi; break;
11635 case 7: u64EffAddr = pCtx->rdi; break;
11636 case 8: u64EffAddr = pCtx->r8; break;
11637 case 9: u64EffAddr = pCtx->r9; break;
11638 case 10: u64EffAddr = pCtx->r10; break;
11639 case 11: u64EffAddr = pCtx->r11; break;
11640 case 13: u64EffAddr = pCtx->r13; break;
11641 case 14: u64EffAddr = pCtx->r14; break;
11642 case 15: u64EffAddr = pCtx->r15; break;
11643 /* SIB */
11644 case 4:
11645 case 12:
11646 {
11647 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11648
11649 /* Get the index and scale it. */
11650 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
11651 {
11652 case 0: u64EffAddr = pCtx->rax; break;
11653 case 1: u64EffAddr = pCtx->rcx; break;
11654 case 2: u64EffAddr = pCtx->rdx; break;
11655 case 3: u64EffAddr = pCtx->rbx; break;
11656 case 4: u64EffAddr = 0; /*none */ break;
11657 case 5: u64EffAddr = pCtx->rbp; break;
11658 case 6: u64EffAddr = pCtx->rsi; break;
11659 case 7: u64EffAddr = pCtx->rdi; break;
11660 case 8: u64EffAddr = pCtx->r8; break;
11661 case 9: u64EffAddr = pCtx->r9; break;
11662 case 10: u64EffAddr = pCtx->r10; break;
11663 case 11: u64EffAddr = pCtx->r11; break;
11664 case 12: u64EffAddr = pCtx->r12; break;
11665 case 13: u64EffAddr = pCtx->r13; break;
11666 case 14: u64EffAddr = pCtx->r14; break;
11667 case 15: u64EffAddr = pCtx->r15; break;
11668 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11669 }
11670 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11671
11672 /* add base */
11673 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
11674 {
11675 case 0: u64EffAddr += pCtx->rax; break;
11676 case 1: u64EffAddr += pCtx->rcx; break;
11677 case 2: u64EffAddr += pCtx->rdx; break;
11678 case 3: u64EffAddr += pCtx->rbx; break;
11679 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
11680 case 6: u64EffAddr += pCtx->rsi; break;
11681 case 7: u64EffAddr += pCtx->rdi; break;
11682 case 8: u64EffAddr += pCtx->r8; break;
11683 case 9: u64EffAddr += pCtx->r9; break;
11684 case 10: u64EffAddr += pCtx->r10; break;
11685 case 11: u64EffAddr += pCtx->r11; break;
11686 case 12: u64EffAddr += pCtx->r12; break;
11687 case 14: u64EffAddr += pCtx->r14; break;
11688 case 15: u64EffAddr += pCtx->r15; break;
11689 /* complicated encodings */
11690 case 5:
11691 case 13:
11692 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11693 {
11694 if (!pVCpu->iem.s.uRexB)
11695 {
11696 u64EffAddr += pCtx->rbp;
11697 SET_SS_DEF();
11698 }
11699 else
11700 u64EffAddr += pCtx->r13;
11701 }
11702 else
11703 {
11704 uint32_t u32Disp;
11705 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11706 u64EffAddr += (int32_t)u32Disp;
11707 }
11708 break;
11709 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11710 }
11711 break;
11712 }
11713 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11714 }
11715
11716 /* Get and add the displacement. */
11717 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11718 {
11719 case 0:
11720 break;
11721 case 1:
11722 {
11723 int8_t i8Disp;
11724 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11725 u64EffAddr += i8Disp;
11726 break;
11727 }
11728 case 2:
11729 {
11730 uint32_t u32Disp;
11731 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11732 u64EffAddr += (int32_t)u32Disp;
11733 break;
11734 }
11735 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
11736 }
11737
11738 }
11739
11740 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
11741 *pGCPtrEff = u64EffAddr;
11742 else
11743 {
11744 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11745 *pGCPtrEff = u64EffAddr & UINT32_MAX;
11746 }
11747 }
11748
11749 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
11750 return VINF_SUCCESS;
11751}
11752
11753
11754/**
11755 * Calculates the effective address of a ModR/M memory operand.
11756 *
11757 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
11758 *
11759 * @return Strict VBox status code.
11760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11761 * @param bRm The ModRM byte.
11762 * @param cbImm The size of any immediate following the
11763 * effective address opcode bytes. Important for
11764 * RIP relative addressing.
11765 * @param pGCPtrEff Where to return the effective address.
11766 * @param offRsp RSP displacement.
11767 */
11768IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
11769{
11770 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
11771 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
11772# define SET_SS_DEF() \
11773 do \
11774 { \
11775 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
11776 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
11777 } while (0)
11778
11779 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
11780 {
11781/** @todo Check the effective address size crap! */
11782 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
11783 {
11784 uint16_t u16EffAddr;
11785
11786 /* Handle the disp16 form with no registers first. */
11787 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
11788 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
11789 else
11790 {
11791 /* Get the displacment. */
11792 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11793 {
11794 case 0: u16EffAddr = 0; break;
11795 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
11796 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
11797 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
11798 }
11799
11800 /* Add the base and index registers to the disp. */
11801 switch (bRm & X86_MODRM_RM_MASK)
11802 {
11803 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
11804 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
11805 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
11806 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
11807 case 4: u16EffAddr += pCtx->si; break;
11808 case 5: u16EffAddr += pCtx->di; break;
11809 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
11810 case 7: u16EffAddr += pCtx->bx; break;
11811 }
11812 }
11813
11814 *pGCPtrEff = u16EffAddr;
11815 }
11816 else
11817 {
11818 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
11819 uint32_t u32EffAddr;
11820
11821 /* Handle the disp32 form with no registers first. */
11822 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11823 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
11824 else
11825 {
11826 /* Get the register (or SIB) value. */
11827 switch ((bRm & X86_MODRM_RM_MASK))
11828 {
11829 case 0: u32EffAddr = pCtx->eax; break;
11830 case 1: u32EffAddr = pCtx->ecx; break;
11831 case 2: u32EffAddr = pCtx->edx; break;
11832 case 3: u32EffAddr = pCtx->ebx; break;
11833 case 4: /* SIB */
11834 {
11835 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11836
11837 /* Get the index and scale it. */
11838 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
11839 {
11840 case 0: u32EffAddr = pCtx->eax; break;
11841 case 1: u32EffAddr = pCtx->ecx; break;
11842 case 2: u32EffAddr = pCtx->edx; break;
11843 case 3: u32EffAddr = pCtx->ebx; break;
11844 case 4: u32EffAddr = 0; /*none */ break;
11845 case 5: u32EffAddr = pCtx->ebp; break;
11846 case 6: u32EffAddr = pCtx->esi; break;
11847 case 7: u32EffAddr = pCtx->edi; break;
11848 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11849 }
11850 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11851
11852 /* add base */
11853 switch (bSib & X86_SIB_BASE_MASK)
11854 {
11855 case 0: u32EffAddr += pCtx->eax; break;
11856 case 1: u32EffAddr += pCtx->ecx; break;
11857 case 2: u32EffAddr += pCtx->edx; break;
11858 case 3: u32EffAddr += pCtx->ebx; break;
11859 case 4:
11860 u32EffAddr += pCtx->esp + offRsp;
11861 SET_SS_DEF();
11862 break;
11863 case 5:
11864 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11865 {
11866 u32EffAddr += pCtx->ebp;
11867 SET_SS_DEF();
11868 }
11869 else
11870 {
11871 uint32_t u32Disp;
11872 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11873 u32EffAddr += u32Disp;
11874 }
11875 break;
11876 case 6: u32EffAddr += pCtx->esi; break;
11877 case 7: u32EffAddr += pCtx->edi; break;
11878 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11879 }
11880 break;
11881 }
11882 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
11883 case 6: u32EffAddr = pCtx->esi; break;
11884 case 7: u32EffAddr = pCtx->edi; break;
11885 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11886 }
11887
11888 /* Get and add the displacement. */
11889 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
11890 {
11891 case 0:
11892 break;
11893 case 1:
11894 {
11895 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
11896 u32EffAddr += i8Disp;
11897 break;
11898 }
11899 case 2:
11900 {
11901 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
11902 u32EffAddr += u32Disp;
11903 break;
11904 }
11905 default:
11906 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
11907 }
11908
11909 }
11910 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
11911 *pGCPtrEff = u32EffAddr;
11912 else
11913 {
11914 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
11915 *pGCPtrEff = u32EffAddr & UINT16_MAX;
11916 }
11917 }
11918 }
11919 else
11920 {
11921 uint64_t u64EffAddr;
11922
11923 /* Handle the rip+disp32 form with no registers first. */
11924 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
11925 {
11926 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
11927 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
11928 }
11929 else
11930 {
11931 /* Get the register (or SIB) value. */
11932 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
11933 {
11934 case 0: u64EffAddr = pCtx->rax; break;
11935 case 1: u64EffAddr = pCtx->rcx; break;
11936 case 2: u64EffAddr = pCtx->rdx; break;
11937 case 3: u64EffAddr = pCtx->rbx; break;
11938 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
11939 case 6: u64EffAddr = pCtx->rsi; break;
11940 case 7: u64EffAddr = pCtx->rdi; break;
11941 case 8: u64EffAddr = pCtx->r8; break;
11942 case 9: u64EffAddr = pCtx->r9; break;
11943 case 10: u64EffAddr = pCtx->r10; break;
11944 case 11: u64EffAddr = pCtx->r11; break;
11945 case 13: u64EffAddr = pCtx->r13; break;
11946 case 14: u64EffAddr = pCtx->r14; break;
11947 case 15: u64EffAddr = pCtx->r15; break;
11948 /* SIB */
11949 case 4:
11950 case 12:
11951 {
11952 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
11953
11954 /* Get the index and scale it. */
11955 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
11956 {
11957 case 0: u64EffAddr = pCtx->rax; break;
11958 case 1: u64EffAddr = pCtx->rcx; break;
11959 case 2: u64EffAddr = pCtx->rdx; break;
11960 case 3: u64EffAddr = pCtx->rbx; break;
11961 case 4: u64EffAddr = 0; /*none */ break;
11962 case 5: u64EffAddr = pCtx->rbp; break;
11963 case 6: u64EffAddr = pCtx->rsi; break;
11964 case 7: u64EffAddr = pCtx->rdi; break;
11965 case 8: u64EffAddr = pCtx->r8; break;
11966 case 9: u64EffAddr = pCtx->r9; break;
11967 case 10: u64EffAddr = pCtx->r10; break;
11968 case 11: u64EffAddr = pCtx->r11; break;
11969 case 12: u64EffAddr = pCtx->r12; break;
11970 case 13: u64EffAddr = pCtx->r13; break;
11971 case 14: u64EffAddr = pCtx->r14; break;
11972 case 15: u64EffAddr = pCtx->r15; break;
11973 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11974 }
11975 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
11976
11977 /* add base */
11978 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
11979 {
11980 case 0: u64EffAddr += pCtx->rax; break;
11981 case 1: u64EffAddr += pCtx->rcx; break;
11982 case 2: u64EffAddr += pCtx->rdx; break;
11983 case 3: u64EffAddr += pCtx->rbx; break;
11984 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
11985 case 6: u64EffAddr += pCtx->rsi; break;
11986 case 7: u64EffAddr += pCtx->rdi; break;
11987 case 8: u64EffAddr += pCtx->r8; break;
11988 case 9: u64EffAddr += pCtx->r9; break;
11989 case 10: u64EffAddr += pCtx->r10; break;
11990 case 11: u64EffAddr += pCtx->r11; break;
11991 case 12: u64EffAddr += pCtx->r12; break;
11992 case 14: u64EffAddr += pCtx->r14; break;
11993 case 15: u64EffAddr += pCtx->r15; break;
11994 /* complicated encodings */
11995 case 5:
11996 case 13:
11997 if ((bRm & X86_MODRM_MOD_MASK) != 0)
11998 {
11999 if (!pVCpu->iem.s.uRexB)
12000 {
12001 u64EffAddr += pCtx->rbp;
12002 SET_SS_DEF();
12003 }
12004 else
12005 u64EffAddr += pCtx->r13;
12006 }
12007 else
12008 {
12009 uint32_t u32Disp;
12010 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12011 u64EffAddr += (int32_t)u32Disp;
12012 }
12013 break;
12014 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12015 }
12016 break;
12017 }
12018 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12019 }
12020
12021 /* Get and add the displacement. */
12022 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12023 {
12024 case 0:
12025 break;
12026 case 1:
12027 {
12028 int8_t i8Disp;
12029 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12030 u64EffAddr += i8Disp;
12031 break;
12032 }
12033 case 2:
12034 {
12035 uint32_t u32Disp;
12036 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12037 u64EffAddr += (int32_t)u32Disp;
12038 break;
12039 }
12040 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12041 }
12042
12043 }
12044
12045 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12046 *pGCPtrEff = u64EffAddr;
12047 else
12048 {
12049 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12050 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12051 }
12052 }
12053
12054 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12055 return VINF_SUCCESS;
12056}
12057
12058
12059#ifdef IEM_WITH_SETJMP
12060/**
12061 * Calculates the effective address of a ModR/M memory operand.
12062 *
12063 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12064 *
12065 * May longjmp on internal error.
12066 *
12067 * @return The effective address.
12068 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12069 * @param bRm The ModRM byte.
12070 * @param cbImm The size of any immediate following the
12071 * effective address opcode bytes. Important for
12072 * RIP relative addressing.
12073 */
12074IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
12075{
12076 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
12077 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12078# define SET_SS_DEF() \
12079 do \
12080 { \
12081 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12082 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12083 } while (0)
12084
12085 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12086 {
12087/** @todo Check the effective address size crap! */
12088 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12089 {
12090 uint16_t u16EffAddr;
12091
12092 /* Handle the disp16 form with no registers first. */
12093 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12094 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12095 else
12096 {
12097 /* Get the displacment. */
12098 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12099 {
12100 case 0: u16EffAddr = 0; break;
12101 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12102 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12103 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
12104 }
12105
12106 /* Add the base and index registers to the disp. */
12107 switch (bRm & X86_MODRM_RM_MASK)
12108 {
12109 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12110 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12111 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12112 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12113 case 4: u16EffAddr += pCtx->si; break;
12114 case 5: u16EffAddr += pCtx->di; break;
12115 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12116 case 7: u16EffAddr += pCtx->bx; break;
12117 }
12118 }
12119
12120 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
12121 return u16EffAddr;
12122 }
12123
12124 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12125 uint32_t u32EffAddr;
12126
12127 /* Handle the disp32 form with no registers first. */
12128 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12129 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12130 else
12131 {
12132 /* Get the register (or SIB) value. */
12133 switch ((bRm & X86_MODRM_RM_MASK))
12134 {
12135 case 0: u32EffAddr = pCtx->eax; break;
12136 case 1: u32EffAddr = pCtx->ecx; break;
12137 case 2: u32EffAddr = pCtx->edx; break;
12138 case 3: u32EffAddr = pCtx->ebx; break;
12139 case 4: /* SIB */
12140 {
12141 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12142
12143 /* Get the index and scale it. */
12144 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12145 {
12146 case 0: u32EffAddr = pCtx->eax; break;
12147 case 1: u32EffAddr = pCtx->ecx; break;
12148 case 2: u32EffAddr = pCtx->edx; break;
12149 case 3: u32EffAddr = pCtx->ebx; break;
12150 case 4: u32EffAddr = 0; /*none */ break;
12151 case 5: u32EffAddr = pCtx->ebp; break;
12152 case 6: u32EffAddr = pCtx->esi; break;
12153 case 7: u32EffAddr = pCtx->edi; break;
12154 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12155 }
12156 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12157
12158 /* add base */
12159 switch (bSib & X86_SIB_BASE_MASK)
12160 {
12161 case 0: u32EffAddr += pCtx->eax; break;
12162 case 1: u32EffAddr += pCtx->ecx; break;
12163 case 2: u32EffAddr += pCtx->edx; break;
12164 case 3: u32EffAddr += pCtx->ebx; break;
12165 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12166 case 5:
12167 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12168 {
12169 u32EffAddr += pCtx->ebp;
12170 SET_SS_DEF();
12171 }
12172 else
12173 {
12174 uint32_t u32Disp;
12175 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12176 u32EffAddr += u32Disp;
12177 }
12178 break;
12179 case 6: u32EffAddr += pCtx->esi; break;
12180 case 7: u32EffAddr += pCtx->edi; break;
12181 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12182 }
12183 break;
12184 }
12185 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12186 case 6: u32EffAddr = pCtx->esi; break;
12187 case 7: u32EffAddr = pCtx->edi; break;
12188 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12189 }
12190
12191 /* Get and add the displacement. */
12192 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12193 {
12194 case 0:
12195 break;
12196 case 1:
12197 {
12198 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12199 u32EffAddr += i8Disp;
12200 break;
12201 }
12202 case 2:
12203 {
12204 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12205 u32EffAddr += u32Disp;
12206 break;
12207 }
12208 default:
12209 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12210 }
12211 }
12212
12213 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12214 {
12215 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
12216 return u32EffAddr;
12217 }
12218 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12219 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
12220 return u32EffAddr & UINT16_MAX;
12221 }
12222
12223 uint64_t u64EffAddr;
12224
12225 /* Handle the rip+disp32 form with no registers first. */
12226 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12227 {
12228 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12229 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12230 }
12231 else
12232 {
12233 /* Get the register (or SIB) value. */
12234 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12235 {
12236 case 0: u64EffAddr = pCtx->rax; break;
12237 case 1: u64EffAddr = pCtx->rcx; break;
12238 case 2: u64EffAddr = pCtx->rdx; break;
12239 case 3: u64EffAddr = pCtx->rbx; break;
12240 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12241 case 6: u64EffAddr = pCtx->rsi; break;
12242 case 7: u64EffAddr = pCtx->rdi; break;
12243 case 8: u64EffAddr = pCtx->r8; break;
12244 case 9: u64EffAddr = pCtx->r9; break;
12245 case 10: u64EffAddr = pCtx->r10; break;
12246 case 11: u64EffAddr = pCtx->r11; break;
12247 case 13: u64EffAddr = pCtx->r13; break;
12248 case 14: u64EffAddr = pCtx->r14; break;
12249 case 15: u64EffAddr = pCtx->r15; break;
12250 /* SIB */
12251 case 4:
12252 case 12:
12253 {
12254 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12255
12256 /* Get the index and scale it. */
12257 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12258 {
12259 case 0: u64EffAddr = pCtx->rax; break;
12260 case 1: u64EffAddr = pCtx->rcx; break;
12261 case 2: u64EffAddr = pCtx->rdx; break;
12262 case 3: u64EffAddr = pCtx->rbx; break;
12263 case 4: u64EffAddr = 0; /*none */ break;
12264 case 5: u64EffAddr = pCtx->rbp; break;
12265 case 6: u64EffAddr = pCtx->rsi; break;
12266 case 7: u64EffAddr = pCtx->rdi; break;
12267 case 8: u64EffAddr = pCtx->r8; break;
12268 case 9: u64EffAddr = pCtx->r9; break;
12269 case 10: u64EffAddr = pCtx->r10; break;
12270 case 11: u64EffAddr = pCtx->r11; break;
12271 case 12: u64EffAddr = pCtx->r12; break;
12272 case 13: u64EffAddr = pCtx->r13; break;
12273 case 14: u64EffAddr = pCtx->r14; break;
12274 case 15: u64EffAddr = pCtx->r15; break;
12275 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12276 }
12277 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12278
12279 /* add base */
12280 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12281 {
12282 case 0: u64EffAddr += pCtx->rax; break;
12283 case 1: u64EffAddr += pCtx->rcx; break;
12284 case 2: u64EffAddr += pCtx->rdx; break;
12285 case 3: u64EffAddr += pCtx->rbx; break;
12286 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12287 case 6: u64EffAddr += pCtx->rsi; break;
12288 case 7: u64EffAddr += pCtx->rdi; break;
12289 case 8: u64EffAddr += pCtx->r8; break;
12290 case 9: u64EffAddr += pCtx->r9; break;
12291 case 10: u64EffAddr += pCtx->r10; break;
12292 case 11: u64EffAddr += pCtx->r11; break;
12293 case 12: u64EffAddr += pCtx->r12; break;
12294 case 14: u64EffAddr += pCtx->r14; break;
12295 case 15: u64EffAddr += pCtx->r15; break;
12296 /* complicated encodings */
12297 case 5:
12298 case 13:
12299 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12300 {
12301 if (!pVCpu->iem.s.uRexB)
12302 {
12303 u64EffAddr += pCtx->rbp;
12304 SET_SS_DEF();
12305 }
12306 else
12307 u64EffAddr += pCtx->r13;
12308 }
12309 else
12310 {
12311 uint32_t u32Disp;
12312 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12313 u64EffAddr += (int32_t)u32Disp;
12314 }
12315 break;
12316 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12317 }
12318 break;
12319 }
12320 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12321 }
12322
12323 /* Get and add the displacement. */
12324 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12325 {
12326 case 0:
12327 break;
12328 case 1:
12329 {
12330 int8_t i8Disp;
12331 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12332 u64EffAddr += i8Disp;
12333 break;
12334 }
12335 case 2:
12336 {
12337 uint32_t u32Disp;
12338 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12339 u64EffAddr += (int32_t)u32Disp;
12340 break;
12341 }
12342 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12343 }
12344
12345 }
12346
12347 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12348 {
12349 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
12350 return u64EffAddr;
12351 }
12352 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12353 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
12354 return u64EffAddr & UINT32_MAX;
12355}
12356#endif /* IEM_WITH_SETJMP */
12357
12358
12359/** @} */
12360
12361
12362
12363/*
12364 * Include the instructions
12365 */
12366#include "IEMAllInstructions.cpp.h"
12367
12368
12369
12370
12371#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
12372
12373/**
12374 * Sets up execution verification mode.
12375 */
12376IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
12377{
12378 PVMCPU pVCpu = pVCpu;
12379 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
12380
12381 /*
12382 * Always note down the address of the current instruction.
12383 */
12384 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
12385 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
12386
12387 /*
12388 * Enable verification and/or logging.
12389 */
12390 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
12391 if ( fNewNoRem
12392 && ( 0
12393#if 0 /* auto enable on first paged protected mode interrupt */
12394 || ( pOrgCtx->eflags.Bits.u1IF
12395 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
12396 && TRPMHasTrap(pVCpu)
12397 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
12398#endif
12399#if 0
12400 || ( pOrgCtx->cs == 0x10
12401 && ( pOrgCtx->rip == 0x90119e3e
12402 || pOrgCtx->rip == 0x901d9810)
12403#endif
12404#if 0 /* Auto enable DSL - FPU stuff. */
12405 || ( pOrgCtx->cs == 0x10
12406 && (// pOrgCtx->rip == 0xc02ec07f
12407 //|| pOrgCtx->rip == 0xc02ec082
12408 //|| pOrgCtx->rip == 0xc02ec0c9
12409 0
12410 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
12411#endif
12412#if 0 /* Auto enable DSL - fstp st0 stuff. */
12413 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
12414#endif
12415#if 0
12416 || pOrgCtx->rip == 0x9022bb3a
12417#endif
12418#if 0
12419 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
12420#endif
12421#if 0
12422 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
12423 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
12424#endif
12425#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
12426 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
12427 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
12428 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
12429#endif
12430#if 0 /* NT4SP1 - xadd early boot. */
12431 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
12432#endif
12433#if 0 /* NT4SP1 - wrmsr (intel MSR). */
12434 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
12435#endif
12436#if 0 /* NT4SP1 - cmpxchg (AMD). */
12437 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
12438#endif
12439#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
12440 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
12441#endif
12442#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
12443 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
12444
12445#endif
12446#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
12447 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
12448
12449#endif
12450#if 0 /* NT4SP1 - frstor [ecx] */
12451 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
12452#endif
12453#if 0 /* xxxxxx - All long mode code. */
12454 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
12455#endif
12456#if 0 /* rep movsq linux 3.7 64-bit boot. */
12457 || (pOrgCtx->rip == 0x0000000000100241)
12458#endif
12459#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
12460 || (pOrgCtx->rip == 0x000000000215e240)
12461#endif
12462#if 0 /* DOS's size-overridden iret to v8086. */
12463 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
12464#endif
12465 )
12466 )
12467 {
12468 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
12469 RTLogFlags(NULL, "enabled");
12470 fNewNoRem = false;
12471 }
12472 if (fNewNoRem != pVCpu->iem.s.fNoRem)
12473 {
12474 pVCpu->iem.s.fNoRem = fNewNoRem;
12475 if (!fNewNoRem)
12476 {
12477 LogAlways(("Enabling verification mode!\n"));
12478 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
12479 }
12480 else
12481 LogAlways(("Disabling verification mode!\n"));
12482 }
12483
12484 /*
12485 * Switch state.
12486 */
12487 if (IEM_VERIFICATION_ENABLED(pVCpu))
12488 {
12489 static CPUMCTX s_DebugCtx; /* Ugly! */
12490
12491 s_DebugCtx = *pOrgCtx;
12492 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
12493 }
12494
12495 /*
12496 * See if there is an interrupt pending in TRPM and inject it if we can.
12497 */
12498 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
12499 if ( pOrgCtx->eflags.Bits.u1IF
12500 && TRPMHasTrap(pVCpu)
12501 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
12502 {
12503 uint8_t u8TrapNo;
12504 TRPMEVENT enmType;
12505 RTGCUINT uErrCode;
12506 RTGCPTR uCr2;
12507 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
12508 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
12509 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12510 TRPMResetTrap(pVCpu);
12511 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
12512 }
12513
12514 /*
12515 * Reset the counters.
12516 */
12517 pVCpu->iem.s.cIOReads = 0;
12518 pVCpu->iem.s.cIOWrites = 0;
12519 pVCpu->iem.s.fIgnoreRaxRdx = false;
12520 pVCpu->iem.s.fOverlappingMovs = false;
12521 pVCpu->iem.s.fProblematicMemory = false;
12522 pVCpu->iem.s.fUndefinedEFlags = 0;
12523
12524 if (IEM_VERIFICATION_ENABLED(pVCpu))
12525 {
12526 /*
12527 * Free all verification records.
12528 */
12529 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
12530 pVCpu->iem.s.pIemEvtRecHead = NULL;
12531 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
12532 do
12533 {
12534 while (pEvtRec)
12535 {
12536 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
12537 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
12538 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
12539 pEvtRec = pNext;
12540 }
12541 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
12542 pVCpu->iem.s.pOtherEvtRecHead = NULL;
12543 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
12544 } while (pEvtRec);
12545 }
12546}
12547
12548
12549/**
12550 * Allocate an event record.
12551 * @returns Pointer to a record.
12552 */
12553IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
12554{
12555 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12556 return NULL;
12557
12558 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
12559 if (pEvtRec)
12560 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
12561 else
12562 {
12563 if (!pVCpu->iem.s.ppIemEvtRecNext)
12564 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
12565
12566 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
12567 if (!pEvtRec)
12568 return NULL;
12569 }
12570 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
12571 pEvtRec->pNext = NULL;
12572 return pEvtRec;
12573}
12574
12575
12576/**
12577 * IOMMMIORead notification.
12578 */
12579VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
12580{
12581 PVMCPU pVCpu = VMMGetCpu(pVM);
12582 if (!pVCpu)
12583 return;
12584 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12585 if (!pEvtRec)
12586 return;
12587 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
12588 pEvtRec->u.RamRead.GCPhys = GCPhys;
12589 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
12590 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12591 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12592}
12593
12594
12595/**
12596 * IOMMMIOWrite notification.
12597 */
12598VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
12599{
12600 PVMCPU pVCpu = VMMGetCpu(pVM);
12601 if (!pVCpu)
12602 return;
12603 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12604 if (!pEvtRec)
12605 return;
12606 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
12607 pEvtRec->u.RamWrite.GCPhys = GCPhys;
12608 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
12609 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
12610 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
12611 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
12612 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
12613 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12614 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12615}
12616
12617
12618/**
12619 * IOMIOPortRead notification.
12620 */
12621VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
12622{
12623 PVMCPU pVCpu = VMMGetCpu(pVM);
12624 if (!pVCpu)
12625 return;
12626 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12627 if (!pEvtRec)
12628 return;
12629 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12630 pEvtRec->u.IOPortRead.Port = Port;
12631 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12632 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12633 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12634}
12635
12636/**
12637 * IOMIOPortWrite notification.
12638 */
12639VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12640{
12641 PVMCPU pVCpu = VMMGetCpu(pVM);
12642 if (!pVCpu)
12643 return;
12644 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12645 if (!pEvtRec)
12646 return;
12647 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12648 pEvtRec->u.IOPortWrite.Port = Port;
12649 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12650 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12651 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12652 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12653}
12654
12655
12656VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
12657{
12658 PVMCPU pVCpu = VMMGetCpu(pVM);
12659 if (!pVCpu)
12660 return;
12661 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12662 if (!pEvtRec)
12663 return;
12664 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
12665 pEvtRec->u.IOPortStrRead.Port = Port;
12666 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
12667 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
12668 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12669 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12670}
12671
12672
12673VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
12674{
12675 PVMCPU pVCpu = VMMGetCpu(pVM);
12676 if (!pVCpu)
12677 return;
12678 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12679 if (!pEvtRec)
12680 return;
12681 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
12682 pEvtRec->u.IOPortStrWrite.Port = Port;
12683 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
12684 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
12685 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
12686 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
12687}
12688
12689
12690/**
12691 * Fakes and records an I/O port read.
12692 *
12693 * @returns VINF_SUCCESS.
12694 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12695 * @param Port The I/O port.
12696 * @param pu32Value Where to store the fake value.
12697 * @param cbValue The size of the access.
12698 */
12699IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
12700{
12701 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12702 if (pEvtRec)
12703 {
12704 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
12705 pEvtRec->u.IOPortRead.Port = Port;
12706 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
12707 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12708 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12709 }
12710 pVCpu->iem.s.cIOReads++;
12711 *pu32Value = 0xcccccccc;
12712 return VINF_SUCCESS;
12713}
12714
12715
12716/**
12717 * Fakes and records an I/O port write.
12718 *
12719 * @returns VINF_SUCCESS.
12720 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12721 * @param Port The I/O port.
12722 * @param u32Value The value being written.
12723 * @param cbValue The size of the access.
12724 */
12725IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
12726{
12727 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
12728 if (pEvtRec)
12729 {
12730 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
12731 pEvtRec->u.IOPortWrite.Port = Port;
12732 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
12733 pEvtRec->u.IOPortWrite.u32Value = u32Value;
12734 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
12735 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
12736 }
12737 pVCpu->iem.s.cIOWrites++;
12738 return VINF_SUCCESS;
12739}
12740
12741
12742/**
12743 * Used to add extra details about a stub case.
12744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12745 */
12746IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
12747{
12748 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12749 PVM pVM = pVCpu->CTX_SUFF(pVM);
12750 PVMCPU pVCpu = pVCpu;
12751 char szRegs[4096];
12752 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
12753 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
12754 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
12755 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
12756 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
12757 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
12758 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
12759 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
12760 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
12761 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
12762 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
12763 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
12764 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
12765 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
12766 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
12767 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
12768 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
12769 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
12770 " efer=%016VR{efer}\n"
12771 " pat=%016VR{pat}\n"
12772 " sf_mask=%016VR{sf_mask}\n"
12773 "krnl_gs_base=%016VR{krnl_gs_base}\n"
12774 " lstar=%016VR{lstar}\n"
12775 " star=%016VR{star} cstar=%016VR{cstar}\n"
12776 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
12777 );
12778
12779 char szInstr1[256];
12780 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
12781 DBGF_DISAS_FLAGS_DEFAULT_MODE,
12782 szInstr1, sizeof(szInstr1), NULL);
12783 char szInstr2[256];
12784 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
12785 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
12786 szInstr2, sizeof(szInstr2), NULL);
12787
12788 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
12789}
12790
12791
12792/**
12793 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
12794 * dump to the assertion info.
12795 *
12796 * @param pEvtRec The record to dump.
12797 */
12798IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
12799{
12800 switch (pEvtRec->enmEvent)
12801 {
12802 case IEMVERIFYEVENT_IOPORT_READ:
12803 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
12804 pEvtRec->u.IOPortWrite.Port,
12805 pEvtRec->u.IOPortWrite.cbValue);
12806 break;
12807 case IEMVERIFYEVENT_IOPORT_WRITE:
12808 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
12809 pEvtRec->u.IOPortWrite.Port,
12810 pEvtRec->u.IOPortWrite.cbValue,
12811 pEvtRec->u.IOPortWrite.u32Value);
12812 break;
12813 case IEMVERIFYEVENT_IOPORT_STR_READ:
12814 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
12815 pEvtRec->u.IOPortStrWrite.Port,
12816 pEvtRec->u.IOPortStrWrite.cbValue,
12817 pEvtRec->u.IOPortStrWrite.cTransfers);
12818 break;
12819 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
12820 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
12821 pEvtRec->u.IOPortStrWrite.Port,
12822 pEvtRec->u.IOPortStrWrite.cbValue,
12823 pEvtRec->u.IOPortStrWrite.cTransfers);
12824 break;
12825 case IEMVERIFYEVENT_RAM_READ:
12826 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
12827 pEvtRec->u.RamRead.GCPhys,
12828 pEvtRec->u.RamRead.cb);
12829 break;
12830 case IEMVERIFYEVENT_RAM_WRITE:
12831 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
12832 pEvtRec->u.RamWrite.GCPhys,
12833 pEvtRec->u.RamWrite.cb,
12834 (int)pEvtRec->u.RamWrite.cb,
12835 pEvtRec->u.RamWrite.ab);
12836 break;
12837 default:
12838 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
12839 break;
12840 }
12841}
12842
12843
12844/**
12845 * Raises an assertion on the specified record, showing the given message with
12846 * a record dump attached.
12847 *
12848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12849 * @param pEvtRec1 The first record.
12850 * @param pEvtRec2 The second record.
12851 * @param pszMsg The message explaining why we're asserting.
12852 */
12853IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
12854{
12855 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
12856 iemVerifyAssertAddRecordDump(pEvtRec1);
12857 iemVerifyAssertAddRecordDump(pEvtRec2);
12858 iemVerifyAssertMsg2(pVCpu);
12859 RTAssertPanic();
12860}
12861
12862
12863/**
12864 * Raises an assertion on the specified record, showing the given message with
12865 * a record dump attached.
12866 *
12867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12868 * @param pEvtRec1 The first record.
12869 * @param pszMsg The message explaining why we're asserting.
12870 */
12871IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
12872{
12873 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
12874 iemVerifyAssertAddRecordDump(pEvtRec);
12875 iemVerifyAssertMsg2(pVCpu);
12876 RTAssertPanic();
12877}
12878
12879
12880/**
12881 * Verifies a write record.
12882 *
12883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12884 * @param pEvtRec The write record.
12885 * @param fRem Set if REM was doing the other executing. If clear
12886 * it was HM.
12887 */
12888IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
12889{
12890 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
12891 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
12892 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
12893 if ( RT_FAILURE(rc)
12894 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
12895 {
12896 /* fend off ins */
12897 if ( !pVCpu->iem.s.cIOReads
12898 || pEvtRec->u.RamWrite.ab[0] != 0xcc
12899 || ( pEvtRec->u.RamWrite.cb != 1
12900 && pEvtRec->u.RamWrite.cb != 2
12901 && pEvtRec->u.RamWrite.cb != 4) )
12902 {
12903 /* fend off ROMs and MMIO */
12904 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
12905 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
12906 {
12907 /* fend off fxsave */
12908 if (pEvtRec->u.RamWrite.cb != 512)
12909 {
12910 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
12911 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
12912 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
12913 RTAssertMsg2Add("%s: %.*Rhxs\n"
12914 "iem: %.*Rhxs\n",
12915 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
12916 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
12917 iemVerifyAssertAddRecordDump(pEvtRec);
12918 iemVerifyAssertMsg2(pVCpu);
12919 RTAssertPanic();
12920 }
12921 }
12922 }
12923 }
12924
12925}
12926
12927/**
12928 * Performs the post-execution verfication checks.
12929 */
12930IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
12931{
12932 if (!IEM_VERIFICATION_ENABLED(pVCpu))
12933 return rcStrictIem;
12934
12935 /*
12936 * Switch back the state.
12937 */
12938 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
12939 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
12940 Assert(pOrgCtx != pDebugCtx);
12941 IEM_GET_CTX(pVCpu) = pOrgCtx;
12942
12943 /*
12944 * Execute the instruction in REM.
12945 */
12946 bool fRem = false;
12947 PVM pVM = pVCpu->CTX_SUFF(pVM);
12948 PVMCPU pVCpu = pVCpu;
12949 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
12950#ifdef IEM_VERIFICATION_MODE_FULL_HM
12951 if ( HMIsEnabled(pVM)
12952 && pVCpu->iem.s.cIOReads == 0
12953 && pVCpu->iem.s.cIOWrites == 0
12954 && !pVCpu->iem.s.fProblematicMemory)
12955 {
12956 uint64_t uStartRip = pOrgCtx->rip;
12957 unsigned iLoops = 0;
12958 do
12959 {
12960 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
12961 iLoops++;
12962 } while ( rc == VINF_SUCCESS
12963 || ( rc == VINF_EM_DBG_STEPPED
12964 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
12965 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
12966 || ( pOrgCtx->rip != pDebugCtx->rip
12967 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
12968 && iLoops < 8) );
12969 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
12970 rc = VINF_SUCCESS;
12971 }
12972#endif
12973 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
12974 || rc == VINF_IOM_R3_IOPORT_READ
12975 || rc == VINF_IOM_R3_IOPORT_WRITE
12976 || rc == VINF_IOM_R3_MMIO_READ
12977 || rc == VINF_IOM_R3_MMIO_READ_WRITE
12978 || rc == VINF_IOM_R3_MMIO_WRITE
12979 || rc == VINF_CPUM_R3_MSR_READ
12980 || rc == VINF_CPUM_R3_MSR_WRITE
12981 || rc == VINF_EM_RESCHEDULE
12982 )
12983 {
12984 EMRemLock(pVM);
12985 rc = REMR3EmulateInstruction(pVM, pVCpu);
12986 AssertRC(rc);
12987 EMRemUnlock(pVM);
12988 fRem = true;
12989 }
12990
12991# if 1 /* Skip unimplemented instructions for now. */
12992 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
12993 {
12994 IEM_GET_CTX(pVCpu) = pOrgCtx;
12995 if (rc == VINF_EM_DBG_STEPPED)
12996 return VINF_SUCCESS;
12997 return rc;
12998 }
12999# endif
13000
13001 /*
13002 * Compare the register states.
13003 */
13004 unsigned cDiffs = 0;
13005 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
13006 {
13007 //Log(("REM and IEM ends up with different registers!\n"));
13008 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
13009
13010# define CHECK_FIELD(a_Field) \
13011 do \
13012 { \
13013 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13014 { \
13015 switch (sizeof(pOrgCtx->a_Field)) \
13016 { \
13017 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13018 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13019 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13020 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13021 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13022 } \
13023 cDiffs++; \
13024 } \
13025 } while (0)
13026# define CHECK_XSTATE_FIELD(a_Field) \
13027 do \
13028 { \
13029 if (pOrgXState->a_Field != pDebugXState->a_Field) \
13030 { \
13031 switch (sizeof(pOrgXState->a_Field)) \
13032 { \
13033 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13034 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13035 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13036 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13037 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13038 } \
13039 cDiffs++; \
13040 } \
13041 } while (0)
13042
13043# define CHECK_BIT_FIELD(a_Field) \
13044 do \
13045 { \
13046 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13047 { \
13048 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
13049 cDiffs++; \
13050 } \
13051 } while (0)
13052
13053# define CHECK_SEL(a_Sel) \
13054 do \
13055 { \
13056 CHECK_FIELD(a_Sel.Sel); \
13057 CHECK_FIELD(a_Sel.Attr.u); \
13058 CHECK_FIELD(a_Sel.u64Base); \
13059 CHECK_FIELD(a_Sel.u32Limit); \
13060 CHECK_FIELD(a_Sel.fFlags); \
13061 } while (0)
13062
13063 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
13064 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
13065
13066#if 1 /* The recompiler doesn't update these the intel way. */
13067 if (fRem)
13068 {
13069 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
13070 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
13071 pOrgXState->x87.CS = pDebugXState->x87.CS;
13072 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
13073 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
13074 pOrgXState->x87.DS = pDebugXState->x87.DS;
13075 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
13076 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
13077 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
13078 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
13079 }
13080#endif
13081 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
13082 {
13083 RTAssertMsg2Weak(" the FPU state differs\n");
13084 cDiffs++;
13085 CHECK_XSTATE_FIELD(x87.FCW);
13086 CHECK_XSTATE_FIELD(x87.FSW);
13087 CHECK_XSTATE_FIELD(x87.FTW);
13088 CHECK_XSTATE_FIELD(x87.FOP);
13089 CHECK_XSTATE_FIELD(x87.FPUIP);
13090 CHECK_XSTATE_FIELD(x87.CS);
13091 CHECK_XSTATE_FIELD(x87.Rsrvd1);
13092 CHECK_XSTATE_FIELD(x87.FPUDP);
13093 CHECK_XSTATE_FIELD(x87.DS);
13094 CHECK_XSTATE_FIELD(x87.Rsrvd2);
13095 CHECK_XSTATE_FIELD(x87.MXCSR);
13096 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
13097 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
13098 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
13099 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
13100 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
13101 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
13102 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
13103 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
13104 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
13105 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
13106 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
13107 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
13108 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
13109 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
13110 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
13111 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
13112 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
13113 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
13114 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
13115 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
13116 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
13117 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
13118 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
13119 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
13120 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
13121 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
13122 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
13123 }
13124 CHECK_FIELD(rip);
13125 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
13126 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
13127 {
13128 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
13129 CHECK_BIT_FIELD(rflags.Bits.u1CF);
13130 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
13131 CHECK_BIT_FIELD(rflags.Bits.u1PF);
13132 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
13133 CHECK_BIT_FIELD(rflags.Bits.u1AF);
13134 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
13135 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
13136 CHECK_BIT_FIELD(rflags.Bits.u1SF);
13137 CHECK_BIT_FIELD(rflags.Bits.u1TF);
13138 CHECK_BIT_FIELD(rflags.Bits.u1IF);
13139 CHECK_BIT_FIELD(rflags.Bits.u1DF);
13140 CHECK_BIT_FIELD(rflags.Bits.u1OF);
13141 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
13142 CHECK_BIT_FIELD(rflags.Bits.u1NT);
13143 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
13144 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
13145 CHECK_BIT_FIELD(rflags.Bits.u1RF);
13146 CHECK_BIT_FIELD(rflags.Bits.u1VM);
13147 CHECK_BIT_FIELD(rflags.Bits.u1AC);
13148 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
13149 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
13150 CHECK_BIT_FIELD(rflags.Bits.u1ID);
13151 }
13152
13153 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
13154 CHECK_FIELD(rax);
13155 CHECK_FIELD(rcx);
13156 if (!pVCpu->iem.s.fIgnoreRaxRdx)
13157 CHECK_FIELD(rdx);
13158 CHECK_FIELD(rbx);
13159 CHECK_FIELD(rsp);
13160 CHECK_FIELD(rbp);
13161 CHECK_FIELD(rsi);
13162 CHECK_FIELD(rdi);
13163 CHECK_FIELD(r8);
13164 CHECK_FIELD(r9);
13165 CHECK_FIELD(r10);
13166 CHECK_FIELD(r11);
13167 CHECK_FIELD(r12);
13168 CHECK_FIELD(r13);
13169 CHECK_SEL(cs);
13170 CHECK_SEL(ss);
13171 CHECK_SEL(ds);
13172 CHECK_SEL(es);
13173 CHECK_SEL(fs);
13174 CHECK_SEL(gs);
13175 CHECK_FIELD(cr0);
13176
13177 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
13178 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
13179 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
13180 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
13181 if (pOrgCtx->cr2 != pDebugCtx->cr2)
13182 {
13183 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
13184 { /* ignore */ }
13185 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
13186 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
13187 && fRem)
13188 { /* ignore */ }
13189 else
13190 CHECK_FIELD(cr2);
13191 }
13192 CHECK_FIELD(cr3);
13193 CHECK_FIELD(cr4);
13194 CHECK_FIELD(dr[0]);
13195 CHECK_FIELD(dr[1]);
13196 CHECK_FIELD(dr[2]);
13197 CHECK_FIELD(dr[3]);
13198 CHECK_FIELD(dr[6]);
13199 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
13200 CHECK_FIELD(dr[7]);
13201 CHECK_FIELD(gdtr.cbGdt);
13202 CHECK_FIELD(gdtr.pGdt);
13203 CHECK_FIELD(idtr.cbIdt);
13204 CHECK_FIELD(idtr.pIdt);
13205 CHECK_SEL(ldtr);
13206 CHECK_SEL(tr);
13207 CHECK_FIELD(SysEnter.cs);
13208 CHECK_FIELD(SysEnter.eip);
13209 CHECK_FIELD(SysEnter.esp);
13210 CHECK_FIELD(msrEFER);
13211 CHECK_FIELD(msrSTAR);
13212 CHECK_FIELD(msrPAT);
13213 CHECK_FIELD(msrLSTAR);
13214 CHECK_FIELD(msrCSTAR);
13215 CHECK_FIELD(msrSFMASK);
13216 CHECK_FIELD(msrKERNELGSBASE);
13217
13218 if (cDiffs != 0)
13219 {
13220 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13221 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
13222 RTAssertPanic();
13223 static bool volatile s_fEnterDebugger = true;
13224 if (s_fEnterDebugger)
13225 DBGFSTOP(pVM);
13226
13227# if 1 /* Ignore unimplemented instructions for now. */
13228 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13229 rcStrictIem = VINF_SUCCESS;
13230# endif
13231 }
13232# undef CHECK_FIELD
13233# undef CHECK_BIT_FIELD
13234 }
13235
13236 /*
13237 * If the register state compared fine, check the verification event
13238 * records.
13239 */
13240 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
13241 {
13242 /*
13243 * Compare verficiation event records.
13244 * - I/O port accesses should be a 1:1 match.
13245 */
13246 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
13247 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
13248 while (pIemRec && pOtherRec)
13249 {
13250 /* Since we might miss RAM writes and reads, ignore reads and check
13251 that any written memory is the same extra ones. */
13252 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
13253 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
13254 && pIemRec->pNext)
13255 {
13256 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13257 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13258 pIemRec = pIemRec->pNext;
13259 }
13260
13261 /* Do the compare. */
13262 if (pIemRec->enmEvent != pOtherRec->enmEvent)
13263 {
13264 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
13265 break;
13266 }
13267 bool fEquals;
13268 switch (pIemRec->enmEvent)
13269 {
13270 case IEMVERIFYEVENT_IOPORT_READ:
13271 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
13272 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
13273 break;
13274 case IEMVERIFYEVENT_IOPORT_WRITE:
13275 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
13276 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
13277 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
13278 break;
13279 case IEMVERIFYEVENT_IOPORT_STR_READ:
13280 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
13281 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
13282 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
13283 break;
13284 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13285 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
13286 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
13287 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
13288 break;
13289 case IEMVERIFYEVENT_RAM_READ:
13290 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
13291 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
13292 break;
13293 case IEMVERIFYEVENT_RAM_WRITE:
13294 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
13295 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
13296 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
13297 break;
13298 default:
13299 fEquals = false;
13300 break;
13301 }
13302 if (!fEquals)
13303 {
13304 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
13305 break;
13306 }
13307
13308 /* advance */
13309 pIemRec = pIemRec->pNext;
13310 pOtherRec = pOtherRec->pNext;
13311 }
13312
13313 /* Ignore extra writes and reads. */
13314 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
13315 {
13316 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
13317 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
13318 pIemRec = pIemRec->pNext;
13319 }
13320 if (pIemRec != NULL)
13321 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
13322 else if (pOtherRec != NULL)
13323 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
13324 }
13325 IEM_GET_CTX(pVCpu) = pOrgCtx;
13326
13327 return rcStrictIem;
13328}
13329
13330#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13331
13332/* stubs */
13333IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13334{
13335 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
13336 return VERR_INTERNAL_ERROR;
13337}
13338
13339IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13340{
13341 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
13342 return VERR_INTERNAL_ERROR;
13343}
13344
13345#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
13346
13347
13348#ifdef LOG_ENABLED
13349/**
13350 * Logs the current instruction.
13351 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13352 * @param pCtx The current CPU context.
13353 * @param fSameCtx Set if we have the same context information as the VMM,
13354 * clear if we may have already executed an instruction in
13355 * our debug context. When clear, we assume IEMCPU holds
13356 * valid CPU mode info.
13357 */
13358IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
13359{
13360# ifdef IN_RING3
13361 if (LogIs2Enabled())
13362 {
13363 char szInstr[256];
13364 uint32_t cbInstr = 0;
13365 if (fSameCtx)
13366 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13367 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13368 szInstr, sizeof(szInstr), &cbInstr);
13369 else
13370 {
13371 uint32_t fFlags = 0;
13372 switch (pVCpu->iem.s.enmCpuMode)
13373 {
13374 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13375 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13376 case IEMMODE_16BIT:
13377 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
13378 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13379 else
13380 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13381 break;
13382 }
13383 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
13384 szInstr, sizeof(szInstr), &cbInstr);
13385 }
13386
13387 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
13388 Log2(("****\n"
13389 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13390 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13391 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13392 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13393 " %s\n"
13394 ,
13395 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
13396 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
13397 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
13398 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
13399 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13400 szInstr));
13401
13402 if (LogIs3Enabled())
13403 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13404 }
13405 else
13406# endif
13407 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
13408 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
13409}
13410#endif
13411
13412
13413/**
13414 * Makes status code addjustments (pass up from I/O and access handler)
13415 * as well as maintaining statistics.
13416 *
13417 * @returns Strict VBox status code to pass up.
13418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13419 * @param rcStrict The status from executing an instruction.
13420 */
13421DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13422{
13423 if (rcStrict != VINF_SUCCESS)
13424 {
13425 if (RT_SUCCESS(rcStrict))
13426 {
13427 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13428 || rcStrict == VINF_IOM_R3_IOPORT_READ
13429 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13430 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13431 || rcStrict == VINF_IOM_R3_MMIO_READ
13432 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13433 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13434 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13435 || rcStrict == VINF_CPUM_R3_MSR_READ
13436 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13437 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13438 || rcStrict == VINF_EM_RAW_TO_R3
13439 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
13440 /* raw-mode / virt handlers only: */
13441 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13442 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13443 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13444 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13445 || rcStrict == VINF_SELM_SYNC_GDT
13446 || rcStrict == VINF_CSAM_PENDING_ACTION
13447 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13448 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13449/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13450 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13451 if (rcPassUp == VINF_SUCCESS)
13452 pVCpu->iem.s.cRetInfStatuses++;
13453 else if ( rcPassUp < VINF_EM_FIRST
13454 || rcPassUp > VINF_EM_LAST
13455 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13456 {
13457 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13458 pVCpu->iem.s.cRetPassUpStatus++;
13459 rcStrict = rcPassUp;
13460 }
13461 else
13462 {
13463 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13464 pVCpu->iem.s.cRetInfStatuses++;
13465 }
13466 }
13467 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13468 pVCpu->iem.s.cRetAspectNotImplemented++;
13469 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13470 pVCpu->iem.s.cRetInstrNotImplemented++;
13471#ifdef IEM_VERIFICATION_MODE_FULL
13472 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
13473 rcStrict = VINF_SUCCESS;
13474#endif
13475 else
13476 pVCpu->iem.s.cRetErrStatuses++;
13477 }
13478 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13479 {
13480 pVCpu->iem.s.cRetPassUpStatus++;
13481 rcStrict = pVCpu->iem.s.rcPassUp;
13482 }
13483
13484 return rcStrict;
13485}
13486
13487
13488/**
13489 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13490 * IEMExecOneWithPrefetchedByPC.
13491 *
13492 * Similar code is found in IEMExecLots.
13493 *
13494 * @return Strict VBox status code.
13495 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13497 * @param fExecuteInhibit If set, execute the instruction following CLI,
13498 * POP SS and MOV SS,GR.
13499 */
13500#ifdef __GNUC__
13501DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13502#else
13503DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13504#endif
13505{
13506#ifdef IEM_WITH_SETJMP
13507 VBOXSTRICTRC rcStrict;
13508 jmp_buf JmpBuf;
13509 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13510 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13511 if ((rcStrict = setjmp(JmpBuf)) == 0)
13512 {
13513 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13514 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13515 }
13516 else
13517 pVCpu->iem.s.cLongJumps++;
13518 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13519#else
13520 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13521 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13522#endif
13523 if (rcStrict == VINF_SUCCESS)
13524 pVCpu->iem.s.cInstructions++;
13525 if (pVCpu->iem.s.cActiveMappings > 0)
13526 {
13527 Assert(rcStrict != VINF_SUCCESS);
13528 iemMemRollback(pVCpu);
13529 }
13530//#ifdef DEBUG
13531// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13532//#endif
13533
13534 /* Execute the next instruction as well if a cli, pop ss or
13535 mov ss, Gr has just completed successfully. */
13536 if ( fExecuteInhibit
13537 && rcStrict == VINF_SUCCESS
13538 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13539 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
13540 {
13541 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13542 if (rcStrict == VINF_SUCCESS)
13543 {
13544#ifdef LOG_ENABLED
13545 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
13546#endif
13547#ifdef IEM_WITH_SETJMP
13548 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13549 if ((rcStrict = setjmp(JmpBuf)) == 0)
13550 {
13551 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13552 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13553 }
13554 else
13555 pVCpu->iem.s.cLongJumps++;
13556 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13557#else
13558 IEM_OPCODE_GET_NEXT_U8(&b);
13559 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13560#endif
13561 if (rcStrict == VINF_SUCCESS)
13562 pVCpu->iem.s.cInstructions++;
13563 if (pVCpu->iem.s.cActiveMappings > 0)
13564 {
13565 Assert(rcStrict != VINF_SUCCESS);
13566 iemMemRollback(pVCpu);
13567 }
13568 }
13569 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13570 }
13571
13572 /*
13573 * Return value fiddling, statistics and sanity assertions.
13574 */
13575 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13576
13577 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
13578 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
13579#if defined(IEM_VERIFICATION_MODE_FULL)
13580 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
13581 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
13582 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
13583 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
13584#endif
13585 return rcStrict;
13586}
13587
13588
13589#ifdef IN_RC
13590/**
13591 * Re-enters raw-mode or ensure we return to ring-3.
13592 *
13593 * @returns rcStrict, maybe modified.
13594 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13595 * @param pCtx The current CPU context.
13596 * @param rcStrict The status code returne by the interpreter.
13597 */
13598DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
13599{
13600 if ( !pVCpu->iem.s.fInPatchCode
13601 && ( rcStrict == VINF_SUCCESS
13602 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13603 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13604 {
13605 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13606 CPUMRawEnter(pVCpu);
13607 else
13608 {
13609 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13610 rcStrict = VINF_EM_RESCHEDULE;
13611 }
13612 }
13613 return rcStrict;
13614}
13615#endif
13616
13617
13618/**
13619 * Execute one instruction.
13620 *
13621 * @return Strict VBox status code.
13622 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13623 */
13624VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13625{
13626#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13627 if (++pVCpu->iem.s.cVerifyDepth == 1)
13628 iemExecVerificationModeSetup(pVCpu);
13629#endif
13630#ifdef LOG_ENABLED
13631 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13632 iemLogCurInstr(pVCpu, pCtx, true);
13633#endif
13634
13635 /*
13636 * Do the decoding and emulation.
13637 */
13638 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13639 if (rcStrict == VINF_SUCCESS)
13640 rcStrict = iemExecOneInner(pVCpu, true);
13641
13642#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13643 /*
13644 * Assert some sanity.
13645 */
13646 if (pVCpu->iem.s.cVerifyDepth == 1)
13647 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
13648 pVCpu->iem.s.cVerifyDepth--;
13649#endif
13650#ifdef IN_RC
13651 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
13652#endif
13653 if (rcStrict != VINF_SUCCESS)
13654 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13655 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13656 return rcStrict;
13657}
13658
13659
13660VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13661{
13662 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13663 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13664
13665 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13666 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13667 if (rcStrict == VINF_SUCCESS)
13668 {
13669 rcStrict = iemExecOneInner(pVCpu, true);
13670 if (pcbWritten)
13671 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13672 }
13673
13674#ifdef IN_RC
13675 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13676#endif
13677 return rcStrict;
13678}
13679
13680
13681VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13682 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13683{
13684 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13685 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13686
13687 VBOXSTRICTRC rcStrict;
13688 if ( cbOpcodeBytes
13689 && pCtx->rip == OpcodeBytesPC)
13690 {
13691 iemInitDecoder(pVCpu, false);
13692#ifdef IEM_WITH_CODE_TLB
13693 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13694 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13695 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13696 pVCpu->iem.s.offCurInstrStart = 0;
13697 pVCpu->iem.s.offInstrNextByte = 0;
13698#else
13699 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13700 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13701#endif
13702 rcStrict = VINF_SUCCESS;
13703 }
13704 else
13705 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13706 if (rcStrict == VINF_SUCCESS)
13707 {
13708 rcStrict = iemExecOneInner(pVCpu, true);
13709 }
13710
13711#ifdef IN_RC
13712 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13713#endif
13714 return rcStrict;
13715}
13716
13717
13718VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
13719{
13720 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13721 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13722
13723 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13724 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13725 if (rcStrict == VINF_SUCCESS)
13726 {
13727 rcStrict = iemExecOneInner(pVCpu, false);
13728 if (pcbWritten)
13729 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13730 }
13731
13732#ifdef IN_RC
13733 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13734#endif
13735 return rcStrict;
13736}
13737
13738
13739VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13740 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
13741{
13742 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13743 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13744
13745 VBOXSTRICTRC rcStrict;
13746 if ( cbOpcodeBytes
13747 && pCtx->rip == OpcodeBytesPC)
13748 {
13749 iemInitDecoder(pVCpu, true);
13750#ifdef IEM_WITH_CODE_TLB
13751 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13752 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13753 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13754 pVCpu->iem.s.offCurInstrStart = 0;
13755 pVCpu->iem.s.offInstrNextByte = 0;
13756#else
13757 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13758 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13759#endif
13760 rcStrict = VINF_SUCCESS;
13761 }
13762 else
13763 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13764 if (rcStrict == VINF_SUCCESS)
13765 rcStrict = iemExecOneInner(pVCpu, false);
13766
13767#ifdef IN_RC
13768 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13769#endif
13770 return rcStrict;
13771}
13772
13773
13774/**
13775 * For debugging DISGetParamSize, may come in handy.
13776 *
13777 * @returns Strict VBox status code.
13778 * @param pVCpu The cross context virtual CPU structure of the
13779 * calling EMT.
13780 * @param pCtxCore The context core structure.
13781 * @param OpcodeBytesPC The PC of the opcode bytes.
13782 * @param pvOpcodeBytes Prefeched opcode bytes.
13783 * @param cbOpcodeBytes Number of prefetched bytes.
13784 * @param pcbWritten Where to return the number of bytes written.
13785 * Optional.
13786 */
13787VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
13788 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
13789 uint32_t *pcbWritten)
13790{
13791 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13792 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
13793
13794 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
13795 VBOXSTRICTRC rcStrict;
13796 if ( cbOpcodeBytes
13797 && pCtx->rip == OpcodeBytesPC)
13798 {
13799 iemInitDecoder(pVCpu, true);
13800#ifdef IEM_WITH_CODE_TLB
13801 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
13802 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
13803 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
13804 pVCpu->iem.s.offCurInstrStart = 0;
13805 pVCpu->iem.s.offInstrNextByte = 0;
13806#else
13807 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
13808 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
13809#endif
13810 rcStrict = VINF_SUCCESS;
13811 }
13812 else
13813 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
13814 if (rcStrict == VINF_SUCCESS)
13815 {
13816 rcStrict = iemExecOneInner(pVCpu, false);
13817 if (pcbWritten)
13818 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
13819 }
13820
13821#ifdef IN_RC
13822 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
13823#endif
13824 return rcStrict;
13825}
13826
13827
13828VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
13829{
13830 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
13831
13832#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13833 /*
13834 * See if there is an interrupt pending in TRPM, inject it if we can.
13835 */
13836 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13837# ifdef IEM_VERIFICATION_MODE_FULL
13838 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13839# endif
13840 if ( pCtx->eflags.Bits.u1IF
13841 && TRPMHasTrap(pVCpu)
13842 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
13843 {
13844 uint8_t u8TrapNo;
13845 TRPMEVENT enmType;
13846 RTGCUINT uErrCode;
13847 RTGCPTR uCr2;
13848 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13849 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13850 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13851 TRPMResetTrap(pVCpu);
13852 }
13853
13854 /*
13855 * Log the state.
13856 */
13857# ifdef LOG_ENABLED
13858 iemLogCurInstr(pVCpu, pCtx, true);
13859# endif
13860
13861 /*
13862 * Do the decoding and emulation.
13863 */
13864 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13865 if (rcStrict == VINF_SUCCESS)
13866 rcStrict = iemExecOneInner(pVCpu, true);
13867
13868 /*
13869 * Assert some sanity.
13870 */
13871 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
13872
13873 /*
13874 * Log and return.
13875 */
13876 if (rcStrict != VINF_SUCCESS)
13877 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13878 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13879 if (pcInstructions)
13880 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
13881 return rcStrict;
13882
13883#else /* Not verification mode */
13884
13885 /*
13886 * See if there is an interrupt pending in TRPM, inject it if we can.
13887 */
13888 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13889# ifdef IEM_VERIFICATION_MODE_FULL
13890 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13891# endif
13892 if ( pCtx->eflags.Bits.u1IF
13893 && TRPMHasTrap(pVCpu)
13894 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
13895 {
13896 uint8_t u8TrapNo;
13897 TRPMEVENT enmType;
13898 RTGCUINT uErrCode;
13899 RTGCPTR uCr2;
13900 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13901 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13902 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13903 TRPMResetTrap(pVCpu);
13904 }
13905
13906 /*
13907 * Initial decoder init w/ prefetch, then setup setjmp.
13908 */
13909 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13910 if (rcStrict == VINF_SUCCESS)
13911 {
13912# ifdef IEM_WITH_SETJMP
13913 jmp_buf JmpBuf;
13914 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13915 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13916 pVCpu->iem.s.cActiveMappings = 0;
13917 if ((rcStrict = setjmp(JmpBuf)) == 0)
13918# endif
13919 {
13920 /*
13921 * The run loop. We limit ourselves to 4096 instructions right now.
13922 */
13923 PVM pVM = pVCpu->CTX_SUFF(pVM);
13924 uint32_t cInstr = 4096;
13925 for (;;)
13926 {
13927 /*
13928 * Log the state.
13929 */
13930# ifdef LOG_ENABLED
13931 iemLogCurInstr(pVCpu, pCtx, true);
13932# endif
13933
13934 /*
13935 * Do the decoding and emulation.
13936 */
13937 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13938 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13939 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
13940 {
13941 Assert(pVCpu->iem.s.cActiveMappings == 0);
13942 pVCpu->iem.s.cInstructions++;
13943 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
13944 {
13945 uint32_t fCpu = pVCpu->fLocalForcedActions
13946 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
13947 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
13948 | VMCPU_FF_TLB_FLUSH
13949# ifdef VBOX_WITH_RAW_MODE
13950 | VMCPU_FF_TRPM_SYNC_IDT
13951 | VMCPU_FF_SELM_SYNC_TSS
13952 | VMCPU_FF_SELM_SYNC_GDT
13953 | VMCPU_FF_SELM_SYNC_LDT
13954# endif
13955 | VMCPU_FF_INHIBIT_INTERRUPTS
13956 | VMCPU_FF_BLOCK_NMIS ));
13957
13958 if (RT_LIKELY( ( !fCpu
13959 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
13960 && !pCtx->rflags.Bits.u1IF) )
13961 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
13962 {
13963 if (cInstr-- > 0)
13964 {
13965 Assert(pVCpu->iem.s.cActiveMappings == 0);
13966 iemReInitDecoder(pVCpu);
13967 continue;
13968 }
13969 }
13970 }
13971 Assert(pVCpu->iem.s.cActiveMappings == 0);
13972 }
13973 else if (pVCpu->iem.s.cActiveMappings > 0)
13974 iemMemRollback(pVCpu);
13975 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13976 break;
13977 }
13978 }
13979# ifdef IEM_WITH_SETJMP
13980 else
13981 {
13982 if (pVCpu->iem.s.cActiveMappings > 0)
13983 iemMemRollback(pVCpu);
13984 pVCpu->iem.s.cLongJumps++;
13985 }
13986 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13987# endif
13988
13989 /*
13990 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
13991 */
13992 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
13993 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
13994# if defined(IEM_VERIFICATION_MODE_FULL)
13995 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
13996 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
13997 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
13998 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
13999# endif
14000 }
14001
14002 /*
14003 * Maybe re-enter raw-mode and log.
14004 */
14005# ifdef IN_RC
14006 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14007# endif
14008 if (rcStrict != VINF_SUCCESS)
14009 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14010 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14011 if (pcInstructions)
14012 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14013 return rcStrict;
14014#endif /* Not verification mode */
14015}
14016
14017
14018
14019/**
14020 * Injects a trap, fault, abort, software interrupt or external interrupt.
14021 *
14022 * The parameter list matches TRPMQueryTrapAll pretty closely.
14023 *
14024 * @returns Strict VBox status code.
14025 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14026 * @param u8TrapNo The trap number.
14027 * @param enmType What type is it (trap/fault/abort), software
14028 * interrupt or hardware interrupt.
14029 * @param uErrCode The error code if applicable.
14030 * @param uCr2 The CR2 value if applicable.
14031 * @param cbInstr The instruction length (only relevant for
14032 * software interrupts).
14033 */
14034VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14035 uint8_t cbInstr)
14036{
14037 iemInitDecoder(pVCpu, false);
14038#ifdef DBGFTRACE_ENABLED
14039 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14040 u8TrapNo, enmType, uErrCode, uCr2);
14041#endif
14042
14043 uint32_t fFlags;
14044 switch (enmType)
14045 {
14046 case TRPM_HARDWARE_INT:
14047 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14048 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14049 uErrCode = uCr2 = 0;
14050 break;
14051
14052 case TRPM_SOFTWARE_INT:
14053 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14054 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14055 uErrCode = uCr2 = 0;
14056 break;
14057
14058 case TRPM_TRAP:
14059 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14060 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14061 if (u8TrapNo == X86_XCPT_PF)
14062 fFlags |= IEM_XCPT_FLAGS_CR2;
14063 switch (u8TrapNo)
14064 {
14065 case X86_XCPT_DF:
14066 case X86_XCPT_TS:
14067 case X86_XCPT_NP:
14068 case X86_XCPT_SS:
14069 case X86_XCPT_PF:
14070 case X86_XCPT_AC:
14071 fFlags |= IEM_XCPT_FLAGS_ERR;
14072 break;
14073
14074 case X86_XCPT_NMI:
14075 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14076 break;
14077 }
14078 break;
14079
14080 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14081 }
14082
14083 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14084}
14085
14086
14087/**
14088 * Injects the active TRPM event.
14089 *
14090 * @returns Strict VBox status code.
14091 * @param pVCpu The cross context virtual CPU structure.
14092 */
14093VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14094{
14095#ifndef IEM_IMPLEMENTS_TASKSWITCH
14096 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14097#else
14098 uint8_t u8TrapNo;
14099 TRPMEVENT enmType;
14100 RTGCUINT uErrCode;
14101 RTGCUINTPTR uCr2;
14102 uint8_t cbInstr;
14103 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14104 if (RT_FAILURE(rc))
14105 return rc;
14106
14107 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14108
14109 /** @todo Are there any other codes that imply the event was successfully
14110 * delivered to the guest? See @bugref{6607}. */
14111 if ( rcStrict == VINF_SUCCESS
14112 || rcStrict == VINF_IEM_RAISED_XCPT)
14113 {
14114 TRPMResetTrap(pVCpu);
14115 }
14116 return rcStrict;
14117#endif
14118}
14119
14120
14121VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14122{
14123 return VERR_NOT_IMPLEMENTED;
14124}
14125
14126
14127VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14128{
14129 return VERR_NOT_IMPLEMENTED;
14130}
14131
14132
14133#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14134/**
14135 * Executes a IRET instruction with default operand size.
14136 *
14137 * This is for PATM.
14138 *
14139 * @returns VBox status code.
14140 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14141 * @param pCtxCore The register frame.
14142 */
14143VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14144{
14145 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14146
14147 iemCtxCoreToCtx(pCtx, pCtxCore);
14148 iemInitDecoder(pVCpu);
14149 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14150 if (rcStrict == VINF_SUCCESS)
14151 iemCtxToCtxCore(pCtxCore, pCtx);
14152 else
14153 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14154 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14155 return rcStrict;
14156}
14157#endif
14158
14159
14160/**
14161 * Macro used by the IEMExec* method to check the given instruction length.
14162 *
14163 * Will return on failure!
14164 *
14165 * @param a_cbInstr The given instruction length.
14166 * @param a_cbMin The minimum length.
14167 */
14168#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14169 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14170 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14171
14172
14173/**
14174 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14175 *
14176 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14177 *
14178 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14180 * @param rcStrict The status code to fiddle.
14181 */
14182DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14183{
14184 iemUninitExec(pVCpu);
14185#ifdef IN_RC
14186 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
14187 iemExecStatusCodeFiddling(pVCpu, rcStrict));
14188#else
14189 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14190#endif
14191}
14192
14193
14194/**
14195 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14196 *
14197 * This API ASSUMES that the caller has already verified that the guest code is
14198 * allowed to access the I/O port. (The I/O port is in the DX register in the
14199 * guest state.)
14200 *
14201 * @returns Strict VBox status code.
14202 * @param pVCpu The cross context virtual CPU structure.
14203 * @param cbValue The size of the I/O port access (1, 2, or 4).
14204 * @param enmAddrMode The addressing mode.
14205 * @param fRepPrefix Indicates whether a repeat prefix is used
14206 * (doesn't matter which for this instruction).
14207 * @param cbInstr The instruction length in bytes.
14208 * @param iEffSeg The effective segment address.
14209 * @param fIoChecked Whether the access to the I/O port has been
14210 * checked or not. It's typically checked in the
14211 * HM scenario.
14212 */
14213VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14214 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14215{
14216 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14217 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14218
14219 /*
14220 * State init.
14221 */
14222 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14223
14224 /*
14225 * Switch orgy for getting to the right handler.
14226 */
14227 VBOXSTRICTRC rcStrict;
14228 if (fRepPrefix)
14229 {
14230 switch (enmAddrMode)
14231 {
14232 case IEMMODE_16BIT:
14233 switch (cbValue)
14234 {
14235 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14236 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14237 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14238 default:
14239 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14240 }
14241 break;
14242
14243 case IEMMODE_32BIT:
14244 switch (cbValue)
14245 {
14246 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14247 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14248 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14249 default:
14250 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14251 }
14252 break;
14253
14254 case IEMMODE_64BIT:
14255 switch (cbValue)
14256 {
14257 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14258 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14259 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14260 default:
14261 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14262 }
14263 break;
14264
14265 default:
14266 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14267 }
14268 }
14269 else
14270 {
14271 switch (enmAddrMode)
14272 {
14273 case IEMMODE_16BIT:
14274 switch (cbValue)
14275 {
14276 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14277 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14278 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14279 default:
14280 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14281 }
14282 break;
14283
14284 case IEMMODE_32BIT:
14285 switch (cbValue)
14286 {
14287 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14288 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14289 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14290 default:
14291 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14292 }
14293 break;
14294
14295 case IEMMODE_64BIT:
14296 switch (cbValue)
14297 {
14298 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14299 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14300 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14301 default:
14302 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14303 }
14304 break;
14305
14306 default:
14307 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14308 }
14309 }
14310
14311 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14312}
14313
14314
14315/**
14316 * Interface for HM and EM for executing string I/O IN (read) instructions.
14317 *
14318 * This API ASSUMES that the caller has already verified that the guest code is
14319 * allowed to access the I/O port. (The I/O port is in the DX register in the
14320 * guest state.)
14321 *
14322 * @returns Strict VBox status code.
14323 * @param pVCpu The cross context virtual CPU structure.
14324 * @param cbValue The size of the I/O port access (1, 2, or 4).
14325 * @param enmAddrMode The addressing mode.
14326 * @param fRepPrefix Indicates whether a repeat prefix is used
14327 * (doesn't matter which for this instruction).
14328 * @param cbInstr The instruction length in bytes.
14329 * @param fIoChecked Whether the access to the I/O port has been
14330 * checked or not. It's typically checked in the
14331 * HM scenario.
14332 */
14333VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14334 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14335{
14336 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14337
14338 /*
14339 * State init.
14340 */
14341 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14342
14343 /*
14344 * Switch orgy for getting to the right handler.
14345 */
14346 VBOXSTRICTRC rcStrict;
14347 if (fRepPrefix)
14348 {
14349 switch (enmAddrMode)
14350 {
14351 case IEMMODE_16BIT:
14352 switch (cbValue)
14353 {
14354 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14355 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14356 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14357 default:
14358 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14359 }
14360 break;
14361
14362 case IEMMODE_32BIT:
14363 switch (cbValue)
14364 {
14365 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14366 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14367 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14368 default:
14369 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14370 }
14371 break;
14372
14373 case IEMMODE_64BIT:
14374 switch (cbValue)
14375 {
14376 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14377 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14378 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14379 default:
14380 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14381 }
14382 break;
14383
14384 default:
14385 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14386 }
14387 }
14388 else
14389 {
14390 switch (enmAddrMode)
14391 {
14392 case IEMMODE_16BIT:
14393 switch (cbValue)
14394 {
14395 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14396 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14397 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14398 default:
14399 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14400 }
14401 break;
14402
14403 case IEMMODE_32BIT:
14404 switch (cbValue)
14405 {
14406 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14407 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14408 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14409 default:
14410 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14411 }
14412 break;
14413
14414 case IEMMODE_64BIT:
14415 switch (cbValue)
14416 {
14417 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14418 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14419 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14420 default:
14421 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14422 }
14423 break;
14424
14425 default:
14426 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14427 }
14428 }
14429
14430 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14431}
14432
14433
14434/**
14435 * Interface for rawmode to write execute an OUT instruction.
14436 *
14437 * @returns Strict VBox status code.
14438 * @param pVCpu The cross context virtual CPU structure.
14439 * @param cbInstr The instruction length in bytes.
14440 * @param u16Port The port to read.
14441 * @param cbReg The register size.
14442 *
14443 * @remarks In ring-0 not all of the state needs to be synced in.
14444 */
14445VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14446{
14447 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14448 Assert(cbReg <= 4 && cbReg != 3);
14449
14450 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14451 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14452 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14453}
14454
14455
14456/**
14457 * Interface for rawmode to write execute an IN instruction.
14458 *
14459 * @returns Strict VBox status code.
14460 * @param pVCpu The cross context virtual CPU structure.
14461 * @param cbInstr The instruction length in bytes.
14462 * @param u16Port The port to read.
14463 * @param cbReg The register size.
14464 */
14465VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14466{
14467 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14468 Assert(cbReg <= 4 && cbReg != 3);
14469
14470 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14471 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14472 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14473}
14474
14475
14476/**
14477 * Interface for HM and EM to write to a CRx register.
14478 *
14479 * @returns Strict VBox status code.
14480 * @param pVCpu The cross context virtual CPU structure.
14481 * @param cbInstr The instruction length in bytes.
14482 * @param iCrReg The control register number (destination).
14483 * @param iGReg The general purpose register number (source).
14484 *
14485 * @remarks In ring-0 not all of the state needs to be synced in.
14486 */
14487VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14488{
14489 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14490 Assert(iCrReg < 16);
14491 Assert(iGReg < 16);
14492
14493 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14494 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14495 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14496}
14497
14498
14499/**
14500 * Interface for HM and EM to read from a CRx register.
14501 *
14502 * @returns Strict VBox status code.
14503 * @param pVCpu The cross context virtual CPU structure.
14504 * @param cbInstr The instruction length in bytes.
14505 * @param iGReg The general purpose register number (destination).
14506 * @param iCrReg The control register number (source).
14507 *
14508 * @remarks In ring-0 not all of the state needs to be synced in.
14509 */
14510VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
14511{
14512 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14513 Assert(iCrReg < 16);
14514 Assert(iGReg < 16);
14515
14516 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14517 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
14518 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14519}
14520
14521
14522/**
14523 * Interface for HM and EM to clear the CR0[TS] bit.
14524 *
14525 * @returns Strict VBox status code.
14526 * @param pVCpu The cross context virtual CPU structure.
14527 * @param cbInstr The instruction length in bytes.
14528 *
14529 * @remarks In ring-0 not all of the state needs to be synced in.
14530 */
14531VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
14532{
14533 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14534
14535 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14536 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
14537 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14538}
14539
14540
14541/**
14542 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
14543 *
14544 * @returns Strict VBox status code.
14545 * @param pVCpu The cross context virtual CPU structure.
14546 * @param cbInstr The instruction length in bytes.
14547 * @param uValue The value to load into CR0.
14548 *
14549 * @remarks In ring-0 not all of the state needs to be synced in.
14550 */
14551VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
14552{
14553 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14554
14555 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14556 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
14557 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14558}
14559
14560
14561/**
14562 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
14563 *
14564 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
14565 *
14566 * @returns Strict VBox status code.
14567 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14568 * @param cbInstr The instruction length in bytes.
14569 * @remarks In ring-0 not all of the state needs to be synced in.
14570 * @thread EMT(pVCpu)
14571 */
14572VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
14573{
14574 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14575
14576 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14577 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
14578 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14579}
14580
14581#ifdef IN_RING3
14582
14583/**
14584 * Handles the unlikely and probably fatal merge cases.
14585 *
14586 * @returns Merged status code.
14587 * @param rcStrict Current EM status code.
14588 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14589 * with @a rcStrict.
14590 * @param iMemMap The memory mapping index. For error reporting only.
14591 * @param pVCpu The cross context virtual CPU structure of the calling
14592 * thread, for error reporting only.
14593 */
14594DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
14595 unsigned iMemMap, PVMCPU pVCpu)
14596{
14597 if (RT_FAILURE_NP(rcStrict))
14598 return rcStrict;
14599
14600 if (RT_FAILURE_NP(rcStrictCommit))
14601 return rcStrictCommit;
14602
14603 if (rcStrict == rcStrictCommit)
14604 return rcStrictCommit;
14605
14606 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
14607 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
14608 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
14609 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
14610 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
14611 return VERR_IOM_FF_STATUS_IPE;
14612}
14613
14614
14615/**
14616 * Helper for IOMR3ProcessForceFlag.
14617 *
14618 * @returns Merged status code.
14619 * @param rcStrict Current EM status code.
14620 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
14621 * with @a rcStrict.
14622 * @param iMemMap The memory mapping index. For error reporting only.
14623 * @param pVCpu The cross context virtual CPU structure of the calling
14624 * thread, for error reporting only.
14625 */
14626DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
14627{
14628 /* Simple. */
14629 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
14630 return rcStrictCommit;
14631
14632 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
14633 return rcStrict;
14634
14635 /* EM scheduling status codes. */
14636 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
14637 && rcStrict <= VINF_EM_LAST))
14638 {
14639 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
14640 && rcStrictCommit <= VINF_EM_LAST))
14641 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
14642 }
14643
14644 /* Unlikely */
14645 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
14646}
14647
14648
14649/**
14650 * Called by force-flag handling code when VMCPU_FF_IEM is set.
14651 *
14652 * @returns Merge between @a rcStrict and what the commit operation returned.
14653 * @param pVM The cross context VM structure.
14654 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14655 * @param rcStrict The status code returned by ring-0 or raw-mode.
14656 */
14657VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14658{
14659 /*
14660 * Reset the pending commit.
14661 */
14662 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
14663 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
14664 ("%#x %#x %#x\n",
14665 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14666 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
14667
14668 /*
14669 * Commit the pending bounce buffers (usually just one).
14670 */
14671 unsigned cBufs = 0;
14672 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
14673 while (iMemMap-- > 0)
14674 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
14675 {
14676 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
14677 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
14678 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
14679
14680 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
14681 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
14682 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
14683
14684 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
14685 {
14686 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
14687 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
14688 pbBuf,
14689 cbFirst,
14690 PGMACCESSORIGIN_IEM);
14691 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
14692 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
14693 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
14694 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
14695 }
14696
14697 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
14698 {
14699 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
14700 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
14701 pbBuf + cbFirst,
14702 cbSecond,
14703 PGMACCESSORIGIN_IEM);
14704 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
14705 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
14706 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
14707 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
14708 }
14709 cBufs++;
14710 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
14711 }
14712
14713 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
14714 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
14715 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
14716 pVCpu->iem.s.cActiveMappings = 0;
14717 return rcStrict;
14718}
14719
14720#endif /* IN_RING3 */
14721
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette