VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 96247

Last change on this file since 96247 was 96247, checked in by vboxsync, 3 years ago

VMM/IEM: Start implementing floating point SSE instructions using addps, added some new infrastructure bits (mostly untested), bugref:9898

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 455.8 KB
Line 
1/* $Id: IEMAll.cpp 96247 2022-08-17 09:08:30Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow) : Basic enter/exit IEM state info.
65 * - Level 2 (Log2) : ?
66 * - Level 3 (Log3) : More detailed enter/exit IEM state info.
67 * - Level 4 (Log4) : Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5) : Decoding details.
69 * - Level 6 (Log6) : Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7) : iret++ execution logging.
71 * - Level 8 (Log8) : Memory writes.
72 * - Level 9 (Log9) : Memory reads.
73 * - Level 10 (Log10): TLBs.
74 * - Level 11 (Log11): Unmasked FPU exceptions.
75 */
76
77/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
78#ifdef _MSC_VER
79# pragma warning(disable:4505)
80#endif
81
82
83/*********************************************************************************************************************************
84* Header Files *
85*********************************************************************************************************************************/
86#define LOG_GROUP LOG_GROUP_IEM
87#define VMCPU_INCL_CPUM_GST_CTX
88#include <VBox/vmm/iem.h>
89#include <VBox/vmm/cpum.h>
90#include <VBox/vmm/apic.h>
91#include <VBox/vmm/pdm.h>
92#include <VBox/vmm/pgm.h>
93#include <VBox/vmm/iom.h>
94#include <VBox/vmm/em.h>
95#include <VBox/vmm/hm.h>
96#include <VBox/vmm/nem.h>
97#include <VBox/vmm/gim.h>
98#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
99# include <VBox/vmm/em.h>
100# include <VBox/vmm/hm_svm.h>
101#endif
102#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
103# include <VBox/vmm/hmvmxinline.h>
104#endif
105#include <VBox/vmm/tm.h>
106#include <VBox/vmm/dbgf.h>
107#include <VBox/vmm/dbgftrace.h>
108#include "IEMInternal.h"
109#include <VBox/vmm/vmcc.h>
110#include <VBox/log.h>
111#include <VBox/err.h>
112#include <VBox/param.h>
113#include <VBox/dis.h>
114#include <VBox/disopcode.h>
115#include <iprt/asm-math.h>
116#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
117# include <iprt/asm-amd64-x86.h>
118#elif defined(RT_ARCH_ARM64) || defined(RT_ARCH_ARM32)
119# include <iprt/asm-arm.h>
120#endif
121#include <iprt/assert.h>
122#include <iprt/string.h>
123#include <iprt/x86.h>
124
125#include "IEMInline.h"
126
127
128/*********************************************************************************************************************************
129* Structures and Typedefs *
130*********************************************************************************************************************************/
131/**
132 * CPU exception classes.
133 */
134typedef enum IEMXCPTCLASS
135{
136 IEMXCPTCLASS_BENIGN,
137 IEMXCPTCLASS_CONTRIBUTORY,
138 IEMXCPTCLASS_PAGE_FAULT,
139 IEMXCPTCLASS_DOUBLE_FAULT
140} IEMXCPTCLASS;
141
142
143/*********************************************************************************************************************************
144* Global Variables *
145*********************************************************************************************************************************/
146#if defined(IEM_LOG_MEMORY_WRITES)
147/** What IEM just wrote. */
148uint8_t g_abIemWrote[256];
149/** How much IEM just wrote. */
150size_t g_cbIemWrote;
151#endif
152
153
154/*********************************************************************************************************************************
155* Internal Functions *
156*********************************************************************************************************************************/
157static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
158 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT;
159
160
161/**
162 * Initializes the decoder state.
163 *
164 * iemReInitDecoder is mostly a copy of this function.
165 *
166 * @param pVCpu The cross context virtual CPU structure of the
167 * calling thread.
168 * @param fBypassHandlers Whether to bypass access handlers.
169 * @param fDisregardLock Whether to disregard the LOCK prefix.
170 */
171DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
172{
173 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
174 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
175 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
176 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
177 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
178 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
179 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
180 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
181 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
182 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
183
184 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
185 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
186 pVCpu->iem.s.enmCpuMode = enmMode;
187 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
188 pVCpu->iem.s.enmEffAddrMode = enmMode;
189 if (enmMode != IEMMODE_64BIT)
190 {
191 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
192 pVCpu->iem.s.enmEffOpSize = enmMode;
193 }
194 else
195 {
196 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
197 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
198 }
199 pVCpu->iem.s.fPrefixes = 0;
200 pVCpu->iem.s.uRexReg = 0;
201 pVCpu->iem.s.uRexB = 0;
202 pVCpu->iem.s.uRexIndex = 0;
203 pVCpu->iem.s.idxPrefix = 0;
204 pVCpu->iem.s.uVex3rdReg = 0;
205 pVCpu->iem.s.uVexLength = 0;
206 pVCpu->iem.s.fEvexStuff = 0;
207 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
208#ifdef IEM_WITH_CODE_TLB
209 pVCpu->iem.s.pbInstrBuf = NULL;
210 pVCpu->iem.s.offInstrNextByte = 0;
211 pVCpu->iem.s.offCurInstrStart = 0;
212# ifdef VBOX_STRICT
213 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
214 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
215 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
216# endif
217#else
218 pVCpu->iem.s.offOpcode = 0;
219 pVCpu->iem.s.cbOpcode = 0;
220#endif
221 pVCpu->iem.s.offModRm = 0;
222 pVCpu->iem.s.cActiveMappings = 0;
223 pVCpu->iem.s.iNextMapping = 0;
224 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
225 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
226 pVCpu->iem.s.fDisregardLock = fDisregardLock;
227
228#ifdef DBGFTRACE_ENABLED
229 switch (enmMode)
230 {
231 case IEMMODE_64BIT:
232 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
233 break;
234 case IEMMODE_32BIT:
235 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
236 break;
237 case IEMMODE_16BIT:
238 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
239 break;
240 }
241#endif
242}
243
244
245/**
246 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
247 *
248 * This is mostly a copy of iemInitDecoder.
249 *
250 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
251 */
252DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
253{
254 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
255 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
256 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
257 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
258 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
259 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
260 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
261 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
262 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
263
264 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
265 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
266 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
267 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
268 pVCpu->iem.s.enmEffAddrMode = enmMode;
269 if (enmMode != IEMMODE_64BIT)
270 {
271 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
272 pVCpu->iem.s.enmEffOpSize = enmMode;
273 }
274 else
275 {
276 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
277 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
278 }
279 pVCpu->iem.s.fPrefixes = 0;
280 pVCpu->iem.s.uRexReg = 0;
281 pVCpu->iem.s.uRexB = 0;
282 pVCpu->iem.s.uRexIndex = 0;
283 pVCpu->iem.s.idxPrefix = 0;
284 pVCpu->iem.s.uVex3rdReg = 0;
285 pVCpu->iem.s.uVexLength = 0;
286 pVCpu->iem.s.fEvexStuff = 0;
287 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
288#ifdef IEM_WITH_CODE_TLB
289 if (pVCpu->iem.s.pbInstrBuf)
290 {
291 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
292 - pVCpu->iem.s.uInstrBufPc;
293 if (off < pVCpu->iem.s.cbInstrBufTotal)
294 {
295 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
296 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
297 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
298 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
299 else
300 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
301 }
302 else
303 {
304 pVCpu->iem.s.pbInstrBuf = NULL;
305 pVCpu->iem.s.offInstrNextByte = 0;
306 pVCpu->iem.s.offCurInstrStart = 0;
307 pVCpu->iem.s.cbInstrBuf = 0;
308 pVCpu->iem.s.cbInstrBufTotal = 0;
309 }
310 }
311 else
312 {
313 pVCpu->iem.s.offInstrNextByte = 0;
314 pVCpu->iem.s.offCurInstrStart = 0;
315 pVCpu->iem.s.cbInstrBuf = 0;
316 pVCpu->iem.s.cbInstrBufTotal = 0;
317 }
318#else
319 pVCpu->iem.s.cbOpcode = 0;
320 pVCpu->iem.s.offOpcode = 0;
321#endif
322 pVCpu->iem.s.offModRm = 0;
323 Assert(pVCpu->iem.s.cActiveMappings == 0);
324 pVCpu->iem.s.iNextMapping = 0;
325 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
326 Assert(pVCpu->iem.s.fBypassHandlers == false);
327
328#ifdef DBGFTRACE_ENABLED
329 switch (enmMode)
330 {
331 case IEMMODE_64BIT:
332 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
333 break;
334 case IEMMODE_32BIT:
335 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
336 break;
337 case IEMMODE_16BIT:
338 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
339 break;
340 }
341#endif
342}
343
344
345
346/**
347 * Prefetch opcodes the first time when starting executing.
348 *
349 * @returns Strict VBox status code.
350 * @param pVCpu The cross context virtual CPU structure of the
351 * calling thread.
352 * @param fBypassHandlers Whether to bypass access handlers.
353 * @param fDisregardLock Whether to disregard LOCK prefixes.
354 *
355 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
356 * store them as such.
357 */
358static VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock) RT_NOEXCEPT
359{
360 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
361
362#ifdef IEM_WITH_CODE_TLB
363 /** @todo Do ITLB lookup here. */
364
365#else /* !IEM_WITH_CODE_TLB */
366
367 /*
368 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
369 *
370 * First translate CS:rIP to a physical address.
371 */
372 uint32_t cbToTryRead;
373 RTGCPTR GCPtrPC;
374 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
375 {
376 cbToTryRead = GUEST_PAGE_SIZE;
377 GCPtrPC = pVCpu->cpum.GstCtx.rip;
378 if (IEM_IS_CANONICAL(GCPtrPC))
379 cbToTryRead = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
380 else
381 return iemRaiseGeneralProtectionFault0(pVCpu);
382 }
383 else
384 {
385 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
386 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
387 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
388 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
389 else
390 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
391 if (cbToTryRead) { /* likely */ }
392 else /* overflowed */
393 {
394 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
395 cbToTryRead = UINT32_MAX;
396 }
397 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
398 Assert(GCPtrPC <= UINT32_MAX);
399 }
400
401 PGMPTWALK Walk;
402 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
403 if (RT_SUCCESS(rc))
404 Assert(Walk.fSucceeded); /* probable. */
405 else
406 {
407 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
408#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
409 if (Walk.fFailed & PGM_WALKFAIL_EPT)
410 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
411#endif
412 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
413 }
414 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
415 else
416 {
417 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
418#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
419 if (Walk.fFailed & PGM_WALKFAIL_EPT)
420 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
421#endif
422 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
423 }
424 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
425 else
426 {
427 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
428#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
429 if (Walk.fFailed & PGM_WALKFAIL_EPT)
430 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
431#endif
432 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
433 }
434 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
435 /** @todo Check reserved bits and such stuff. PGM is better at doing
436 * that, so do it when implementing the guest virtual address
437 * TLB... */
438
439 /*
440 * Read the bytes at this address.
441 */
442 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrPC & GUEST_PAGE_OFFSET_MASK);
443 if (cbToTryRead > cbLeftOnPage)
444 cbToTryRead = cbLeftOnPage;
445 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
446 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
447
448 if (!pVCpu->iem.s.fBypassHandlers)
449 {
450 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
451 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
452 { /* likely */ }
453 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
454 {
455 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
456 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
457 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
458 }
459 else
460 {
461 Log((RT_SUCCESS(rcStrict)
462 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
463 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
464 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
465 return rcStrict;
466 }
467 }
468 else
469 {
470 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
471 if (RT_SUCCESS(rc))
472 { /* likely */ }
473 else
474 {
475 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
476 GCPtrPC, GCPhys, rc, cbToTryRead));
477 return rc;
478 }
479 }
480 pVCpu->iem.s.cbOpcode = cbToTryRead;
481#endif /* !IEM_WITH_CODE_TLB */
482 return VINF_SUCCESS;
483}
484
485
486/**
487 * Invalidates the IEM TLBs.
488 *
489 * This is called internally as well as by PGM when moving GC mappings.
490 *
491 * @returns
492 * @param pVCpu The cross context virtual CPU structure of the calling
493 * thread.
494 */
495VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu)
496{
497#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
498 Log10(("IEMTlbInvalidateAll\n"));
499# ifdef IEM_WITH_CODE_TLB
500 pVCpu->iem.s.cbInstrBufTotal = 0;
501 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
502 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
503 { /* very likely */ }
504 else
505 {
506 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
507 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
508 while (i-- > 0)
509 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
510 }
511# endif
512
513# ifdef IEM_WITH_DATA_TLB
514 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
515 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
516 { /* very likely */ }
517 else
518 {
519 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
520 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
521 while (i-- > 0)
522 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
523 }
524# endif
525#else
526 RT_NOREF(pVCpu);
527#endif
528}
529
530
531/**
532 * Invalidates a page in the TLBs.
533 *
534 * @param pVCpu The cross context virtual CPU structure of the calling
535 * thread.
536 * @param GCPtr The address of the page to invalidate
537 * @thread EMT(pVCpu)
538 */
539VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
540{
541#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
542 Log10(("IEMTlbInvalidatePage: GCPtr=%RGv\n", GCPtr));
543 GCPtr = IEMTLB_CALC_TAG_NO_REV(GCPtr);
544 Assert(!(GCPtr >> (48 - X86_PAGE_SHIFT)));
545 uintptr_t const idx = IEMTLB_TAG_TO_INDEX(GCPtr);
546
547# ifdef IEM_WITH_CODE_TLB
548 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
549 {
550 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
551 if (GCPtr == IEMTLB_CALC_TAG_NO_REV(pVCpu->iem.s.uInstrBufPc))
552 pVCpu->iem.s.cbInstrBufTotal = 0;
553 }
554# endif
555
556# ifdef IEM_WITH_DATA_TLB
557 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
558 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
559# endif
560#else
561 NOREF(pVCpu); NOREF(GCPtr);
562#endif
563}
564
565
566#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
567/**
568 * Invalid both TLBs slow fashion following a rollover.
569 *
570 * Worker for IEMTlbInvalidateAllPhysical,
571 * IEMTlbInvalidateAllPhysicalAllCpus, iemOpcodeFetchBytesJmp, iemMemMap,
572 * iemMemMapJmp and others.
573 *
574 * @thread EMT(pVCpu)
575 */
576static void IEMTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu)
577{
578 Log10(("IEMTlbInvalidateAllPhysicalSlow\n"));
579 ASMAtomicWriteU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
580 ASMAtomicWriteU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, IEMTLB_PHYS_REV_INCR * 2);
581
582 unsigned i;
583# ifdef IEM_WITH_CODE_TLB
584 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
585 while (i-- > 0)
586 {
587 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
588 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
589 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
590 }
591# endif
592# ifdef IEM_WITH_DATA_TLB
593 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
594 while (i-- > 0)
595 {
596 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
597 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~( IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ
598 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PHYS_REV);
599 }
600# endif
601
602}
603#endif
604
605
606/**
607 * Invalidates the host physical aspects of the IEM TLBs.
608 *
609 * This is called internally as well as by PGM when moving GC mappings.
610 *
611 * @param pVCpu The cross context virtual CPU structure of the calling
612 * thread.
613 * @note Currently not used.
614 */
615VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
616{
617#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
618 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
619 Log10(("IEMTlbInvalidateAllPhysical\n"));
620
621# ifdef IEM_WITH_CODE_TLB
622 pVCpu->iem.s.cbInstrBufTotal = 0;
623# endif
624 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
625 if (RT_LIKELY(uTlbPhysRev > IEMTLB_PHYS_REV_INCR * 2))
626 {
627 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
628 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
629 }
630 else
631 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
632#else
633 NOREF(pVCpu);
634#endif
635}
636
637
638/**
639 * Invalidates the host physical aspects of the IEM TLBs.
640 *
641 * This is called internally as well as by PGM when moving GC mappings.
642 *
643 * @param pVM The cross context VM structure.
644 * @param idCpuCaller The ID of the calling EMT if available to the caller,
645 * otherwise NIL_VMCPUID.
646 *
647 * @remarks Caller holds the PGM lock.
648 */
649VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVMCC pVM, VMCPUID idCpuCaller)
650{
651#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
652 PVMCPUCC const pVCpuCaller = idCpuCaller >= pVM->cCpus ? VMMGetCpu(pVM) : VMMGetCpuById(pVM, idCpuCaller);
653 if (pVCpuCaller)
654 VMCPU_ASSERT_EMT(pVCpuCaller);
655 Log10(("IEMTlbInvalidateAllPhysicalAllCpus\n"));
656
657 VMCC_FOR_EACH_VMCPU(pVM)
658 {
659# ifdef IEM_WITH_CODE_TLB
660 if (pVCpuCaller == pVCpu)
661 pVCpu->iem.s.cbInstrBufTotal = 0;
662# endif
663
664 uint64_t const uTlbPhysRevPrev = ASMAtomicUoReadU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev);
665 uint64_t uTlbPhysRevNew = uTlbPhysRevPrev + IEMTLB_PHYS_REV_INCR;
666 if (RT_LIKELY(uTlbPhysRevNew > IEMTLB_PHYS_REV_INCR * 2))
667 { /* likely */}
668 else if (pVCpuCaller == pVCpu)
669 uTlbPhysRevNew = IEMTLB_PHYS_REV_INCR;
670 else
671 {
672 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
673 continue;
674 }
675 ASMAtomicCmpXchgU64(&pVCpu->iem.s.CodeTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
676 ASMAtomicCmpXchgU64(&pVCpu->iem.s.DataTlb.uTlbPhysRev, uTlbPhysRevNew, uTlbPhysRevPrev);
677 }
678 VMCC_FOR_EACH_VMCPU_END(pVM);
679
680#else
681 RT_NOREF(pVM, idCpuCaller);
682#endif
683}
684
685#ifdef IEM_WITH_CODE_TLB
686
687/**
688 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
689 * failure and jumps.
690 *
691 * We end up here for a number of reasons:
692 * - pbInstrBuf isn't yet initialized.
693 * - Advancing beyond the buffer boundrary (e.g. cross page).
694 * - Advancing beyond the CS segment limit.
695 * - Fetching from non-mappable page (e.g. MMIO).
696 *
697 * @param pVCpu The cross context virtual CPU structure of the
698 * calling thread.
699 * @param pvDst Where to return the bytes.
700 * @param cbDst Number of bytes to read.
701 *
702 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
703 */
704void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst) RT_NOEXCEPT
705{
706#ifdef IN_RING3
707 for (;;)
708 {
709 Assert(cbDst <= 8);
710 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
711
712 /*
713 * We might have a partial buffer match, deal with that first to make the
714 * rest simpler. This is the first part of the cross page/buffer case.
715 */
716 if (pVCpu->iem.s.pbInstrBuf != NULL)
717 {
718 if (offBuf < pVCpu->iem.s.cbInstrBuf)
719 {
720 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
721 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
722 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
723
724 cbDst -= cbCopy;
725 pvDst = (uint8_t *)pvDst + cbCopy;
726 offBuf += cbCopy;
727 pVCpu->iem.s.offInstrNextByte += offBuf;
728 }
729 }
730
731 /*
732 * Check segment limit, figuring how much we're allowed to access at this point.
733 *
734 * We will fault immediately if RIP is past the segment limit / in non-canonical
735 * territory. If we do continue, there are one or more bytes to read before we
736 * end up in trouble and we need to do that first before faulting.
737 */
738 RTGCPTR GCPtrFirst;
739 uint32_t cbMaxRead;
740 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
741 {
742 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
743 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
744 { /* likely */ }
745 else
746 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
747 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
748 }
749 else
750 {
751 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
752 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
753 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
754 { /* likely */ }
755 else
756 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
757 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
758 if (cbMaxRead != 0)
759 { /* likely */ }
760 else
761 {
762 /* Overflowed because address is 0 and limit is max. */
763 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
764 cbMaxRead = X86_PAGE_SIZE;
765 }
766 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
767 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
768 if (cbMaxRead2 < cbMaxRead)
769 cbMaxRead = cbMaxRead2;
770 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
771 }
772
773 /*
774 * Get the TLB entry for this piece of code.
775 */
776 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.CodeTlb, GCPtrFirst);
777 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.CodeTlb, uTag);
778 if (pTlbe->uTag == uTag)
779 {
780 /* likely when executing lots of code, otherwise unlikely */
781# ifdef VBOX_WITH_STATISTICS
782 pVCpu->iem.s.CodeTlb.cTlbHits++;
783# endif
784 }
785 else
786 {
787 pVCpu->iem.s.CodeTlb.cTlbMisses++;
788 PGMPTWALK Walk;
789 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
790 if (RT_FAILURE(rc))
791 {
792#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
793 /** @todo Nested VMX: Need to handle EPT violation/misconfig here? */
794 Assert(!(Walk.fFailed & PGM_WALKFAIL_EPT));
795#endif
796 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
797 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
798 }
799
800 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
801 Assert(Walk.fSucceeded);
802 pTlbe->uTag = uTag;
803 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A))
804 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
805 pTlbe->GCPhys = Walk.GCPhys;
806 pTlbe->pbMappingR3 = NULL;
807 }
808
809 /*
810 * Check TLB page table level access flags.
811 */
812 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
813 {
814 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
815 {
816 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
817 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
818 }
819 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
820 {
821 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
822 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
823 }
824 }
825
826 /*
827 * Look up the physical page info if necessary.
828 */
829 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
830 { /* not necessary */ }
831 else
832 {
833 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
834 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
835 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
836 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
837 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
838 { /* likely */ }
839 else
840 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
841 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
842 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
843 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
844 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
845 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
846 }
847
848# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
849 /*
850 * Try do a direct read using the pbMappingR3 pointer.
851 */
852 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
853 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
854 {
855 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
856 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
857 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
858 {
859 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
860 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
861 }
862 else
863 {
864 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
865 Assert(cbInstr < cbMaxRead);
866 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
867 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
868 }
869 if (cbDst <= cbMaxRead)
870 {
871 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
872 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
873 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
874 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
875 return;
876 }
877 pVCpu->iem.s.pbInstrBuf = NULL;
878
879 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
880 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
881 }
882 else
883# endif
884#if 0
885 /*
886 * If there is no special read handling, so we can read a bit more and
887 * put it in the prefetch buffer.
888 */
889 if ( cbDst < cbMaxRead
890 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
891 {
892 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
893 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
894 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
895 { /* likely */ }
896 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
897 {
898 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
899 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
900 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
901 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
902 }
903 else
904 {
905 Log((RT_SUCCESS(rcStrict)
906 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
907 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
908 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
909 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
910 }
911 }
912 /*
913 * Special read handling, so only read exactly what's needed.
914 * This is a highly unlikely scenario.
915 */
916 else
917#endif
918 {
919 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
920 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
921 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
922 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
923 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
924 { /* likely */ }
925 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
926 {
927 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
928 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
929 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
930 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
931 }
932 else
933 {
934 Log((RT_SUCCESS(rcStrict)
935 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
936 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
937 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
938 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
939 }
940 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
941 if (cbToRead == cbDst)
942 return;
943 }
944
945 /*
946 * More to read, loop.
947 */
948 cbDst -= cbMaxRead;
949 pvDst = (uint8_t *)pvDst + cbMaxRead;
950 }
951#else
952 RT_NOREF(pvDst, cbDst);
953 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
954#endif
955}
956
957#else
958
959/**
960 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
961 * exception if it fails.
962 *
963 * @returns Strict VBox status code.
964 * @param pVCpu The cross context virtual CPU structure of the
965 * calling thread.
966 * @param cbMin The minimum number of bytes relative offOpcode
967 * that must be read.
968 */
969VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin) RT_NOEXCEPT
970{
971 /*
972 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
973 *
974 * First translate CS:rIP to a physical address.
975 */
976 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
977 uint32_t cbToTryRead;
978 RTGCPTR GCPtrNext;
979 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
980 {
981 cbToTryRead = GUEST_PAGE_SIZE;
982 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
983 if (!IEM_IS_CANONICAL(GCPtrNext))
984 return iemRaiseGeneralProtectionFault0(pVCpu);
985 }
986 else
987 {
988 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
989 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
990 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
991 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
992 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
993 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
994 if (!cbToTryRead) /* overflowed */
995 {
996 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
997 cbToTryRead = UINT32_MAX;
998 /** @todo check out wrapping around the code segment. */
999 }
1000 if (cbToTryRead < cbMin - cbLeft)
1001 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1002 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1003 }
1004
1005 /* Only read up to the end of the page, and make sure we don't read more
1006 than the opcode buffer can hold. */
1007 uint32_t cbLeftOnPage = GUEST_PAGE_SIZE - (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1008 if (cbToTryRead > cbLeftOnPage)
1009 cbToTryRead = cbLeftOnPage;
1010 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1011 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1012/** @todo r=bird: Convert assertion into undefined opcode exception? */
1013 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1014
1015 PGMPTWALK Walk;
1016 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1017 if (RT_FAILURE(rc))
1018 {
1019 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1020#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1021 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1022 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1023#endif
1024 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1025 }
1026 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1027 {
1028 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1029#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1030 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1031 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1032#endif
1033 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1034 }
1035 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1036 {
1037 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1038#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1039 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1040 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
1041#endif
1042 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1043 }
1044 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & GUEST_PAGE_OFFSET_MASK);
1045 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1046 /** @todo Check reserved bits and such stuff. PGM is better at doing
1047 * that, so do it when implementing the guest virtual address
1048 * TLB... */
1049
1050 /*
1051 * Read the bytes at this address.
1052 *
1053 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1054 * and since PATM should only patch the start of an instruction there
1055 * should be no need to check again here.
1056 */
1057 if (!pVCpu->iem.s.fBypassHandlers)
1058 {
1059 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1060 cbToTryRead, PGMACCESSORIGIN_IEM);
1061 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1062 { /* likely */ }
1063 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1064 {
1065 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1066 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1067 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1068 }
1069 else
1070 {
1071 Log((RT_SUCCESS(rcStrict)
1072 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1073 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1074 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1075 return rcStrict;
1076 }
1077 }
1078 else
1079 {
1080 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
1081 if (RT_SUCCESS(rc))
1082 { /* likely */ }
1083 else
1084 {
1085 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
1086 return rc;
1087 }
1088 }
1089 pVCpu->iem.s.cbOpcode += cbToTryRead;
1090 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
1091
1092 return VINF_SUCCESS;
1093}
1094
1095#endif /* !IEM_WITH_CODE_TLB */
1096#ifndef IEM_WITH_SETJMP
1097
1098/**
1099 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
1100 *
1101 * @returns Strict VBox status code.
1102 * @param pVCpu The cross context virtual CPU structure of the
1103 * calling thread.
1104 * @param pb Where to return the opcode byte.
1105 */
1106VBOXSTRICTRC iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb) RT_NOEXCEPT
1107{
1108 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1109 if (rcStrict == VINF_SUCCESS)
1110 {
1111 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1112 *pb = pVCpu->iem.s.abOpcode[offOpcode];
1113 pVCpu->iem.s.offOpcode = offOpcode + 1;
1114 }
1115 else
1116 *pb = 0;
1117 return rcStrict;
1118}
1119
1120#else /* IEM_WITH_SETJMP */
1121
1122/**
1123 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
1124 *
1125 * @returns The opcode byte.
1126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1127 */
1128uint8_t iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1129{
1130# ifdef IEM_WITH_CODE_TLB
1131 uint8_t u8;
1132 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
1133 return u8;
1134# else
1135 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
1136 if (rcStrict == VINF_SUCCESS)
1137 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
1138 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1139# endif
1140}
1141
1142#endif /* IEM_WITH_SETJMP */
1143
1144#ifndef IEM_WITH_SETJMP
1145
1146/**
1147 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
1148 *
1149 * @returns Strict VBox status code.
1150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1151 * @param pu16 Where to return the opcode dword.
1152 */
1153VBOXSTRICTRC iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1154{
1155 uint8_t u8;
1156 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1157 if (rcStrict == VINF_SUCCESS)
1158 *pu16 = (int8_t)u8;
1159 return rcStrict;
1160}
1161
1162
1163/**
1164 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
1165 *
1166 * @returns Strict VBox status code.
1167 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1168 * @param pu32 Where to return the opcode dword.
1169 */
1170VBOXSTRICTRC iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1171{
1172 uint8_t u8;
1173 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1174 if (rcStrict == VINF_SUCCESS)
1175 *pu32 = (int8_t)u8;
1176 return rcStrict;
1177}
1178
1179
1180/**
1181 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
1182 *
1183 * @returns Strict VBox status code.
1184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1185 * @param pu64 Where to return the opcode qword.
1186 */
1187VBOXSTRICTRC iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1188{
1189 uint8_t u8;
1190 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
1191 if (rcStrict == VINF_SUCCESS)
1192 *pu64 = (int8_t)u8;
1193 return rcStrict;
1194}
1195
1196#endif /* !IEM_WITH_SETJMP */
1197
1198
1199#ifndef IEM_WITH_SETJMP
1200
1201/**
1202 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
1203 *
1204 * @returns Strict VBox status code.
1205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1206 * @param pu16 Where to return the opcode word.
1207 */
1208VBOXSTRICTRC iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16) RT_NOEXCEPT
1209{
1210 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1211 if (rcStrict == VINF_SUCCESS)
1212 {
1213 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1214# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1215 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1216# else
1217 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1218# endif
1219 pVCpu->iem.s.offOpcode = offOpcode + 2;
1220 }
1221 else
1222 *pu16 = 0;
1223 return rcStrict;
1224}
1225
1226#else /* IEM_WITH_SETJMP */
1227
1228/**
1229 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
1230 *
1231 * @returns The opcode word.
1232 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1233 */
1234uint16_t iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1235{
1236# ifdef IEM_WITH_CODE_TLB
1237 uint16_t u16;
1238 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
1239 return u16;
1240# else
1241 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1242 if (rcStrict == VINF_SUCCESS)
1243 {
1244 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1245 pVCpu->iem.s.offOpcode += 2;
1246# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1247 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1248# else
1249 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1250# endif
1251 }
1252 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1253# endif
1254}
1255
1256#endif /* IEM_WITH_SETJMP */
1257
1258#ifndef IEM_WITH_SETJMP
1259
1260/**
1261 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
1262 *
1263 * @returns Strict VBox status code.
1264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1265 * @param pu32 Where to return the opcode double word.
1266 */
1267VBOXSTRICTRC iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1268{
1269 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1270 if (rcStrict == VINF_SUCCESS)
1271 {
1272 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1273 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1274 pVCpu->iem.s.offOpcode = offOpcode + 2;
1275 }
1276 else
1277 *pu32 = 0;
1278 return rcStrict;
1279}
1280
1281
1282/**
1283 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
1284 *
1285 * @returns Strict VBox status code.
1286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1287 * @param pu64 Where to return the opcode quad word.
1288 */
1289VBOXSTRICTRC iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1290{
1291 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
1292 if (rcStrict == VINF_SUCCESS)
1293 {
1294 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1295 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
1296 pVCpu->iem.s.offOpcode = offOpcode + 2;
1297 }
1298 else
1299 *pu64 = 0;
1300 return rcStrict;
1301}
1302
1303#endif /* !IEM_WITH_SETJMP */
1304
1305#ifndef IEM_WITH_SETJMP
1306
1307/**
1308 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
1309 *
1310 * @returns Strict VBox status code.
1311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1312 * @param pu32 Where to return the opcode dword.
1313 */
1314VBOXSTRICTRC iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32) RT_NOEXCEPT
1315{
1316 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1317 if (rcStrict == VINF_SUCCESS)
1318 {
1319 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1320# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1321 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1322# else
1323 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1324 pVCpu->iem.s.abOpcode[offOpcode + 1],
1325 pVCpu->iem.s.abOpcode[offOpcode + 2],
1326 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1327# endif
1328 pVCpu->iem.s.offOpcode = offOpcode + 4;
1329 }
1330 else
1331 *pu32 = 0;
1332 return rcStrict;
1333}
1334
1335#else /* IEM_WITH_SETJMP */
1336
1337/**
1338 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
1339 *
1340 * @returns The opcode dword.
1341 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1342 */
1343uint32_t iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1344{
1345# ifdef IEM_WITH_CODE_TLB
1346 uint32_t u32;
1347 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
1348 return u32;
1349# else
1350 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1351 if (rcStrict == VINF_SUCCESS)
1352 {
1353 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1354 pVCpu->iem.s.offOpcode = offOpcode + 4;
1355# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1356 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1357# else
1358 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1359 pVCpu->iem.s.abOpcode[offOpcode + 1],
1360 pVCpu->iem.s.abOpcode[offOpcode + 2],
1361 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1362# endif
1363 }
1364 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1365# endif
1366}
1367
1368#endif /* IEM_WITH_SETJMP */
1369
1370#ifndef IEM_WITH_SETJMP
1371
1372/**
1373 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
1374 *
1375 * @returns Strict VBox status code.
1376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1377 * @param pu64 Where to return the opcode dword.
1378 */
1379VBOXSTRICTRC iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1380{
1381 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1382 if (rcStrict == VINF_SUCCESS)
1383 {
1384 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1385 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1386 pVCpu->iem.s.abOpcode[offOpcode + 1],
1387 pVCpu->iem.s.abOpcode[offOpcode + 2],
1388 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1389 pVCpu->iem.s.offOpcode = offOpcode + 4;
1390 }
1391 else
1392 *pu64 = 0;
1393 return rcStrict;
1394}
1395
1396
1397/**
1398 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
1399 *
1400 * @returns Strict VBox status code.
1401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1402 * @param pu64 Where to return the opcode qword.
1403 */
1404VBOXSTRICTRC iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1405{
1406 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
1407 if (rcStrict == VINF_SUCCESS)
1408 {
1409 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1410 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1411 pVCpu->iem.s.abOpcode[offOpcode + 1],
1412 pVCpu->iem.s.abOpcode[offOpcode + 2],
1413 pVCpu->iem.s.abOpcode[offOpcode + 3]);
1414 pVCpu->iem.s.offOpcode = offOpcode + 4;
1415 }
1416 else
1417 *pu64 = 0;
1418 return rcStrict;
1419}
1420
1421#endif /* !IEM_WITH_SETJMP */
1422
1423#ifndef IEM_WITH_SETJMP
1424
1425/**
1426 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
1427 *
1428 * @returns Strict VBox status code.
1429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1430 * @param pu64 Where to return the opcode qword.
1431 */
1432VBOXSTRICTRC iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64) RT_NOEXCEPT
1433{
1434 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1435 if (rcStrict == VINF_SUCCESS)
1436 {
1437 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1438# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1439 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1440# else
1441 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1442 pVCpu->iem.s.abOpcode[offOpcode + 1],
1443 pVCpu->iem.s.abOpcode[offOpcode + 2],
1444 pVCpu->iem.s.abOpcode[offOpcode + 3],
1445 pVCpu->iem.s.abOpcode[offOpcode + 4],
1446 pVCpu->iem.s.abOpcode[offOpcode + 5],
1447 pVCpu->iem.s.abOpcode[offOpcode + 6],
1448 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1449# endif
1450 pVCpu->iem.s.offOpcode = offOpcode + 8;
1451 }
1452 else
1453 *pu64 = 0;
1454 return rcStrict;
1455}
1456
1457#else /* IEM_WITH_SETJMP */
1458
1459/**
1460 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
1461 *
1462 * @returns The opcode qword.
1463 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1464 */
1465uint64_t iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
1466{
1467# ifdef IEM_WITH_CODE_TLB
1468 uint64_t u64;
1469 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
1470 return u64;
1471# else
1472 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
1473 if (rcStrict == VINF_SUCCESS)
1474 {
1475 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
1476 pVCpu->iem.s.offOpcode = offOpcode + 8;
1477# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
1478 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
1479# else
1480 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
1481 pVCpu->iem.s.abOpcode[offOpcode + 1],
1482 pVCpu->iem.s.abOpcode[offOpcode + 2],
1483 pVCpu->iem.s.abOpcode[offOpcode + 3],
1484 pVCpu->iem.s.abOpcode[offOpcode + 4],
1485 pVCpu->iem.s.abOpcode[offOpcode + 5],
1486 pVCpu->iem.s.abOpcode[offOpcode + 6],
1487 pVCpu->iem.s.abOpcode[offOpcode + 7]);
1488# endif
1489 }
1490 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1491# endif
1492}
1493
1494#endif /* IEM_WITH_SETJMP */
1495
1496
1497
1498/** @name Misc Worker Functions.
1499 * @{
1500 */
1501
1502/**
1503 * Gets the exception class for the specified exception vector.
1504 *
1505 * @returns The class of the specified exception.
1506 * @param uVector The exception vector.
1507 */
1508static IEMXCPTCLASS iemGetXcptClass(uint8_t uVector) RT_NOEXCEPT
1509{
1510 Assert(uVector <= X86_XCPT_LAST);
1511 switch (uVector)
1512 {
1513 case X86_XCPT_DE:
1514 case X86_XCPT_TS:
1515 case X86_XCPT_NP:
1516 case X86_XCPT_SS:
1517 case X86_XCPT_GP:
1518 case X86_XCPT_SX: /* AMD only */
1519 return IEMXCPTCLASS_CONTRIBUTORY;
1520
1521 case X86_XCPT_PF:
1522 case X86_XCPT_VE: /* Intel only */
1523 return IEMXCPTCLASS_PAGE_FAULT;
1524
1525 case X86_XCPT_DF:
1526 return IEMXCPTCLASS_DOUBLE_FAULT;
1527 }
1528 return IEMXCPTCLASS_BENIGN;
1529}
1530
1531
1532/**
1533 * Evaluates how to handle an exception caused during delivery of another event
1534 * (exception / interrupt).
1535 *
1536 * @returns How to handle the recursive exception.
1537 * @param pVCpu The cross context virtual CPU structure of the
1538 * calling thread.
1539 * @param fPrevFlags The flags of the previous event.
1540 * @param uPrevVector The vector of the previous event.
1541 * @param fCurFlags The flags of the current exception.
1542 * @param uCurVector The vector of the current exception.
1543 * @param pfXcptRaiseInfo Where to store additional information about the
1544 * exception condition. Optional.
1545 */
1546VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
1547 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
1548{
1549 /*
1550 * Only CPU exceptions can be raised while delivering other events, software interrupt
1551 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
1552 */
1553 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
1554 Assert(pVCpu); RT_NOREF(pVCpu);
1555 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
1556
1557 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
1558 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
1559 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1560 {
1561 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
1562 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
1563 {
1564 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
1565 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
1566 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
1567 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
1568 {
1569 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1570 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
1571 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
1572 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
1573 uCurVector, pVCpu->cpum.GstCtx.cr2));
1574 }
1575 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1576 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
1577 {
1578 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
1579 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
1580 }
1581 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
1582 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
1583 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
1584 {
1585 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
1586 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
1587 }
1588 }
1589 else
1590 {
1591 if (uPrevVector == X86_XCPT_NMI)
1592 {
1593 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
1594 if (uCurVector == X86_XCPT_PF)
1595 {
1596 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
1597 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
1598 }
1599 }
1600 else if ( uPrevVector == X86_XCPT_AC
1601 && uCurVector == X86_XCPT_AC)
1602 {
1603 enmRaise = IEMXCPTRAISE_CPU_HANG;
1604 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
1605 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
1606 }
1607 }
1608 }
1609 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
1610 {
1611 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
1612 if (uCurVector == X86_XCPT_PF)
1613 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
1614 }
1615 else
1616 {
1617 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
1618 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
1619 }
1620
1621 if (pfXcptRaiseInfo)
1622 *pfXcptRaiseInfo = fRaiseInfo;
1623 return enmRaise;
1624}
1625
1626
1627/**
1628 * Enters the CPU shutdown state initiated by a triple fault or other
1629 * unrecoverable conditions.
1630 *
1631 * @returns Strict VBox status code.
1632 * @param pVCpu The cross context virtual CPU structure of the
1633 * calling thread.
1634 */
1635static VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu) RT_NOEXCEPT
1636{
1637 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
1638 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
1639
1640 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
1641 {
1642 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
1643 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
1644 }
1645
1646 RT_NOREF(pVCpu);
1647 return VINF_EM_TRIPLE_FAULT;
1648}
1649
1650
1651/**
1652 * Validates a new SS segment.
1653 *
1654 * @returns VBox strict status code.
1655 * @param pVCpu The cross context virtual CPU structure of the
1656 * calling thread.
1657 * @param NewSS The new SS selctor.
1658 * @param uCpl The CPL to load the stack for.
1659 * @param pDesc Where to return the descriptor.
1660 */
1661static VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc) RT_NOEXCEPT
1662{
1663 /* Null selectors are not allowed (we're not called for dispatching
1664 interrupts with SS=0 in long mode). */
1665 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
1666 {
1667 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
1668 return iemRaiseTaskSwitchFault0(pVCpu);
1669 }
1670
1671 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
1672 if ((NewSS & X86_SEL_RPL) != uCpl)
1673 {
1674 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
1675 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1676 }
1677
1678 /*
1679 * Read the descriptor.
1680 */
1681 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
1682 if (rcStrict != VINF_SUCCESS)
1683 return rcStrict;
1684
1685 /*
1686 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
1687 */
1688 if (!pDesc->Legacy.Gen.u1DescType)
1689 {
1690 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1691 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1692 }
1693
1694 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1695 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1696 {
1697 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
1698 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1699 }
1700 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
1701 {
1702 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
1703 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
1704 }
1705
1706 /* Is it there? */
1707 /** @todo testcase: Is this checked before the canonical / limit check below? */
1708 if (!pDesc->Legacy.Gen.u1Present)
1709 {
1710 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
1711 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
1712 }
1713
1714 return VINF_SUCCESS;
1715}
1716
1717/** @} */
1718
1719
1720/** @name Raising Exceptions.
1721 *
1722 * @{
1723 */
1724
1725
1726/**
1727 * Loads the specified stack far pointer from the TSS.
1728 *
1729 * @returns VBox strict status code.
1730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1731 * @param uCpl The CPL to load the stack for.
1732 * @param pSelSS Where to return the new stack segment.
1733 * @param puEsp Where to return the new stack pointer.
1734 */
1735static VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp) RT_NOEXCEPT
1736{
1737 VBOXSTRICTRC rcStrict;
1738 Assert(uCpl < 4);
1739
1740 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1741 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
1742 {
1743 /*
1744 * 16-bit TSS (X86TSS16).
1745 */
1746 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1747 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
1748 {
1749 uint32_t off = uCpl * 4 + 2;
1750 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1751 {
1752 /** @todo check actual access pattern here. */
1753 uint32_t u32Tmp = 0; /* gcc maybe... */
1754 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1755 if (rcStrict == VINF_SUCCESS)
1756 {
1757 *puEsp = RT_LOWORD(u32Tmp);
1758 *pSelSS = RT_HIWORD(u32Tmp);
1759 return VINF_SUCCESS;
1760 }
1761 }
1762 else
1763 {
1764 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1765 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1766 }
1767 break;
1768 }
1769
1770 /*
1771 * 32-bit TSS (X86TSS32).
1772 */
1773 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
1774 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
1775 {
1776 uint32_t off = uCpl * 8 + 4;
1777 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
1778 {
1779/** @todo check actual access pattern here. */
1780 uint64_t u64Tmp;
1781 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1782 if (rcStrict == VINF_SUCCESS)
1783 {
1784 *puEsp = u64Tmp & UINT32_MAX;
1785 *pSelSS = (RTSEL)(u64Tmp >> 32);
1786 return VINF_SUCCESS;
1787 }
1788 }
1789 else
1790 {
1791 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
1792 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1793 }
1794 break;
1795 }
1796
1797 default:
1798 AssertFailed();
1799 rcStrict = VERR_IEM_IPE_4;
1800 break;
1801 }
1802
1803 *puEsp = 0; /* make gcc happy */
1804 *pSelSS = 0; /* make gcc happy */
1805 return rcStrict;
1806}
1807
1808
1809/**
1810 * Loads the specified stack pointer from the 64-bit TSS.
1811 *
1812 * @returns VBox strict status code.
1813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1814 * @param uCpl The CPL to load the stack for.
1815 * @param uIst The interrupt stack table index, 0 if to use uCpl.
1816 * @param puRsp Where to return the new stack pointer.
1817 */
1818static VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp) RT_NOEXCEPT
1819{
1820 Assert(uCpl < 4);
1821 Assert(uIst < 8);
1822 *puRsp = 0; /* make gcc happy */
1823
1824 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
1825 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
1826
1827 uint32_t off;
1828 if (uIst)
1829 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
1830 else
1831 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
1832 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
1833 {
1834 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
1835 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
1836 }
1837
1838 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
1839}
1840
1841
1842/**
1843 * Adjust the CPU state according to the exception being raised.
1844 *
1845 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1846 * @param u8Vector The exception that has been raised.
1847 */
1848DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
1849{
1850 switch (u8Vector)
1851 {
1852 case X86_XCPT_DB:
1853 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
1854 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
1855 break;
1856 /** @todo Read the AMD and Intel exception reference... */
1857 }
1858}
1859
1860
1861/**
1862 * Implements exceptions and interrupts for real mode.
1863 *
1864 * @returns VBox strict status code.
1865 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1866 * @param cbInstr The number of bytes to offset rIP by in the return
1867 * address.
1868 * @param u8Vector The interrupt / exception vector number.
1869 * @param fFlags The flags.
1870 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
1871 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
1872 */
1873static VBOXSTRICTRC
1874iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
1875 uint8_t cbInstr,
1876 uint8_t u8Vector,
1877 uint32_t fFlags,
1878 uint16_t uErr,
1879 uint64_t uCr2) RT_NOEXCEPT
1880{
1881 NOREF(uErr); NOREF(uCr2);
1882 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1883
1884 /*
1885 * Read the IDT entry.
1886 */
1887 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
1888 {
1889 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
1890 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
1891 }
1892 RTFAR16 Idte;
1893 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
1894 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1895 {
1896 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
1897 return rcStrict;
1898 }
1899
1900 /*
1901 * Push the stack frame.
1902 */
1903 uint16_t *pu16Frame;
1904 uint64_t uNewRsp;
1905 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, 3, (void **)&pu16Frame, &uNewRsp);
1906 if (rcStrict != VINF_SUCCESS)
1907 return rcStrict;
1908
1909 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
1910#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
1911 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
1912 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
1913 fEfl |= UINT16_C(0xf000);
1914#endif
1915 pu16Frame[2] = (uint16_t)fEfl;
1916 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
1917 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
1918 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
1919 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
1920 return rcStrict;
1921
1922 /*
1923 * Load the vector address into cs:ip and make exception specific state
1924 * adjustments.
1925 */
1926 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
1927 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
1928 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
1929 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
1930 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
1931 pVCpu->cpum.GstCtx.rip = Idte.off;
1932 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
1933 IEMMISC_SET_EFL(pVCpu, fEfl);
1934
1935 /** @todo do we actually do this in real mode? */
1936 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
1937 iemRaiseXcptAdjustState(pVCpu, u8Vector);
1938
1939 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
1940}
1941
1942
1943/**
1944 * Loads a NULL data selector into when coming from V8086 mode.
1945 *
1946 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1947 * @param pSReg Pointer to the segment register.
1948 */
1949DECLINLINE(void) iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
1950{
1951 pSReg->Sel = 0;
1952 pSReg->ValidSel = 0;
1953 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
1954 {
1955 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
1956 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
1957 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
1958 }
1959 else
1960 {
1961 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1962 /** @todo check this on AMD-V */
1963 pSReg->u64Base = 0;
1964 pSReg->u32Limit = 0;
1965 }
1966}
1967
1968
1969/**
1970 * Loads a segment selector during a task switch in V8086 mode.
1971 *
1972 * @param pSReg Pointer to the segment register.
1973 * @param uSel The selector value to load.
1974 */
1975DECLINLINE(void) iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
1976{
1977 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
1978 pSReg->Sel = uSel;
1979 pSReg->ValidSel = uSel;
1980 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
1981 pSReg->u64Base = uSel << 4;
1982 pSReg->u32Limit = 0xffff;
1983 pSReg->Attr.u = 0xf3;
1984}
1985
1986
1987/**
1988 * Loads a segment selector during a task switch in protected mode.
1989 *
1990 * In this task switch scenario, we would throw \#TS exceptions rather than
1991 * \#GPs.
1992 *
1993 * @returns VBox strict status code.
1994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
1995 * @param pSReg Pointer to the segment register.
1996 * @param uSel The new selector value.
1997 *
1998 * @remarks This does _not_ handle CS or SS.
1999 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
2000 */
2001static VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel) RT_NOEXCEPT
2002{
2003 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2004
2005 /* Null data selector. */
2006 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2007 {
2008 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
2009 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2010 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2011 return VINF_SUCCESS;
2012 }
2013
2014 /* Fetch the descriptor. */
2015 IEMSELDESC Desc;
2016 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
2017 if (rcStrict != VINF_SUCCESS)
2018 {
2019 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
2020 VBOXSTRICTRC_VAL(rcStrict)));
2021 return rcStrict;
2022 }
2023
2024 /* Must be a data segment or readable code segment. */
2025 if ( !Desc.Legacy.Gen.u1DescType
2026 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2027 {
2028 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
2029 Desc.Legacy.Gen.u4Type));
2030 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2031 }
2032
2033 /* Check privileges for data segments and non-conforming code segments. */
2034 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2035 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2036 {
2037 /* The RPL and the new CPL must be less than or equal to the DPL. */
2038 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2039 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
2040 {
2041 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
2042 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
2043 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2044 }
2045 }
2046
2047 /* Is it there? */
2048 if (!Desc.Legacy.Gen.u1Present)
2049 {
2050 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
2051 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
2052 }
2053
2054 /* The base and limit. */
2055 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2056 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
2057
2058 /*
2059 * Ok, everything checked out fine. Now set the accessed bit before
2060 * committing the result into the registers.
2061 */
2062 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2063 {
2064 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
2065 if (rcStrict != VINF_SUCCESS)
2066 return rcStrict;
2067 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2068 }
2069
2070 /* Commit */
2071 pSReg->Sel = uSel;
2072 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2073 pSReg->u32Limit = cbLimit;
2074 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
2075 pSReg->ValidSel = uSel;
2076 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2077 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2078 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
2079
2080 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
2081 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2082 return VINF_SUCCESS;
2083}
2084
2085
2086/**
2087 * Performs a task switch.
2088 *
2089 * If the task switch is the result of a JMP, CALL or IRET instruction, the
2090 * caller is responsible for performing the necessary checks (like DPL, TSS
2091 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
2092 * reference for JMP, CALL, IRET.
2093 *
2094 * If the task switch is the due to a software interrupt or hardware exception,
2095 * the caller is responsible for validating the TSS selector and descriptor. See
2096 * Intel Instruction reference for INT n.
2097 *
2098 * @returns VBox strict status code.
2099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2100 * @param enmTaskSwitch The cause of the task switch.
2101 * @param uNextEip The EIP effective after the task switch.
2102 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
2103 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2104 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2105 * @param SelTSS The TSS selector of the new task.
2106 * @param pNewDescTSS Pointer to the new TSS descriptor.
2107 */
2108VBOXSTRICTRC
2109iemTaskSwitch(PVMCPUCC pVCpu,
2110 IEMTASKSWITCH enmTaskSwitch,
2111 uint32_t uNextEip,
2112 uint32_t fFlags,
2113 uint16_t uErr,
2114 uint64_t uCr2,
2115 RTSEL SelTSS,
2116 PIEMSELDESC pNewDescTSS) RT_NOEXCEPT
2117{
2118 Assert(!IEM_IS_REAL_MODE(pVCpu));
2119 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
2120 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2121
2122 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
2123 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2124 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2125 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2126 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2127
2128 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
2129 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2130
2131 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
2132 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
2133
2134 /* Update CR2 in case it's a page-fault. */
2135 /** @todo This should probably be done much earlier in IEM/PGM. See
2136 * @bugref{5653#c49}. */
2137 if (fFlags & IEM_XCPT_FLAGS_CR2)
2138 pVCpu->cpum.GstCtx.cr2 = uCr2;
2139
2140 /*
2141 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
2142 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
2143 */
2144 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
2145 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
2146 if (uNewTSSLimit < uNewTSSLimitMin)
2147 {
2148 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
2149 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
2150 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2151 }
2152
2153 /*
2154 * Task switches in VMX non-root mode always cause task switches.
2155 * The new TSS must have been read and validated (DPL, limits etc.) before a
2156 * task-switch VM-exit commences.
2157 *
2158 * See Intel spec. 25.4.2 "Treatment of Task Switches".
2159 */
2160 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
2161 {
2162 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
2163 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
2164 }
2165
2166 /*
2167 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
2168 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
2169 */
2170 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
2171 {
2172 uint32_t const uExitInfo1 = SelTSS;
2173 uint32_t uExitInfo2 = uErr;
2174 switch (enmTaskSwitch)
2175 {
2176 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
2177 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
2178 default: break;
2179 }
2180 if (fFlags & IEM_XCPT_FLAGS_ERR)
2181 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
2182 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
2183 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
2184
2185 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
2186 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
2187 RT_NOREF2(uExitInfo1, uExitInfo2);
2188 }
2189
2190 /*
2191 * Check the current TSS limit. The last written byte to the current TSS during the
2192 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
2193 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2194 *
2195 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
2196 * end up with smaller than "legal" TSS limits.
2197 */
2198 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
2199 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
2200 if (uCurTSSLimit < uCurTSSLimitMin)
2201 {
2202 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
2203 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
2204 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
2205 }
2206
2207 /*
2208 * Verify that the new TSS can be accessed and map it. Map only the required contents
2209 * and not the entire TSS.
2210 */
2211 void *pvNewTSS;
2212 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
2213 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
2214 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
2215 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
2216 * not perform correct translation if this happens. See Intel spec. 7.2.1
2217 * "Task-State Segment". */
2218 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW, 0);
2219 if (rcStrict != VINF_SUCCESS)
2220 {
2221 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
2222 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
2223 return rcStrict;
2224 }
2225
2226 /*
2227 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
2228 */
2229 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
2230 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
2231 || enmTaskSwitch == IEMTASKSWITCH_IRET)
2232 {
2233 PX86DESC pDescCurTSS;
2234 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
2235 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2236 if (rcStrict != VINF_SUCCESS)
2237 {
2238 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2239 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2240 return rcStrict;
2241 }
2242
2243 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2244 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
2245 if (rcStrict != VINF_SUCCESS)
2246 {
2247 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2248 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2249 return rcStrict;
2250 }
2251
2252 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
2253 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
2254 {
2255 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
2256 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
2257 u32EFlags &= ~X86_EFL_NT;
2258 }
2259 }
2260
2261 /*
2262 * Save the CPU state into the current TSS.
2263 */
2264 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
2265 if (GCPtrNewTSS == GCPtrCurTSS)
2266 {
2267 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
2268 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
2269 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
2270 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
2271 pVCpu->cpum.GstCtx.ldtr.Sel));
2272 }
2273 if (fIsNewTSS386)
2274 {
2275 /*
2276 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
2277 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
2278 */
2279 void *pvCurTSS32;
2280 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
2281 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
2282 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
2283 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2284 if (rcStrict != VINF_SUCCESS)
2285 {
2286 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2287 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2288 return rcStrict;
2289 }
2290
2291 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2292 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
2293 pCurTSS32->eip = uNextEip;
2294 pCurTSS32->eflags = u32EFlags;
2295 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
2296 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
2297 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
2298 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
2299 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
2300 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
2301 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
2302 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
2303 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
2304 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
2305 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
2306 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
2307 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
2308 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
2309
2310 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
2311 if (rcStrict != VINF_SUCCESS)
2312 {
2313 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2314 VBOXSTRICTRC_VAL(rcStrict)));
2315 return rcStrict;
2316 }
2317 }
2318 else
2319 {
2320 /*
2321 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
2322 */
2323 void *pvCurTSS16;
2324 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
2325 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
2326 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
2327 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW, 0);
2328 if (rcStrict != VINF_SUCCESS)
2329 {
2330 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
2331 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
2332 return rcStrict;
2333 }
2334
2335 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
2336 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
2337 pCurTSS16->ip = uNextEip;
2338 pCurTSS16->flags = u32EFlags;
2339 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
2340 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
2341 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
2342 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
2343 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
2344 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
2345 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
2346 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
2347 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
2348 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
2349 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
2350 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
2351
2352 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
2353 if (rcStrict != VINF_SUCCESS)
2354 {
2355 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
2356 VBOXSTRICTRC_VAL(rcStrict)));
2357 return rcStrict;
2358 }
2359 }
2360
2361 /*
2362 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
2363 */
2364 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2365 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2366 {
2367 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
2368 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
2369 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
2370 }
2371
2372 /*
2373 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
2374 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
2375 */
2376 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
2377 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
2378 bool fNewDebugTrap;
2379 if (fIsNewTSS386)
2380 {
2381 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
2382 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
2383 uNewEip = pNewTSS32->eip;
2384 uNewEflags = pNewTSS32->eflags;
2385 uNewEax = pNewTSS32->eax;
2386 uNewEcx = pNewTSS32->ecx;
2387 uNewEdx = pNewTSS32->edx;
2388 uNewEbx = pNewTSS32->ebx;
2389 uNewEsp = pNewTSS32->esp;
2390 uNewEbp = pNewTSS32->ebp;
2391 uNewEsi = pNewTSS32->esi;
2392 uNewEdi = pNewTSS32->edi;
2393 uNewES = pNewTSS32->es;
2394 uNewCS = pNewTSS32->cs;
2395 uNewSS = pNewTSS32->ss;
2396 uNewDS = pNewTSS32->ds;
2397 uNewFS = pNewTSS32->fs;
2398 uNewGS = pNewTSS32->gs;
2399 uNewLdt = pNewTSS32->selLdt;
2400 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
2401 }
2402 else
2403 {
2404 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
2405 uNewCr3 = 0;
2406 uNewEip = pNewTSS16->ip;
2407 uNewEflags = pNewTSS16->flags;
2408 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
2409 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
2410 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
2411 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
2412 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
2413 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
2414 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
2415 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
2416 uNewES = pNewTSS16->es;
2417 uNewCS = pNewTSS16->cs;
2418 uNewSS = pNewTSS16->ss;
2419 uNewDS = pNewTSS16->ds;
2420 uNewFS = 0;
2421 uNewGS = 0;
2422 uNewLdt = pNewTSS16->selLdt;
2423 fNewDebugTrap = false;
2424 }
2425
2426 if (GCPtrNewTSS == GCPtrCurTSS)
2427 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
2428 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
2429
2430 /*
2431 * We're done accessing the new TSS.
2432 */
2433 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
2434 if (rcStrict != VINF_SUCCESS)
2435 {
2436 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
2437 return rcStrict;
2438 }
2439
2440 /*
2441 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
2442 */
2443 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
2444 {
2445 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
2446 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW, 0);
2447 if (rcStrict != VINF_SUCCESS)
2448 {
2449 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2450 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2451 return rcStrict;
2452 }
2453
2454 /* Check that the descriptor indicates the new TSS is available (not busy). */
2455 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
2456 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
2457 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
2458
2459 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2460 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
2461 if (rcStrict != VINF_SUCCESS)
2462 {
2463 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
2464 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
2465 return rcStrict;
2466 }
2467 }
2468
2469 /*
2470 * From this point on, we're technically in the new task. We will defer exceptions
2471 * until the completion of the task switch but before executing any instructions in the new task.
2472 */
2473 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
2474 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
2475 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
2476 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
2477 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
2478 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
2479 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
2480
2481 /* Set the busy bit in TR. */
2482 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2483
2484 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
2485 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
2486 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
2487 {
2488 uNewEflags |= X86_EFL_NT;
2489 }
2490
2491 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
2492 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
2493 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
2494
2495 pVCpu->cpum.GstCtx.eip = uNewEip;
2496 pVCpu->cpum.GstCtx.eax = uNewEax;
2497 pVCpu->cpum.GstCtx.ecx = uNewEcx;
2498 pVCpu->cpum.GstCtx.edx = uNewEdx;
2499 pVCpu->cpum.GstCtx.ebx = uNewEbx;
2500 pVCpu->cpum.GstCtx.esp = uNewEsp;
2501 pVCpu->cpum.GstCtx.ebp = uNewEbp;
2502 pVCpu->cpum.GstCtx.esi = uNewEsi;
2503 pVCpu->cpum.GstCtx.edi = uNewEdi;
2504
2505 uNewEflags &= X86_EFL_LIVE_MASK;
2506 uNewEflags |= X86_EFL_RA1_MASK;
2507 IEMMISC_SET_EFL(pVCpu, uNewEflags);
2508
2509 /*
2510 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
2511 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
2512 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
2513 */
2514 pVCpu->cpum.GstCtx.es.Sel = uNewES;
2515 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
2516
2517 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2518 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
2519
2520 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2521 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
2522
2523 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
2524 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
2525
2526 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
2527 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
2528
2529 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
2530 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
2531 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
2532
2533 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
2534 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
2535 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
2536 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
2537
2538 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2539 {
2540 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
2541 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
2542 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
2543 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
2544 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
2545 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
2546 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
2547 }
2548
2549 /*
2550 * Switch CR3 for the new task.
2551 */
2552 if ( fIsNewTSS386
2553 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
2554 {
2555 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
2556 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
2557 AssertRCSuccessReturn(rc, rc);
2558
2559 /* Inform PGM. */
2560 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
2561 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
2562 AssertRCReturn(rc, rc);
2563 /* ignore informational status codes */
2564
2565 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
2566 }
2567
2568 /*
2569 * Switch LDTR for the new task.
2570 */
2571 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2572 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
2573 else
2574 {
2575 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
2576
2577 IEMSELDESC DescNewLdt;
2578 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
2579 if (rcStrict != VINF_SUCCESS)
2580 {
2581 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
2582 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
2583 return rcStrict;
2584 }
2585 if ( !DescNewLdt.Legacy.Gen.u1Present
2586 || DescNewLdt.Legacy.Gen.u1DescType
2587 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2588 {
2589 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
2590 uNewLdt, DescNewLdt.Legacy.u));
2591 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2592 }
2593
2594 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
2595 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2596 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
2597 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
2598 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
2599 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
2600 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
2601 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
2602 }
2603
2604 IEMSELDESC DescSS;
2605 if (IEM_IS_V86_MODE(pVCpu))
2606 {
2607 pVCpu->iem.s.uCpl = 3;
2608 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
2609 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
2610 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
2611 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
2612 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
2613 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
2614
2615 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
2616 DescSS.Legacy.u = 0;
2617 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
2618 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
2619 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
2620 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
2621 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
2622 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
2623 DescSS.Legacy.Gen.u2Dpl = 3;
2624 }
2625 else
2626 {
2627 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
2628
2629 /*
2630 * Load the stack segment for the new task.
2631 */
2632 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2633 {
2634 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
2635 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2636 }
2637
2638 /* Fetch the descriptor. */
2639 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
2640 if (rcStrict != VINF_SUCCESS)
2641 {
2642 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
2643 VBOXSTRICTRC_VAL(rcStrict)));
2644 return rcStrict;
2645 }
2646
2647 /* SS must be a data segment and writable. */
2648 if ( !DescSS.Legacy.Gen.u1DescType
2649 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2650 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
2651 {
2652 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
2653 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
2654 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2655 }
2656
2657 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
2658 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
2659 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
2660 {
2661 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
2662 uNewCpl));
2663 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2664 }
2665
2666 /* Is it there? */
2667 if (!DescSS.Legacy.Gen.u1Present)
2668 {
2669 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
2670 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
2671 }
2672
2673 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
2674 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
2675
2676 /* Set the accessed bit before committing the result into SS. */
2677 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2678 {
2679 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
2680 if (rcStrict != VINF_SUCCESS)
2681 return rcStrict;
2682 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2683 }
2684
2685 /* Commit SS. */
2686 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
2687 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
2688 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2689 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
2690 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
2691 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
2692 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
2693
2694 /* CPL has changed, update IEM before loading rest of segments. */
2695 pVCpu->iem.s.uCpl = uNewCpl;
2696
2697 /*
2698 * Load the data segments for the new task.
2699 */
2700 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
2701 if (rcStrict != VINF_SUCCESS)
2702 return rcStrict;
2703 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
2704 if (rcStrict != VINF_SUCCESS)
2705 return rcStrict;
2706 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
2707 if (rcStrict != VINF_SUCCESS)
2708 return rcStrict;
2709 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
2710 if (rcStrict != VINF_SUCCESS)
2711 return rcStrict;
2712
2713 /*
2714 * Load the code segment for the new task.
2715 */
2716 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
2717 {
2718 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
2719 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2720 }
2721
2722 /* Fetch the descriptor. */
2723 IEMSELDESC DescCS;
2724 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
2725 if (rcStrict != VINF_SUCCESS)
2726 {
2727 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
2728 return rcStrict;
2729 }
2730
2731 /* CS must be a code segment. */
2732 if ( !DescCS.Legacy.Gen.u1DescType
2733 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2734 {
2735 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
2736 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
2737 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2738 }
2739
2740 /* For conforming CS, DPL must be less than or equal to the RPL. */
2741 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2742 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
2743 {
2744 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
2745 DescCS.Legacy.Gen.u2Dpl));
2746 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2747 }
2748
2749 /* For non-conforming CS, DPL must match RPL. */
2750 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2751 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
2752 {
2753 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
2754 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
2755 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2756 }
2757
2758 /* Is it there? */
2759 if (!DescCS.Legacy.Gen.u1Present)
2760 {
2761 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
2762 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
2763 }
2764
2765 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
2766 u64Base = X86DESC_BASE(&DescCS.Legacy);
2767
2768 /* Set the accessed bit before committing the result into CS. */
2769 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2770 {
2771 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
2772 if (rcStrict != VINF_SUCCESS)
2773 return rcStrict;
2774 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2775 }
2776
2777 /* Commit CS. */
2778 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
2779 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
2780 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2781 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
2782 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
2783 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
2784 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
2785 }
2786
2787 /** @todo Debug trap. */
2788 if (fIsNewTSS386 && fNewDebugTrap)
2789 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
2790
2791 /*
2792 * Construct the error code masks based on what caused this task switch.
2793 * See Intel Instruction reference for INT.
2794 */
2795 uint16_t uExt;
2796 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
2797 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2798 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
2799 {
2800 uExt = 1;
2801 }
2802 else
2803 uExt = 0;
2804
2805 /*
2806 * Push any error code on to the new stack.
2807 */
2808 if (fFlags & IEM_XCPT_FLAGS_ERR)
2809 {
2810 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
2811 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
2812 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
2813
2814 /* Check that there is sufficient space on the stack. */
2815 /** @todo Factor out segment limit checking for normal/expand down segments
2816 * into a separate function. */
2817 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
2818 {
2819 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
2820 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
2821 {
2822 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2823 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
2824 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2825 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2826 }
2827 }
2828 else
2829 {
2830 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
2831 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
2832 {
2833 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
2834 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
2835 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
2836 }
2837 }
2838
2839
2840 if (fIsNewTSS386)
2841 rcStrict = iemMemStackPushU32(pVCpu, uErr);
2842 else
2843 rcStrict = iemMemStackPushU16(pVCpu, uErr);
2844 if (rcStrict != VINF_SUCCESS)
2845 {
2846 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
2847 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
2848 return rcStrict;
2849 }
2850 }
2851
2852 /* Check the new EIP against the new CS limit. */
2853 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
2854 {
2855 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
2856 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
2857 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
2858 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
2859 }
2860
2861 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
2862 pVCpu->cpum.GstCtx.ss.Sel));
2863 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
2864}
2865
2866
2867/**
2868 * Implements exceptions and interrupts for protected mode.
2869 *
2870 * @returns VBox strict status code.
2871 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2872 * @param cbInstr The number of bytes to offset rIP by in the return
2873 * address.
2874 * @param u8Vector The interrupt / exception vector number.
2875 * @param fFlags The flags.
2876 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
2877 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
2878 */
2879static VBOXSTRICTRC
2880iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
2881 uint8_t cbInstr,
2882 uint8_t u8Vector,
2883 uint32_t fFlags,
2884 uint16_t uErr,
2885 uint64_t uCr2) RT_NOEXCEPT
2886{
2887 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2888
2889 /*
2890 * Read the IDT entry.
2891 */
2892 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
2893 {
2894 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
2895 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2896 }
2897 X86DESC Idte;
2898 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
2899 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
2900 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2901 {
2902 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
2903 return rcStrict;
2904 }
2905 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
2906 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
2907 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
2908
2909 /*
2910 * Check the descriptor type, DPL and such.
2911 * ASSUMES this is done in the same order as described for call-gate calls.
2912 */
2913 if (Idte.Gate.u1DescType)
2914 {
2915 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2916 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2917 }
2918 bool fTaskGate = false;
2919 uint8_t f32BitGate = true;
2920 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
2921 switch (Idte.Gate.u4Type)
2922 {
2923 case X86_SEL_TYPE_SYS_UNDEFINED:
2924 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
2925 case X86_SEL_TYPE_SYS_LDT:
2926 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
2927 case X86_SEL_TYPE_SYS_286_CALL_GATE:
2928 case X86_SEL_TYPE_SYS_UNDEFINED2:
2929 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
2930 case X86_SEL_TYPE_SYS_UNDEFINED3:
2931 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
2932 case X86_SEL_TYPE_SYS_386_CALL_GATE:
2933 case X86_SEL_TYPE_SYS_UNDEFINED4:
2934 {
2935 /** @todo check what actually happens when the type is wrong...
2936 * esp. call gates. */
2937 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
2938 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2939 }
2940
2941 case X86_SEL_TYPE_SYS_286_INT_GATE:
2942 f32BitGate = false;
2943 RT_FALL_THRU();
2944 case X86_SEL_TYPE_SYS_386_INT_GATE:
2945 fEflToClear |= X86_EFL_IF;
2946 break;
2947
2948 case X86_SEL_TYPE_SYS_TASK_GATE:
2949 fTaskGate = true;
2950#ifndef IEM_IMPLEMENTS_TASKSWITCH
2951 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
2952#endif
2953 break;
2954
2955 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
2956 f32BitGate = false;
2957 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
2958 break;
2959
2960 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2961 }
2962
2963 /* Check DPL against CPL if applicable. */
2964 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
2965 {
2966 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
2967 {
2968 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
2969 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2970 }
2971 }
2972
2973 /* Is it there? */
2974 if (!Idte.Gate.u1Present)
2975 {
2976 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
2977 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
2978 }
2979
2980 /* Is it a task-gate? */
2981 if (fTaskGate)
2982 {
2983 /*
2984 * Construct the error code masks based on what caused this task switch.
2985 * See Intel Instruction reference for INT.
2986 */
2987 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
2988 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
2989 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
2990 RTSEL SelTSS = Idte.Gate.u16Sel;
2991
2992 /*
2993 * Fetch the TSS descriptor in the GDT.
2994 */
2995 IEMSELDESC DescTSS;
2996 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
2997 if (rcStrict != VINF_SUCCESS)
2998 {
2999 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
3000 VBOXSTRICTRC_VAL(rcStrict)));
3001 return rcStrict;
3002 }
3003
3004 /* The TSS descriptor must be a system segment and be available (not busy). */
3005 if ( DescTSS.Legacy.Gen.u1DescType
3006 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3007 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
3008 {
3009 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
3010 u8Vector, SelTSS, DescTSS.Legacy.au64));
3011 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
3012 }
3013
3014 /* The TSS must be present. */
3015 if (!DescTSS.Legacy.Gen.u1Present)
3016 {
3017 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
3018 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
3019 }
3020
3021 /* Do the actual task switch. */
3022 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
3023 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
3024 fFlags, uErr, uCr2, SelTSS, &DescTSS);
3025 }
3026
3027 /* A null CS is bad. */
3028 RTSEL NewCS = Idte.Gate.u16Sel;
3029 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3030 {
3031 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3032 return iemRaiseGeneralProtectionFault0(pVCpu);
3033 }
3034
3035 /* Fetch the descriptor for the new CS. */
3036 IEMSELDESC DescCS;
3037 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
3038 if (rcStrict != VINF_SUCCESS)
3039 {
3040 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3041 return rcStrict;
3042 }
3043
3044 /* Must be a code segment. */
3045 if (!DescCS.Legacy.Gen.u1DescType)
3046 {
3047 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3048 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3049 }
3050 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
3051 {
3052 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3053 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3054 }
3055
3056 /* Don't allow lowering the privilege level. */
3057 /** @todo Does the lowering of privileges apply to software interrupts
3058 * only? This has bearings on the more-privileged or
3059 * same-privilege stack behavior further down. A testcase would
3060 * be nice. */
3061 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3062 {
3063 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3064 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3065 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3066 }
3067
3068 /* Make sure the selector is present. */
3069 if (!DescCS.Legacy.Gen.u1Present)
3070 {
3071 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3072 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3073 }
3074
3075 /* Check the new EIP against the new CS limit. */
3076 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
3077 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
3078 ? Idte.Gate.u16OffsetLow
3079 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
3080 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
3081 if (uNewEip > cbLimitCS)
3082 {
3083 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
3084 u8Vector, uNewEip, cbLimitCS, NewCS));
3085 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3086 }
3087 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
3088
3089 /* Calc the flag image to push. */
3090 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3091 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3092 fEfl &= ~X86_EFL_RF;
3093 else
3094 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3095
3096 /* From V8086 mode only go to CPL 0. */
3097 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3098 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3099 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
3100 {
3101 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
3102 return iemRaiseGeneralProtectionFault(pVCpu, 0);
3103 }
3104
3105 /*
3106 * If the privilege level changes, we need to get a new stack from the TSS.
3107 * This in turns means validating the new SS and ESP...
3108 */
3109 if (uNewCpl != pVCpu->iem.s.uCpl)
3110 {
3111 RTSEL NewSS;
3112 uint32_t uNewEsp;
3113 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
3114 if (rcStrict != VINF_SUCCESS)
3115 return rcStrict;
3116
3117 IEMSELDESC DescSS;
3118 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
3119 if (rcStrict != VINF_SUCCESS)
3120 return rcStrict;
3121 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
3122 if (!DescSS.Legacy.Gen.u1DefBig)
3123 {
3124 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
3125 uNewEsp = (uint16_t)uNewEsp;
3126 }
3127
3128 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3129
3130 /* Check that there is sufficient space for the stack frame. */
3131 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
3132 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
3133 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
3134 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
3135
3136 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
3137 {
3138 if ( uNewEsp - 1 > cbLimitSS
3139 || uNewEsp < cbStackFrame)
3140 {
3141 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
3142 u8Vector, NewSS, uNewEsp, cbStackFrame));
3143 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3144 }
3145 }
3146 else
3147 {
3148 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
3149 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
3150 {
3151 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
3152 u8Vector, NewSS, uNewEsp, cbStackFrame));
3153 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
3154 }
3155 }
3156
3157 /*
3158 * Start making changes.
3159 */
3160
3161 /* Set the new CPL so that stack accesses use it. */
3162 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3163 pVCpu->iem.s.uCpl = uNewCpl;
3164
3165 /* Create the stack frame. */
3166 RTPTRUNION uStackFrame;
3167 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3168 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy),
3169 IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3170 if (rcStrict != VINF_SUCCESS)
3171 return rcStrict;
3172 void * const pvStackFrame = uStackFrame.pv;
3173 if (f32BitGate)
3174 {
3175 if (fFlags & IEM_XCPT_FLAGS_ERR)
3176 *uStackFrame.pu32++ = uErr;
3177 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3178 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3179 uStackFrame.pu32[2] = fEfl;
3180 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
3181 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
3182 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
3183 if (fEfl & X86_EFL_VM)
3184 {
3185 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
3186 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
3187 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
3188 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
3189 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
3190 }
3191 }
3192 else
3193 {
3194 if (fFlags & IEM_XCPT_FLAGS_ERR)
3195 *uStackFrame.pu16++ = uErr;
3196 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3197 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
3198 uStackFrame.pu16[2] = fEfl;
3199 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
3200 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
3201 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
3202 if (fEfl & X86_EFL_VM)
3203 {
3204 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
3205 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
3206 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
3207 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
3208 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
3209 }
3210 }
3211 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3212 if (rcStrict != VINF_SUCCESS)
3213 return rcStrict;
3214
3215 /* Mark the selectors 'accessed' (hope this is the correct time). */
3216 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3217 * after pushing the stack frame? (Write protect the gdt + stack to
3218 * find out.) */
3219 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3220 {
3221 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3222 if (rcStrict != VINF_SUCCESS)
3223 return rcStrict;
3224 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3225 }
3226
3227 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3228 {
3229 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
3230 if (rcStrict != VINF_SUCCESS)
3231 return rcStrict;
3232 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3233 }
3234
3235 /*
3236 * Start comitting the register changes (joins with the DPL=CPL branch).
3237 */
3238 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
3239 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
3240 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3241 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
3242 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
3243 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
3244 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
3245 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
3246 * SP is loaded).
3247 * Need to check the other combinations too:
3248 * - 16-bit TSS, 32-bit handler
3249 * - 32-bit TSS, 16-bit handler */
3250 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
3251 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
3252 else
3253 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
3254
3255 if (fEfl & X86_EFL_VM)
3256 {
3257 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
3258 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
3259 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
3260 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
3261 }
3262 }
3263 /*
3264 * Same privilege, no stack change and smaller stack frame.
3265 */
3266 else
3267 {
3268 uint64_t uNewRsp;
3269 RTPTRUNION uStackFrame;
3270 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
3271 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, f32BitGate ? 3 : 1, &uStackFrame.pv, &uNewRsp);
3272 if (rcStrict != VINF_SUCCESS)
3273 return rcStrict;
3274 void * const pvStackFrame = uStackFrame.pv;
3275
3276 if (f32BitGate)
3277 {
3278 if (fFlags & IEM_XCPT_FLAGS_ERR)
3279 *uStackFrame.pu32++ = uErr;
3280 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3281 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3282 uStackFrame.pu32[2] = fEfl;
3283 }
3284 else
3285 {
3286 if (fFlags & IEM_XCPT_FLAGS_ERR)
3287 *uStackFrame.pu16++ = uErr;
3288 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
3289 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
3290 uStackFrame.pu16[2] = fEfl;
3291 }
3292 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
3293 if (rcStrict != VINF_SUCCESS)
3294 return rcStrict;
3295
3296 /* Mark the CS selector as 'accessed'. */
3297 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3298 {
3299 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3300 if (rcStrict != VINF_SUCCESS)
3301 return rcStrict;
3302 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3303 }
3304
3305 /*
3306 * Start committing the register changes (joins with the other branch).
3307 */
3308 pVCpu->cpum.GstCtx.rsp = uNewRsp;
3309 }
3310
3311 /* ... register committing continues. */
3312 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3313 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3314 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3315 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
3316 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3317 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3318
3319 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
3320 fEfl &= ~fEflToClear;
3321 IEMMISC_SET_EFL(pVCpu, fEfl);
3322
3323 if (fFlags & IEM_XCPT_FLAGS_CR2)
3324 pVCpu->cpum.GstCtx.cr2 = uCr2;
3325
3326 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3327 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3328
3329 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3330}
3331
3332
3333/**
3334 * Implements exceptions and interrupts for long mode.
3335 *
3336 * @returns VBox strict status code.
3337 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3338 * @param cbInstr The number of bytes to offset rIP by in the return
3339 * address.
3340 * @param u8Vector The interrupt / exception vector number.
3341 * @param fFlags The flags.
3342 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3343 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3344 */
3345static VBOXSTRICTRC
3346iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
3347 uint8_t cbInstr,
3348 uint8_t u8Vector,
3349 uint32_t fFlags,
3350 uint16_t uErr,
3351 uint64_t uCr2) RT_NOEXCEPT
3352{
3353 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3354
3355 /*
3356 * Read the IDT entry.
3357 */
3358 uint16_t offIdt = (uint16_t)u8Vector << 4;
3359 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
3360 {
3361 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3362 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3363 }
3364 X86DESC64 Idte;
3365#ifdef _MSC_VER /* Shut up silly compiler warning. */
3366 Idte.au64[0] = 0;
3367 Idte.au64[1] = 0;
3368#endif
3369 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
3370 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
3371 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
3372 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3373 {
3374 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3375 return rcStrict;
3376 }
3377 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
3378 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
3379 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
3380
3381 /*
3382 * Check the descriptor type, DPL and such.
3383 * ASSUMES this is done in the same order as described for call-gate calls.
3384 */
3385 if (Idte.Gate.u1DescType)
3386 {
3387 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3388 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3389 }
3390 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
3391 switch (Idte.Gate.u4Type)
3392 {
3393 case AMD64_SEL_TYPE_SYS_INT_GATE:
3394 fEflToClear |= X86_EFL_IF;
3395 break;
3396 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
3397 break;
3398
3399 default:
3400 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
3401 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3402 }
3403
3404 /* Check DPL against CPL if applicable. */
3405 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
3406 {
3407 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
3408 {
3409 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
3410 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3411 }
3412 }
3413
3414 /* Is it there? */
3415 if (!Idte.Gate.u1Present)
3416 {
3417 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
3418 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3419 }
3420
3421 /* A null CS is bad. */
3422 RTSEL NewCS = Idte.Gate.u16Sel;
3423 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
3424 {
3425 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
3426 return iemRaiseGeneralProtectionFault0(pVCpu);
3427 }
3428
3429 /* Fetch the descriptor for the new CS. */
3430 IEMSELDESC DescCS;
3431 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
3432 if (rcStrict != VINF_SUCCESS)
3433 {
3434 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
3435 return rcStrict;
3436 }
3437
3438 /* Must be a 64-bit code segment. */
3439 if (!DescCS.Long.Gen.u1DescType)
3440 {
3441 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
3442 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3443 }
3444 if ( !DescCS.Long.Gen.u1Long
3445 || DescCS.Long.Gen.u1DefBig
3446 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
3447 {
3448 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
3449 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
3450 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3451 }
3452
3453 /* Don't allow lowering the privilege level. For non-conforming CS
3454 selectors, the CS.DPL sets the privilege level the trap/interrupt
3455 handler runs at. For conforming CS selectors, the CPL remains
3456 unchanged, but the CS.DPL must be <= CPL. */
3457 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
3458 * when CPU in Ring-0. Result \#GP? */
3459 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
3460 {
3461 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
3462 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3463 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
3464 }
3465
3466
3467 /* Make sure the selector is present. */
3468 if (!DescCS.Legacy.Gen.u1Present)
3469 {
3470 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
3471 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
3472 }
3473
3474 /* Check that the new RIP is canonical. */
3475 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
3476 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
3477 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
3478 if (!IEM_IS_CANONICAL(uNewRip))
3479 {
3480 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
3481 return iemRaiseGeneralProtectionFault0(pVCpu);
3482 }
3483
3484 /*
3485 * If the privilege level changes or if the IST isn't zero, we need to get
3486 * a new stack from the TSS.
3487 */
3488 uint64_t uNewRsp;
3489 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
3490 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
3491 if ( uNewCpl != pVCpu->iem.s.uCpl
3492 || Idte.Gate.u3IST != 0)
3493 {
3494 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
3495 if (rcStrict != VINF_SUCCESS)
3496 return rcStrict;
3497 }
3498 else
3499 uNewRsp = pVCpu->cpum.GstCtx.rsp;
3500 uNewRsp &= ~(uint64_t)0xf;
3501
3502 /*
3503 * Calc the flag image to push.
3504 */
3505 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3506 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
3507 fEfl &= ~X86_EFL_RF;
3508 else
3509 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
3510
3511 /*
3512 * Start making changes.
3513 */
3514 /* Set the new CPL so that stack accesses use it. */
3515 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
3516 pVCpu->iem.s.uCpl = uNewCpl;
3517
3518 /* Create the stack frame. */
3519 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
3520 RTPTRUNION uStackFrame;
3521 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
3522 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS, 0); /* _SYS is a hack ... */
3523 if (rcStrict != VINF_SUCCESS)
3524 return rcStrict;
3525 void * const pvStackFrame = uStackFrame.pv;
3526
3527 if (fFlags & IEM_XCPT_FLAGS_ERR)
3528 *uStackFrame.pu64++ = uErr;
3529 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
3530 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
3531 uStackFrame.pu64[2] = fEfl;
3532 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
3533 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
3534 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
3535 if (rcStrict != VINF_SUCCESS)
3536 return rcStrict;
3537
3538 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
3539 /** @todo testcase: excatly _when_ are the accessed bits set - before or
3540 * after pushing the stack frame? (Write protect the gdt + stack to
3541 * find out.) */
3542 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3543 {
3544 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
3545 if (rcStrict != VINF_SUCCESS)
3546 return rcStrict;
3547 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3548 }
3549
3550 /*
3551 * Start comitting the register changes.
3552 */
3553 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
3554 * hidden registers when interrupting 32-bit or 16-bit code! */
3555 if (uNewCpl != uOldCpl)
3556 {
3557 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
3558 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
3559 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
3560 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
3561 pVCpu->cpum.GstCtx.ss.u64Base = 0;
3562 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
3563 }
3564 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
3565 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3566 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
3567 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3568 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
3569 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
3570 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
3571 pVCpu->cpum.GstCtx.rip = uNewRip;
3572
3573 fEfl &= ~fEflToClear;
3574 IEMMISC_SET_EFL(pVCpu, fEfl);
3575
3576 if (fFlags & IEM_XCPT_FLAGS_CR2)
3577 pVCpu->cpum.GstCtx.cr2 = uCr2;
3578
3579 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3580 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3581
3582 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3583}
3584
3585
3586/**
3587 * Implements exceptions and interrupts.
3588 *
3589 * All exceptions and interrupts goes thru this function!
3590 *
3591 * @returns VBox strict status code.
3592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3593 * @param cbInstr The number of bytes to offset rIP by in the return
3594 * address.
3595 * @param u8Vector The interrupt / exception vector number.
3596 * @param fFlags The flags.
3597 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3598 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3599 */
3600VBOXSTRICTRC
3601iemRaiseXcptOrInt(PVMCPUCC pVCpu,
3602 uint8_t cbInstr,
3603 uint8_t u8Vector,
3604 uint32_t fFlags,
3605 uint16_t uErr,
3606 uint64_t uCr2) RT_NOEXCEPT
3607{
3608 /*
3609 * Get all the state that we might need here.
3610 */
3611 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3612 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3613
3614#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
3615 /*
3616 * Flush prefetch buffer
3617 */
3618 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
3619#endif
3620
3621 /*
3622 * Perform the V8086 IOPL check and upgrade the fault without nesting.
3623 */
3624 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
3625 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
3626 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3627 | IEM_XCPT_FLAGS_BP_INSTR
3628 | IEM_XCPT_FLAGS_ICEBP_INSTR
3629 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3630 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
3631 {
3632 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
3633 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3634 u8Vector = X86_XCPT_GP;
3635 uErr = 0;
3636 }
3637#ifdef DBGFTRACE_ENABLED
3638 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
3639 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
3640 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
3641#endif
3642
3643 /*
3644 * Evaluate whether NMI blocking should be in effect.
3645 * Normally, NMI blocking is in effect whenever we inject an NMI.
3646 */
3647 bool fBlockNmi;
3648 if ( u8Vector == X86_XCPT_NMI
3649 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3650 fBlockNmi = true;
3651 else
3652 fBlockNmi = false;
3653
3654#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3655 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3656 {
3657 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
3658 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3659 return rcStrict0;
3660
3661 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
3662 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
3663 {
3664 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
3665 fBlockNmi = false;
3666 }
3667 }
3668#endif
3669
3670#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
3671 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
3672 {
3673 /*
3674 * If the event is being injected as part of VMRUN, it isn't subject to event
3675 * intercepts in the nested-guest. However, secondary exceptions that occur
3676 * during injection of any event -are- subject to exception intercepts.
3677 *
3678 * See AMD spec. 15.20 "Event Injection".
3679 */
3680 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
3681 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
3682 else
3683 {
3684 /*
3685 * Check and handle if the event being raised is intercepted.
3686 */
3687 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
3688 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
3689 return rcStrict0;
3690 }
3691 }
3692#endif
3693
3694 /*
3695 * Set NMI blocking if necessary.
3696 */
3697 if ( fBlockNmi
3698 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
3699 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
3700
3701 /*
3702 * Do recursion accounting.
3703 */
3704 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
3705 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
3706 if (pVCpu->iem.s.cXcptRecursions == 0)
3707 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
3708 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
3709 else
3710 {
3711 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
3712 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
3713 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
3714
3715 if (pVCpu->iem.s.cXcptRecursions >= 4)
3716 {
3717#ifdef DEBUG_bird
3718 AssertFailed();
3719#endif
3720 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
3721 }
3722
3723 /*
3724 * Evaluate the sequence of recurring events.
3725 */
3726 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
3727 NULL /* pXcptRaiseInfo */);
3728 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
3729 { /* likely */ }
3730 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
3731 {
3732 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
3733 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
3734 u8Vector = X86_XCPT_DF;
3735 uErr = 0;
3736#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
3737 /* VMX nested-guest #DF intercept needs to be checked here. */
3738 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3739 {
3740 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
3741 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
3742 return rcStrict0;
3743 }
3744#endif
3745 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
3746 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
3747 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3748 }
3749 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
3750 {
3751 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
3752 return iemInitiateCpuShutdown(pVCpu);
3753 }
3754 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
3755 {
3756 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
3757 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
3758 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
3759 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
3760 return VERR_EM_GUEST_CPU_HANG;
3761 }
3762 else
3763 {
3764 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
3765 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
3766 return VERR_IEM_IPE_9;
3767 }
3768
3769 /*
3770 * The 'EXT' bit is set when an exception occurs during deliver of an external
3771 * event (such as an interrupt or earlier exception)[1]. Privileged software
3772 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
3773 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
3774 *
3775 * [1] - Intel spec. 6.13 "Error Code"
3776 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
3777 * [3] - Intel Instruction reference for INT n.
3778 */
3779 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
3780 && (fFlags & IEM_XCPT_FLAGS_ERR)
3781 && u8Vector != X86_XCPT_PF
3782 && u8Vector != X86_XCPT_DF)
3783 {
3784 uErr |= X86_TRAP_ERR_EXTERNAL;
3785 }
3786 }
3787
3788 pVCpu->iem.s.cXcptRecursions++;
3789 pVCpu->iem.s.uCurXcpt = u8Vector;
3790 pVCpu->iem.s.fCurXcpt = fFlags;
3791 pVCpu->iem.s.uCurXcptErr = uErr;
3792 pVCpu->iem.s.uCurXcptCr2 = uCr2;
3793
3794 /*
3795 * Extensive logging.
3796 */
3797#if defined(LOG_ENABLED) && defined(IN_RING3)
3798 if (LogIs3Enabled())
3799 {
3800 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
3801 PVM pVM = pVCpu->CTX_SUFF(pVM);
3802 char szRegs[4096];
3803 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
3804 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
3805 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
3806 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
3807 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
3808 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
3809 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
3810 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
3811 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
3812 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
3813 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
3814 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
3815 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
3816 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
3817 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
3818 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
3819 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
3820 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
3821 " efer=%016VR{efer}\n"
3822 " pat=%016VR{pat}\n"
3823 " sf_mask=%016VR{sf_mask}\n"
3824 "krnl_gs_base=%016VR{krnl_gs_base}\n"
3825 " lstar=%016VR{lstar}\n"
3826 " star=%016VR{star} cstar=%016VR{cstar}\n"
3827 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
3828 );
3829
3830 char szInstr[256];
3831 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
3832 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
3833 szInstr, sizeof(szInstr), NULL);
3834 Log3(("%s%s\n", szRegs, szInstr));
3835 }
3836#endif /* LOG_ENABLED */
3837
3838 /*
3839 * Stats.
3840 */
3841 if (!(fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
3842 STAM_REL_STATS({ pVCpu->iem.s.aStatInts[u8Vector] += 1; });
3843 else if (u8Vector <= X86_XCPT_LAST)
3844 {
3845 STAM_REL_COUNTER_INC(&pVCpu->iem.s.aStatXcpts[u8Vector]);
3846 EMHistoryAddExit(pVCpu, EMEXIT_MAKE_FT(EMEXIT_F_KIND_XCPT, u8Vector),
3847 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base, ASMReadTSC());
3848 }
3849
3850 /*
3851 * #PF's implies a INVLPG for the CR2 value (see 4.10.1.1 in Intel SDM Vol 3)
3852 * to ensure that a stale TLB or paging cache entry will only cause one
3853 * spurious #PF.
3854 */
3855 if ( u8Vector == X86_XCPT_PF
3856 && (fFlags & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2)) == (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_CR2))
3857 IEMTlbInvalidatePage(pVCpu, uCr2);
3858
3859 /*
3860 * Call the mode specific worker function.
3861 */
3862 VBOXSTRICTRC rcStrict;
3863 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
3864 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3865 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
3866 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3867 else
3868 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3869
3870 /* Flush the prefetch buffer. */
3871#ifdef IEM_WITH_CODE_TLB
3872 pVCpu->iem.s.pbInstrBuf = NULL;
3873#else
3874 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
3875#endif
3876
3877 /*
3878 * Unwind.
3879 */
3880 pVCpu->iem.s.cXcptRecursions--;
3881 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
3882 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
3883 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
3884 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
3885 pVCpu->iem.s.cXcptRecursions + 1));
3886 return rcStrict;
3887}
3888
3889#ifdef IEM_WITH_SETJMP
3890/**
3891 * See iemRaiseXcptOrInt. Will not return.
3892 */
3893DECL_NO_RETURN(void)
3894iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
3895 uint8_t cbInstr,
3896 uint8_t u8Vector,
3897 uint32_t fFlags,
3898 uint16_t uErr,
3899 uint64_t uCr2) RT_NOEXCEPT
3900{
3901 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
3902 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3903}
3904#endif
3905
3906
3907/** \#DE - 00. */
3908VBOXSTRICTRC iemRaiseDivideError(PVMCPUCC pVCpu) RT_NOEXCEPT
3909{
3910 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3911}
3912
3913
3914/** \#DB - 01.
3915 * @note This automatically clear DR7.GD. */
3916VBOXSTRICTRC iemRaiseDebugException(PVMCPUCC pVCpu) RT_NOEXCEPT
3917{
3918 /** @todo set/clear RF. */
3919 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3920 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3921}
3922
3923
3924/** \#BR - 05. */
3925VBOXSTRICTRC iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu) RT_NOEXCEPT
3926{
3927 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3928}
3929
3930
3931/** \#UD - 06. */
3932VBOXSTRICTRC iemRaiseUndefinedOpcode(PVMCPUCC pVCpu) RT_NOEXCEPT
3933{
3934 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3935}
3936
3937
3938/** \#NM - 07. */
3939VBOXSTRICTRC iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu) RT_NOEXCEPT
3940{
3941 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
3942}
3943
3944
3945/** \#TS(err) - 0a. */
3946VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3947{
3948 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3949}
3950
3951
3952/** \#TS(tr) - 0a. */
3953VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu) RT_NOEXCEPT
3954{
3955 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3956 pVCpu->cpum.GstCtx.tr.Sel, 0);
3957}
3958
3959
3960/** \#TS(0) - 0a. */
3961VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
3962{
3963 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3964 0, 0);
3965}
3966
3967
3968/** \#TS(err) - 0a. */
3969VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3970{
3971 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3972 uSel & X86_SEL_MASK_OFF_RPL, 0);
3973}
3974
3975
3976/** \#NP(err) - 0b. */
3977VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
3978{
3979 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
3980}
3981
3982
3983/** \#NP(sel) - 0b. */
3984VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3985{
3986 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3987 uSel & ~X86_SEL_RPL, 0);
3988}
3989
3990
3991/** \#SS(seg) - 0c. */
3992VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
3993{
3994 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
3995 uSel & ~X86_SEL_RPL, 0);
3996}
3997
3998
3999/** \#SS(err) - 0c. */
4000VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4001{
4002 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4003}
4004
4005
4006/** \#GP(n) - 0d. */
4007VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr) RT_NOEXCEPT
4008{
4009 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
4010}
4011
4012
4013/** \#GP(0) - 0d. */
4014VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu) RT_NOEXCEPT
4015{
4016 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4017}
4018
4019#ifdef IEM_WITH_SETJMP
4020/** \#GP(0) - 0d. */
4021DECL_NO_RETURN(void) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu) RT_NOEXCEPT
4022{
4023 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4024}
4025#endif
4026
4027
4028/** \#GP(sel) - 0d. */
4029VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4030{
4031 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
4032 Sel & ~X86_SEL_RPL, 0);
4033}
4034
4035
4036/** \#GP(0) - 0d. */
4037VBOXSTRICTRC iemRaiseNotCanonical(PVMCPUCC pVCpu) RT_NOEXCEPT
4038{
4039 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4040}
4041
4042
4043/** \#GP(sel) - 0d. */
4044VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4045{
4046 NOREF(iSegReg); NOREF(fAccess);
4047 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4048 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4049}
4050
4051#ifdef IEM_WITH_SETJMP
4052/** \#GP(sel) - 0d, longjmp. */
4053DECL_NO_RETURN(void) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4054{
4055 NOREF(iSegReg); NOREF(fAccess);
4056 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
4057 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4058}
4059#endif
4060
4061/** \#GP(sel) - 0d. */
4062VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4063{
4064 NOREF(Sel);
4065 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4066}
4067
4068#ifdef IEM_WITH_SETJMP
4069/** \#GP(sel) - 0d, longjmp. */
4070DECL_NO_RETURN(void) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel) RT_NOEXCEPT
4071{
4072 NOREF(Sel);
4073 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4074}
4075#endif
4076
4077
4078/** \#GP(sel) - 0d. */
4079VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4080{
4081 NOREF(iSegReg); NOREF(fAccess);
4082 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4083}
4084
4085#ifdef IEM_WITH_SETJMP
4086/** \#GP(sel) - 0d, longjmp. */
4087DECL_NO_RETURN(void) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess) RT_NOEXCEPT
4088{
4089 NOREF(iSegReg); NOREF(fAccess);
4090 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4091}
4092#endif
4093
4094
4095/** \#PF(n) - 0e. */
4096VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4097{
4098 uint16_t uErr;
4099 switch (rc)
4100 {
4101 case VERR_PAGE_NOT_PRESENT:
4102 case VERR_PAGE_TABLE_NOT_PRESENT:
4103 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
4104 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
4105 uErr = 0;
4106 break;
4107
4108 default:
4109 AssertMsgFailed(("%Rrc\n", rc));
4110 RT_FALL_THRU();
4111 case VERR_ACCESS_DENIED:
4112 uErr = X86_TRAP_PF_P;
4113 break;
4114
4115 /** @todo reserved */
4116 }
4117
4118 if (pVCpu->iem.s.uCpl == 3)
4119 uErr |= X86_TRAP_PF_US;
4120
4121 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
4122 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
4123 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
4124 uErr |= X86_TRAP_PF_ID;
4125
4126#if 0 /* This is so much non-sense, really. Why was it done like that? */
4127 /* Note! RW access callers reporting a WRITE protection fault, will clear
4128 the READ flag before calling. So, read-modify-write accesses (RW)
4129 can safely be reported as READ faults. */
4130 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
4131 uErr |= X86_TRAP_PF_RW;
4132#else
4133 if (fAccess & IEM_ACCESS_TYPE_WRITE)
4134 {
4135 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
4136 /// (regardless of outcome of the comparison in the latter case).
4137 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
4138 uErr |= X86_TRAP_PF_RW;
4139 }
4140#endif
4141
4142 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
4143 uErr, GCPtrWhere);
4144}
4145
4146#ifdef IEM_WITH_SETJMP
4147/** \#PF(n) - 0e, longjmp. */
4148DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc) RT_NOEXCEPT
4149{
4150 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
4151}
4152#endif
4153
4154
4155/** \#MF(0) - 10. */
4156VBOXSTRICTRC iemRaiseMathFault(PVMCPUCC pVCpu)
4157{
4158 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4159}
4160
4161
4162/** \#AC(0) - 11. */
4163VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
4164{
4165 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
4166}
4167
4168#ifdef IEM_WITH_SETJMP
4169/** \#AC(0) - 11, longjmp. */
4170DECL_NO_RETURN(void) iemRaiseAlignmentCheckExceptionJmp(PVMCPUCC pVCpu) RT_NOEXCEPT
4171{
4172 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaiseAlignmentCheckException(pVCpu)));
4173}
4174#endif
4175
4176
4177/** \#XF(0)/\#XM(0) - 19. */
4178VBOXSTRICTRC iemRaiseSimdFpException(PVMCPUCC pVCpu) RT_NOEXCEPT
4179{
4180 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_XF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4181}
4182
4183
4184/** Accessed via IEMOP_RAISE_DIVIDE_ERROR. */
4185IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
4186{
4187 NOREF(cbInstr);
4188 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4189}
4190
4191
4192/** Accessed via IEMOP_RAISE_INVALID_LOCK_PREFIX. */
4193IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
4194{
4195 NOREF(cbInstr);
4196 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4197}
4198
4199
4200/** Accessed via IEMOP_RAISE_INVALID_OPCODE. */
4201IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
4202{
4203 NOREF(cbInstr);
4204 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
4205}
4206
4207
4208/** @} */
4209
4210/** @name Common opcode decoders.
4211 * @{
4212 */
4213//#include <iprt/mem.h>
4214
4215/**
4216 * Used to add extra details about a stub case.
4217 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4218 */
4219void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT
4220{
4221#if defined(LOG_ENABLED) && defined(IN_RING3)
4222 PVM pVM = pVCpu->CTX_SUFF(pVM);
4223 char szRegs[4096];
4224 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
4225 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
4226 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
4227 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
4228 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
4229 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
4230 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
4231 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
4232 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
4233 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
4234 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
4235 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
4236 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
4237 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
4238 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
4239 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
4240 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
4241 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
4242 " efer=%016VR{efer}\n"
4243 " pat=%016VR{pat}\n"
4244 " sf_mask=%016VR{sf_mask}\n"
4245 "krnl_gs_base=%016VR{krnl_gs_base}\n"
4246 " lstar=%016VR{lstar}\n"
4247 " star=%016VR{star} cstar=%016VR{cstar}\n"
4248 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
4249 );
4250
4251 char szInstr[256];
4252 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
4253 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
4254 szInstr, sizeof(szInstr), NULL);
4255
4256 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
4257#else
4258 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
4259#endif
4260}
4261
4262/** @} */
4263
4264
4265
4266/** @name Register Access.
4267 * @{
4268 */
4269
4270/**
4271 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
4272 *
4273 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4274 * segment limit.
4275 *
4276 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4277 * @param offNextInstr The offset of the next instruction.
4278 */
4279VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr) RT_NOEXCEPT
4280{
4281 switch (pVCpu->iem.s.enmEffOpSize)
4282 {
4283 case IEMMODE_16BIT:
4284 {
4285 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4286 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4287 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4288 return iemRaiseGeneralProtectionFault0(pVCpu);
4289 pVCpu->cpum.GstCtx.rip = uNewIp;
4290 break;
4291 }
4292
4293 case IEMMODE_32BIT:
4294 {
4295 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4296 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4297
4298 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4299 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4300 return iemRaiseGeneralProtectionFault0(pVCpu);
4301 pVCpu->cpum.GstCtx.rip = uNewEip;
4302 break;
4303 }
4304
4305 case IEMMODE_64BIT:
4306 {
4307 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4308
4309 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4310 if (!IEM_IS_CANONICAL(uNewRip))
4311 return iemRaiseGeneralProtectionFault0(pVCpu);
4312 pVCpu->cpum.GstCtx.rip = uNewRip;
4313 break;
4314 }
4315
4316 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4317 }
4318
4319 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4320
4321#ifndef IEM_WITH_CODE_TLB
4322 /* Flush the prefetch buffer. */
4323 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4324#endif
4325
4326 return VINF_SUCCESS;
4327}
4328
4329
4330/**
4331 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
4332 *
4333 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4334 * segment limit.
4335 *
4336 * @returns Strict VBox status code.
4337 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4338 * @param offNextInstr The offset of the next instruction.
4339 */
4340VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr) RT_NOEXCEPT
4341{
4342 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
4343
4344 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4345 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
4346 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4347 return iemRaiseGeneralProtectionFault0(pVCpu);
4348 /** @todo Test 16-bit jump in 64-bit mode. possible? */
4349 pVCpu->cpum.GstCtx.rip = uNewIp;
4350 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4351
4352#ifndef IEM_WITH_CODE_TLB
4353 /* Flush the prefetch buffer. */
4354 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4355#endif
4356
4357 return VINF_SUCCESS;
4358}
4359
4360
4361/**
4362 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
4363 *
4364 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4365 * segment limit.
4366 *
4367 * @returns Strict VBox status code.
4368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4369 * @param offNextInstr The offset of the next instruction.
4370 */
4371VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr) RT_NOEXCEPT
4372{
4373 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
4374
4375 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
4376 {
4377 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4378
4379 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4380 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
4381 return iemRaiseGeneralProtectionFault0(pVCpu);
4382 pVCpu->cpum.GstCtx.rip = uNewEip;
4383 }
4384 else
4385 {
4386 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4387
4388 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
4389 if (!IEM_IS_CANONICAL(uNewRip))
4390 return iemRaiseGeneralProtectionFault0(pVCpu);
4391 pVCpu->cpum.GstCtx.rip = uNewRip;
4392 }
4393 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4394
4395#ifndef IEM_WITH_CODE_TLB
4396 /* Flush the prefetch buffer. */
4397 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4398#endif
4399
4400 return VINF_SUCCESS;
4401}
4402
4403
4404/**
4405 * Performs a near jump to the specified address.
4406 *
4407 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
4408 * segment limit.
4409 *
4410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4411 * @param uNewRip The new RIP value.
4412 */
4413VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip) RT_NOEXCEPT
4414{
4415 switch (pVCpu->iem.s.enmEffOpSize)
4416 {
4417 case IEMMODE_16BIT:
4418 {
4419 Assert(uNewRip <= UINT16_MAX);
4420 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
4421 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
4422 return iemRaiseGeneralProtectionFault0(pVCpu);
4423 /** @todo Test 16-bit jump in 64-bit mode. */
4424 pVCpu->cpum.GstCtx.rip = uNewRip;
4425 break;
4426 }
4427
4428 case IEMMODE_32BIT:
4429 {
4430 Assert(uNewRip <= UINT32_MAX);
4431 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
4432 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4433
4434 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
4435 return iemRaiseGeneralProtectionFault0(pVCpu);
4436 pVCpu->cpum.GstCtx.rip = uNewRip;
4437 break;
4438 }
4439
4440 case IEMMODE_64BIT:
4441 {
4442 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
4443
4444 if (!IEM_IS_CANONICAL(uNewRip))
4445 return iemRaiseGeneralProtectionFault0(pVCpu);
4446 pVCpu->cpum.GstCtx.rip = uNewRip;
4447 break;
4448 }
4449
4450 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4451 }
4452
4453 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
4454
4455#ifndef IEM_WITH_CODE_TLB
4456 /* Flush the prefetch buffer. */
4457 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
4458#endif
4459
4460 return VINF_SUCCESS;
4461}
4462
4463/** @} */
4464
4465
4466/** @name FPU access and helpers.
4467 *
4468 * @{
4469 */
4470
4471/**
4472 * Updates the x87.DS and FPUDP registers.
4473 *
4474 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4475 * @param pFpuCtx The FPU context.
4476 * @param iEffSeg The effective segment register.
4477 * @param GCPtrEff The effective address relative to @a iEffSeg.
4478 */
4479DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
4480{
4481 RTSEL sel;
4482 switch (iEffSeg)
4483 {
4484 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
4485 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
4486 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
4487 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
4488 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
4489 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
4490 default:
4491 AssertMsgFailed(("%d\n", iEffSeg));
4492 sel = pVCpu->cpum.GstCtx.ds.Sel;
4493 }
4494 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
4495 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
4496 {
4497 pFpuCtx->DS = 0;
4498 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
4499 }
4500 else if (!IEM_IS_LONG_MODE(pVCpu))
4501 {
4502 pFpuCtx->DS = sel;
4503 pFpuCtx->FPUDP = GCPtrEff;
4504 }
4505 else
4506 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
4507}
4508
4509
4510/**
4511 * Rotates the stack registers in the push direction.
4512 *
4513 * @param pFpuCtx The FPU context.
4514 * @remarks This is a complete waste of time, but fxsave stores the registers in
4515 * stack order.
4516 */
4517DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
4518{
4519 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
4520 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
4521 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
4522 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
4523 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
4524 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
4525 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
4526 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
4527 pFpuCtx->aRegs[0].r80 = r80Tmp;
4528}
4529
4530
4531/**
4532 * Rotates the stack registers in the pop direction.
4533 *
4534 * @param pFpuCtx The FPU context.
4535 * @remarks This is a complete waste of time, but fxsave stores the registers in
4536 * stack order.
4537 */
4538DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
4539{
4540 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
4541 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
4542 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
4543 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
4544 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
4545 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
4546 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
4547 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
4548 pFpuCtx->aRegs[7].r80 = r80Tmp;
4549}
4550
4551
4552/**
4553 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
4554 * exception prevents it.
4555 *
4556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4557 * @param pResult The FPU operation result to push.
4558 * @param pFpuCtx The FPU context.
4559 */
4560static void iemFpuMaybePushResult(PVMCPU pVCpu, PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4561{
4562 /* Update FSW and bail if there are pending exceptions afterwards. */
4563 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4564 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4565 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4566 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4567 {
4568 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FCW & X86_FSW_ES))
4569 Log11(("iemFpuMaybePushResult: %04x:%08RX64: FSW %#x -> %#x\n",
4570 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4571 pFpuCtx->FSW = fFsw;
4572 return;
4573 }
4574
4575 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4576 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4577 {
4578 /* All is fine, push the actual value. */
4579 pFpuCtx->FTW |= RT_BIT(iNewTop);
4580 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
4581 }
4582 else if (pFpuCtx->FCW & X86_FCW_IM)
4583 {
4584 /* Masked stack overflow, push QNaN. */
4585 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4586 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4587 }
4588 else
4589 {
4590 /* Raise stack overflow, don't push anything. */
4591 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4592 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4593 Log11(("iemFpuMaybePushResult: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4594 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4595 return;
4596 }
4597
4598 fFsw &= ~X86_FSW_TOP_MASK;
4599 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4600 pFpuCtx->FSW = fFsw;
4601
4602 iemFpuRotateStackPush(pFpuCtx);
4603 RT_NOREF(pVCpu);
4604}
4605
4606
4607/**
4608 * Stores a result in a FPU register and updates the FSW and FTW.
4609 *
4610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4611 * @param pFpuCtx The FPU context.
4612 * @param pResult The result to store.
4613 * @param iStReg Which FPU register to store it in.
4614 */
4615static void iemFpuStoreResultOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4616{
4617 Assert(iStReg < 8);
4618 uint16_t fNewFsw = pFpuCtx->FSW;
4619 uint16_t const iReg = (X86_FSW_TOP_GET(fNewFsw) + iStReg) & X86_FSW_TOP_SMASK;
4620 fNewFsw &= ~X86_FSW_C_MASK;
4621 fNewFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4622 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4623 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4624 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4625 pFpuCtx->FSW = fNewFsw;
4626 pFpuCtx->FTW |= RT_BIT(iReg);
4627 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
4628 RT_NOREF(pVCpu);
4629}
4630
4631
4632/**
4633 * Only updates the FPU status word (FSW) with the result of the current
4634 * instruction.
4635 *
4636 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4637 * @param pFpuCtx The FPU context.
4638 * @param u16FSW The FSW output of the current instruction.
4639 */
4640static void iemFpuUpdateFSWOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint16_t u16FSW) RT_NOEXCEPT
4641{
4642 uint16_t fNewFsw = pFpuCtx->FSW;
4643 fNewFsw &= ~X86_FSW_C_MASK;
4644 fNewFsw |= u16FSW & ~X86_FSW_TOP_MASK;
4645 if ((fNewFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4646 Log11(("iemFpuStoreResultOnly: %04x:%08RX64: FSW %#x -> %#x\n",
4647 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fNewFsw));
4648 pFpuCtx->FSW = fNewFsw;
4649 RT_NOREF(pVCpu);
4650}
4651
4652
4653/**
4654 * Pops one item off the FPU stack if no pending exception prevents it.
4655 *
4656 * @param pFpuCtx The FPU context.
4657 */
4658static void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx) RT_NOEXCEPT
4659{
4660 /* Check pending exceptions. */
4661 uint16_t uFSW = pFpuCtx->FSW;
4662 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4663 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4664 return;
4665
4666 /* TOP--. */
4667 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
4668 uFSW &= ~X86_FSW_TOP_MASK;
4669 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
4670 pFpuCtx->FSW = uFSW;
4671
4672 /* Mark the previous ST0 as empty. */
4673 iOldTop >>= X86_FSW_TOP_SHIFT;
4674 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
4675
4676 /* Rotate the registers. */
4677 iemFpuRotateStackPop(pFpuCtx);
4678}
4679
4680
4681/**
4682 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
4683 *
4684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4685 * @param pResult The FPU operation result to push.
4686 */
4687void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult) RT_NOEXCEPT
4688{
4689 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4690 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4691 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4692}
4693
4694
4695/**
4696 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
4697 * and sets FPUDP and FPUDS.
4698 *
4699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4700 * @param pResult The FPU operation result to push.
4701 * @param iEffSeg The effective segment register.
4702 * @param GCPtrEff The effective address relative to @a iEffSeg.
4703 */
4704void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4705{
4706 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4707 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4708 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4709 iemFpuMaybePushResult(pVCpu, pResult, pFpuCtx);
4710}
4711
4712
4713/**
4714 * Replace ST0 with the first value and push the second onto the FPU stack,
4715 * unless a pending exception prevents it.
4716 *
4717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4718 * @param pResult The FPU operation result to store and push.
4719 */
4720void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult) RT_NOEXCEPT
4721{
4722 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4723 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4724
4725 /* Update FSW and bail if there are pending exceptions afterwards. */
4726 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
4727 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
4728 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
4729 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
4730 {
4731 if ((fFsw & X86_FSW_ES) && !(pFpuCtx->FSW & X86_FSW_ES))
4732 Log11(("iemFpuPushResultTwo: %04x:%08RX64: FSW %#x -> %#x\n",
4733 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW, fFsw));
4734 pFpuCtx->FSW = fFsw;
4735 return;
4736 }
4737
4738 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
4739 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
4740 {
4741 /* All is fine, push the actual value. */
4742 pFpuCtx->FTW |= RT_BIT(iNewTop);
4743 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
4744 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
4745 }
4746 else if (pFpuCtx->FCW & X86_FCW_IM)
4747 {
4748 /* Masked stack overflow, push QNaN. */
4749 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
4750 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
4751 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
4752 }
4753 else
4754 {
4755 /* Raise stack overflow, don't push anything. */
4756 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
4757 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
4758 Log11(("iemFpuPushResultTwo: %04x:%08RX64: stack overflow (FSW=%#x)\n",
4759 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4760 return;
4761 }
4762
4763 fFsw &= ~X86_FSW_TOP_MASK;
4764 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
4765 pFpuCtx->FSW = fFsw;
4766
4767 iemFpuRotateStackPush(pFpuCtx);
4768}
4769
4770
4771/**
4772 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4773 * FOP.
4774 *
4775 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4776 * @param pResult The result to store.
4777 * @param iStReg Which FPU register to store it in.
4778 */
4779void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4780{
4781 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4782 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4783 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4784}
4785
4786
4787/**
4788 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
4789 * FOP, and then pops the stack.
4790 *
4791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4792 * @param pResult The result to store.
4793 * @param iStReg Which FPU register to store it in.
4794 */
4795void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg) RT_NOEXCEPT
4796{
4797 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4798 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4799 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4800 iemFpuMaybePopOne(pFpuCtx);
4801}
4802
4803
4804/**
4805 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4806 * FPUDP, and FPUDS.
4807 *
4808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4809 * @param pResult The result to store.
4810 * @param iStReg Which FPU register to store it in.
4811 * @param iEffSeg The effective memory operand selector register.
4812 * @param GCPtrEff The effective memory operand offset.
4813 */
4814void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
4815 uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4816{
4817 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4818 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4819 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4820 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4821}
4822
4823
4824/**
4825 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
4826 * FPUDP, and FPUDS, and then pops the stack.
4827 *
4828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4829 * @param pResult The result to store.
4830 * @param iStReg Which FPU register to store it in.
4831 * @param iEffSeg The effective memory operand selector register.
4832 * @param GCPtrEff The effective memory operand offset.
4833 */
4834void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
4835 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4836{
4837 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4838 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4839 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4840 iemFpuStoreResultOnly(pVCpu, pFpuCtx, pResult, iStReg);
4841 iemFpuMaybePopOne(pFpuCtx);
4842}
4843
4844
4845/**
4846 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
4847 *
4848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4849 */
4850void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu) RT_NOEXCEPT
4851{
4852 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4853 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4854}
4855
4856
4857/**
4858 * Updates the FSW, FOP, FPUIP, and FPUCS.
4859 *
4860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4861 * @param u16FSW The FSW from the current instruction.
4862 */
4863void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4864{
4865 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4866 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4867 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4868}
4869
4870
4871/**
4872 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
4873 *
4874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4875 * @param u16FSW The FSW from the current instruction.
4876 */
4877void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4878{
4879 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4880 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4881 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4882 iemFpuMaybePopOne(pFpuCtx);
4883}
4884
4885
4886/**
4887 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
4888 *
4889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4890 * @param u16FSW The FSW from the current instruction.
4891 * @param iEffSeg The effective memory operand selector register.
4892 * @param GCPtrEff The effective memory operand offset.
4893 */
4894void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4895{
4896 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4897 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4898 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4899 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4900}
4901
4902
4903/**
4904 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
4905 *
4906 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4907 * @param u16FSW The FSW from the current instruction.
4908 */
4909void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW) RT_NOEXCEPT
4910{
4911 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4912 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4913 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4914 iemFpuMaybePopOne(pFpuCtx);
4915 iemFpuMaybePopOne(pFpuCtx);
4916}
4917
4918
4919/**
4920 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
4921 *
4922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4923 * @param u16FSW The FSW from the current instruction.
4924 * @param iEffSeg The effective memory operand selector register.
4925 * @param GCPtrEff The effective memory operand offset.
4926 */
4927void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4928{
4929 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4930 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4931 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4932 iemFpuUpdateFSWOnly(pVCpu, pFpuCtx, u16FSW);
4933 iemFpuMaybePopOne(pFpuCtx);
4934}
4935
4936
4937/**
4938 * Worker routine for raising an FPU stack underflow exception.
4939 *
4940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4941 * @param pFpuCtx The FPU context.
4942 * @param iStReg The stack register being accessed.
4943 */
4944static void iemFpuStackUnderflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iStReg)
4945{
4946 Assert(iStReg < 8 || iStReg == UINT8_MAX);
4947 if (pFpuCtx->FCW & X86_FCW_IM)
4948 {
4949 /* Masked underflow. */
4950 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4951 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
4952 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
4953 if (iStReg != UINT8_MAX)
4954 {
4955 pFpuCtx->FTW |= RT_BIT(iReg);
4956 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
4957 }
4958 }
4959 else
4960 {
4961 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
4962 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4963 Log11(("iemFpuStackUnderflowOnly: %04x:%08RX64: underflow (FSW=%#x)\n",
4964 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
4965 }
4966 RT_NOREF(pVCpu);
4967}
4968
4969
4970/**
4971 * Raises a FPU stack underflow exception.
4972 *
4973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4974 * @param iStReg The destination register that should be loaded
4975 * with QNaN if \#IS is not masked. Specify
4976 * UINT8_MAX if none (like for fcom).
4977 */
4978void iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4979{
4980 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4981 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4982 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
4983}
4984
4985
4986void iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
4987{
4988 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4989 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
4990 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4991 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
4992}
4993
4994
4995void iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg) RT_NOEXCEPT
4996{
4997 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
4998 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
4999 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5000 iemFpuMaybePopOne(pFpuCtx);
5001}
5002
5003
5004void iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5005{
5006 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5007 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5008 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5009 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, iStReg);
5010 iemFpuMaybePopOne(pFpuCtx);
5011}
5012
5013
5014void iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu) RT_NOEXCEPT
5015{
5016 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5017 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5018 iemFpuStackUnderflowOnly(pVCpu, pFpuCtx, UINT8_MAX);
5019 iemFpuMaybePopOne(pFpuCtx);
5020 iemFpuMaybePopOne(pFpuCtx);
5021}
5022
5023
5024void iemFpuStackPushUnderflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5025{
5026 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5027 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5028
5029 if (pFpuCtx->FCW & X86_FCW_IM)
5030 {
5031 /* Masked overflow - Push QNaN. */
5032 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5033 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5034 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5035 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5036 pFpuCtx->FTW |= RT_BIT(iNewTop);
5037 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5038 iemFpuRotateStackPush(pFpuCtx);
5039 }
5040 else
5041 {
5042 /* Exception pending - don't change TOP or the register stack. */
5043 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5044 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5045 Log11(("iemFpuStackPushUnderflow: %04x:%08RX64: underflow (FSW=%#x)\n",
5046 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5047 }
5048}
5049
5050
5051void iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu) RT_NOEXCEPT
5052{
5053 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5054 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5055
5056 if (pFpuCtx->FCW & X86_FCW_IM)
5057 {
5058 /* Masked overflow - Push QNaN. */
5059 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5060 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5061 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
5062 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5063 pFpuCtx->FTW |= RT_BIT(iNewTop);
5064 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
5065 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5066 iemFpuRotateStackPush(pFpuCtx);
5067 }
5068 else
5069 {
5070 /* Exception pending - don't change TOP or the register stack. */
5071 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5072 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5073 Log11(("iemFpuStackPushUnderflowTwo: %04x:%08RX64: underflow (FSW=%#x)\n",
5074 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5075 }
5076}
5077
5078
5079/**
5080 * Worker routine for raising an FPU stack overflow exception on a push.
5081 *
5082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5083 * @param pFpuCtx The FPU context.
5084 */
5085static void iemFpuStackPushOverflowOnly(PVMCPU pVCpu, PX86FXSTATE pFpuCtx) RT_NOEXCEPT
5086{
5087 if (pFpuCtx->FCW & X86_FCW_IM)
5088 {
5089 /* Masked overflow. */
5090 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
5091 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
5092 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
5093 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
5094 pFpuCtx->FTW |= RT_BIT(iNewTop);
5095 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
5096 iemFpuRotateStackPush(pFpuCtx);
5097 }
5098 else
5099 {
5100 /* Exception pending - don't change TOP or the register stack. */
5101 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
5102 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
5103 Log11(("iemFpuStackPushOverflowOnly: %04x:%08RX64: overflow (FSW=%#x)\n",
5104 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pFpuCtx->FSW));
5105 }
5106 RT_NOREF(pVCpu);
5107}
5108
5109
5110/**
5111 * Raises a FPU stack overflow exception on a push.
5112 *
5113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5114 */
5115void iemFpuStackPushOverflow(PVMCPUCC pVCpu) RT_NOEXCEPT
5116{
5117 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5118 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5119 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5120}
5121
5122
5123/**
5124 * Raises a FPU stack overflow exception on a push with a memory operand.
5125 *
5126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5127 * @param iEffSeg The effective memory operand selector register.
5128 * @param GCPtrEff The effective memory operand offset.
5129 */
5130void iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff) RT_NOEXCEPT
5131{
5132 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5133 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
5134 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
5135 iemFpuStackPushOverflowOnly(pVCpu, pFpuCtx);
5136}
5137
5138/** @} */
5139
5140
5141/** @name SSE+AVX SIMD access and helpers.
5142 *
5143 * @{
5144 */
5145/**
5146 * Stores a result in a SIMD XMM register, updates the MXCSR.
5147 *
5148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5149 * @param pResult The result to store.
5150 * @param iXmmReg Which SIMD XMM register to store the result in.
5151 */
5152void iemSseStoreResult(PVMCPUCC pVCpu, PCIEMSSERESULT pResult, uint8_t iXmmReg) RT_NOEXCEPT
5153{
5154 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
5155 pFpuCtx->MXCSR |= pResult->MXCSR & X86_MXCSR_XCPT_FLAGS;
5156 pVCpu->cpum.GstCtx.XState.x87.aXMM[iXmmReg] = pResult->uResult;
5157}
5158
5159/** @} */
5160
5161
5162/** @name Memory access.
5163 *
5164 * @{
5165 */
5166
5167
5168/**
5169 * Updates the IEMCPU::cbWritten counter if applicable.
5170 *
5171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5172 * @param fAccess The access being accounted for.
5173 * @param cbMem The access size.
5174 */
5175DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
5176{
5177 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
5178 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
5179 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
5180}
5181
5182
5183/**
5184 * Applies the segment limit, base and attributes.
5185 *
5186 * This may raise a \#GP or \#SS.
5187 *
5188 * @returns VBox strict status code.
5189 *
5190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5191 * @param fAccess The kind of access which is being performed.
5192 * @param iSegReg The index of the segment register to apply.
5193 * This is UINT8_MAX if none (for IDT, GDT, LDT,
5194 * TSS, ++).
5195 * @param cbMem The access size.
5196 * @param pGCPtrMem Pointer to the guest memory address to apply
5197 * segmentation to. Input and output parameter.
5198 */
5199VBOXSTRICTRC iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem) RT_NOEXCEPT
5200{
5201 if (iSegReg == UINT8_MAX)
5202 return VINF_SUCCESS;
5203
5204 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
5205 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
5206 switch (pVCpu->iem.s.enmCpuMode)
5207 {
5208 case IEMMODE_16BIT:
5209 case IEMMODE_32BIT:
5210 {
5211 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
5212 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
5213
5214 if ( pSel->Attr.n.u1Present
5215 && !pSel->Attr.n.u1Unusable)
5216 {
5217 Assert(pSel->Attr.n.u1DescType);
5218 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
5219 {
5220 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5221 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
5222 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5223
5224 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5225 {
5226 /** @todo CPL check. */
5227 }
5228
5229 /*
5230 * There are two kinds of data selectors, normal and expand down.
5231 */
5232 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
5233 {
5234 if ( GCPtrFirst32 > pSel->u32Limit
5235 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5236 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5237 }
5238 else
5239 {
5240 /*
5241 * The upper boundary is defined by the B bit, not the G bit!
5242 */
5243 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
5244 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
5245 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5246 }
5247 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5248 }
5249 else
5250 {
5251 /*
5252 * Code selector and usually be used to read thru, writing is
5253 * only permitted in real and V8086 mode.
5254 */
5255 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5256 || ( (fAccess & IEM_ACCESS_TYPE_READ)
5257 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
5258 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
5259 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
5260
5261 if ( GCPtrFirst32 > pSel->u32Limit
5262 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
5263 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
5264
5265 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
5266 {
5267 /** @todo CPL check. */
5268 }
5269
5270 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
5271 }
5272 }
5273 else
5274 return iemRaiseGeneralProtectionFault0(pVCpu);
5275 return VINF_SUCCESS;
5276 }
5277
5278 case IEMMODE_64BIT:
5279 {
5280 RTGCPTR GCPtrMem = *pGCPtrMem;
5281 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
5282 *pGCPtrMem = GCPtrMem + pSel->u64Base;
5283
5284 Assert(cbMem >= 1);
5285 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
5286 return VINF_SUCCESS;
5287 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
5288 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
5289 return iemRaiseGeneralProtectionFault0(pVCpu);
5290 }
5291
5292 default:
5293 AssertFailedReturn(VERR_IEM_IPE_7);
5294 }
5295}
5296
5297
5298/**
5299 * Translates a virtual address to a physical physical address and checks if we
5300 * can access the page as specified.
5301 *
5302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5303 * @param GCPtrMem The virtual address.
5304 * @param fAccess The intended access.
5305 * @param pGCPhysMem Where to return the physical address.
5306 */
5307VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem) RT_NOEXCEPT
5308{
5309 /** @todo Need a different PGM interface here. We're currently using
5310 * generic / REM interfaces. this won't cut it for R0. */
5311 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
5312 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
5313 * here. */
5314 PGMPTWALK Walk;
5315 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5316 if (RT_FAILURE(rc))
5317 {
5318 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5319 /** @todo Check unassigned memory in unpaged mode. */
5320 /** @todo Reserved bits in page tables. Requires new PGM interface. */
5321#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5322 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5323 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5324#endif
5325 *pGCPhysMem = NIL_RTGCPHYS;
5326 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
5327 }
5328
5329 /* If the page is writable and does not have the no-exec bit set, all
5330 access is allowed. Otherwise we'll have to check more carefully... */
5331 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
5332 {
5333 /* Write to read only memory? */
5334 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
5335 && !(Walk.fEffective & X86_PTE_RW)
5336 && ( ( pVCpu->iem.s.uCpl == 3
5337 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5338 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
5339 {
5340 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
5341 *pGCPhysMem = NIL_RTGCPHYS;
5342#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5343 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5344 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5345#endif
5346 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
5347 }
5348
5349 /* Kernel memory accessed by userland? */
5350 if ( !(Walk.fEffective & X86_PTE_US)
5351 && pVCpu->iem.s.uCpl == 3
5352 && !(fAccess & IEM_ACCESS_WHAT_SYS))
5353 {
5354 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
5355 *pGCPhysMem = NIL_RTGCPHYS;
5356#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5357 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5358 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5359#endif
5360 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
5361 }
5362
5363 /* Executing non-executable memory? */
5364 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
5365 && (Walk.fEffective & X86_PTE_PAE_NX)
5366 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
5367 {
5368 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
5369 *pGCPhysMem = NIL_RTGCPHYS;
5370#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5371 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5372 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
5373#endif
5374 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
5375 VERR_ACCESS_DENIED);
5376 }
5377 }
5378
5379 /*
5380 * Set the dirty / access flags.
5381 * ASSUMES this is set when the address is translated rather than on committ...
5382 */
5383 /** @todo testcase: check when A and D bits are actually set by the CPU. */
5384 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
5385 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
5386 {
5387 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
5388 AssertRC(rc2);
5389 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
5390 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
5391 }
5392
5393 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
5394 *pGCPhysMem = GCPhys;
5395 return VINF_SUCCESS;
5396}
5397
5398
5399/**
5400 * Looks up a memory mapping entry.
5401 *
5402 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
5403 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5404 * @param pvMem The memory address.
5405 * @param fAccess The access to.
5406 */
5407DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
5408{
5409 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5410 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
5411 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
5412 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5413 return 0;
5414 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
5415 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5416 return 1;
5417 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
5418 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
5419 return 2;
5420 return VERR_NOT_FOUND;
5421}
5422
5423
5424/**
5425 * Finds a free memmap entry when using iNextMapping doesn't work.
5426 *
5427 * @returns Memory mapping index, 1024 on failure.
5428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5429 */
5430static unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
5431{
5432 /*
5433 * The easy case.
5434 */
5435 if (pVCpu->iem.s.cActiveMappings == 0)
5436 {
5437 pVCpu->iem.s.iNextMapping = 1;
5438 return 0;
5439 }
5440
5441 /* There should be enough mappings for all instructions. */
5442 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
5443
5444 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
5445 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
5446 return i;
5447
5448 AssertFailedReturn(1024);
5449}
5450
5451
5452/**
5453 * Commits a bounce buffer that needs writing back and unmaps it.
5454 *
5455 * @returns Strict VBox status code.
5456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5457 * @param iMemMap The index of the buffer to commit.
5458 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
5459 * Always false in ring-3, obviously.
5460 */
5461static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
5462{
5463 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
5464 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
5465#ifdef IN_RING3
5466 Assert(!fPostponeFail);
5467 RT_NOREF_PV(fPostponeFail);
5468#endif
5469
5470 /*
5471 * Do the writing.
5472 */
5473 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5474 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
5475 {
5476 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
5477 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5478 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5479 if (!pVCpu->iem.s.fBypassHandlers)
5480 {
5481 /*
5482 * Carefully and efficiently dealing with access handler return
5483 * codes make this a little bloated.
5484 */
5485 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
5486 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5487 pbBuf,
5488 cbFirst,
5489 PGMACCESSORIGIN_IEM);
5490 if (rcStrict == VINF_SUCCESS)
5491 {
5492 if (cbSecond)
5493 {
5494 rcStrict = PGMPhysWrite(pVM,
5495 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5496 pbBuf + cbFirst,
5497 cbSecond,
5498 PGMACCESSORIGIN_IEM);
5499 if (rcStrict == VINF_SUCCESS)
5500 { /* nothing */ }
5501 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5502 {
5503 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
5504 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5505 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5506 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5507 }
5508#ifndef IN_RING3
5509 else if (fPostponeFail)
5510 {
5511 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5512 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5513 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5514 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5515 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5516 return iemSetPassUpStatus(pVCpu, rcStrict);
5517 }
5518#endif
5519 else
5520 {
5521 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5522 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5523 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5524 return rcStrict;
5525 }
5526 }
5527 }
5528 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5529 {
5530 if (!cbSecond)
5531 {
5532 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
5533 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5534 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5535 }
5536 else
5537 {
5538 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
5539 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5540 pbBuf + cbFirst,
5541 cbSecond,
5542 PGMACCESSORIGIN_IEM);
5543 if (rcStrict2 == VINF_SUCCESS)
5544 {
5545 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
5546 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5547 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5548 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5549 }
5550 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5551 {
5552 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
5553 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5554 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5555 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5556 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5557 }
5558#ifndef IN_RING3
5559 else if (fPostponeFail)
5560 {
5561 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5562 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5563 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5564 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
5565 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5566 return iemSetPassUpStatus(pVCpu, rcStrict);
5567 }
5568#endif
5569 else
5570 {
5571 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5572 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5573 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
5574 return rcStrict2;
5575 }
5576 }
5577 }
5578#ifndef IN_RING3
5579 else if (fPostponeFail)
5580 {
5581 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
5582 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5583 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5584 if (!cbSecond)
5585 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
5586 else
5587 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
5588 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
5589 return iemSetPassUpStatus(pVCpu, rcStrict);
5590 }
5591#endif
5592 else
5593 {
5594 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5595 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
5596 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5597 return rcStrict;
5598 }
5599 }
5600 else
5601 {
5602 /*
5603 * No access handlers, much simpler.
5604 */
5605 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
5606 if (RT_SUCCESS(rc))
5607 {
5608 if (cbSecond)
5609 {
5610 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
5611 if (RT_SUCCESS(rc))
5612 { /* likely */ }
5613 else
5614 {
5615 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
5616 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
5617 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
5618 return rc;
5619 }
5620 }
5621 }
5622 else
5623 {
5624 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
5625 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
5626 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
5627 return rc;
5628 }
5629 }
5630 }
5631
5632#if defined(IEM_LOG_MEMORY_WRITES)
5633 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
5634 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
5635 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
5636 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
5637 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
5638 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
5639
5640 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
5641 g_cbIemWrote = cbWrote;
5642 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
5643#endif
5644
5645 /*
5646 * Free the mapping entry.
5647 */
5648 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
5649 Assert(pVCpu->iem.s.cActiveMappings != 0);
5650 pVCpu->iem.s.cActiveMappings--;
5651 return VINF_SUCCESS;
5652}
5653
5654
5655/**
5656 * iemMemMap worker that deals with a request crossing pages.
5657 */
5658static VBOXSTRICTRC
5659iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
5660{
5661 /*
5662 * Do the address translations.
5663 */
5664 RTGCPHYS GCPhysFirst;
5665 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
5666 if (rcStrict != VINF_SUCCESS)
5667 return rcStrict;
5668
5669 RTGCPHYS GCPhysSecond;
5670 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
5671 fAccess, &GCPhysSecond);
5672 if (rcStrict != VINF_SUCCESS)
5673 return rcStrict;
5674 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
5675
5676 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
5677
5678 /*
5679 * Read in the current memory content if it's a read, execute or partial
5680 * write access.
5681 */
5682 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5683 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (GCPhysFirst & GUEST_PAGE_OFFSET_MASK);
5684 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
5685
5686 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5687 {
5688 if (!pVCpu->iem.s.fBypassHandlers)
5689 {
5690 /*
5691 * Must carefully deal with access handler status codes here,
5692 * makes the code a bit bloated.
5693 */
5694 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
5695 if (rcStrict == VINF_SUCCESS)
5696 {
5697 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5698 if (rcStrict == VINF_SUCCESS)
5699 { /*likely */ }
5700 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5701 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5702 else
5703 {
5704 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
5705 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
5706 return rcStrict;
5707 }
5708 }
5709 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5710 {
5711 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
5712 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
5713 {
5714 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
5715 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5716 }
5717 else
5718 {
5719 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
5720 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
5721 return rcStrict2;
5722 }
5723 }
5724 else
5725 {
5726 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5727 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5728 return rcStrict;
5729 }
5730 }
5731 else
5732 {
5733 /*
5734 * No informational status codes here, much more straight forward.
5735 */
5736 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
5737 if (RT_SUCCESS(rc))
5738 {
5739 Assert(rc == VINF_SUCCESS);
5740 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
5741 if (RT_SUCCESS(rc))
5742 Assert(rc == VINF_SUCCESS);
5743 else
5744 {
5745 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
5746 return rc;
5747 }
5748 }
5749 else
5750 {
5751 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
5752 return rc;
5753 }
5754 }
5755 }
5756#ifdef VBOX_STRICT
5757 else
5758 memset(pbBuf, 0xcc, cbMem);
5759 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5760 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5761#endif
5762
5763 /*
5764 * Commit the bounce buffer entry.
5765 */
5766 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5767 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
5768 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
5769 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
5770 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
5771 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5772 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5773 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5774 pVCpu->iem.s.cActiveMappings++;
5775
5776 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5777 *ppvMem = pbBuf;
5778 return VINF_SUCCESS;
5779}
5780
5781
5782/**
5783 * iemMemMap woker that deals with iemMemPageMap failures.
5784 */
5785static VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
5786 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
5787{
5788 /*
5789 * Filter out conditions we can handle and the ones which shouldn't happen.
5790 */
5791 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
5792 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
5793 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
5794 {
5795 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
5796 return rcMap;
5797 }
5798 pVCpu->iem.s.cPotentialExits++;
5799
5800 /*
5801 * Read in the current memory content if it's a read, execute or partial
5802 * write access.
5803 */
5804 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
5805 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
5806 {
5807 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
5808 memset(pbBuf, 0xff, cbMem);
5809 else
5810 {
5811 int rc;
5812 if (!pVCpu->iem.s.fBypassHandlers)
5813 {
5814 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
5815 if (rcStrict == VINF_SUCCESS)
5816 { /* nothing */ }
5817 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
5818 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
5819 else
5820 {
5821 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5822 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
5823 return rcStrict;
5824 }
5825 }
5826 else
5827 {
5828 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
5829 if (RT_SUCCESS(rc))
5830 { /* likely */ }
5831 else
5832 {
5833 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
5834 GCPhysFirst, rc));
5835 return rc;
5836 }
5837 }
5838 }
5839 }
5840#ifdef VBOX_STRICT
5841 else
5842 memset(pbBuf, 0xcc, cbMem);
5843#endif
5844#ifdef VBOX_STRICT
5845 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
5846 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
5847#endif
5848
5849 /*
5850 * Commit the bounce buffer entry.
5851 */
5852 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
5853 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
5854 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
5855 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
5856 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
5857 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
5858 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
5859 pVCpu->iem.s.iNextMapping = iMemMap + 1;
5860 pVCpu->iem.s.cActiveMappings++;
5861
5862 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
5863 *ppvMem = pbBuf;
5864 return VINF_SUCCESS;
5865}
5866
5867
5868
5869/**
5870 * Maps the specified guest memory for the given kind of access.
5871 *
5872 * This may be using bounce buffering of the memory if it's crossing a page
5873 * boundary or if there is an access handler installed for any of it. Because
5874 * of lock prefix guarantees, we're in for some extra clutter when this
5875 * happens.
5876 *
5877 * This may raise a \#GP, \#SS, \#PF or \#AC.
5878 *
5879 * @returns VBox strict status code.
5880 *
5881 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5882 * @param ppvMem Where to return the pointer to the mapped memory.
5883 * @param cbMem The number of bytes to map. This is usually 1, 2, 4, 6,
5884 * 8, 12, 16, 32 or 512. When used by string operations
5885 * it can be up to a page.
5886 * @param iSegReg The index of the segment register to use for this
5887 * access. The base and limits are checked. Use UINT8_MAX
5888 * to indicate that no segmentation is required (for IDT,
5889 * GDT and LDT accesses).
5890 * @param GCPtrMem The address of the guest memory.
5891 * @param fAccess How the memory is being accessed. The
5892 * IEM_ACCESS_TYPE_XXX bit is used to figure out how to map
5893 * the memory, while the IEM_ACCESS_WHAT_XXX bit is used
5894 * when raising exceptions.
5895 * @param uAlignCtl Alignment control:
5896 * - Bits 15:0 is the alignment mask.
5897 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
5898 * IEM_MEMMAP_F_ALIGN_SSE, and
5899 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
5900 * Pass zero to skip alignment.
5901 */
5902VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem,
5903 uint32_t fAccess, uint32_t uAlignCtl) RT_NOEXCEPT
5904{
5905 /*
5906 * Check the input and figure out which mapping entry to use.
5907 */
5908 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
5909 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
5910 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
5911
5912 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
5913 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
5914 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
5915 {
5916 iMemMap = iemMemMapFindFree(pVCpu);
5917 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
5918 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
5919 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
5920 pVCpu->iem.s.aMemMappings[2].fAccess),
5921 VERR_IEM_IPE_9);
5922 }
5923
5924 /*
5925 * Map the memory, checking that we can actually access it. If something
5926 * slightly complicated happens, fall back on bounce buffering.
5927 */
5928 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
5929 if (rcStrict == VINF_SUCCESS)
5930 { /* likely */ }
5931 else
5932 return rcStrict;
5933
5934 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE) /* Crossing a page boundary? */
5935 { /* likely */ }
5936 else
5937 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
5938
5939 /*
5940 * Alignment check.
5941 */
5942 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
5943 { /* likelyish */ }
5944 else
5945 {
5946 /* Misaligned access. */
5947 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
5948 {
5949 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
5950 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
5951 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
5952 {
5953 AssertCompile(X86_CR0_AM == X86_EFL_AC);
5954
5955 if (iemMemAreAlignmentChecksEnabled(pVCpu))
5956 return iemRaiseAlignmentCheckException(pVCpu);
5957 }
5958 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
5959 && iemMemAreAlignmentChecksEnabled(pVCpu)
5960/** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
5961 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */
5962 )
5963 return iemRaiseAlignmentCheckException(pVCpu);
5964 else
5965 return iemRaiseGeneralProtectionFault0(pVCpu);
5966 }
5967 }
5968
5969#ifdef IEM_WITH_DATA_TLB
5970 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
5971
5972 /*
5973 * Get the TLB entry for this page.
5974 */
5975 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
5976 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
5977 if (pTlbe->uTag == uTag)
5978 {
5979# ifdef VBOX_WITH_STATISTICS
5980 pVCpu->iem.s.DataTlb.cTlbHits++;
5981# endif
5982 }
5983 else
5984 {
5985 pVCpu->iem.s.DataTlb.cTlbMisses++;
5986 PGMPTWALK Walk;
5987 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
5988 if (RT_FAILURE(rc))
5989 {
5990 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
5991# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
5992 if (Walk.fFailed & PGM_WALKFAIL_EPT)
5993 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
5994# endif
5995 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
5996 }
5997
5998 Assert(Walk.fSucceeded);
5999 pTlbe->uTag = uTag;
6000 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6001 pTlbe->GCPhys = Walk.GCPhys;
6002 pTlbe->pbMappingR3 = NULL;
6003 }
6004
6005 /*
6006 * Check TLB page table level access flags.
6007 */
6008 /* If the page is either supervisor only or non-writable, we need to do
6009 more careful access checks. */
6010 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_WRITE))
6011 {
6012 /* Write to read only memory? */
6013 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_WRITE)
6014 && (fAccess & IEM_ACCESS_TYPE_WRITE)
6015 && ( ( pVCpu->iem.s.uCpl == 3
6016 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6017 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
6018 {
6019 Log(("iemMemMap: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6020# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6021 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6022 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6023# endif
6024 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6025 }
6026
6027 /* Kernel memory accessed by userland? */
6028 if ( (pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER)
6029 && pVCpu->iem.s.uCpl == 3
6030 && !(fAccess & IEM_ACCESS_WHAT_SYS))
6031 {
6032 Log(("iemMemMap: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6033# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6034 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6035 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6036# endif
6037 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6038 }
6039 }
6040
6041 /*
6042 * Set the dirty / access flags.
6043 * ASSUMES this is set when the address is translated rather than on commit...
6044 */
6045 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6046 uint64_t const fTlbAccessedDirty = (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PT_NO_DIRTY : 0) | IEMTLBE_F_PT_NO_ACCESSED;
6047 if (pTlbe->fFlagsAndPhysRev & fTlbAccessedDirty)
6048 {
6049 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6050 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6051 AssertRC(rc2);
6052 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6053 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6054 pTlbe->fFlagsAndPhysRev &= ~fTlbAccessedDirty;
6055 }
6056
6057 /*
6058 * Look up the physical page info if necessary.
6059 */
6060 uint8_t *pbMem = NULL;
6061 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6062# ifdef IN_RING3
6063 pbMem = pTlbe->pbMappingR3;
6064# else
6065 pbMem = NULL;
6066# endif
6067 else
6068 {
6069 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6070 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6071 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6072 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6073 if (RT_LIKELY(pVCpu->iem.s.CodeTlb.uTlbPhysRev > IEMTLB_PHYS_REV_INCR))
6074 { /* likely */ }
6075 else
6076 IEMTlbInvalidateAllPhysicalSlow(pVCpu);
6077 pTlbe->pbMappingR3 = NULL;
6078 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6079 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6080 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6081 &pbMem, &pTlbe->fFlagsAndPhysRev);
6082 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
6083# ifdef IN_RING3
6084 pTlbe->pbMappingR3 = pbMem;
6085# endif
6086 }
6087
6088 /*
6089 * Check the physical page level access and mapping.
6090 */
6091 if ( !(pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))
6092 || !(pTlbe->fFlagsAndPhysRev & ( (fAccess & IEM_ACCESS_TYPE_WRITE ? IEMTLBE_F_PG_NO_WRITE : 0)
6093 | (fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0))) )
6094 { /* probably likely */ }
6095 else
6096 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem,
6097 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6098 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6099 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6100 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6101 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6102
6103 if (pbMem)
6104 {
6105 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6106 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6107 fAccess |= IEM_ACCESS_NOT_LOCKED;
6108 }
6109 else
6110 {
6111 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6112 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6113 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6114 if (rcStrict != VINF_SUCCESS)
6115 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6116 }
6117
6118 void * const pvMem = pbMem;
6119
6120 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6121 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6122 if (fAccess & IEM_ACCESS_TYPE_READ)
6123 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6124
6125#else /* !IEM_WITH_DATA_TLB */
6126
6127 RTGCPHYS GCPhysFirst;
6128 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6129 if (rcStrict != VINF_SUCCESS)
6130 return rcStrict;
6131
6132 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6133 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6134 if (fAccess & IEM_ACCESS_TYPE_READ)
6135 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6136
6137 void *pvMem;
6138 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6139 if (rcStrict != VINF_SUCCESS)
6140 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6141
6142#endif /* !IEM_WITH_DATA_TLB */
6143
6144 /*
6145 * Fill in the mapping table entry.
6146 */
6147 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6148 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6149 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6150 pVCpu->iem.s.cActiveMappings += 1;
6151
6152 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6153 *ppvMem = pvMem;
6154
6155 return VINF_SUCCESS;
6156}
6157
6158
6159/**
6160 * Commits the guest memory if bounce buffered and unmaps it.
6161 *
6162 * @returns Strict VBox status code.
6163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6164 * @param pvMem The mapping.
6165 * @param fAccess The kind of access.
6166 */
6167VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6168{
6169 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6170 AssertReturn(iMemMap >= 0, iMemMap);
6171
6172 /* If it's bounce buffered, we may need to write back the buffer. */
6173 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6174 {
6175 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6176 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6177 }
6178 /* Otherwise unlock it. */
6179 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6180 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6181
6182 /* Free the entry. */
6183 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6184 Assert(pVCpu->iem.s.cActiveMappings != 0);
6185 pVCpu->iem.s.cActiveMappings--;
6186 return VINF_SUCCESS;
6187}
6188
6189#ifdef IEM_WITH_SETJMP
6190
6191/**
6192 * Maps the specified guest memory for the given kind of access, longjmp on
6193 * error.
6194 *
6195 * This may be using bounce buffering of the memory if it's crossing a page
6196 * boundary or if there is an access handler installed for any of it. Because
6197 * of lock prefix guarantees, we're in for some extra clutter when this
6198 * happens.
6199 *
6200 * This may raise a \#GP, \#SS, \#PF or \#AC.
6201 *
6202 * @returns Pointer to the mapped memory.
6203 *
6204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6205 * @param cbMem The number of bytes to map. This is usually 1,
6206 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
6207 * string operations it can be up to a page.
6208 * @param iSegReg The index of the segment register to use for
6209 * this access. The base and limits are checked.
6210 * Use UINT8_MAX to indicate that no segmentation
6211 * is required (for IDT, GDT and LDT accesses).
6212 * @param GCPtrMem The address of the guest memory.
6213 * @param fAccess How the memory is being accessed. The
6214 * IEM_ACCESS_TYPE_XXX bit is used to figure out
6215 * how to map the memory, while the
6216 * IEM_ACCESS_WHAT_XXX bit is used when raising
6217 * exceptions.
6218 * @param uAlignCtl Alignment control:
6219 * - Bits 15:0 is the alignment mask.
6220 * - Bits 31:16 for flags like IEM_MEMMAP_F_ALIGN_GP,
6221 * IEM_MEMMAP_F_ALIGN_SSE, and
6222 * IEM_MEMMAP_F_ALIGN_GP_OR_AC.
6223 * Pass zero to skip alignment.
6224 */
6225void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess,
6226 uint32_t uAlignCtl) RT_NOEXCEPT
6227{
6228 /*
6229 * Check the input, check segment access and adjust address
6230 * with segment base.
6231 */
6232 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
6233 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
6234 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
6235
6236 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
6237 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6238 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6239
6240 /*
6241 * Alignment check.
6242 */
6243 if ( (GCPtrMem & (uAlignCtl & UINT16_MAX)) == 0 )
6244 { /* likelyish */ }
6245 else
6246 {
6247 /* Misaligned access. */
6248 if ((fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6249 {
6250 if ( !(uAlignCtl & IEM_MEMMAP_F_ALIGN_GP)
6251 || ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_SSE)
6252 && (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) )
6253 {
6254 AssertCompile(X86_CR0_AM == X86_EFL_AC);
6255
6256 if (iemMemAreAlignmentChecksEnabled(pVCpu))
6257 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6258 }
6259 else if ( (uAlignCtl & IEM_MEMMAP_F_ALIGN_GP_OR_AC)
6260 && iemMemAreAlignmentChecksEnabled(pVCpu)
6261/** @todo may only apply to 2, 4 or 8 byte misalignments depending on the CPU
6262 * implementation. See FXSAVE/FRSTOR/XSAVE/XRSTOR/++. */
6263 )
6264 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6265 else
6266 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
6267 }
6268 }
6269
6270 /*
6271 * Figure out which mapping entry to use.
6272 */
6273 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
6274 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
6275 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
6276 {
6277 iMemMap = iemMemMapFindFree(pVCpu);
6278 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
6279 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
6280 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
6281 pVCpu->iem.s.aMemMappings[2].fAccess),
6282 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
6283 }
6284
6285 /*
6286 * Crossing a page boundary?
6287 */
6288 if ((GCPtrMem & GUEST_PAGE_OFFSET_MASK) + cbMem <= GUEST_PAGE_SIZE)
6289 { /* No (likely). */ }
6290 else
6291 {
6292 void *pvMem;
6293 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
6294 if (rcStrict == VINF_SUCCESS)
6295 return pvMem;
6296 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6297 }
6298
6299#ifdef IEM_WITH_DATA_TLB
6300 Assert(!(fAccess & IEM_ACCESS_TYPE_EXEC));
6301
6302 /*
6303 * Get the TLB entry for this page.
6304 */
6305 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem);
6306 PIEMTLBENTRY const pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6307 if (pTlbe->uTag == uTag)
6308 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6309 else
6310 {
6311 pVCpu->iem.s.DataTlb.cTlbMisses++;
6312 PGMPTWALK Walk;
6313 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
6314 if (RT_FAILURE(rc))
6315 {
6316 Log(("iemMemMap: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
6317# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6318 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6319 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
6320# endif
6321 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, rc);
6322 }
6323
6324 Assert(Walk.fSucceeded);
6325 pTlbe->uTag = uTag;
6326 pTlbe->fFlagsAndPhysRev = ~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D | X86_PTE_A); /* skipping NX */
6327 pTlbe->GCPhys = Walk.GCPhys;
6328 pTlbe->pbMappingR3 = NULL;
6329 }
6330
6331 /*
6332 * Check the flags and physical revision.
6333 */
6334 /** @todo make the caller pass these in with fAccess. */
6335 uint64_t const fNoUser = (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS && pVCpu->iem.s.uCpl == 3
6336 ? IEMTLBE_F_PT_NO_USER : 0;
6337 uint64_t const fNoWriteNoDirty = fAccess & IEM_ACCESS_TYPE_WRITE
6338 ? IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY
6339 | ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)
6340 || (pVCpu->iem.s.uCpl == 3 && (fAccess & IEM_ACCESS_WHAT_MASK) != IEM_ACCESS_WHAT_SYS)
6341 ? IEMTLBE_F_PT_NO_WRITE : 0)
6342 : 0;
6343 uint64_t const fNoRead = fAccess & IEM_ACCESS_TYPE_READ ? IEMTLBE_F_PG_NO_READ : 0;
6344 uint8_t *pbMem = NULL;
6345 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PT_NO_ACCESSED | fNoRead | fNoWriteNoDirty | fNoUser))
6346 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6347# ifdef IN_RING3
6348 pbMem = pTlbe->pbMappingR3;
6349# else
6350 pbMem = NULL;
6351# endif
6352 else
6353 {
6354 /*
6355 * Okay, something isn't quite right or needs refreshing.
6356 */
6357 /* Write to read only memory? */
6358 if (pTlbe->fFlagsAndPhysRev & fNoWriteNoDirty & IEMTLBE_F_PT_NO_WRITE)
6359 {
6360 Log(("iemMemMapJmp: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
6361# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6362 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6363 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6364# endif
6365 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
6366 }
6367
6368 /* Kernel memory accessed by userland? */
6369 if (pTlbe->fFlagsAndPhysRev & fNoUser & IEMTLBE_F_PT_NO_USER)
6370 {
6371 Log(("iemMemMapJmp: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
6372# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
6373 if (Walk.fFailed & PGM_WALKFAIL_EPT)
6374 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, fAccess, IEM_SLAT_FAIL_LINEAR_TO_PAGE_TABLE, 0 /* cbInstr */);
6375# endif
6376 iemRaisePageFaultJmp(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
6377 }
6378
6379 /* Set the dirty / access flags.
6380 ASSUMES this is set when the address is translated rather than on commit... */
6381 /** @todo testcase: check when A and D bits are actually set by the CPU. */
6382 if (pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED))
6383 {
6384 uint32_t const fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
6385 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
6386 AssertRC(rc2);
6387 /** @todo Nested VMX: Accessed/dirty bit currently not supported, asserted below. */
6388 Assert(!(CPUMGetGuestIa32VmxEptVpidCap(pVCpu) & VMX_BF_EPT_VPID_CAP_ACCESS_DIRTY_MASK));
6389 pTlbe->fFlagsAndPhysRev &= ~((fNoWriteNoDirty & IEMTLBE_F_PT_NO_DIRTY) | IEMTLBE_F_PT_NO_ACCESSED);
6390 }
6391
6392 /*
6393 * Check if the physical page info needs updating.
6394 */
6395 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6396# ifdef IN_RING3
6397 pbMem = pTlbe->pbMappingR3;
6398# else
6399 pbMem = NULL;
6400# endif
6401 else
6402 {
6403 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
6404 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
6405 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
6406 AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
6407 pTlbe->pbMappingR3 = NULL;
6408 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
6409 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_UNASSIGNED);
6410 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.DataTlb.uTlbPhysRev,
6411 &pbMem, &pTlbe->fFlagsAndPhysRev);
6412 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
6413# ifdef IN_RING3
6414 pTlbe->pbMappingR3 = pbMem;
6415# endif
6416 }
6417
6418 /*
6419 * Check the physical page level access and mapping.
6420 */
6421 if (!(pTlbe->fFlagsAndPhysRev & ((fNoWriteNoDirty | fNoRead) & (IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ))))
6422 { /* probably likely */ }
6423 else
6424 {
6425 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, (void **)&pbMem, cbMem,
6426 pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), fAccess,
6427 pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_UNASSIGNED ? VERR_PGM_PHYS_TLB_UNASSIGNED
6428 : pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PG_NO_READ ? VERR_PGM_PHYS_TLB_CATCH_ALL
6429 : VERR_PGM_PHYS_TLB_CATCH_WRITE);
6430 if (rcStrict == VINF_SUCCESS)
6431 return pbMem;
6432 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6433 }
6434 }
6435 Assert(!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_NO_MAPPINGR3)); /* ASSUMPTIONS about PGMPhysIemGCPhys2PtrNoLock behaviour. */
6436
6437 if (pbMem)
6438 {
6439 Assert(!((uintptr_t)pbMem & GUEST_PAGE_OFFSET_MASK));
6440 pbMem = pbMem + (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6441 fAccess |= IEM_ACCESS_NOT_LOCKED;
6442 }
6443 else
6444 {
6445 Assert(!(fAccess & IEM_ACCESS_NOT_LOCKED));
6446 RTGCPHYS const GCPhysFirst = pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK);
6447 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, (void **)&pbMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6448 if (rcStrict == VINF_SUCCESS)
6449 return pbMem;
6450 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6451 }
6452
6453 void * const pvMem = pbMem;
6454
6455 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6456 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6457 if (fAccess & IEM_ACCESS_TYPE_READ)
6458 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, pTlbe->GCPhys | (GCPtrMem & GUEST_PAGE_OFFSET_MASK), cbMem));
6459
6460#else /* !IEM_WITH_DATA_TLB */
6461
6462
6463 RTGCPHYS GCPhysFirst;
6464 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
6465 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
6466 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6467
6468 if (fAccess & IEM_ACCESS_TYPE_WRITE)
6469 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6470 if (fAccess & IEM_ACCESS_TYPE_READ)
6471 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
6472
6473 void *pvMem;
6474 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6475 if (rcStrict == VINF_SUCCESS)
6476 { /* likely */ }
6477 else
6478 {
6479 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
6480 if (rcStrict == VINF_SUCCESS)
6481 return pvMem;
6482 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6483 }
6484
6485#endif /* !IEM_WITH_DATA_TLB */
6486
6487 /*
6488 * Fill in the mapping table entry.
6489 */
6490 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
6491 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
6492 pVCpu->iem.s.iNextMapping = iMemMap + 1;
6493 pVCpu->iem.s.cActiveMappings++;
6494
6495 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
6496 return pvMem;
6497}
6498
6499
6500/**
6501 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
6502 *
6503 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6504 * @param pvMem The mapping.
6505 * @param fAccess The kind of access.
6506 */
6507void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6508{
6509 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6510 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
6511
6512 /* If it's bounce buffered, we may need to write back the buffer. */
6513 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6514 {
6515 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6516 {
6517 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
6518 if (rcStrict == VINF_SUCCESS)
6519 return;
6520 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
6521 }
6522 }
6523 /* Otherwise unlock it. */
6524 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6525 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6526
6527 /* Free the entry. */
6528 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6529 Assert(pVCpu->iem.s.cActiveMappings != 0);
6530 pVCpu->iem.s.cActiveMappings--;
6531}
6532
6533#endif /* IEM_WITH_SETJMP */
6534
6535#ifndef IN_RING3
6536/**
6537 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
6538 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
6539 *
6540 * Allows the instruction to be completed and retired, while the IEM user will
6541 * return to ring-3 immediately afterwards and do the postponed writes there.
6542 *
6543 * @returns VBox status code (no strict statuses). Caller must check
6544 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
6545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6546 * @param pvMem The mapping.
6547 * @param fAccess The kind of access.
6548 */
6549VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess) RT_NOEXCEPT
6550{
6551 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
6552 AssertReturn(iMemMap >= 0, iMemMap);
6553
6554 /* If it's bounce buffered, we may need to write back the buffer. */
6555 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
6556 {
6557 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
6558 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
6559 }
6560 /* Otherwise unlock it. */
6561 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
6562 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6563
6564 /* Free the entry. */
6565 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6566 Assert(pVCpu->iem.s.cActiveMappings != 0);
6567 pVCpu->iem.s.cActiveMappings--;
6568 return VINF_SUCCESS;
6569}
6570#endif
6571
6572
6573/**
6574 * Rollbacks mappings, releasing page locks and such.
6575 *
6576 * The caller shall only call this after checking cActiveMappings.
6577 *
6578 * @returns Strict VBox status code to pass up.
6579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6580 */
6581void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
6582{
6583 Assert(pVCpu->iem.s.cActiveMappings > 0);
6584
6585 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
6586 while (iMemMap-- > 0)
6587 {
6588 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
6589 if (fAccess != IEM_ACCESS_INVALID)
6590 {
6591 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
6592 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
6593 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
6594 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
6595 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
6596 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
6597 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
6598 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
6599 pVCpu->iem.s.cActiveMappings--;
6600 }
6601 }
6602}
6603
6604
6605/**
6606 * Fetches a data byte.
6607 *
6608 * @returns Strict VBox status code.
6609 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6610 * @param pu8Dst Where to return the byte.
6611 * @param iSegReg The index of the segment register to use for
6612 * this access. The base and limits are checked.
6613 * @param GCPtrMem The address of the guest memory.
6614 */
6615VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6616{
6617 /* The lazy approach for now... */
6618 uint8_t const *pu8Src;
6619 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6620 if (rc == VINF_SUCCESS)
6621 {
6622 *pu8Dst = *pu8Src;
6623 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6624 }
6625 return rc;
6626}
6627
6628
6629#ifdef IEM_WITH_SETJMP
6630/**
6631 * Fetches a data byte, longjmp on error.
6632 *
6633 * @returns The byte.
6634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6635 * @param iSegReg The index of the segment register to use for
6636 * this access. The base and limits are checked.
6637 * @param GCPtrMem The address of the guest memory.
6638 */
6639uint8_t iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6640{
6641 /* The lazy approach for now... */
6642 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 0);
6643 uint8_t const bRet = *pu8Src;
6644 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
6645 return bRet;
6646}
6647#endif /* IEM_WITH_SETJMP */
6648
6649
6650/**
6651 * Fetches a data word.
6652 *
6653 * @returns Strict VBox status code.
6654 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6655 * @param pu16Dst Where to return the word.
6656 * @param iSegReg The index of the segment register to use for
6657 * this access. The base and limits are checked.
6658 * @param GCPtrMem The address of the guest memory.
6659 */
6660VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6661{
6662 /* The lazy approach for now... */
6663 uint16_t const *pu16Src;
6664 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem,
6665 IEM_ACCESS_DATA_R, sizeof(*pu16Src) - 1);
6666 if (rc == VINF_SUCCESS)
6667 {
6668 *pu16Dst = *pu16Src;
6669 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6670 }
6671 return rc;
6672}
6673
6674
6675#ifdef IEM_WITH_SETJMP
6676/**
6677 * Fetches a data word, longjmp on error.
6678 *
6679 * @returns The word
6680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6681 * @param iSegReg The index of the segment register to use for
6682 * this access. The base and limits are checked.
6683 * @param GCPtrMem The address of the guest memory.
6684 */
6685uint16_t iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6686{
6687 /* The lazy approach for now... */
6688 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6689 sizeof(*pu16Src) - 1);
6690 uint16_t const u16Ret = *pu16Src;
6691 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
6692 return u16Ret;
6693}
6694#endif
6695
6696
6697/**
6698 * Fetches a data dword.
6699 *
6700 * @returns Strict VBox status code.
6701 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6702 * @param pu32Dst Where to return the dword.
6703 * @param iSegReg The index of the segment register to use for
6704 * this access. The base and limits are checked.
6705 * @param GCPtrMem The address of the guest memory.
6706 */
6707VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6708{
6709 /* The lazy approach for now... */
6710 uint32_t const *pu32Src;
6711 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6712 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6713 if (rc == VINF_SUCCESS)
6714 {
6715 *pu32Dst = *pu32Src;
6716 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6717 }
6718 return rc;
6719}
6720
6721
6722/**
6723 * Fetches a data dword and zero extends it to a qword.
6724 *
6725 * @returns Strict VBox status code.
6726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6727 * @param pu64Dst Where to return the qword.
6728 * @param iSegReg The index of the segment register to use for
6729 * this access. The base and limits are checked.
6730 * @param GCPtrMem The address of the guest memory.
6731 */
6732VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6733{
6734 /* The lazy approach for now... */
6735 uint32_t const *pu32Src;
6736 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem,
6737 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6738 if (rc == VINF_SUCCESS)
6739 {
6740 *pu64Dst = *pu32Src;
6741 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6742 }
6743 return rc;
6744}
6745
6746
6747#ifdef IEM_WITH_SETJMP
6748
6749/**
6750 * Fetches a data dword, longjmp on error, fallback/safe version.
6751 *
6752 * @returns The dword
6753 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6754 * @param iSegReg The index of the segment register to use for
6755 * this access. The base and limits are checked.
6756 * @param GCPtrMem The address of the guest memory.
6757 */
6758uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6759{
6760 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6761 sizeof(*pu32Src) - 1);
6762 uint32_t const u32Ret = *pu32Src;
6763 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6764 return u32Ret;
6765}
6766
6767
6768/**
6769 * Fetches a data dword, longjmp on error.
6770 *
6771 * @returns The dword
6772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6773 * @param iSegReg The index of the segment register to use for
6774 * this access. The base and limits are checked.
6775 * @param GCPtrMem The address of the guest memory.
6776 */
6777uint32_t iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6778{
6779# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
6780 /*
6781 * Convert from segmented to flat address and check that it doesn't cross a page boundrary.
6782 */
6783 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
6784 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(uint32_t)))
6785 {
6786 /*
6787 * TLB lookup.
6788 */
6789 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff);
6790 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag);
6791 if (pTlbe->uTag == uTag)
6792 {
6793 /*
6794 * Check TLB page table level access flags.
6795 */
6796 uint64_t const fNoUser = pVCpu->iem.s.uCpl == 3 ? IEMTLBE_F_PT_NO_USER : 0;
6797 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ
6798 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_NO_MAPPINGR3 | fNoUser))
6799 == pVCpu->iem.s.DataTlb.uTlbPhysRev)
6800 {
6801 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;});
6802
6803 /*
6804 * Alignment check:
6805 */
6806 /** @todo check priority \#AC vs \#PF */
6807 if ( !(GCPtrEff & (sizeof(uint32_t) - 1))
6808 || !(pVCpu->cpum.GstCtx.cr0 & X86_CR0_AM)
6809 || !pVCpu->cpum.GstCtx.eflags.Bits.u1AC
6810 || pVCpu->iem.s.uCpl != 3)
6811 {
6812 /*
6813 * Fetch and return the dword
6814 */
6815 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */
6816 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK));
6817 return *(uint32_t const *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK];
6818 }
6819 Log10(("iemMemFetchDataU32Jmp: Raising #AC for %RGv\n", GCPtrEff));
6820 iemRaiseAlignmentCheckExceptionJmp(pVCpu);
6821 }
6822 }
6823 }
6824
6825 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception
6826 outdated page pointer, or other troubles. */
6827 Log10(("iemMemFetchDataU32Jmp: %u:%RGv fallback\n", iSegReg, GCPtrMem));
6828 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
6829
6830# else
6831 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem,
6832 IEM_ACCESS_DATA_R, sizeof(*pu32Src) - 1);
6833 uint32_t const u32Ret = *pu32Src;
6834 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
6835 return u32Ret;
6836# endif
6837}
6838#endif
6839
6840
6841#ifdef SOME_UNUSED_FUNCTION
6842/**
6843 * Fetches a data dword and sign extends it to a qword.
6844 *
6845 * @returns Strict VBox status code.
6846 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6847 * @param pu64Dst Where to return the sign extended value.
6848 * @param iSegReg The index of the segment register to use for
6849 * this access. The base and limits are checked.
6850 * @param GCPtrMem The address of the guest memory.
6851 */
6852VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6853{
6854 /* The lazy approach for now... */
6855 int32_t const *pi32Src;
6856 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem,
6857 IEM_ACCESS_DATA_R, sizeof(*pi32Src) - 1);
6858 if (rc == VINF_SUCCESS)
6859 {
6860 *pu64Dst = *pi32Src;
6861 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
6862 }
6863#ifdef __GNUC__ /* warning: GCC may be a royal pain */
6864 else
6865 *pu64Dst = 0;
6866#endif
6867 return rc;
6868}
6869#endif
6870
6871
6872/**
6873 * Fetches a data qword.
6874 *
6875 * @returns Strict VBox status code.
6876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6877 * @param pu64Dst Where to return the qword.
6878 * @param iSegReg The index of the segment register to use for
6879 * this access. The base and limits are checked.
6880 * @param GCPtrMem The address of the guest memory.
6881 */
6882VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6883{
6884 /* The lazy approach for now... */
6885 uint64_t const *pu64Src;
6886 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
6887 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
6888 if (rc == VINF_SUCCESS)
6889 {
6890 *pu64Dst = *pu64Src;
6891 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6892 }
6893 return rc;
6894}
6895
6896
6897#ifdef IEM_WITH_SETJMP
6898/**
6899 * Fetches a data qword, longjmp on error.
6900 *
6901 * @returns The qword.
6902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6903 * @param iSegReg The index of the segment register to use for
6904 * this access. The base and limits are checked.
6905 * @param GCPtrMem The address of the guest memory.
6906 */
6907uint64_t iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6908{
6909 /* The lazy approach for now... */
6910 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem,
6911 IEM_ACCESS_DATA_R, sizeof(*pu64Src) - 1);
6912 uint64_t const u64Ret = *pu64Src;
6913 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6914 return u64Ret;
6915}
6916#endif
6917
6918
6919/**
6920 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
6921 *
6922 * @returns Strict VBox status code.
6923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6924 * @param pu64Dst Where to return the qword.
6925 * @param iSegReg The index of the segment register to use for
6926 * this access. The base and limits are checked.
6927 * @param GCPtrMem The address of the guest memory.
6928 */
6929VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6930{
6931 /* The lazy approach for now... */
6932 uint64_t const *pu64Src;
6933 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem,
6934 IEM_ACCESS_DATA_R, 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
6935 if (rc == VINF_SUCCESS)
6936 {
6937 *pu64Dst = *pu64Src;
6938 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6939 }
6940 return rc;
6941}
6942
6943
6944#ifdef IEM_WITH_SETJMP
6945/**
6946 * Fetches a data qword, longjmp on error.
6947 *
6948 * @returns The qword.
6949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6950 * @param iSegReg The index of the segment register to use for
6951 * this access. The base and limits are checked.
6952 * @param GCPtrMem The address of the guest memory.
6953 */
6954uint64_t iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6955{
6956 /* The lazy approach for now... */
6957 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
6958 15 | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
6959 uint64_t const u64Ret = *pu64Src;
6960 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
6961 return u64Ret;
6962}
6963#endif
6964
6965
6966/**
6967 * Fetches a data tword.
6968 *
6969 * @returns Strict VBox status code.
6970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6971 * @param pr80Dst Where to return the tword.
6972 * @param iSegReg The index of the segment register to use for
6973 * this access. The base and limits are checked.
6974 * @param GCPtrMem The address of the guest memory.
6975 */
6976VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
6977{
6978 /* The lazy approach for now... */
6979 PCRTFLOAT80U pr80Src;
6980 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
6981 if (rc == VINF_SUCCESS)
6982 {
6983 *pr80Dst = *pr80Src;
6984 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
6985 }
6986 return rc;
6987}
6988
6989
6990#ifdef IEM_WITH_SETJMP
6991/**
6992 * Fetches a data tword, longjmp on error.
6993 *
6994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6995 * @param pr80Dst Where to return the tword.
6996 * @param iSegReg The index of the segment register to use for
6997 * this access. The base and limits are checked.
6998 * @param GCPtrMem The address of the guest memory.
6999 */
7000void iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7001{
7002 /* The lazy approach for now... */
7003 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, 7);
7004 *pr80Dst = *pr80Src;
7005 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
7006}
7007#endif
7008
7009
7010/**
7011 * Fetches a data decimal tword.
7012 *
7013 * @returns Strict VBox status code.
7014 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7015 * @param pd80Dst Where to return the tword.
7016 * @param iSegReg The index of the segment register to use for
7017 * this access. The base and limits are checked.
7018 * @param GCPtrMem The address of the guest memory.
7019 */
7020VBOXSTRICTRC iemMemFetchDataD80(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7021{
7022 /* The lazy approach for now... */
7023 PCRTPBCD80U pd80Src;
7024 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pd80Src, sizeof(*pd80Src), iSegReg, GCPtrMem,
7025 IEM_ACCESS_DATA_R, 7 /** @todo FBLD alignment check */);
7026 if (rc == VINF_SUCCESS)
7027 {
7028 *pd80Dst = *pd80Src;
7029 rc = iemMemCommitAndUnmap(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7030 }
7031 return rc;
7032}
7033
7034
7035#ifdef IEM_WITH_SETJMP
7036/**
7037 * Fetches a data decimal tword, longjmp on error.
7038 *
7039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7040 * @param pd80Dst Where to return the tword.
7041 * @param iSegReg The index of the segment register to use for
7042 * this access. The base and limits are checked.
7043 * @param GCPtrMem The address of the guest memory.
7044 */
7045void iemMemFetchDataD80Jmp(PVMCPUCC pVCpu, PRTPBCD80U pd80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7046{
7047 /* The lazy approach for now... */
7048 PCRTPBCD80U pd80Src = (PCRTPBCD80U)iemMemMapJmp(pVCpu, sizeof(*pd80Src), iSegReg, GCPtrMem,
7049 IEM_ACCESS_DATA_R, 7 /** @todo FBSTP alignment check */);
7050 *pd80Dst = *pd80Src;
7051 iemMemCommitAndUnmapJmp(pVCpu, (void *)pd80Src, IEM_ACCESS_DATA_R);
7052}
7053#endif
7054
7055
7056/**
7057 * Fetches a data dqword (double qword), generally SSE related.
7058 *
7059 * @returns Strict VBox status code.
7060 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7061 * @param pu128Dst Where to return the qword.
7062 * @param iSegReg The index of the segment register to use for
7063 * this access. The base and limits are checked.
7064 * @param GCPtrMem The address of the guest memory.
7065 */
7066VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7067{
7068 /* The lazy approach for now... */
7069 PCRTUINT128U pu128Src;
7070 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7071 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7072 if (rc == VINF_SUCCESS)
7073 {
7074 pu128Dst->au64[0] = pu128Src->au64[0];
7075 pu128Dst->au64[1] = pu128Src->au64[1];
7076 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7077 }
7078 return rc;
7079}
7080
7081
7082#ifdef IEM_WITH_SETJMP
7083/**
7084 * Fetches a data dqword (double qword), generally SSE related.
7085 *
7086 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7087 * @param pu128Dst Where to return the qword.
7088 * @param iSegReg The index of the segment register to use for
7089 * this access. The base and limits are checked.
7090 * @param GCPtrMem The address of the guest memory.
7091 */
7092void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7093{
7094 /* The lazy approach for now... */
7095 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem,
7096 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7097 pu128Dst->au64[0] = pu128Src->au64[0];
7098 pu128Dst->au64[1] = pu128Src->au64[1];
7099 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7100}
7101#endif
7102
7103
7104/**
7105 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7106 * related.
7107 *
7108 * Raises \#GP(0) if not aligned.
7109 *
7110 * @returns Strict VBox status code.
7111 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7112 * @param pu128Dst Where to return the qword.
7113 * @param iSegReg The index of the segment register to use for
7114 * this access. The base and limits are checked.
7115 * @param GCPtrMem The address of the guest memory.
7116 */
7117VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7118{
7119 /* The lazy approach for now... */
7120 PCRTUINT128U pu128Src;
7121 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem,
7122 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7123 if (rc == VINF_SUCCESS)
7124 {
7125 pu128Dst->au64[0] = pu128Src->au64[0];
7126 pu128Dst->au64[1] = pu128Src->au64[1];
7127 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7128 }
7129 return rc;
7130}
7131
7132
7133#ifdef IEM_WITH_SETJMP
7134/**
7135 * Fetches a data dqword (double qword) at an aligned address, generally SSE
7136 * related, longjmp on error.
7137 *
7138 * Raises \#GP(0) if not aligned.
7139 *
7140 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7141 * @param pu128Dst Where to return the qword.
7142 * @param iSegReg The index of the segment register to use for
7143 * this access. The base and limits are checked.
7144 * @param GCPtrMem The address of the guest memory.
7145 */
7146void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7147{
7148 /* The lazy approach for now... */
7149 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7150 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7151 pu128Dst->au64[0] = pu128Src->au64[0];
7152 pu128Dst->au64[1] = pu128Src->au64[1];
7153 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
7154}
7155#endif
7156
7157
7158/**
7159 * Fetches a data oword (octo word), generally AVX related.
7160 *
7161 * @returns Strict VBox status code.
7162 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7163 * @param pu256Dst Where to return the qword.
7164 * @param iSegReg The index of the segment register to use for
7165 * this access. The base and limits are checked.
7166 * @param GCPtrMem The address of the guest memory.
7167 */
7168VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7169{
7170 /* The lazy approach for now... */
7171 PCRTUINT256U pu256Src;
7172 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7173 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7174 if (rc == VINF_SUCCESS)
7175 {
7176 pu256Dst->au64[0] = pu256Src->au64[0];
7177 pu256Dst->au64[1] = pu256Src->au64[1];
7178 pu256Dst->au64[2] = pu256Src->au64[2];
7179 pu256Dst->au64[3] = pu256Src->au64[3];
7180 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7181 }
7182 return rc;
7183}
7184
7185
7186#ifdef IEM_WITH_SETJMP
7187/**
7188 * Fetches a data oword (octo word), generally AVX related.
7189 *
7190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7191 * @param pu256Dst Where to return the qword.
7192 * @param iSegReg The index of the segment register to use for
7193 * this access. The base and limits are checked.
7194 * @param GCPtrMem The address of the guest memory.
7195 */
7196void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7197{
7198 /* The lazy approach for now... */
7199 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem,
7200 IEM_ACCESS_DATA_R, 0 /* NO_AC variant */);
7201 pu256Dst->au64[0] = pu256Src->au64[0];
7202 pu256Dst->au64[1] = pu256Src->au64[1];
7203 pu256Dst->au64[2] = pu256Src->au64[2];
7204 pu256Dst->au64[3] = pu256Src->au64[3];
7205 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7206}
7207#endif
7208
7209
7210/**
7211 * Fetches a data oword (octo word) at an aligned address, generally AVX
7212 * related.
7213 *
7214 * Raises \#GP(0) if not aligned.
7215 *
7216 * @returns Strict VBox status code.
7217 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7218 * @param pu256Dst Where to return the qword.
7219 * @param iSegReg The index of the segment register to use for
7220 * this access. The base and limits are checked.
7221 * @param GCPtrMem The address of the guest memory.
7222 */
7223VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7224{
7225 /* The lazy approach for now... */
7226 PCRTUINT256U pu256Src;
7227 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem,
7228 IEM_ACCESS_DATA_R, (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7229 if (rc == VINF_SUCCESS)
7230 {
7231 pu256Dst->au64[0] = pu256Src->au64[0];
7232 pu256Dst->au64[1] = pu256Src->au64[1];
7233 pu256Dst->au64[2] = pu256Src->au64[2];
7234 pu256Dst->au64[3] = pu256Src->au64[3];
7235 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7236 }
7237 return rc;
7238}
7239
7240
7241#ifdef IEM_WITH_SETJMP
7242/**
7243 * Fetches a data oword (octo word) at an aligned address, generally AVX
7244 * related, longjmp on error.
7245 *
7246 * Raises \#GP(0) if not aligned.
7247 *
7248 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7249 * @param pu256Dst Where to return the qword.
7250 * @param iSegReg The index of the segment register to use for
7251 * this access. The base and limits are checked.
7252 * @param GCPtrMem The address of the guest memory.
7253 */
7254void iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7255{
7256 /* The lazy approach for now... */
7257 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,
7258 (sizeof(*pu256Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7259 pu256Dst->au64[0] = pu256Src->au64[0];
7260 pu256Dst->au64[1] = pu256Src->au64[1];
7261 pu256Dst->au64[2] = pu256Src->au64[2];
7262 pu256Dst->au64[3] = pu256Src->au64[3];
7263 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
7264}
7265#endif
7266
7267
7268
7269/**
7270 * Fetches a descriptor register (lgdt, lidt).
7271 *
7272 * @returns Strict VBox status code.
7273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7274 * @param pcbLimit Where to return the limit.
7275 * @param pGCPtrBase Where to return the base.
7276 * @param iSegReg The index of the segment register to use for
7277 * this access. The base and limits are checked.
7278 * @param GCPtrMem The address of the guest memory.
7279 * @param enmOpSize The effective operand size.
7280 */
7281VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
7282 RTGCPTR GCPtrMem, IEMMODE enmOpSize) RT_NOEXCEPT
7283{
7284 /*
7285 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
7286 * little special:
7287 * - The two reads are done separately.
7288 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
7289 * - We suspect the 386 to actually commit the limit before the base in
7290 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
7291 * don't try emulate this eccentric behavior, because it's not well
7292 * enough understood and rather hard to trigger.
7293 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
7294 */
7295 VBOXSTRICTRC rcStrict;
7296 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7297 {
7298 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7299 if (rcStrict == VINF_SUCCESS)
7300 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
7301 }
7302 else
7303 {
7304 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
7305 if (enmOpSize == IEMMODE_32BIT)
7306 {
7307 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
7308 {
7309 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7310 if (rcStrict == VINF_SUCCESS)
7311 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7312 }
7313 else
7314 {
7315 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
7316 if (rcStrict == VINF_SUCCESS)
7317 {
7318 *pcbLimit = (uint16_t)uTmp;
7319 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7320 }
7321 }
7322 if (rcStrict == VINF_SUCCESS)
7323 *pGCPtrBase = uTmp;
7324 }
7325 else
7326 {
7327 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
7328 if (rcStrict == VINF_SUCCESS)
7329 {
7330 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
7331 if (rcStrict == VINF_SUCCESS)
7332 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
7333 }
7334 }
7335 }
7336 return rcStrict;
7337}
7338
7339
7340
7341/**
7342 * Stores a data byte.
7343 *
7344 * @returns Strict VBox status code.
7345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7346 * @param iSegReg The index of the segment register to use for
7347 * this access. The base and limits are checked.
7348 * @param GCPtrMem The address of the guest memory.
7349 * @param u8Value The value to store.
7350 */
7351VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7352{
7353 /* The lazy approach for now... */
7354 uint8_t *pu8Dst;
7355 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7356 if (rc == VINF_SUCCESS)
7357 {
7358 *pu8Dst = u8Value;
7359 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7360 }
7361 return rc;
7362}
7363
7364
7365#ifdef IEM_WITH_SETJMP
7366/**
7367 * Stores a data byte, longjmp on error.
7368 *
7369 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7370 * @param iSegReg The index of the segment register to use for
7371 * this access. The base and limits are checked.
7372 * @param GCPtrMem The address of the guest memory.
7373 * @param u8Value The value to store.
7374 */
7375void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value) RT_NOEXCEPT
7376{
7377 /* The lazy approach for now... */
7378 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, 0);
7379 *pu8Dst = u8Value;
7380 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
7381}
7382#endif
7383
7384
7385/**
7386 * Stores a data word.
7387 *
7388 * @returns Strict VBox status code.
7389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7390 * @param iSegReg The index of the segment register to use for
7391 * this access. The base and limits are checked.
7392 * @param GCPtrMem The address of the guest memory.
7393 * @param u16Value The value to store.
7394 */
7395VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7396{
7397 /* The lazy approach for now... */
7398 uint16_t *pu16Dst;
7399 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7400 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7401 if (rc == VINF_SUCCESS)
7402 {
7403 *pu16Dst = u16Value;
7404 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7405 }
7406 return rc;
7407}
7408
7409
7410#ifdef IEM_WITH_SETJMP
7411/**
7412 * Stores a data word, longjmp on error.
7413 *
7414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7415 * @param iSegReg The index of the segment register to use for
7416 * this access. The base and limits are checked.
7417 * @param GCPtrMem The address of the guest memory.
7418 * @param u16Value The value to store.
7419 */
7420void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value) RT_NOEXCEPT
7421{
7422 /* The lazy approach for now... */
7423 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem,
7424 IEM_ACCESS_DATA_W, sizeof(*pu16Dst) - 1);
7425 *pu16Dst = u16Value;
7426 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
7427}
7428#endif
7429
7430
7431/**
7432 * Stores a data dword.
7433 *
7434 * @returns Strict VBox status code.
7435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7436 * @param iSegReg The index of the segment register to use for
7437 * this access. The base and limits are checked.
7438 * @param GCPtrMem The address of the guest memory.
7439 * @param u32Value The value to store.
7440 */
7441VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7442{
7443 /* The lazy approach for now... */
7444 uint32_t *pu32Dst;
7445 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7446 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7447 if (rc == VINF_SUCCESS)
7448 {
7449 *pu32Dst = u32Value;
7450 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7451 }
7452 return rc;
7453}
7454
7455
7456#ifdef IEM_WITH_SETJMP
7457/**
7458 * Stores a data dword.
7459 *
7460 * @returns Strict VBox status code.
7461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7462 * @param iSegReg The index of the segment register to use for
7463 * this access. The base and limits are checked.
7464 * @param GCPtrMem The address of the guest memory.
7465 * @param u32Value The value to store.
7466 */
7467void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value) RT_NOEXCEPT
7468{
7469 /* The lazy approach for now... */
7470 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem,
7471 IEM_ACCESS_DATA_W, sizeof(*pu32Dst) - 1);
7472 *pu32Dst = u32Value;
7473 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
7474}
7475#endif
7476
7477
7478/**
7479 * Stores a data qword.
7480 *
7481 * @returns Strict VBox status code.
7482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7483 * @param iSegReg The index of the segment register to use for
7484 * this access. The base and limits are checked.
7485 * @param GCPtrMem The address of the guest memory.
7486 * @param u64Value The value to store.
7487 */
7488VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7489{
7490 /* The lazy approach for now... */
7491 uint64_t *pu64Dst;
7492 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7493 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7494 if (rc == VINF_SUCCESS)
7495 {
7496 *pu64Dst = u64Value;
7497 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7498 }
7499 return rc;
7500}
7501
7502
7503#ifdef IEM_WITH_SETJMP
7504/**
7505 * Stores a data qword, longjmp on error.
7506 *
7507 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7508 * @param iSegReg The index of the segment register to use for
7509 * this access. The base and limits are checked.
7510 * @param GCPtrMem The address of the guest memory.
7511 * @param u64Value The value to store.
7512 */
7513void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) RT_NOEXCEPT
7514{
7515 /* The lazy approach for now... */
7516 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem,
7517 IEM_ACCESS_DATA_W, sizeof(*pu64Dst) - 1);
7518 *pu64Dst = u64Value;
7519 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
7520}
7521#endif
7522
7523
7524/**
7525 * Stores a data dqword.
7526 *
7527 * @returns Strict VBox status code.
7528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7529 * @param iSegReg The index of the segment register to use for
7530 * this access. The base and limits are checked.
7531 * @param GCPtrMem The address of the guest memory.
7532 * @param u128Value The value to store.
7533 */
7534VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7535{
7536 /* The lazy approach for now... */
7537 PRTUINT128U pu128Dst;
7538 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7539 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7540 if (rc == VINF_SUCCESS)
7541 {
7542 pu128Dst->au64[0] = u128Value.au64[0];
7543 pu128Dst->au64[1] = u128Value.au64[1];
7544 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7545 }
7546 return rc;
7547}
7548
7549
7550#ifdef IEM_WITH_SETJMP
7551/**
7552 * Stores a data dqword, longjmp on error.
7553 *
7554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7555 * @param iSegReg The index of the segment register to use for
7556 * this access. The base and limits are checked.
7557 * @param GCPtrMem The address of the guest memory.
7558 * @param u128Value The value to store.
7559 */
7560void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7561{
7562 /* The lazy approach for now... */
7563 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem,
7564 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7565 pu128Dst->au64[0] = u128Value.au64[0];
7566 pu128Dst->au64[1] = u128Value.au64[1];
7567 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7568}
7569#endif
7570
7571
7572/**
7573 * Stores a data dqword, SSE aligned.
7574 *
7575 * @returns Strict VBox status code.
7576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7577 * @param iSegReg The index of the segment register to use for
7578 * this access. The base and limits are checked.
7579 * @param GCPtrMem The address of the guest memory.
7580 * @param u128Value The value to store.
7581 */
7582VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7583{
7584 /* The lazy approach for now... */
7585 PRTUINT128U pu128Dst;
7586 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7587 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7588 if (rc == VINF_SUCCESS)
7589 {
7590 pu128Dst->au64[0] = u128Value.au64[0];
7591 pu128Dst->au64[1] = u128Value.au64[1];
7592 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7593 }
7594 return rc;
7595}
7596
7597
7598#ifdef IEM_WITH_SETJMP
7599/**
7600 * Stores a data dqword, SSE aligned.
7601 *
7602 * @returns Strict VBox status code.
7603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7604 * @param iSegReg The index of the segment register to use for
7605 * this access. The base and limits are checked.
7606 * @param GCPtrMem The address of the guest memory.
7607 * @param u128Value The value to store.
7608 */
7609void iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value) RT_NOEXCEPT
7610{
7611 /* The lazy approach for now... */
7612 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W,
7613 (sizeof(*pu128Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);
7614 pu128Dst->au64[0] = u128Value.au64[0];
7615 pu128Dst->au64[1] = u128Value.au64[1];
7616 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
7617}
7618#endif
7619
7620
7621/**
7622 * Stores a data dqword.
7623 *
7624 * @returns Strict VBox status code.
7625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7626 * @param iSegReg The index of the segment register to use for
7627 * this access. The base and limits are checked.
7628 * @param GCPtrMem The address of the guest memory.
7629 * @param pu256Value Pointer to the value to store.
7630 */
7631VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7632{
7633 /* The lazy approach for now... */
7634 PRTUINT256U pu256Dst;
7635 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7636 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7637 if (rc == VINF_SUCCESS)
7638 {
7639 pu256Dst->au64[0] = pu256Value->au64[0];
7640 pu256Dst->au64[1] = pu256Value->au64[1];
7641 pu256Dst->au64[2] = pu256Value->au64[2];
7642 pu256Dst->au64[3] = pu256Value->au64[3];
7643 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7644 }
7645 return rc;
7646}
7647
7648
7649#ifdef IEM_WITH_SETJMP
7650/**
7651 * Stores a data dqword, longjmp on error.
7652 *
7653 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7654 * @param iSegReg The index of the segment register to use for
7655 * this access. The base and limits are checked.
7656 * @param GCPtrMem The address of the guest memory.
7657 * @param pu256Value Pointer to the value to store.
7658 */
7659void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7660{
7661 /* The lazy approach for now... */
7662 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7663 IEM_ACCESS_DATA_W, 0 /* NO_AC variant */);
7664 pu256Dst->au64[0] = pu256Value->au64[0];
7665 pu256Dst->au64[1] = pu256Value->au64[1];
7666 pu256Dst->au64[2] = pu256Value->au64[2];
7667 pu256Dst->au64[3] = pu256Value->au64[3];
7668 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7669}
7670#endif
7671
7672
7673/**
7674 * Stores a data dqword, AVX \#GP(0) aligned.
7675 *
7676 * @returns Strict VBox status code.
7677 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7678 * @param iSegReg The index of the segment register to use for
7679 * this access. The base and limits are checked.
7680 * @param GCPtrMem The address of the guest memory.
7681 * @param pu256Value Pointer to the value to store.
7682 */
7683VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7684{
7685 /* The lazy approach for now... */
7686 PRTUINT256U pu256Dst;
7687 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7688 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7689 if (rc == VINF_SUCCESS)
7690 {
7691 pu256Dst->au64[0] = pu256Value->au64[0];
7692 pu256Dst->au64[1] = pu256Value->au64[1];
7693 pu256Dst->au64[2] = pu256Value->au64[2];
7694 pu256Dst->au64[3] = pu256Value->au64[3];
7695 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7696 }
7697 return rc;
7698}
7699
7700
7701#ifdef IEM_WITH_SETJMP
7702/**
7703 * Stores a data dqword, AVX aligned.
7704 *
7705 * @returns Strict VBox status code.
7706 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7707 * @param iSegReg The index of the segment register to use for
7708 * this access. The base and limits are checked.
7709 * @param GCPtrMem The address of the guest memory.
7710 * @param pu256Value Pointer to the value to store.
7711 */
7712void iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) RT_NOEXCEPT
7713{
7714 /* The lazy approach for now... */
7715 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem,
7716 IEM_ACCESS_DATA_W, (sizeof(*pu256Dst) - 1) | IEM_MEMMAP_F_ALIGN_GP);
7717 pu256Dst->au64[0] = pu256Value->au64[0];
7718 pu256Dst->au64[1] = pu256Value->au64[1];
7719 pu256Dst->au64[2] = pu256Value->au64[2];
7720 pu256Dst->au64[3] = pu256Value->au64[3];
7721 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
7722}
7723#endif
7724
7725
7726/**
7727 * Stores a descriptor register (sgdt, sidt).
7728 *
7729 * @returns Strict VBox status code.
7730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7731 * @param cbLimit The limit.
7732 * @param GCPtrBase The base address.
7733 * @param iSegReg The index of the segment register to use for
7734 * this access. The base and limits are checked.
7735 * @param GCPtrMem The address of the guest memory.
7736 */
7737VBOXSTRICTRC iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
7738{
7739 /*
7740 * The SIDT and SGDT instructions actually stores the data using two
7741 * independent writes (see bs3CpuBasic2_sidt_sgdt_One). The instructions
7742 * does not respond to opsize prefixes.
7743 */
7744 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
7745 if (rcStrict == VINF_SUCCESS)
7746 {
7747 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
7748 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
7749 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
7750 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
7751 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
7752 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
7753 else
7754 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
7755 }
7756 return rcStrict;
7757}
7758
7759
7760/**
7761 * Pushes a word onto the stack.
7762 *
7763 * @returns Strict VBox status code.
7764 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7765 * @param u16Value The value to push.
7766 */
7767VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value) RT_NOEXCEPT
7768{
7769 /* Increment the stack pointer. */
7770 uint64_t uNewRsp;
7771 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
7772
7773 /* Write the word the lazy way. */
7774 uint16_t *pu16Dst;
7775 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
7776 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
7777 if (rc == VINF_SUCCESS)
7778 {
7779 *pu16Dst = u16Value;
7780 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
7781 }
7782
7783 /* Commit the new RSP value unless we an access handler made trouble. */
7784 if (rc == VINF_SUCCESS)
7785 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7786
7787 return rc;
7788}
7789
7790
7791/**
7792 * Pushes a dword onto the stack.
7793 *
7794 * @returns Strict VBox status code.
7795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7796 * @param u32Value The value to push.
7797 */
7798VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7799{
7800 /* Increment the stack pointer. */
7801 uint64_t uNewRsp;
7802 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7803
7804 /* Write the dword the lazy way. */
7805 uint32_t *pu32Dst;
7806 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
7807 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
7808 if (rc == VINF_SUCCESS)
7809 {
7810 *pu32Dst = u32Value;
7811 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
7812 }
7813
7814 /* Commit the new RSP value unless we an access handler made trouble. */
7815 if (rc == VINF_SUCCESS)
7816 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7817
7818 return rc;
7819}
7820
7821
7822/**
7823 * Pushes a dword segment register value onto the stack.
7824 *
7825 * @returns Strict VBox status code.
7826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7827 * @param u32Value The value to push.
7828 */
7829VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value) RT_NOEXCEPT
7830{
7831 /* Increment the stack pointer. */
7832 uint64_t uNewRsp;
7833 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
7834
7835 /* The intel docs talks about zero extending the selector register
7836 value. My actual intel CPU here might be zero extending the value
7837 but it still only writes the lower word... */
7838 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
7839 * happens when crossing an electric page boundrary, is the high word checked
7840 * for write accessibility or not? Probably it is. What about segment limits?
7841 * It appears this behavior is also shared with trap error codes.
7842 *
7843 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
7844 * ancient hardware when it actually did change. */
7845 uint16_t *pu16Dst;
7846 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop,
7847 IEM_ACCESS_STACK_RW, sizeof(*pu16Dst) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
7848 if (rc == VINF_SUCCESS)
7849 {
7850 *pu16Dst = (uint16_t)u32Value;
7851 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
7852 }
7853
7854 /* Commit the new RSP value unless we an access handler made trouble. */
7855 if (rc == VINF_SUCCESS)
7856 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7857
7858 return rc;
7859}
7860
7861
7862/**
7863 * Pushes a qword onto the stack.
7864 *
7865 * @returns Strict VBox status code.
7866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7867 * @param u64Value The value to push.
7868 */
7869VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value) RT_NOEXCEPT
7870{
7871 /* Increment the stack pointer. */
7872 uint64_t uNewRsp;
7873 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
7874
7875 /* Write the word the lazy way. */
7876 uint64_t *pu64Dst;
7877 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
7878 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
7879 if (rc == VINF_SUCCESS)
7880 {
7881 *pu64Dst = u64Value;
7882 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
7883 }
7884
7885 /* Commit the new RSP value unless we an access handler made trouble. */
7886 if (rc == VINF_SUCCESS)
7887 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7888
7889 return rc;
7890}
7891
7892
7893/**
7894 * Pops a word from the stack.
7895 *
7896 * @returns Strict VBox status code.
7897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7898 * @param pu16Value Where to store the popped value.
7899 */
7900VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value) RT_NOEXCEPT
7901{
7902 /* Increment the stack pointer. */
7903 uint64_t uNewRsp;
7904 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
7905
7906 /* Write the word the lazy way. */
7907 uint16_t const *pu16Src;
7908 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
7909 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
7910 if (rc == VINF_SUCCESS)
7911 {
7912 *pu16Value = *pu16Src;
7913 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
7914
7915 /* Commit the new RSP value. */
7916 if (rc == VINF_SUCCESS)
7917 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7918 }
7919
7920 return rc;
7921}
7922
7923
7924/**
7925 * Pops a dword from the stack.
7926 *
7927 * @returns Strict VBox status code.
7928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7929 * @param pu32Value Where to store the popped value.
7930 */
7931VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value) RT_NOEXCEPT
7932{
7933 /* Increment the stack pointer. */
7934 uint64_t uNewRsp;
7935 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
7936
7937 /* Write the word the lazy way. */
7938 uint32_t const *pu32Src;
7939 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
7940 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
7941 if (rc == VINF_SUCCESS)
7942 {
7943 *pu32Value = *pu32Src;
7944 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
7945
7946 /* Commit the new RSP value. */
7947 if (rc == VINF_SUCCESS)
7948 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7949 }
7950
7951 return rc;
7952}
7953
7954
7955/**
7956 * Pops a qword from the stack.
7957 *
7958 * @returns Strict VBox status code.
7959 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7960 * @param pu64Value Where to store the popped value.
7961 */
7962VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value) RT_NOEXCEPT
7963{
7964 /* Increment the stack pointer. */
7965 uint64_t uNewRsp;
7966 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
7967
7968 /* Write the word the lazy way. */
7969 uint64_t const *pu64Src;
7970 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
7971 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
7972 if (rc == VINF_SUCCESS)
7973 {
7974 *pu64Value = *pu64Src;
7975 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
7976
7977 /* Commit the new RSP value. */
7978 if (rc == VINF_SUCCESS)
7979 pVCpu->cpum.GstCtx.rsp = uNewRsp;
7980 }
7981
7982 return rc;
7983}
7984
7985
7986/**
7987 * Pushes a word onto the stack, using a temporary stack pointer.
7988 *
7989 * @returns Strict VBox status code.
7990 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7991 * @param u16Value The value to push.
7992 * @param pTmpRsp Pointer to the temporary stack pointer.
7993 */
7994VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
7995{
7996 /* Increment the stack pointer. */
7997 RTUINT64U NewRsp = *pTmpRsp;
7998 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
7999
8000 /* Write the word the lazy way. */
8001 uint16_t *pu16Dst;
8002 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop,
8003 IEM_ACCESS_STACK_W, sizeof(*pu16Dst) - 1);
8004 if (rc == VINF_SUCCESS)
8005 {
8006 *pu16Dst = u16Value;
8007 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
8008 }
8009
8010 /* Commit the new RSP value unless we an access handler made trouble. */
8011 if (rc == VINF_SUCCESS)
8012 *pTmpRsp = NewRsp;
8013
8014 return rc;
8015}
8016
8017
8018/**
8019 * Pushes a dword onto the stack, using a temporary stack pointer.
8020 *
8021 * @returns Strict VBox status code.
8022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8023 * @param u32Value The value to push.
8024 * @param pTmpRsp Pointer to the temporary stack pointer.
8025 */
8026VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8027{
8028 /* Increment the stack pointer. */
8029 RTUINT64U NewRsp = *pTmpRsp;
8030 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
8031
8032 /* Write the word the lazy way. */
8033 uint32_t *pu32Dst;
8034 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop,
8035 IEM_ACCESS_STACK_W, sizeof(*pu32Dst) - 1);
8036 if (rc == VINF_SUCCESS)
8037 {
8038 *pu32Dst = u32Value;
8039 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
8040 }
8041
8042 /* Commit the new RSP value unless we an access handler made trouble. */
8043 if (rc == VINF_SUCCESS)
8044 *pTmpRsp = NewRsp;
8045
8046 return rc;
8047}
8048
8049
8050/**
8051 * Pushes a dword onto the stack, using a temporary stack pointer.
8052 *
8053 * @returns Strict VBox status code.
8054 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8055 * @param u64Value The value to push.
8056 * @param pTmpRsp Pointer to the temporary stack pointer.
8057 */
8058VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8059{
8060 /* Increment the stack pointer. */
8061 RTUINT64U NewRsp = *pTmpRsp;
8062 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
8063
8064 /* Write the word the lazy way. */
8065 uint64_t *pu64Dst;
8066 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop,
8067 IEM_ACCESS_STACK_W, sizeof(*pu64Dst) - 1);
8068 if (rc == VINF_SUCCESS)
8069 {
8070 *pu64Dst = u64Value;
8071 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
8072 }
8073
8074 /* Commit the new RSP value unless we an access handler made trouble. */
8075 if (rc == VINF_SUCCESS)
8076 *pTmpRsp = NewRsp;
8077
8078 return rc;
8079}
8080
8081
8082/**
8083 * Pops a word from the stack, using a temporary stack pointer.
8084 *
8085 * @returns Strict VBox status code.
8086 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8087 * @param pu16Value Where to store the popped value.
8088 * @param pTmpRsp Pointer to the temporary stack pointer.
8089 */
8090VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8091{
8092 /* Increment the stack pointer. */
8093 RTUINT64U NewRsp = *pTmpRsp;
8094 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
8095
8096 /* Write the word the lazy way. */
8097 uint16_t const *pu16Src;
8098 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop,
8099 IEM_ACCESS_STACK_R, sizeof(*pu16Src) - 1);
8100 if (rc == VINF_SUCCESS)
8101 {
8102 *pu16Value = *pu16Src;
8103 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
8104
8105 /* Commit the new RSP value. */
8106 if (rc == VINF_SUCCESS)
8107 *pTmpRsp = NewRsp;
8108 }
8109
8110 return rc;
8111}
8112
8113
8114/**
8115 * Pops a dword from the stack, using a temporary stack pointer.
8116 *
8117 * @returns Strict VBox status code.
8118 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8119 * @param pu32Value Where to store the popped value.
8120 * @param pTmpRsp Pointer to the temporary stack pointer.
8121 */
8122VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8123{
8124 /* Increment the stack pointer. */
8125 RTUINT64U NewRsp = *pTmpRsp;
8126 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
8127
8128 /* Write the word the lazy way. */
8129 uint32_t const *pu32Src;
8130 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop,
8131 IEM_ACCESS_STACK_R, sizeof(*pu32Src) - 1);
8132 if (rc == VINF_SUCCESS)
8133 {
8134 *pu32Value = *pu32Src;
8135 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
8136
8137 /* Commit the new RSP value. */
8138 if (rc == VINF_SUCCESS)
8139 *pTmpRsp = NewRsp;
8140 }
8141
8142 return rc;
8143}
8144
8145
8146/**
8147 * Pops a qword from the stack, using a temporary stack pointer.
8148 *
8149 * @returns Strict VBox status code.
8150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8151 * @param pu64Value Where to store the popped value.
8152 * @param pTmpRsp Pointer to the temporary stack pointer.
8153 */
8154VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp) RT_NOEXCEPT
8155{
8156 /* Increment the stack pointer. */
8157 RTUINT64U NewRsp = *pTmpRsp;
8158 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8159
8160 /* Write the word the lazy way. */
8161 uint64_t const *pu64Src;
8162 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop,
8163 IEM_ACCESS_STACK_R, sizeof(*pu64Src) - 1);
8164 if (rcStrict == VINF_SUCCESS)
8165 {
8166 *pu64Value = *pu64Src;
8167 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
8168
8169 /* Commit the new RSP value. */
8170 if (rcStrict == VINF_SUCCESS)
8171 *pTmpRsp = NewRsp;
8172 }
8173
8174 return rcStrict;
8175}
8176
8177
8178/**
8179 * Begin a special stack push (used by interrupt, exceptions and such).
8180 *
8181 * This will raise \#SS or \#PF if appropriate.
8182 *
8183 * @returns Strict VBox status code.
8184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8185 * @param cbMem The number of bytes to push onto the stack.
8186 * @param cbAlign The alignment mask (7, 3, 1).
8187 * @param ppvMem Where to return the pointer to the stack memory.
8188 * As with the other memory functions this could be
8189 * direct access or bounce buffered access, so
8190 * don't commit register until the commit call
8191 * succeeds.
8192 * @param puNewRsp Where to return the new RSP value. This must be
8193 * passed unchanged to
8194 * iemMemStackPushCommitSpecial().
8195 */
8196VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8197 void **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8198{
8199 Assert(cbMem < UINT8_MAX);
8200 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
8201 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop,
8202 IEM_ACCESS_STACK_W, cbAlign);
8203}
8204
8205
8206/**
8207 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
8208 *
8209 * This will update the rSP.
8210 *
8211 * @returns Strict VBox status code.
8212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8213 * @param pvMem The pointer returned by
8214 * iemMemStackPushBeginSpecial().
8215 * @param uNewRsp The new RSP value returned by
8216 * iemMemStackPushBeginSpecial().
8217 */
8218VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp) RT_NOEXCEPT
8219{
8220 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
8221 if (rcStrict == VINF_SUCCESS)
8222 pVCpu->cpum.GstCtx.rsp = uNewRsp;
8223 return rcStrict;
8224}
8225
8226
8227/**
8228 * Begin a special stack pop (used by iret, retf and such).
8229 *
8230 * This will raise \#SS or \#PF if appropriate.
8231 *
8232 * @returns Strict VBox status code.
8233 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8234 * @param cbMem The number of bytes to pop from the stack.
8235 * @param cbAlign The alignment mask (7, 3, 1).
8236 * @param ppvMem Where to return the pointer to the stack memory.
8237 * @param puNewRsp Where to return the new RSP value. This must be
8238 * assigned to CPUMCTX::rsp manually some time
8239 * after iemMemStackPopDoneSpecial() has been
8240 * called.
8241 */
8242VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, uint32_t cbAlign,
8243 void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8244{
8245 Assert(cbMem < UINT8_MAX);
8246 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
8247 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R, cbAlign);
8248}
8249
8250
8251/**
8252 * Continue a special stack pop (used by iret and retf).
8253 *
8254 * This will raise \#SS or \#PF if appropriate.
8255 *
8256 * @returns Strict VBox status code.
8257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8258 * @param cbMem The number of bytes to pop from the stack.
8259 * @param ppvMem Where to return the pointer to the stack memory.
8260 * @param puNewRsp Where to return the new RSP value. This must be
8261 * assigned to CPUMCTX::rsp manually some time
8262 * after iemMemStackPopDoneSpecial() has been
8263 * called.
8264 */
8265VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp) RT_NOEXCEPT
8266{
8267 Assert(cbMem < UINT8_MAX);
8268 RTUINT64U NewRsp;
8269 NewRsp.u = *puNewRsp;
8270 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
8271 *puNewRsp = NewRsp.u;
8272 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R,
8273 0 /* checked in iemMemStackPopBeginSpecial */);
8274}
8275
8276
8277/**
8278 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
8279 * iemMemStackPopContinueSpecial).
8280 *
8281 * The caller will manually commit the rSP.
8282 *
8283 * @returns Strict VBox status code.
8284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8285 * @param pvMem The pointer returned by
8286 * iemMemStackPopBeginSpecial() or
8287 * iemMemStackPopContinueSpecial().
8288 */
8289VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem) RT_NOEXCEPT
8290{
8291 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
8292}
8293
8294
8295/**
8296 * Fetches a system table byte.
8297 *
8298 * @returns Strict VBox status code.
8299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8300 * @param pbDst Where to return the byte.
8301 * @param iSegReg The index of the segment register to use for
8302 * this access. The base and limits are checked.
8303 * @param GCPtrMem The address of the guest memory.
8304 */
8305VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8306{
8307 /* The lazy approach for now... */
8308 uint8_t const *pbSrc;
8309 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8310 if (rc == VINF_SUCCESS)
8311 {
8312 *pbDst = *pbSrc;
8313 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
8314 }
8315 return rc;
8316}
8317
8318
8319/**
8320 * Fetches a system table word.
8321 *
8322 * @returns Strict VBox status code.
8323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8324 * @param pu16Dst Where to return the word.
8325 * @param iSegReg The index of the segment register to use for
8326 * this access. The base and limits are checked.
8327 * @param GCPtrMem The address of the guest memory.
8328 */
8329VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8330{
8331 /* The lazy approach for now... */
8332 uint16_t const *pu16Src;
8333 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8334 if (rc == VINF_SUCCESS)
8335 {
8336 *pu16Dst = *pu16Src;
8337 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
8338 }
8339 return rc;
8340}
8341
8342
8343/**
8344 * Fetches a system table dword.
8345 *
8346 * @returns Strict VBox status code.
8347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8348 * @param pu32Dst Where to return the dword.
8349 * @param iSegReg The index of the segment register to use for
8350 * this access. The base and limits are checked.
8351 * @param GCPtrMem The address of the guest memory.
8352 */
8353VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8354{
8355 /* The lazy approach for now... */
8356 uint32_t const *pu32Src;
8357 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8358 if (rc == VINF_SUCCESS)
8359 {
8360 *pu32Dst = *pu32Src;
8361 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
8362 }
8363 return rc;
8364}
8365
8366
8367/**
8368 * Fetches a system table qword.
8369 *
8370 * @returns Strict VBox status code.
8371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8372 * @param pu64Dst Where to return the qword.
8373 * @param iSegReg The index of the segment register to use for
8374 * this access. The base and limits are checked.
8375 * @param GCPtrMem The address of the guest memory.
8376 */
8377VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
8378{
8379 /* The lazy approach for now... */
8380 uint64_t const *pu64Src;
8381 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R, 0);
8382 if (rc == VINF_SUCCESS)
8383 {
8384 *pu64Dst = *pu64Src;
8385 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
8386 }
8387 return rc;
8388}
8389
8390
8391/**
8392 * Fetches a descriptor table entry with caller specified error code.
8393 *
8394 * @returns Strict VBox status code.
8395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8396 * @param pDesc Where to return the descriptor table entry.
8397 * @param uSel The selector which table entry to fetch.
8398 * @param uXcpt The exception to raise on table lookup error.
8399 * @param uErrorCode The error code associated with the exception.
8400 */
8401static VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel,
8402 uint8_t uXcpt, uint16_t uErrorCode) RT_NOEXCEPT
8403{
8404 AssertPtr(pDesc);
8405 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
8406
8407 /** @todo did the 286 require all 8 bytes to be accessible? */
8408 /*
8409 * Get the selector table base and check bounds.
8410 */
8411 RTGCPTR GCPtrBase;
8412 if (uSel & X86_SEL_LDT)
8413 {
8414 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
8415 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
8416 {
8417 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
8418 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
8419 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8420 uErrorCode, 0);
8421 }
8422
8423 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
8424 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
8425 }
8426 else
8427 {
8428 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
8429 {
8430 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
8431 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
8432 uErrorCode, 0);
8433 }
8434 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
8435 }
8436
8437 /*
8438 * Read the legacy descriptor and maybe the long mode extensions if
8439 * required.
8440 */
8441 VBOXSTRICTRC rcStrict;
8442 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
8443 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
8444 else
8445 {
8446 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
8447 if (rcStrict == VINF_SUCCESS)
8448 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
8449 if (rcStrict == VINF_SUCCESS)
8450 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
8451 if (rcStrict == VINF_SUCCESS)
8452 pDesc->Legacy.au16[3] = 0;
8453 else
8454 return rcStrict;
8455 }
8456
8457 if (rcStrict == VINF_SUCCESS)
8458 {
8459 if ( !IEM_IS_LONG_MODE(pVCpu)
8460 || pDesc->Legacy.Gen.u1DescType)
8461 pDesc->Long.au64[1] = 0;
8462 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
8463 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
8464 else
8465 {
8466 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
8467 /** @todo is this the right exception? */
8468 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
8469 }
8470 }
8471 return rcStrict;
8472}
8473
8474
8475/**
8476 * Fetches a descriptor table entry.
8477 *
8478 * @returns Strict VBox status code.
8479 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8480 * @param pDesc Where to return the descriptor table entry.
8481 * @param uSel The selector which table entry to fetch.
8482 * @param uXcpt The exception to raise on table lookup error.
8483 */
8484VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt) RT_NOEXCEPT
8485{
8486 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
8487}
8488
8489
8490/**
8491 * Marks the selector descriptor as accessed (only non-system descriptors).
8492 *
8493 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
8494 * will therefore skip the limit checks.
8495 *
8496 * @returns Strict VBox status code.
8497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8498 * @param uSel The selector.
8499 */
8500VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEPT
8501{
8502 /*
8503 * Get the selector table base and calculate the entry address.
8504 */
8505 RTGCPTR GCPtr = uSel & X86_SEL_LDT
8506 ? pVCpu->cpum.GstCtx.ldtr.u64Base
8507 : pVCpu->cpum.GstCtx.gdtr.pGdt;
8508 GCPtr += uSel & X86_SEL_MASK;
8509
8510 /*
8511 * ASMAtomicBitSet will assert if the address is misaligned, so do some
8512 * ugly stuff to avoid this. This will make sure it's an atomic access
8513 * as well more or less remove any question about 8-bit or 32-bit accesss.
8514 */
8515 VBOXSTRICTRC rcStrict;
8516 uint32_t volatile *pu32;
8517 if ((GCPtr & 3) == 0)
8518 {
8519 /* The normal case, map the 32-bit bits around the accessed bit (40). */
8520 GCPtr += 2 + 2;
8521 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8522 if (rcStrict != VINF_SUCCESS)
8523 return rcStrict;
8524 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
8525 }
8526 else
8527 {
8528 /* The misaligned GDT/LDT case, map the whole thing. */
8529 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW, 0);
8530 if (rcStrict != VINF_SUCCESS)
8531 return rcStrict;
8532 switch ((uintptr_t)pu32 & 3)
8533 {
8534 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
8535 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
8536 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
8537 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
8538 }
8539 }
8540
8541 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
8542}
8543
8544/** @} */
8545
8546/** @name Opcode Helpers.
8547 * @{
8548 */
8549
8550/**
8551 * Calculates the effective address of a ModR/M memory operand.
8552 *
8553 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8554 *
8555 * @return Strict VBox status code.
8556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8557 * @param bRm The ModRM byte.
8558 * @param cbImm The size of any immediate following the
8559 * effective address opcode bytes. Important for
8560 * RIP relative addressing.
8561 * @param pGCPtrEff Where to return the effective address.
8562 */
8563VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
8564{
8565 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8566# define SET_SS_DEF() \
8567 do \
8568 { \
8569 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8570 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8571 } while (0)
8572
8573 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8574 {
8575/** @todo Check the effective address size crap! */
8576 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8577 {
8578 uint16_t u16EffAddr;
8579
8580 /* Handle the disp16 form with no registers first. */
8581 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8582 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8583 else
8584 {
8585 /* Get the displacment. */
8586 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8587 {
8588 case 0: u16EffAddr = 0; break;
8589 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8590 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8591 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8592 }
8593
8594 /* Add the base and index registers to the disp. */
8595 switch (bRm & X86_MODRM_RM_MASK)
8596 {
8597 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8598 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8599 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8600 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8601 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8602 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8603 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8604 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8605 }
8606 }
8607
8608 *pGCPtrEff = u16EffAddr;
8609 }
8610 else
8611 {
8612 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8613 uint32_t u32EffAddr;
8614
8615 /* Handle the disp32 form with no registers first. */
8616 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8617 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8618 else
8619 {
8620 /* Get the register (or SIB) value. */
8621 switch ((bRm & X86_MODRM_RM_MASK))
8622 {
8623 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8624 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8625 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8626 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8627 case 4: /* SIB */
8628 {
8629 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8630
8631 /* Get the index and scale it. */
8632 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8633 {
8634 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8635 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8636 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8637 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8638 case 4: u32EffAddr = 0; /*none */ break;
8639 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8640 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8641 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8642 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8643 }
8644 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8645
8646 /* add base */
8647 switch (bSib & X86_SIB_BASE_MASK)
8648 {
8649 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8650 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8651 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8652 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8653 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
8654 case 5:
8655 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8656 {
8657 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8658 SET_SS_DEF();
8659 }
8660 else
8661 {
8662 uint32_t u32Disp;
8663 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8664 u32EffAddr += u32Disp;
8665 }
8666 break;
8667 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8668 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8669 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8670 }
8671 break;
8672 }
8673 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8674 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8675 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8676 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8677 }
8678
8679 /* Get and add the displacement. */
8680 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8681 {
8682 case 0:
8683 break;
8684 case 1:
8685 {
8686 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8687 u32EffAddr += i8Disp;
8688 break;
8689 }
8690 case 2:
8691 {
8692 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8693 u32EffAddr += u32Disp;
8694 break;
8695 }
8696 default:
8697 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
8698 }
8699
8700 }
8701 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
8702 *pGCPtrEff = u32EffAddr;
8703 else
8704 {
8705 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
8706 *pGCPtrEff = u32EffAddr & UINT16_MAX;
8707 }
8708 }
8709 }
8710 else
8711 {
8712 uint64_t u64EffAddr;
8713
8714 /* Handle the rip+disp32 form with no registers first. */
8715 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8716 {
8717 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
8718 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
8719 }
8720 else
8721 {
8722 /* Get the register (or SIB) value. */
8723 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
8724 {
8725 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8726 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8727 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8728 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8729 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
8730 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8731 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8732 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8733 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8734 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8735 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8736 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8737 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8738 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8739 /* SIB */
8740 case 4:
8741 case 12:
8742 {
8743 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8744
8745 /* Get the index and scale it. */
8746 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
8747 {
8748 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
8749 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
8750 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
8751 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
8752 case 4: u64EffAddr = 0; /*none */ break;
8753 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
8754 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
8755 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
8756 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
8757 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
8758 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
8759 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
8760 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
8761 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
8762 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
8763 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
8764 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8765 }
8766 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8767
8768 /* add base */
8769 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
8770 {
8771 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
8772 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
8773 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
8774 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
8775 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
8776 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
8777 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
8778 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
8779 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
8780 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
8781 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
8782 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
8783 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
8784 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
8785 /* complicated encodings */
8786 case 5:
8787 case 13:
8788 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8789 {
8790 if (!pVCpu->iem.s.uRexB)
8791 {
8792 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
8793 SET_SS_DEF();
8794 }
8795 else
8796 u64EffAddr += pVCpu->cpum.GstCtx.r13;
8797 }
8798 else
8799 {
8800 uint32_t u32Disp;
8801 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8802 u64EffAddr += (int32_t)u32Disp;
8803 }
8804 break;
8805 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8806 }
8807 break;
8808 }
8809 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8810 }
8811
8812 /* Get and add the displacement. */
8813 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8814 {
8815 case 0:
8816 break;
8817 case 1:
8818 {
8819 int8_t i8Disp;
8820 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8821 u64EffAddr += i8Disp;
8822 break;
8823 }
8824 case 2:
8825 {
8826 uint32_t u32Disp;
8827 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8828 u64EffAddr += (int32_t)u32Disp;
8829 break;
8830 }
8831 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
8832 }
8833
8834 }
8835
8836 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
8837 *pGCPtrEff = u64EffAddr;
8838 else
8839 {
8840 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8841 *pGCPtrEff = u64EffAddr & UINT32_MAX;
8842 }
8843 }
8844
8845 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
8846 return VINF_SUCCESS;
8847}
8848
8849
8850/**
8851 * Calculates the effective address of a ModR/M memory operand.
8852 *
8853 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
8854 *
8855 * @return Strict VBox status code.
8856 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8857 * @param bRm The ModRM byte.
8858 * @param cbImm The size of any immediate following the
8859 * effective address opcode bytes. Important for
8860 * RIP relative addressing.
8861 * @param pGCPtrEff Where to return the effective address.
8862 * @param offRsp RSP displacement.
8863 */
8864VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp) RT_NOEXCEPT
8865{
8866 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
8867# define SET_SS_DEF() \
8868 do \
8869 { \
8870 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
8871 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
8872 } while (0)
8873
8874 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
8875 {
8876/** @todo Check the effective address size crap! */
8877 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
8878 {
8879 uint16_t u16EffAddr;
8880
8881 /* Handle the disp16 form with no registers first. */
8882 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
8883 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
8884 else
8885 {
8886 /* Get the displacment. */
8887 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8888 {
8889 case 0: u16EffAddr = 0; break;
8890 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
8891 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
8892 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
8893 }
8894
8895 /* Add the base and index registers to the disp. */
8896 switch (bRm & X86_MODRM_RM_MASK)
8897 {
8898 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
8899 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
8900 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
8901 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
8902 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
8903 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
8904 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
8905 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
8906 }
8907 }
8908
8909 *pGCPtrEff = u16EffAddr;
8910 }
8911 else
8912 {
8913 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
8914 uint32_t u32EffAddr;
8915
8916 /* Handle the disp32 form with no registers first. */
8917 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
8918 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
8919 else
8920 {
8921 /* Get the register (or SIB) value. */
8922 switch ((bRm & X86_MODRM_RM_MASK))
8923 {
8924 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8925 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8926 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8927 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8928 case 4: /* SIB */
8929 {
8930 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
8931
8932 /* Get the index and scale it. */
8933 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
8934 {
8935 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
8936 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
8937 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
8938 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
8939 case 4: u32EffAddr = 0; /*none */ break;
8940 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
8941 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8942 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8943 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8944 }
8945 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
8946
8947 /* add base */
8948 switch (bSib & X86_SIB_BASE_MASK)
8949 {
8950 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
8951 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
8952 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
8953 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
8954 case 4:
8955 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
8956 SET_SS_DEF();
8957 break;
8958 case 5:
8959 if ((bRm & X86_MODRM_MOD_MASK) != 0)
8960 {
8961 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
8962 SET_SS_DEF();
8963 }
8964 else
8965 {
8966 uint32_t u32Disp;
8967 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8968 u32EffAddr += u32Disp;
8969 }
8970 break;
8971 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
8972 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
8973 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8974 }
8975 break;
8976 }
8977 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
8978 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
8979 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
8980 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8981 }
8982
8983 /* Get and add the displacement. */
8984 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
8985 {
8986 case 0:
8987 break;
8988 case 1:
8989 {
8990 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
8991 u32EffAddr += i8Disp;
8992 break;
8993 }
8994 case 2:
8995 {
8996 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
8997 u32EffAddr += u32Disp;
8998 break;
8999 }
9000 default:
9001 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
9002 }
9003
9004 }
9005 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9006 *pGCPtrEff = u32EffAddr;
9007 else
9008 {
9009 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9010 *pGCPtrEff = u32EffAddr & UINT16_MAX;
9011 }
9012 }
9013 }
9014 else
9015 {
9016 uint64_t u64EffAddr;
9017
9018 /* Handle the rip+disp32 form with no registers first. */
9019 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9020 {
9021 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9022 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9023 }
9024 else
9025 {
9026 /* Get the register (or SIB) value. */
9027 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9028 {
9029 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9030 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9031 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9032 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9033 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9034 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9035 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9036 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9037 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9038 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9039 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9040 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9041 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9042 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9043 /* SIB */
9044 case 4:
9045 case 12:
9046 {
9047 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9048
9049 /* Get the index and scale it. */
9050 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9051 {
9052 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9053 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9054 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9055 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9056 case 4: u64EffAddr = 0; /*none */ break;
9057 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9058 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9059 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9060 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9061 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9062 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9063 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9064 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9065 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9066 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9067 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9068 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9069 }
9070 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9071
9072 /* add base */
9073 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9074 {
9075 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9076 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9077 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9078 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9079 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
9080 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9081 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9082 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9083 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9084 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9085 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9086 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9087 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9088 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9089 /* complicated encodings */
9090 case 5:
9091 case 13:
9092 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9093 {
9094 if (!pVCpu->iem.s.uRexB)
9095 {
9096 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9097 SET_SS_DEF();
9098 }
9099 else
9100 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9101 }
9102 else
9103 {
9104 uint32_t u32Disp;
9105 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9106 u64EffAddr += (int32_t)u32Disp;
9107 }
9108 break;
9109 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9110 }
9111 break;
9112 }
9113 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9114 }
9115
9116 /* Get and add the displacement. */
9117 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9118 {
9119 case 0:
9120 break;
9121 case 1:
9122 {
9123 int8_t i8Disp;
9124 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9125 u64EffAddr += i8Disp;
9126 break;
9127 }
9128 case 2:
9129 {
9130 uint32_t u32Disp;
9131 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9132 u64EffAddr += (int32_t)u32Disp;
9133 break;
9134 }
9135 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
9136 }
9137
9138 }
9139
9140 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9141 *pGCPtrEff = u64EffAddr;
9142 else
9143 {
9144 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9145 *pGCPtrEff = u64EffAddr & UINT32_MAX;
9146 }
9147 }
9148
9149 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
9150 return VINF_SUCCESS;
9151}
9152
9153
9154#ifdef IEM_WITH_SETJMP
9155/**
9156 * Calculates the effective address of a ModR/M memory operand.
9157 *
9158 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
9159 *
9160 * May longjmp on internal error.
9161 *
9162 * @return The effective address.
9163 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9164 * @param bRm The ModRM byte.
9165 * @param cbImm The size of any immediate following the
9166 * effective address opcode bytes. Important for
9167 * RIP relative addressing.
9168 */
9169RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) RT_NOEXCEPT
9170{
9171 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
9172# define SET_SS_DEF() \
9173 do \
9174 { \
9175 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
9176 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
9177 } while (0)
9178
9179 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
9180 {
9181/** @todo Check the effective address size crap! */
9182 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
9183 {
9184 uint16_t u16EffAddr;
9185
9186 /* Handle the disp16 form with no registers first. */
9187 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
9188 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
9189 else
9190 {
9191 /* Get the displacment. */
9192 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9193 {
9194 case 0: u16EffAddr = 0; break;
9195 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
9196 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
9197 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
9198 }
9199
9200 /* Add the base and index registers to the disp. */
9201 switch (bRm & X86_MODRM_RM_MASK)
9202 {
9203 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
9204 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
9205 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
9206 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
9207 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
9208 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
9209 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
9210 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
9211 }
9212 }
9213
9214 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
9215 return u16EffAddr;
9216 }
9217
9218 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9219 uint32_t u32EffAddr;
9220
9221 /* Handle the disp32 form with no registers first. */
9222 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9223 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
9224 else
9225 {
9226 /* Get the register (or SIB) value. */
9227 switch ((bRm & X86_MODRM_RM_MASK))
9228 {
9229 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9230 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9231 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9232 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9233 case 4: /* SIB */
9234 {
9235 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9236
9237 /* Get the index and scale it. */
9238 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
9239 {
9240 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
9241 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
9242 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
9243 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
9244 case 4: u32EffAddr = 0; /*none */ break;
9245 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
9246 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9247 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9248 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9249 }
9250 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9251
9252 /* add base */
9253 switch (bSib & X86_SIB_BASE_MASK)
9254 {
9255 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
9256 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
9257 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
9258 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
9259 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
9260 case 5:
9261 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9262 {
9263 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
9264 SET_SS_DEF();
9265 }
9266 else
9267 {
9268 uint32_t u32Disp;
9269 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9270 u32EffAddr += u32Disp;
9271 }
9272 break;
9273 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
9274 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
9275 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9276 }
9277 break;
9278 }
9279 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
9280 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
9281 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
9282 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9283 }
9284
9285 /* Get and add the displacement. */
9286 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9287 {
9288 case 0:
9289 break;
9290 case 1:
9291 {
9292 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9293 u32EffAddr += i8Disp;
9294 break;
9295 }
9296 case 2:
9297 {
9298 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9299 u32EffAddr += u32Disp;
9300 break;
9301 }
9302 default:
9303 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
9304 }
9305 }
9306
9307 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
9308 {
9309 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
9310 return u32EffAddr;
9311 }
9312 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
9313 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
9314 return u32EffAddr & UINT16_MAX;
9315 }
9316
9317 uint64_t u64EffAddr;
9318
9319 /* Handle the rip+disp32 form with no registers first. */
9320 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
9321 {
9322 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
9323 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
9324 }
9325 else
9326 {
9327 /* Get the register (or SIB) value. */
9328 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
9329 {
9330 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9331 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9332 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9333 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9334 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
9335 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9336 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9337 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9338 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9339 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9340 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9341 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9342 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9343 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9344 /* SIB */
9345 case 4:
9346 case 12:
9347 {
9348 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
9349
9350 /* Get the index and scale it. */
9351 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
9352 {
9353 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
9354 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
9355 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
9356 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
9357 case 4: u64EffAddr = 0; /*none */ break;
9358 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
9359 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
9360 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
9361 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
9362 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
9363 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
9364 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
9365 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
9366 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
9367 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
9368 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
9369 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9370 }
9371 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
9372
9373 /* add base */
9374 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
9375 {
9376 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
9377 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
9378 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
9379 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
9380 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
9381 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
9382 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
9383 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
9384 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
9385 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
9386 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
9387 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
9388 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
9389 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
9390 /* complicated encodings */
9391 case 5:
9392 case 13:
9393 if ((bRm & X86_MODRM_MOD_MASK) != 0)
9394 {
9395 if (!pVCpu->iem.s.uRexB)
9396 {
9397 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
9398 SET_SS_DEF();
9399 }
9400 else
9401 u64EffAddr += pVCpu->cpum.GstCtx.r13;
9402 }
9403 else
9404 {
9405 uint32_t u32Disp;
9406 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9407 u64EffAddr += (int32_t)u32Disp;
9408 }
9409 break;
9410 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9411 }
9412 break;
9413 }
9414 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
9415 }
9416
9417 /* Get and add the displacement. */
9418 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
9419 {
9420 case 0:
9421 break;
9422 case 1:
9423 {
9424 int8_t i8Disp;
9425 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
9426 u64EffAddr += i8Disp;
9427 break;
9428 }
9429 case 2:
9430 {
9431 uint32_t u32Disp;
9432 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
9433 u64EffAddr += (int32_t)u32Disp;
9434 break;
9435 }
9436 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
9437 }
9438
9439 }
9440
9441 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
9442 {
9443 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
9444 return u64EffAddr;
9445 }
9446 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
9447 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
9448 return u64EffAddr & UINT32_MAX;
9449}
9450#endif /* IEM_WITH_SETJMP */
9451
9452/** @} */
9453
9454
9455#ifdef LOG_ENABLED
9456/**
9457 * Logs the current instruction.
9458 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9459 * @param fSameCtx Set if we have the same context information as the VMM,
9460 * clear if we may have already executed an instruction in
9461 * our debug context. When clear, we assume IEMCPU holds
9462 * valid CPU mode info.
9463 *
9464 * The @a fSameCtx parameter is now misleading and obsolete.
9465 * @param pszFunction The IEM function doing the execution.
9466 */
9467static void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction) RT_NOEXCEPT
9468{
9469# ifdef IN_RING3
9470 if (LogIs2Enabled())
9471 {
9472 char szInstr[256];
9473 uint32_t cbInstr = 0;
9474 if (fSameCtx)
9475 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
9476 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
9477 szInstr, sizeof(szInstr), &cbInstr);
9478 else
9479 {
9480 uint32_t fFlags = 0;
9481 switch (pVCpu->iem.s.enmCpuMode)
9482 {
9483 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
9484 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
9485 case IEMMODE_16BIT:
9486 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
9487 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
9488 else
9489 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
9490 break;
9491 }
9492 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
9493 szInstr, sizeof(szInstr), &cbInstr);
9494 }
9495
9496 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
9497 Log2(("**** %s\n"
9498 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
9499 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
9500 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
9501 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
9502 " %s\n"
9503 , pszFunction,
9504 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
9505 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
9506 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
9507 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
9508 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
9509 szInstr));
9510
9511 if (LogIs3Enabled())
9512 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
9513 }
9514 else
9515# endif
9516 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
9517 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
9518 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
9519}
9520#endif /* LOG_ENABLED */
9521
9522
9523#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9524/**
9525 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
9526 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
9527 *
9528 * @returns Modified rcStrict.
9529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9530 * @param rcStrict The instruction execution status.
9531 */
9532static VBOXSTRICTRC iemHandleNestedInstructionBoundraryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict) RT_NOEXCEPT
9533{
9534 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
9535 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
9536 {
9537 /* VMX preemption timer takes priority over NMI-window exits. */
9538 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
9539 {
9540 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
9541 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
9542 }
9543 /*
9544 * Check remaining intercepts.
9545 *
9546 * NMI-window and Interrupt-window VM-exits.
9547 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
9548 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
9549 *
9550 * See Intel spec. 26.7.6 "NMI-Window Exiting".
9551 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
9552 */
9553 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
9554 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9555 && !TRPMHasTrap(pVCpu))
9556 {
9557 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
9558 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
9559 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
9560 {
9561 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
9562 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
9563 }
9564 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
9565 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
9566 {
9567 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
9568 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
9569 }
9570 }
9571 }
9572 /* TPR-below threshold/APIC write has the highest priority. */
9573 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
9574 {
9575 rcStrict = iemVmxApicWriteEmulation(pVCpu);
9576 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9577 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
9578 }
9579 /* MTF takes priority over VMX-preemption timer. */
9580 else
9581 {
9582 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
9583 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
9584 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
9585 }
9586 return rcStrict;
9587}
9588#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
9589
9590
9591/**
9592 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
9593 * IEMExecOneWithPrefetchedByPC.
9594 *
9595 * Similar code is found in IEMExecLots.
9596 *
9597 * @return Strict VBox status code.
9598 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9599 * @param fExecuteInhibit If set, execute the instruction following CLI,
9600 * POP SS and MOV SS,GR.
9601 * @param pszFunction The calling function name.
9602 */
9603DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
9604{
9605 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9606 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9607 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9608 RT_NOREF_PV(pszFunction);
9609
9610#ifdef IEM_WITH_SETJMP
9611 VBOXSTRICTRC rcStrict;
9612 jmp_buf JmpBuf;
9613 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9614 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9615 if ((rcStrict = setjmp(JmpBuf)) == 0)
9616 {
9617 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9618 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9619 }
9620 else
9621 pVCpu->iem.s.cLongJumps++;
9622 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9623#else
9624 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9625 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9626#endif
9627 if (rcStrict == VINF_SUCCESS)
9628 pVCpu->iem.s.cInstructions++;
9629 if (pVCpu->iem.s.cActiveMappings > 0)
9630 {
9631 Assert(rcStrict != VINF_SUCCESS);
9632 iemMemRollback(pVCpu);
9633 }
9634 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9635 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9636 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9637
9638//#ifdef DEBUG
9639// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
9640//#endif
9641
9642#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
9643 /*
9644 * Perform any VMX nested-guest instruction boundary actions.
9645 *
9646 * If any of these causes a VM-exit, we must skip executing the next
9647 * instruction (would run into stale page tables). A VM-exit makes sure
9648 * there is no interrupt-inhibition, so that should ensure we don't go
9649 * to try execute the next instruction. Clearing fExecuteInhibit is
9650 * problematic because of the setjmp/longjmp clobbering above.
9651 */
9652 if ( rcStrict == VINF_SUCCESS
9653 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
9654 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
9655 rcStrict = iemHandleNestedInstructionBoundraryFFs(pVCpu, rcStrict);
9656#endif
9657
9658 /* Execute the next instruction as well if a cli, pop ss or
9659 mov ss, Gr has just completed successfully. */
9660 if ( fExecuteInhibit
9661 && rcStrict == VINF_SUCCESS
9662 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
9663 && EMIsInhibitInterruptsActive(pVCpu))
9664 {
9665 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
9666 if (rcStrict == VINF_SUCCESS)
9667 {
9668#ifdef LOG_ENABLED
9669 iemLogCurInstr(pVCpu, false, pszFunction);
9670#endif
9671#ifdef IEM_WITH_SETJMP
9672 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9673 if ((rcStrict = setjmp(JmpBuf)) == 0)
9674 {
9675 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
9676 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9677 }
9678 else
9679 pVCpu->iem.s.cLongJumps++;
9680 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
9681#else
9682 IEM_OPCODE_GET_NEXT_U8(&b);
9683 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
9684#endif
9685 if (rcStrict == VINF_SUCCESS)
9686 pVCpu->iem.s.cInstructions++;
9687 if (pVCpu->iem.s.cActiveMappings > 0)
9688 {
9689 Assert(rcStrict != VINF_SUCCESS);
9690 iemMemRollback(pVCpu);
9691 }
9692 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
9693 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
9694 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
9695 }
9696 else if (pVCpu->iem.s.cActiveMappings > 0)
9697 iemMemRollback(pVCpu);
9698 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
9699 }
9700
9701 /*
9702 * Return value fiddling, statistics and sanity assertions.
9703 */
9704 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
9705
9706 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
9707 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
9708 return rcStrict;
9709}
9710
9711
9712/**
9713 * Execute one instruction.
9714 *
9715 * @return Strict VBox status code.
9716 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9717 */
9718VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
9719{
9720 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
9721#ifdef LOG_ENABLED
9722 iemLogCurInstr(pVCpu, true, "IEMExecOne");
9723#endif
9724
9725 /*
9726 * Do the decoding and emulation.
9727 */
9728 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9729 if (rcStrict == VINF_SUCCESS)
9730 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
9731 else if (pVCpu->iem.s.cActiveMappings > 0)
9732 iemMemRollback(pVCpu);
9733
9734 if (rcStrict != VINF_SUCCESS)
9735 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9736 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9737 return rcStrict;
9738}
9739
9740
9741VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9742{
9743 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9744
9745 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9746 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9747 if (rcStrict == VINF_SUCCESS)
9748 {
9749 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
9750 if (pcbWritten)
9751 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9752 }
9753 else if (pVCpu->iem.s.cActiveMappings > 0)
9754 iemMemRollback(pVCpu);
9755
9756 return rcStrict;
9757}
9758
9759
9760VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9761 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9762{
9763 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9764
9765 VBOXSTRICTRC rcStrict;
9766 if ( cbOpcodeBytes
9767 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9768 {
9769 iemInitDecoder(pVCpu, false, false);
9770#ifdef IEM_WITH_CODE_TLB
9771 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9772 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9773 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9774 pVCpu->iem.s.offCurInstrStart = 0;
9775 pVCpu->iem.s.offInstrNextByte = 0;
9776#else
9777 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9778 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9779#endif
9780 rcStrict = VINF_SUCCESS;
9781 }
9782 else
9783 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9784 if (rcStrict == VINF_SUCCESS)
9785 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
9786 else if (pVCpu->iem.s.cActiveMappings > 0)
9787 iemMemRollback(pVCpu);
9788
9789 return rcStrict;
9790}
9791
9792
9793VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
9794{
9795 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9796
9797 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9798 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9799 if (rcStrict == VINF_SUCCESS)
9800 {
9801 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
9802 if (pcbWritten)
9803 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9804 }
9805 else if (pVCpu->iem.s.cActiveMappings > 0)
9806 iemMemRollback(pVCpu);
9807
9808 return rcStrict;
9809}
9810
9811
9812VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9813 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
9814{
9815 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9816
9817 VBOXSTRICTRC rcStrict;
9818 if ( cbOpcodeBytes
9819 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9820 {
9821 iemInitDecoder(pVCpu, true, false);
9822#ifdef IEM_WITH_CODE_TLB
9823 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9824 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9825 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9826 pVCpu->iem.s.offCurInstrStart = 0;
9827 pVCpu->iem.s.offInstrNextByte = 0;
9828#else
9829 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9830 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9831#endif
9832 rcStrict = VINF_SUCCESS;
9833 }
9834 else
9835 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9836 if (rcStrict == VINF_SUCCESS)
9837 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
9838 else if (pVCpu->iem.s.cActiveMappings > 0)
9839 iemMemRollback(pVCpu);
9840
9841 return rcStrict;
9842}
9843
9844
9845/**
9846 * For debugging DISGetParamSize, may come in handy.
9847 *
9848 * @returns Strict VBox status code.
9849 * @param pVCpu The cross context virtual CPU structure of the
9850 * calling EMT.
9851 * @param pCtxCore The context core structure.
9852 * @param OpcodeBytesPC The PC of the opcode bytes.
9853 * @param pvOpcodeBytes Prefeched opcode bytes.
9854 * @param cbOpcodeBytes Number of prefetched bytes.
9855 * @param pcbWritten Where to return the number of bytes written.
9856 * Optional.
9857 */
9858VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
9859 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
9860 uint32_t *pcbWritten)
9861{
9862 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
9863
9864 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
9865 VBOXSTRICTRC rcStrict;
9866 if ( cbOpcodeBytes
9867 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
9868 {
9869 iemInitDecoder(pVCpu, true, false);
9870#ifdef IEM_WITH_CODE_TLB
9871 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
9872 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
9873 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
9874 pVCpu->iem.s.offCurInstrStart = 0;
9875 pVCpu->iem.s.offInstrNextByte = 0;
9876#else
9877 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
9878 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
9879#endif
9880 rcStrict = VINF_SUCCESS;
9881 }
9882 else
9883 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
9884 if (rcStrict == VINF_SUCCESS)
9885 {
9886 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
9887 if (pcbWritten)
9888 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
9889 }
9890 else if (pVCpu->iem.s.cActiveMappings > 0)
9891 iemMemRollback(pVCpu);
9892
9893 return rcStrict;
9894}
9895
9896
9897/**
9898 * For handling split cacheline lock operations when the host has split-lock
9899 * detection enabled.
9900 *
9901 * This will cause the interpreter to disregard the lock prefix and implicit
9902 * locking (xchg).
9903 *
9904 * @returns Strict VBox status code.
9905 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
9906 */
9907VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
9908{
9909 /*
9910 * Do the decoding and emulation.
9911 */
9912 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
9913 if (rcStrict == VINF_SUCCESS)
9914 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
9915 else if (pVCpu->iem.s.cActiveMappings > 0)
9916 iemMemRollback(pVCpu);
9917
9918 if (rcStrict != VINF_SUCCESS)
9919 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
9920 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
9921 return rcStrict;
9922}
9923
9924
9925VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
9926{
9927 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
9928 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
9929
9930 /*
9931 * See if there is an interrupt pending in TRPM, inject it if we can.
9932 */
9933 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
9934#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9935 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
9936 if (fIntrEnabled)
9937 {
9938 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
9939 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9940 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
9941 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
9942 else
9943 {
9944 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
9945 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
9946 }
9947 }
9948#else
9949 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
9950#endif
9951
9952 /** @todo What if we are injecting an exception and not an interrupt? Is that
9953 * possible here? For now we assert it is indeed only an interrupt. */
9954 if ( fIntrEnabled
9955 && TRPMHasTrap(pVCpu)
9956 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
9957 {
9958 uint8_t u8TrapNo;
9959 TRPMEVENT enmType;
9960 uint32_t uErrCode;
9961 RTGCPTR uCr2;
9962 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
9963 AssertRC(rc2);
9964 Assert(enmType == TRPM_HARDWARE_INT);
9965 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
9966 TRPMResetTrap(pVCpu);
9967#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
9968 /* Injecting an event may cause a VM-exit. */
9969 if ( rcStrict != VINF_SUCCESS
9970 && rcStrict != VINF_IEM_RAISED_XCPT)
9971 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
9972#else
9973 NOREF(rcStrict);
9974#endif
9975 }
9976
9977 /*
9978 * Initial decoder init w/ prefetch, then setup setjmp.
9979 */
9980 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
9981 if (rcStrict == VINF_SUCCESS)
9982 {
9983#ifdef IEM_WITH_SETJMP
9984 jmp_buf JmpBuf;
9985 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
9986 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
9987 pVCpu->iem.s.cActiveMappings = 0;
9988 if ((rcStrict = setjmp(JmpBuf)) == 0)
9989#endif
9990 {
9991 /*
9992 * The run loop. We limit ourselves to 4096 instructions right now.
9993 */
9994 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
9995 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
9996 for (;;)
9997 {
9998 /*
9999 * Log the state.
10000 */
10001#ifdef LOG_ENABLED
10002 iemLogCurInstr(pVCpu, true, "IEMExecLots");
10003#endif
10004
10005 /*
10006 * Do the decoding and emulation.
10007 */
10008 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10009 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10010 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10011 {
10012 Assert(pVCpu->iem.s.cActiveMappings == 0);
10013 pVCpu->iem.s.cInstructions++;
10014 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10015 {
10016 uint64_t fCpu = pVCpu->fLocalForcedActions
10017 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10018 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10019 | VMCPU_FF_TLB_FLUSH
10020 | VMCPU_FF_INHIBIT_INTERRUPTS
10021 | VMCPU_FF_BLOCK_NMIS
10022 | VMCPU_FF_UNHALT ));
10023
10024 if (RT_LIKELY( ( !fCpu
10025 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10026 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
10027 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
10028 {
10029 if (cMaxInstructionsGccStupidity-- > 0)
10030 {
10031 /* Poll timers every now an then according to the caller's specs. */
10032 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
10033 || !TMTimerPollBool(pVM, pVCpu))
10034 {
10035 Assert(pVCpu->iem.s.cActiveMappings == 0);
10036 iemReInitDecoder(pVCpu);
10037 continue;
10038 }
10039 }
10040 }
10041 }
10042 Assert(pVCpu->iem.s.cActiveMappings == 0);
10043 }
10044 else if (pVCpu->iem.s.cActiveMappings > 0)
10045 iemMemRollback(pVCpu);
10046 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10047 break;
10048 }
10049 }
10050#ifdef IEM_WITH_SETJMP
10051 else
10052 {
10053 if (pVCpu->iem.s.cActiveMappings > 0)
10054 iemMemRollback(pVCpu);
10055# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10056 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10057# endif
10058 pVCpu->iem.s.cLongJumps++;
10059 }
10060 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
10061#endif
10062
10063 /*
10064 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10065 */
10066 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10067 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10068 }
10069 else
10070 {
10071 if (pVCpu->iem.s.cActiveMappings > 0)
10072 iemMemRollback(pVCpu);
10073
10074#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10075 /*
10076 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10077 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10078 */
10079 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10080#endif
10081 }
10082
10083 /*
10084 * Maybe re-enter raw-mode and log.
10085 */
10086 if (rcStrict != VINF_SUCCESS)
10087 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10088 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10089 if (pcInstructions)
10090 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
10091 return rcStrict;
10092}
10093
10094
10095/**
10096 * Interface used by EMExecuteExec, does exit statistics and limits.
10097 *
10098 * @returns Strict VBox status code.
10099 * @param pVCpu The cross context virtual CPU structure.
10100 * @param fWillExit To be defined.
10101 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
10102 * @param cMaxInstructions Maximum number of instructions to execute.
10103 * @param cMaxInstructionsWithoutExits
10104 * The max number of instructions without exits.
10105 * @param pStats Where to return statistics.
10106 */
10107VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
10108 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
10109{
10110 NOREF(fWillExit); /** @todo define flexible exit crits */
10111
10112 /*
10113 * Initialize return stats.
10114 */
10115 pStats->cInstructions = 0;
10116 pStats->cExits = 0;
10117 pStats->cMaxExitDistance = 0;
10118 pStats->cReserved = 0;
10119
10120 /*
10121 * Initial decoder init w/ prefetch, then setup setjmp.
10122 */
10123 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
10124 if (rcStrict == VINF_SUCCESS)
10125 {
10126#ifdef IEM_WITH_SETJMP
10127 jmp_buf JmpBuf;
10128 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
10129 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
10130 pVCpu->iem.s.cActiveMappings = 0;
10131 if ((rcStrict = setjmp(JmpBuf)) == 0)
10132#endif
10133 {
10134#ifdef IN_RING0
10135 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
10136#endif
10137 uint32_t cInstructionSinceLastExit = 0;
10138
10139 /*
10140 * The run loop. We limit ourselves to 4096 instructions right now.
10141 */
10142 PVM pVM = pVCpu->CTX_SUFF(pVM);
10143 for (;;)
10144 {
10145 /*
10146 * Log the state.
10147 */
10148#ifdef LOG_ENABLED
10149 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
10150#endif
10151
10152 /*
10153 * Do the decoding and emulation.
10154 */
10155 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
10156
10157 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
10158 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
10159
10160 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
10161 && cInstructionSinceLastExit > 0 /* don't count the first */ )
10162 {
10163 pStats->cExits += 1;
10164 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
10165 pStats->cMaxExitDistance = cInstructionSinceLastExit;
10166 cInstructionSinceLastExit = 0;
10167 }
10168
10169 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
10170 {
10171 Assert(pVCpu->iem.s.cActiveMappings == 0);
10172 pVCpu->iem.s.cInstructions++;
10173 pStats->cInstructions++;
10174 cInstructionSinceLastExit++;
10175 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
10176 {
10177 uint64_t fCpu = pVCpu->fLocalForcedActions
10178 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
10179 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
10180 | VMCPU_FF_TLB_FLUSH
10181 | VMCPU_FF_INHIBIT_INTERRUPTS
10182 | VMCPU_FF_BLOCK_NMIS
10183 | VMCPU_FF_UNHALT ));
10184
10185 if (RT_LIKELY( ( ( !fCpu
10186 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
10187 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
10188 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
10189 || pStats->cInstructions < cMinInstructions))
10190 {
10191 if (pStats->cInstructions < cMaxInstructions)
10192 {
10193 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
10194 {
10195#ifdef IN_RING0
10196 if ( !fCheckPreemptionPending
10197 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
10198#endif
10199 {
10200 Assert(pVCpu->iem.s.cActiveMappings == 0);
10201 iemReInitDecoder(pVCpu);
10202 continue;
10203 }
10204#ifdef IN_RING0
10205 rcStrict = VINF_EM_RAW_INTERRUPT;
10206 break;
10207#endif
10208 }
10209 }
10210 }
10211 Assert(!(fCpu & VMCPU_FF_IEM));
10212 }
10213 Assert(pVCpu->iem.s.cActiveMappings == 0);
10214 }
10215 else if (pVCpu->iem.s.cActiveMappings > 0)
10216 iemMemRollback(pVCpu);
10217 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10218 break;
10219 }
10220 }
10221#ifdef IEM_WITH_SETJMP
10222 else
10223 {
10224 if (pVCpu->iem.s.cActiveMappings > 0)
10225 iemMemRollback(pVCpu);
10226 pVCpu->iem.s.cLongJumps++;
10227 }
10228 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
10229#endif
10230
10231 /*
10232 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
10233 */
10234 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
10235 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
10236 }
10237 else
10238 {
10239 if (pVCpu->iem.s.cActiveMappings > 0)
10240 iemMemRollback(pVCpu);
10241
10242#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
10243 /*
10244 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
10245 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
10246 */
10247 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
10248#endif
10249 }
10250
10251 /*
10252 * Maybe re-enter raw-mode and log.
10253 */
10254 if (rcStrict != VINF_SUCCESS)
10255 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
10256 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
10257 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
10258 return rcStrict;
10259}
10260
10261
10262/**
10263 * Injects a trap, fault, abort, software interrupt or external interrupt.
10264 *
10265 * The parameter list matches TRPMQueryTrapAll pretty closely.
10266 *
10267 * @returns Strict VBox status code.
10268 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10269 * @param u8TrapNo The trap number.
10270 * @param enmType What type is it (trap/fault/abort), software
10271 * interrupt or hardware interrupt.
10272 * @param uErrCode The error code if applicable.
10273 * @param uCr2 The CR2 value if applicable.
10274 * @param cbInstr The instruction length (only relevant for
10275 * software interrupts).
10276 */
10277VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
10278 uint8_t cbInstr)
10279{
10280 iemInitDecoder(pVCpu, false, false);
10281#ifdef DBGFTRACE_ENABLED
10282 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
10283 u8TrapNo, enmType, uErrCode, uCr2);
10284#endif
10285
10286 uint32_t fFlags;
10287 switch (enmType)
10288 {
10289 case TRPM_HARDWARE_INT:
10290 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
10291 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
10292 uErrCode = uCr2 = 0;
10293 break;
10294
10295 case TRPM_SOFTWARE_INT:
10296 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
10297 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
10298 uErrCode = uCr2 = 0;
10299 break;
10300
10301 case TRPM_TRAP:
10302 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
10303 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
10304 if (u8TrapNo == X86_XCPT_PF)
10305 fFlags |= IEM_XCPT_FLAGS_CR2;
10306 switch (u8TrapNo)
10307 {
10308 case X86_XCPT_DF:
10309 case X86_XCPT_TS:
10310 case X86_XCPT_NP:
10311 case X86_XCPT_SS:
10312 case X86_XCPT_PF:
10313 case X86_XCPT_AC:
10314 case X86_XCPT_GP:
10315 fFlags |= IEM_XCPT_FLAGS_ERR;
10316 break;
10317 }
10318 break;
10319
10320 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10321 }
10322
10323 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
10324
10325 if (pVCpu->iem.s.cActiveMappings > 0)
10326 iemMemRollback(pVCpu);
10327
10328 return rcStrict;
10329}
10330
10331
10332/**
10333 * Injects the active TRPM event.
10334 *
10335 * @returns Strict VBox status code.
10336 * @param pVCpu The cross context virtual CPU structure.
10337 */
10338VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
10339{
10340#ifndef IEM_IMPLEMENTS_TASKSWITCH
10341 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
10342#else
10343 uint8_t u8TrapNo;
10344 TRPMEVENT enmType;
10345 uint32_t uErrCode;
10346 RTGCUINTPTR uCr2;
10347 uint8_t cbInstr;
10348 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
10349 if (RT_FAILURE(rc))
10350 return rc;
10351
10352 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
10353 * ICEBP \#DB injection as a special case. */
10354 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
10355#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
10356 if (rcStrict == VINF_SVM_VMEXIT)
10357 rcStrict = VINF_SUCCESS;
10358#endif
10359#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
10360 if (rcStrict == VINF_VMX_VMEXIT)
10361 rcStrict = VINF_SUCCESS;
10362#endif
10363 /** @todo Are there any other codes that imply the event was successfully
10364 * delivered to the guest? See @bugref{6607}. */
10365 if ( rcStrict == VINF_SUCCESS
10366 || rcStrict == VINF_IEM_RAISED_XCPT)
10367 TRPMResetTrap(pVCpu);
10368
10369 return rcStrict;
10370#endif
10371}
10372
10373
10374VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
10375{
10376 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10377 return VERR_NOT_IMPLEMENTED;
10378}
10379
10380
10381VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
10382{
10383 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
10384 return VERR_NOT_IMPLEMENTED;
10385}
10386
10387
10388#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
10389/**
10390 * Executes a IRET instruction with default operand size.
10391 *
10392 * This is for PATM.
10393 *
10394 * @returns VBox status code.
10395 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10396 * @param pCtxCore The register frame.
10397 */
10398VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
10399{
10400 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10401
10402 iemCtxCoreToCtx(pCtx, pCtxCore);
10403 iemInitDecoder(pVCpu);
10404 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
10405 if (rcStrict == VINF_SUCCESS)
10406 iemCtxToCtxCore(pCtxCore, pCtx);
10407 else
10408 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
10409 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
10410 return rcStrict;
10411}
10412#endif
10413
10414
10415/**
10416 * Interface for HM and EM for executing string I/O OUT (write) instructions.
10417 *
10418 * This API ASSUMES that the caller has already verified that the guest code is
10419 * allowed to access the I/O port. (The I/O port is in the DX register in the
10420 * guest state.)
10421 *
10422 * @returns Strict VBox status code.
10423 * @param pVCpu The cross context virtual CPU structure.
10424 * @param cbValue The size of the I/O port access (1, 2, or 4).
10425 * @param enmAddrMode The addressing mode.
10426 * @param fRepPrefix Indicates whether a repeat prefix is used
10427 * (doesn't matter which for this instruction).
10428 * @param cbInstr The instruction length in bytes.
10429 * @param iEffSeg The effective segment address.
10430 * @param fIoChecked Whether the access to the I/O port has been
10431 * checked or not. It's typically checked in the
10432 * HM scenario.
10433 */
10434VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10435 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
10436{
10437 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
10438 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10439
10440 /*
10441 * State init.
10442 */
10443 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10444
10445 /*
10446 * Switch orgy for getting to the right handler.
10447 */
10448 VBOXSTRICTRC rcStrict;
10449 if (fRepPrefix)
10450 {
10451 switch (enmAddrMode)
10452 {
10453 case IEMMODE_16BIT:
10454 switch (cbValue)
10455 {
10456 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10457 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10458 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10459 default:
10460 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10461 }
10462 break;
10463
10464 case IEMMODE_32BIT:
10465 switch (cbValue)
10466 {
10467 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10468 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10469 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10470 default:
10471 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10472 }
10473 break;
10474
10475 case IEMMODE_64BIT:
10476 switch (cbValue)
10477 {
10478 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10479 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10480 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10481 default:
10482 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10483 }
10484 break;
10485
10486 default:
10487 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10488 }
10489 }
10490 else
10491 {
10492 switch (enmAddrMode)
10493 {
10494 case IEMMODE_16BIT:
10495 switch (cbValue)
10496 {
10497 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10498 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10499 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10500 default:
10501 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10502 }
10503 break;
10504
10505 case IEMMODE_32BIT:
10506 switch (cbValue)
10507 {
10508 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10509 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10510 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10511 default:
10512 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10513 }
10514 break;
10515
10516 case IEMMODE_64BIT:
10517 switch (cbValue)
10518 {
10519 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10520 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10521 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
10522 default:
10523 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10524 }
10525 break;
10526
10527 default:
10528 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10529 }
10530 }
10531
10532 if (pVCpu->iem.s.cActiveMappings)
10533 iemMemRollback(pVCpu);
10534
10535 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10536}
10537
10538
10539/**
10540 * Interface for HM and EM for executing string I/O IN (read) instructions.
10541 *
10542 * This API ASSUMES that the caller has already verified that the guest code is
10543 * allowed to access the I/O port. (The I/O port is in the DX register in the
10544 * guest state.)
10545 *
10546 * @returns Strict VBox status code.
10547 * @param pVCpu The cross context virtual CPU structure.
10548 * @param cbValue The size of the I/O port access (1, 2, or 4).
10549 * @param enmAddrMode The addressing mode.
10550 * @param fRepPrefix Indicates whether a repeat prefix is used
10551 * (doesn't matter which for this instruction).
10552 * @param cbInstr The instruction length in bytes.
10553 * @param fIoChecked Whether the access to the I/O port has been
10554 * checked or not. It's typically checked in the
10555 * HM scenario.
10556 */
10557VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
10558 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
10559{
10560 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10561
10562 /*
10563 * State init.
10564 */
10565 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10566
10567 /*
10568 * Switch orgy for getting to the right handler.
10569 */
10570 VBOXSTRICTRC rcStrict;
10571 if (fRepPrefix)
10572 {
10573 switch (enmAddrMode)
10574 {
10575 case IEMMODE_16BIT:
10576 switch (cbValue)
10577 {
10578 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10579 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10580 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10581 default:
10582 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10583 }
10584 break;
10585
10586 case IEMMODE_32BIT:
10587 switch (cbValue)
10588 {
10589 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10590 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10591 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10592 default:
10593 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10594 }
10595 break;
10596
10597 case IEMMODE_64BIT:
10598 switch (cbValue)
10599 {
10600 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10601 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10602 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10603 default:
10604 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10605 }
10606 break;
10607
10608 default:
10609 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10610 }
10611 }
10612 else
10613 {
10614 switch (enmAddrMode)
10615 {
10616 case IEMMODE_16BIT:
10617 switch (cbValue)
10618 {
10619 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
10620 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
10621 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
10622 default:
10623 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10624 }
10625 break;
10626
10627 case IEMMODE_32BIT:
10628 switch (cbValue)
10629 {
10630 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
10631 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
10632 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
10633 default:
10634 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10635 }
10636 break;
10637
10638 case IEMMODE_64BIT:
10639 switch (cbValue)
10640 {
10641 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
10642 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
10643 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
10644 default:
10645 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
10646 }
10647 break;
10648
10649 default:
10650 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
10651 }
10652 }
10653
10654 if ( pVCpu->iem.s.cActiveMappings == 0
10655 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
10656 { /* likely */ }
10657 else
10658 {
10659 AssertMsg(!IOM_SUCCESS(rcStrict), ("%#x\n", VBOXSTRICTRC_VAL(rcStrict)));
10660 iemMemRollback(pVCpu);
10661 }
10662 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10663}
10664
10665
10666/**
10667 * Interface for rawmode to write execute an OUT instruction.
10668 *
10669 * @returns Strict VBox status code.
10670 * @param pVCpu The cross context virtual CPU structure.
10671 * @param cbInstr The instruction length in bytes.
10672 * @param u16Port The port to read.
10673 * @param fImm Whether the port is specified using an immediate operand or
10674 * using the implicit DX register.
10675 * @param cbReg The register size.
10676 *
10677 * @remarks In ring-0 not all of the state needs to be synced in.
10678 */
10679VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10680{
10681 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10682 Assert(cbReg <= 4 && cbReg != 3);
10683
10684 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10685 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
10686 Assert(!pVCpu->iem.s.cActiveMappings);
10687 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10688}
10689
10690
10691/**
10692 * Interface for rawmode to write execute an IN instruction.
10693 *
10694 * @returns Strict VBox status code.
10695 * @param pVCpu The cross context virtual CPU structure.
10696 * @param cbInstr The instruction length in bytes.
10697 * @param u16Port The port to read.
10698 * @param fImm Whether the port is specified using an immediate operand or
10699 * using the implicit DX.
10700 * @param cbReg The register size.
10701 */
10702VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
10703{
10704 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
10705 Assert(cbReg <= 4 && cbReg != 3);
10706
10707 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10708 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
10709 Assert(!pVCpu->iem.s.cActiveMappings);
10710 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10711}
10712
10713
10714/**
10715 * Interface for HM and EM to write to a CRx register.
10716 *
10717 * @returns Strict VBox status code.
10718 * @param pVCpu The cross context virtual CPU structure.
10719 * @param cbInstr The instruction length in bytes.
10720 * @param iCrReg The control register number (destination).
10721 * @param iGReg The general purpose register number (source).
10722 *
10723 * @remarks In ring-0 not all of the state needs to be synced in.
10724 */
10725VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
10726{
10727 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10728 Assert(iCrReg < 16);
10729 Assert(iGReg < 16);
10730
10731 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10732 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
10733 Assert(!pVCpu->iem.s.cActiveMappings);
10734 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10735}
10736
10737
10738/**
10739 * Interface for HM and EM to read from a CRx register.
10740 *
10741 * @returns Strict VBox status code.
10742 * @param pVCpu The cross context virtual CPU structure.
10743 * @param cbInstr The instruction length in bytes.
10744 * @param iGReg The general purpose register number (destination).
10745 * @param iCrReg The control register number (source).
10746 *
10747 * @remarks In ring-0 not all of the state needs to be synced in.
10748 */
10749VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
10750{
10751 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10752 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
10753 | CPUMCTX_EXTRN_APIC_TPR);
10754 Assert(iCrReg < 16);
10755 Assert(iGReg < 16);
10756
10757 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10758 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
10759 Assert(!pVCpu->iem.s.cActiveMappings);
10760 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10761}
10762
10763
10764/**
10765 * Interface for HM and EM to clear the CR0[TS] bit.
10766 *
10767 * @returns Strict VBox status code.
10768 * @param pVCpu The cross context virtual CPU structure.
10769 * @param cbInstr The instruction length in bytes.
10770 *
10771 * @remarks In ring-0 not all of the state needs to be synced in.
10772 */
10773VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
10774{
10775 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10776
10777 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10778 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
10779 Assert(!pVCpu->iem.s.cActiveMappings);
10780 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10781}
10782
10783
10784/**
10785 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
10786 *
10787 * @returns Strict VBox status code.
10788 * @param pVCpu The cross context virtual CPU structure.
10789 * @param cbInstr The instruction length in bytes.
10790 * @param uValue The value to load into CR0.
10791 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
10792 * memory operand. Otherwise pass NIL_RTGCPTR.
10793 *
10794 * @remarks In ring-0 not all of the state needs to be synced in.
10795 */
10796VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
10797{
10798 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10799
10800 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10801 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
10802 Assert(!pVCpu->iem.s.cActiveMappings);
10803 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10804}
10805
10806
10807/**
10808 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
10809 *
10810 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
10811 *
10812 * @returns Strict VBox status code.
10813 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
10814 * @param cbInstr The instruction length in bytes.
10815 * @remarks In ring-0 not all of the state needs to be synced in.
10816 * @thread EMT(pVCpu)
10817 */
10818VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
10819{
10820 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10821
10822 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10823 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
10824 Assert(!pVCpu->iem.s.cActiveMappings);
10825 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10826}
10827
10828
10829/**
10830 * Interface for HM and EM to emulate the WBINVD instruction.
10831 *
10832 * @returns Strict VBox status code.
10833 * @param pVCpu The cross context virtual CPU structure.
10834 * @param cbInstr The instruction length in bytes.
10835 *
10836 * @remarks In ring-0 not all of the state needs to be synced in.
10837 */
10838VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10839{
10840 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10841
10842 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10843 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
10844 Assert(!pVCpu->iem.s.cActiveMappings);
10845 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10846}
10847
10848
10849/**
10850 * Interface for HM and EM to emulate the INVD instruction.
10851 *
10852 * @returns Strict VBox status code.
10853 * @param pVCpu The cross context virtual CPU structure.
10854 * @param cbInstr The instruction length in bytes.
10855 *
10856 * @remarks In ring-0 not all of the state needs to be synced in.
10857 */
10858VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
10859{
10860 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10861
10862 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10863 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
10864 Assert(!pVCpu->iem.s.cActiveMappings);
10865 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10866}
10867
10868
10869/**
10870 * Interface for HM and EM to emulate the INVLPG instruction.
10871 *
10872 * @returns Strict VBox status code.
10873 * @retval VINF_PGM_SYNC_CR3
10874 *
10875 * @param pVCpu The cross context virtual CPU structure.
10876 * @param cbInstr The instruction length in bytes.
10877 * @param GCPtrPage The effective address of the page to invalidate.
10878 *
10879 * @remarks In ring-0 not all of the state needs to be synced in.
10880 */
10881VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
10882{
10883 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
10884
10885 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10886 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
10887 Assert(!pVCpu->iem.s.cActiveMappings);
10888 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10889}
10890
10891
10892/**
10893 * Interface for HM and EM to emulate the INVPCID instruction.
10894 *
10895 * @returns Strict VBox status code.
10896 * @retval VINF_PGM_SYNC_CR3
10897 *
10898 * @param pVCpu The cross context virtual CPU structure.
10899 * @param cbInstr The instruction length in bytes.
10900 * @param iEffSeg The effective segment register.
10901 * @param GCPtrDesc The effective address of the INVPCID descriptor.
10902 * @param uType The invalidation type.
10903 *
10904 * @remarks In ring-0 not all of the state needs to be synced in.
10905 */
10906VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
10907 uint64_t uType)
10908{
10909 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
10910
10911 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10912 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
10913 Assert(!pVCpu->iem.s.cActiveMappings);
10914 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10915}
10916
10917
10918/**
10919 * Interface for HM and EM to emulate the CPUID instruction.
10920 *
10921 * @returns Strict VBox status code.
10922 *
10923 * @param pVCpu The cross context virtual CPU structure.
10924 * @param cbInstr The instruction length in bytes.
10925 *
10926 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
10927 */
10928VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
10929{
10930 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10931 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
10932
10933 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10934 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
10935 Assert(!pVCpu->iem.s.cActiveMappings);
10936 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10937}
10938
10939
10940/**
10941 * Interface for HM and EM to emulate the RDPMC instruction.
10942 *
10943 * @returns Strict VBox status code.
10944 *
10945 * @param pVCpu The cross context virtual CPU structure.
10946 * @param cbInstr The instruction length in bytes.
10947 *
10948 * @remarks Not all of the state needs to be synced in.
10949 */
10950VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
10951{
10952 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10953 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10954
10955 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10956 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
10957 Assert(!pVCpu->iem.s.cActiveMappings);
10958 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10959}
10960
10961
10962/**
10963 * Interface for HM and EM to emulate the RDTSC instruction.
10964 *
10965 * @returns Strict VBox status code.
10966 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10967 *
10968 * @param pVCpu The cross context virtual CPU structure.
10969 * @param cbInstr The instruction length in bytes.
10970 *
10971 * @remarks Not all of the state needs to be synced in.
10972 */
10973VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
10974{
10975 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
10976 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
10977
10978 iemInitExec(pVCpu, false /*fBypassHandlers*/);
10979 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
10980 Assert(!pVCpu->iem.s.cActiveMappings);
10981 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
10982}
10983
10984
10985/**
10986 * Interface for HM and EM to emulate the RDTSCP instruction.
10987 *
10988 * @returns Strict VBox status code.
10989 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
10990 *
10991 * @param pVCpu The cross context virtual CPU structure.
10992 * @param cbInstr The instruction length in bytes.
10993 *
10994 * @remarks Not all of the state needs to be synced in. Recommended
10995 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
10996 */
10997VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
10998{
10999 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11000 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
11001
11002 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11003 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
11004 Assert(!pVCpu->iem.s.cActiveMappings);
11005 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11006}
11007
11008
11009/**
11010 * Interface for HM and EM to emulate the RDMSR instruction.
11011 *
11012 * @returns Strict VBox status code.
11013 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11014 *
11015 * @param pVCpu The cross context virtual CPU structure.
11016 * @param cbInstr The instruction length in bytes.
11017 *
11018 * @remarks Not all of the state needs to be synced in. Requires RCX and
11019 * (currently) all MSRs.
11020 */
11021VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11022{
11023 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11024 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
11025
11026 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11027 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
11028 Assert(!pVCpu->iem.s.cActiveMappings);
11029 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11030}
11031
11032
11033/**
11034 * Interface for HM and EM to emulate the WRMSR instruction.
11035 *
11036 * @returns Strict VBox status code.
11037 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11038 *
11039 * @param pVCpu The cross context virtual CPU structure.
11040 * @param cbInstr The instruction length in bytes.
11041 *
11042 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
11043 * and (currently) all MSRs.
11044 */
11045VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
11046{
11047 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
11048 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
11049 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
11050
11051 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11052 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
11053 Assert(!pVCpu->iem.s.cActiveMappings);
11054 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11055}
11056
11057
11058/**
11059 * Interface for HM and EM to emulate the MONITOR instruction.
11060 *
11061 * @returns Strict VBox status code.
11062 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11063 *
11064 * @param pVCpu The cross context virtual CPU structure.
11065 * @param cbInstr The instruction length in bytes.
11066 *
11067 * @remarks Not all of the state needs to be synced in.
11068 * @remarks ASSUMES the default segment of DS and no segment override prefixes
11069 * are used.
11070 */
11071VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
11072{
11073 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11074 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
11075
11076 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11077 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
11078 Assert(!pVCpu->iem.s.cActiveMappings);
11079 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11080}
11081
11082
11083/**
11084 * Interface for HM and EM to emulate the MWAIT instruction.
11085 *
11086 * @returns Strict VBox status code.
11087 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11088 *
11089 * @param pVCpu The cross context virtual CPU structure.
11090 * @param cbInstr The instruction length in bytes.
11091 *
11092 * @remarks Not all of the state needs to be synced in.
11093 */
11094VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
11095{
11096 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
11097 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
11098
11099 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11100 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
11101 Assert(!pVCpu->iem.s.cActiveMappings);
11102 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11103}
11104
11105
11106/**
11107 * Interface for HM and EM to emulate the HLT instruction.
11108 *
11109 * @returns Strict VBox status code.
11110 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
11111 *
11112 * @param pVCpu The cross context virtual CPU structure.
11113 * @param cbInstr The instruction length in bytes.
11114 *
11115 * @remarks Not all of the state needs to be synced in.
11116 */
11117VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
11118{
11119 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
11120
11121 iemInitExec(pVCpu, false /*fBypassHandlers*/);
11122 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
11123 Assert(!pVCpu->iem.s.cActiveMappings);
11124 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
11125}
11126
11127
11128/**
11129 * Checks if IEM is in the process of delivering an event (interrupt or
11130 * exception).
11131 *
11132 * @returns true if we're in the process of raising an interrupt or exception,
11133 * false otherwise.
11134 * @param pVCpu The cross context virtual CPU structure.
11135 * @param puVector Where to store the vector associated with the
11136 * currently delivered event, optional.
11137 * @param pfFlags Where to store th event delivery flags (see
11138 * IEM_XCPT_FLAGS_XXX), optional.
11139 * @param puErr Where to store the error code associated with the
11140 * event, optional.
11141 * @param puCr2 Where to store the CR2 associated with the event,
11142 * optional.
11143 * @remarks The caller should check the flags to determine if the error code and
11144 * CR2 are valid for the event.
11145 */
11146VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
11147{
11148 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
11149 if (fRaisingXcpt)
11150 {
11151 if (puVector)
11152 *puVector = pVCpu->iem.s.uCurXcpt;
11153 if (pfFlags)
11154 *pfFlags = pVCpu->iem.s.fCurXcpt;
11155 if (puErr)
11156 *puErr = pVCpu->iem.s.uCurXcptErr;
11157 if (puCr2)
11158 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
11159 }
11160 return fRaisingXcpt;
11161}
11162
11163#ifdef IN_RING3
11164
11165/**
11166 * Handles the unlikely and probably fatal merge cases.
11167 *
11168 * @returns Merged status code.
11169 * @param rcStrict Current EM status code.
11170 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11171 * with @a rcStrict.
11172 * @param iMemMap The memory mapping index. For error reporting only.
11173 * @param pVCpu The cross context virtual CPU structure of the calling
11174 * thread, for error reporting only.
11175 */
11176DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
11177 unsigned iMemMap, PVMCPUCC pVCpu)
11178{
11179 if (RT_FAILURE_NP(rcStrict))
11180 return rcStrict;
11181
11182 if (RT_FAILURE_NP(rcStrictCommit))
11183 return rcStrictCommit;
11184
11185 if (rcStrict == rcStrictCommit)
11186 return rcStrictCommit;
11187
11188 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
11189 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
11190 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
11191 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
11192 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
11193 return VERR_IOM_FF_STATUS_IPE;
11194}
11195
11196
11197/**
11198 * Helper for IOMR3ProcessForceFlag.
11199 *
11200 * @returns Merged status code.
11201 * @param rcStrict Current EM status code.
11202 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
11203 * with @a rcStrict.
11204 * @param iMemMap The memory mapping index. For error reporting only.
11205 * @param pVCpu The cross context virtual CPU structure of the calling
11206 * thread, for error reporting only.
11207 */
11208DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
11209{
11210 /* Simple. */
11211 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
11212 return rcStrictCommit;
11213
11214 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
11215 return rcStrict;
11216
11217 /* EM scheduling status codes. */
11218 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
11219 && rcStrict <= VINF_EM_LAST))
11220 {
11221 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
11222 && rcStrictCommit <= VINF_EM_LAST))
11223 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
11224 }
11225
11226 /* Unlikely */
11227 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
11228}
11229
11230
11231/**
11232 * Called by force-flag handling code when VMCPU_FF_IEM is set.
11233 *
11234 * @returns Merge between @a rcStrict and what the commit operation returned.
11235 * @param pVM The cross context VM structure.
11236 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
11237 * @param rcStrict The status code returned by ring-0 or raw-mode.
11238 */
11239VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
11240{
11241 /*
11242 * Reset the pending commit.
11243 */
11244 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
11245 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
11246 ("%#x %#x %#x\n",
11247 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11248 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
11249
11250 /*
11251 * Commit the pending bounce buffers (usually just one).
11252 */
11253 unsigned cBufs = 0;
11254 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
11255 while (iMemMap-- > 0)
11256 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
11257 {
11258 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
11259 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
11260 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
11261
11262 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
11263 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
11264 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
11265
11266 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
11267 {
11268 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
11269 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
11270 pbBuf,
11271 cbFirst,
11272 PGMACCESSORIGIN_IEM);
11273 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
11274 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
11275 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
11276 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
11277 }
11278
11279 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
11280 {
11281 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
11282 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
11283 pbBuf + cbFirst,
11284 cbSecond,
11285 PGMACCESSORIGIN_IEM);
11286 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
11287 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
11288 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
11289 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
11290 }
11291 cBufs++;
11292 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
11293 }
11294
11295 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
11296 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
11297 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
11298 pVCpu->iem.s.cActiveMappings = 0;
11299 return rcStrict;
11300}
11301
11302#endif /* IN_RING3 */
11303
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette