VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 47280

Last change on this file since 47280 was 47280, checked in by vboxsync, 12 years ago

IEM: TPR access.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 158.0 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 47280 2013-07-19 18:58:17Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23/**
24 * Checks if we are allowed to access the given I/O port, raising the
25 * appropriate exceptions if we aren't (or if the I/O bitmap is not
26 * accessible).
27 *
28 * @returns Strict VBox status code.
29 *
30 * @param pIemCpu The IEM per CPU data.
31 * @param pCtx The register context.
32 * @param u16Port The port number.
33 * @param cbOperand The operand size.
34 */
35DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
36{
37 X86EFLAGS Efl;
38 Efl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
39 if ( (pCtx->cr0 & X86_CR0_PE)
40 && ( pIemCpu->uCpl > Efl.Bits.u2IOPL
41 || Efl.Bits.u1VM) )
42 {
43 NOREF(u16Port); NOREF(cbOperand); /** @todo I/O port permission bitmap check */
44 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Implement I/O permission bitmap\n"));
45 }
46 return VINF_SUCCESS;
47}
48
49
50#if 0
51/**
52 * Calculates the parity bit.
53 *
54 * @returns true if the bit is set, false if not.
55 * @param u8Result The least significant byte of the result.
56 */
57static bool iemHlpCalcParityFlag(uint8_t u8Result)
58{
59 /*
60 * Parity is set if the number of bits in the least significant byte of
61 * the result is even.
62 */
63 uint8_t cBits;
64 cBits = u8Result & 1; /* 0 */
65 u8Result >>= 1;
66 cBits += u8Result & 1;
67 u8Result >>= 1;
68 cBits += u8Result & 1;
69 u8Result >>= 1;
70 cBits += u8Result & 1;
71 u8Result >>= 1;
72 cBits += u8Result & 1; /* 4 */
73 u8Result >>= 1;
74 cBits += u8Result & 1;
75 u8Result >>= 1;
76 cBits += u8Result & 1;
77 u8Result >>= 1;
78 cBits += u8Result & 1;
79 return !(cBits & 1);
80}
81#endif /* not used */
82
83
84/**
85 * Updates the specified flags according to a 8-bit result.
86 *
87 * @param pIemCpu The IEM state of the calling EMT.
88 * @param u8Result The result to set the flags according to.
89 * @param fToUpdate The flags to update.
90 * @param fUndefined The flags that are specified as undefined.
91 */
92static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
93{
94 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
95
96 uint32_t fEFlags = pCtx->eflags.u;
97 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
98 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
99 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
100}
101
102
103/**
104 * Loads a NULL data selector into a selector register, both the hidden and
105 * visible parts, in protected mode.
106 *
107 * @param pSReg Pointer to the segment register.
108 * @param uRpl The RPL.
109 */
110static void iemHlpLoadNullDataSelectorProt(PCPUMSELREG pSReg, RTSEL uRpl)
111{
112 /** @todo Testcase: write a testcase checking what happends when loading a NULL
113 * data selector in protected mode. */
114 pSReg->Sel = uRpl;
115 pSReg->ValidSel = uRpl;
116 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
117 pSReg->u64Base = 0;
118 pSReg->u32Limit = 0;
119 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
120}
121
122
123/**
124 * Helper used by iret.
125 *
126 * @param uCpl The new CPL.
127 * @param pSReg Pointer to the segment register.
128 */
129static void iemHlpAdjustSelectorForNewCpl(PIEMCPU pIemCpu, uint8_t uCpl, PCPUMSELREG pSReg)
130{
131#ifdef VBOX_WITH_RAW_MODE_NOT_R0
132 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg))
133 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg);
134#else
135 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg));
136#endif
137
138 if ( uCpl > pSReg->Attr.n.u2Dpl
139 && pSReg->Attr.n.u1DescType /* code or data, not system */
140 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
141 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
142 iemHlpLoadNullDataSelectorProt(pSReg, 0);
143}
144
145
146/**
147 * Indicates that we have modified the FPU state.
148 *
149 * @param pIemCpu The IEM state of the calling EMT.
150 */
151DECLINLINE(void) iemHlpUsedFpu(PIEMCPU pIemCpu)
152{
153 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_FPU_REM);
154}
155
156/** @} */
157
158/** @name C Implementations
159 * @{
160 */
161
162/**
163 * Implements a 16-bit popa.
164 */
165IEM_CIMPL_DEF_0(iemCImpl_popa_16)
166{
167 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
168 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
169 RTGCPTR GCPtrLast = GCPtrStart + 15;
170 VBOXSTRICTRC rcStrict;
171
172 /*
173 * The docs are a bit hard to comprehend here, but it looks like we wrap
174 * around in real mode as long as none of the individual "popa" crosses the
175 * end of the stack segment. In protected mode we check the whole access
176 * in one go. For efficiency, only do the word-by-word thing if we're in
177 * danger of wrapping around.
178 */
179 /** @todo do popa boundary / wrap-around checks. */
180 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
181 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
182 {
183 /* word-by-word */
184 RTUINT64U TmpRsp;
185 TmpRsp.u = pCtx->rsp;
186 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
187 if (rcStrict == VINF_SUCCESS)
188 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
189 if (rcStrict == VINF_SUCCESS)
190 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
191 if (rcStrict == VINF_SUCCESS)
192 {
193 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
194 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
195 }
196 if (rcStrict == VINF_SUCCESS)
197 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
198 if (rcStrict == VINF_SUCCESS)
199 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
200 if (rcStrict == VINF_SUCCESS)
201 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
202 if (rcStrict == VINF_SUCCESS)
203 {
204 pCtx->rsp = TmpRsp.u;
205 iemRegAddToRip(pIemCpu, cbInstr);
206 }
207 }
208 else
209 {
210 uint16_t const *pa16Mem = NULL;
211 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
212 if (rcStrict == VINF_SUCCESS)
213 {
214 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
215 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
216 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
217 /* skip sp */
218 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
219 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
220 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
221 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
222 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
223 if (rcStrict == VINF_SUCCESS)
224 {
225 iemRegAddToRsp(pCtx, 16);
226 iemRegAddToRip(pIemCpu, cbInstr);
227 }
228 }
229 }
230 return rcStrict;
231}
232
233
234/**
235 * Implements a 32-bit popa.
236 */
237IEM_CIMPL_DEF_0(iemCImpl_popa_32)
238{
239 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
240 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
241 RTGCPTR GCPtrLast = GCPtrStart + 31;
242 VBOXSTRICTRC rcStrict;
243
244 /*
245 * The docs are a bit hard to comprehend here, but it looks like we wrap
246 * around in real mode as long as none of the individual "popa" crosses the
247 * end of the stack segment. In protected mode we check the whole access
248 * in one go. For efficiency, only do the word-by-word thing if we're in
249 * danger of wrapping around.
250 */
251 /** @todo do popa boundary / wrap-around checks. */
252 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
253 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
254 {
255 /* word-by-word */
256 RTUINT64U TmpRsp;
257 TmpRsp.u = pCtx->rsp;
258 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
259 if (rcStrict == VINF_SUCCESS)
260 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
261 if (rcStrict == VINF_SUCCESS)
262 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
263 if (rcStrict == VINF_SUCCESS)
264 {
265 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
266 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
267 }
268 if (rcStrict == VINF_SUCCESS)
269 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
270 if (rcStrict == VINF_SUCCESS)
271 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
272 if (rcStrict == VINF_SUCCESS)
273 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
274 if (rcStrict == VINF_SUCCESS)
275 {
276#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
277 pCtx->rdi &= UINT32_MAX;
278 pCtx->rsi &= UINT32_MAX;
279 pCtx->rbp &= UINT32_MAX;
280 pCtx->rbx &= UINT32_MAX;
281 pCtx->rdx &= UINT32_MAX;
282 pCtx->rcx &= UINT32_MAX;
283 pCtx->rax &= UINT32_MAX;
284#endif
285 pCtx->rsp = TmpRsp.u;
286 iemRegAddToRip(pIemCpu, cbInstr);
287 }
288 }
289 else
290 {
291 uint32_t const *pa32Mem;
292 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
293 if (rcStrict == VINF_SUCCESS)
294 {
295 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
296 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
297 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
298 /* skip esp */
299 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
300 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
301 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
302 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
303 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
304 if (rcStrict == VINF_SUCCESS)
305 {
306 iemRegAddToRsp(pCtx, 32);
307 iemRegAddToRip(pIemCpu, cbInstr);
308 }
309 }
310 }
311 return rcStrict;
312}
313
314
315/**
316 * Implements a 16-bit pusha.
317 */
318IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
319{
320 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
321 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
322 RTGCPTR GCPtrBottom = GCPtrTop - 15;
323 VBOXSTRICTRC rcStrict;
324
325 /*
326 * The docs are a bit hard to comprehend here, but it looks like we wrap
327 * around in real mode as long as none of the individual "pushd" crosses the
328 * end of the stack segment. In protected mode we check the whole access
329 * in one go. For efficiency, only do the word-by-word thing if we're in
330 * danger of wrapping around.
331 */
332 /** @todo do pusha boundary / wrap-around checks. */
333 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
334 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
335 {
336 /* word-by-word */
337 RTUINT64U TmpRsp;
338 TmpRsp.u = pCtx->rsp;
339 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
340 if (rcStrict == VINF_SUCCESS)
341 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
342 if (rcStrict == VINF_SUCCESS)
343 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
344 if (rcStrict == VINF_SUCCESS)
345 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
346 if (rcStrict == VINF_SUCCESS)
347 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
348 if (rcStrict == VINF_SUCCESS)
349 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
350 if (rcStrict == VINF_SUCCESS)
351 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
352 if (rcStrict == VINF_SUCCESS)
353 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
354 if (rcStrict == VINF_SUCCESS)
355 {
356 pCtx->rsp = TmpRsp.u;
357 iemRegAddToRip(pIemCpu, cbInstr);
358 }
359 }
360 else
361 {
362 GCPtrBottom--;
363 uint16_t *pa16Mem = NULL;
364 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
365 if (rcStrict == VINF_SUCCESS)
366 {
367 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
368 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
369 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
370 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
371 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
372 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
373 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
374 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
375 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
376 if (rcStrict == VINF_SUCCESS)
377 {
378 iemRegSubFromRsp(pCtx, 16);
379 iemRegAddToRip(pIemCpu, cbInstr);
380 }
381 }
382 }
383 return rcStrict;
384}
385
386
387/**
388 * Implements a 32-bit pusha.
389 */
390IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
391{
392 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
393 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
394 RTGCPTR GCPtrBottom = GCPtrTop - 31;
395 VBOXSTRICTRC rcStrict;
396
397 /*
398 * The docs are a bit hard to comprehend here, but it looks like we wrap
399 * around in real mode as long as none of the individual "pusha" crosses the
400 * end of the stack segment. In protected mode we check the whole access
401 * in one go. For efficiency, only do the word-by-word thing if we're in
402 * danger of wrapping around.
403 */
404 /** @todo do pusha boundary / wrap-around checks. */
405 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
406 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
407 {
408 /* word-by-word */
409 RTUINT64U TmpRsp;
410 TmpRsp.u = pCtx->rsp;
411 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
412 if (rcStrict == VINF_SUCCESS)
413 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
414 if (rcStrict == VINF_SUCCESS)
415 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
416 if (rcStrict == VINF_SUCCESS)
417 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
418 if (rcStrict == VINF_SUCCESS)
419 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
420 if (rcStrict == VINF_SUCCESS)
421 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
422 if (rcStrict == VINF_SUCCESS)
423 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
424 if (rcStrict == VINF_SUCCESS)
425 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
426 if (rcStrict == VINF_SUCCESS)
427 {
428 pCtx->rsp = TmpRsp.u;
429 iemRegAddToRip(pIemCpu, cbInstr);
430 }
431 }
432 else
433 {
434 GCPtrBottom--;
435 uint32_t *pa32Mem;
436 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
437 if (rcStrict == VINF_SUCCESS)
438 {
439 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
440 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
441 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
442 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
443 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
444 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
445 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
446 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
447 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
448 if (rcStrict == VINF_SUCCESS)
449 {
450 iemRegSubFromRsp(pCtx, 32);
451 iemRegAddToRip(pIemCpu, cbInstr);
452 }
453 }
454 }
455 return rcStrict;
456}
457
458
459/**
460 * Implements pushf.
461 *
462 *
463 * @param enmEffOpSize The effective operand size.
464 */
465IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
466{
467 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
468
469 /*
470 * If we're in V8086 mode some care is required (which is why we're in
471 * doing this in a C implementation).
472 */
473 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
474 if ( (fEfl & X86_EFL_VM)
475 && X86_EFL_GET_IOPL(fEfl) != 3 )
476 {
477 Assert(pCtx->cr0 & X86_CR0_PE);
478 if ( enmEffOpSize != IEMMODE_16BIT
479 || !(pCtx->cr4 & X86_CR4_VME))
480 return iemRaiseGeneralProtectionFault0(pIemCpu);
481 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
482 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
483 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
484 }
485
486 /*
487 * Ok, clear RF and VM and push the flags.
488 */
489 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
490
491 VBOXSTRICTRC rcStrict;
492 switch (enmEffOpSize)
493 {
494 case IEMMODE_16BIT:
495 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
496 break;
497 case IEMMODE_32BIT:
498 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
499 break;
500 case IEMMODE_64BIT:
501 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
502 break;
503 IEM_NOT_REACHED_DEFAULT_CASE_RET();
504 }
505 if (rcStrict != VINF_SUCCESS)
506 return rcStrict;
507
508 iemRegAddToRip(pIemCpu, cbInstr);
509 return VINF_SUCCESS;
510}
511
512
513/**
514 * Implements popf.
515 *
516 * @param enmEffOpSize The effective operand size.
517 */
518IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
519{
520 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
521 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
522 uint32_t const fEflOld = IEMMISC_GET_EFL(pIemCpu, pCtx);
523 VBOXSTRICTRC rcStrict;
524 uint32_t fEflNew;
525
526 /*
527 * V8086 is special as usual.
528 */
529 if (fEflOld & X86_EFL_VM)
530 {
531 /*
532 * Almost anything goes if IOPL is 3.
533 */
534 if (X86_EFL_GET_IOPL(fEflOld) == 3)
535 {
536 switch (enmEffOpSize)
537 {
538 case IEMMODE_16BIT:
539 {
540 uint16_t u16Value;
541 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
542 if (rcStrict != VINF_SUCCESS)
543 return rcStrict;
544 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
545 break;
546 }
547 case IEMMODE_32BIT:
548 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
549 if (rcStrict != VINF_SUCCESS)
550 return rcStrict;
551 break;
552 IEM_NOT_REACHED_DEFAULT_CASE_RET();
553 }
554
555 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
556 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
557 }
558 /*
559 * Interrupt flag virtualization with CR4.VME=1.
560 */
561 else if ( enmEffOpSize == IEMMODE_16BIT
562 && (pCtx->cr4 & X86_CR4_VME) )
563 {
564 uint16_t u16Value;
565 RTUINT64U TmpRsp;
566 TmpRsp.u = pCtx->rsp;
567 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
568 if (rcStrict != VINF_SUCCESS)
569 return rcStrict;
570
571 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
572 * or before? */
573 if ( ( (u16Value & X86_EFL_IF)
574 && (fEflOld & X86_EFL_VIP))
575 || (u16Value & X86_EFL_TF) )
576 return iemRaiseGeneralProtectionFault0(pIemCpu);
577
578 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
579 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
580 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
581 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
582
583 pCtx->rsp = TmpRsp.u;
584 }
585 else
586 return iemRaiseGeneralProtectionFault0(pIemCpu);
587
588 }
589 /*
590 * Not in V8086 mode.
591 */
592 else
593 {
594 /* Pop the flags. */
595 switch (enmEffOpSize)
596 {
597 case IEMMODE_16BIT:
598 {
599 uint16_t u16Value;
600 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
601 if (rcStrict != VINF_SUCCESS)
602 return rcStrict;
603 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
604 break;
605 }
606 case IEMMODE_32BIT:
607 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
608 if (rcStrict != VINF_SUCCESS)
609 return rcStrict;
610 break;
611 case IEMMODE_64BIT:
612 {
613 uint64_t u64Value;
614 rcStrict = iemMemStackPopU64(pIemCpu, &u64Value);
615 if (rcStrict != VINF_SUCCESS)
616 return rcStrict;
617 fEflNew = u64Value; /** @todo testcase: Check exactly what happens if high bits are set. */
618 break;
619 }
620 IEM_NOT_REACHED_DEFAULT_CASE_RET();
621 }
622
623 /* Merge them with the current flags. */
624 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
625 || pIemCpu->uCpl == 0)
626 {
627 fEflNew &= X86_EFL_POPF_BITS;
628 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
629 }
630 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
631 {
632 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
633 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
634 }
635 else
636 {
637 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
638 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
639 }
640 }
641
642 /*
643 * Commit the flags.
644 */
645 Assert(fEflNew & RT_BIT_32(1));
646 IEMMISC_SET_EFL(pIemCpu, pCtx, fEflNew);
647 iemRegAddToRip(pIemCpu, cbInstr);
648
649 return VINF_SUCCESS;
650}
651
652
653/**
654 * Implements an indirect call.
655 *
656 * @param uNewPC The new program counter (RIP) value (loaded from the
657 * operand).
658 * @param enmEffOpSize The effective operand size.
659 */
660IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
661{
662 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
663 uint16_t uOldPC = pCtx->ip + cbInstr;
664 if (uNewPC > pCtx->cs.u32Limit)
665 return iemRaiseGeneralProtectionFault0(pIemCpu);
666
667 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
668 if (rcStrict != VINF_SUCCESS)
669 return rcStrict;
670
671 pCtx->rip = uNewPC;
672 return VINF_SUCCESS;
673
674}
675
676
677/**
678 * Implements a 16-bit relative call.
679 *
680 * @param offDisp The displacment offset.
681 */
682IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
683{
684 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
685 uint16_t uOldPC = pCtx->ip + cbInstr;
686 uint16_t uNewPC = uOldPC + offDisp;
687 if (uNewPC > pCtx->cs.u32Limit)
688 return iemRaiseGeneralProtectionFault0(pIemCpu);
689
690 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
691 if (rcStrict != VINF_SUCCESS)
692 return rcStrict;
693
694 pCtx->rip = uNewPC;
695 return VINF_SUCCESS;
696}
697
698
699/**
700 * Implements a 32-bit indirect call.
701 *
702 * @param uNewPC The new program counter (RIP) value (loaded from the
703 * operand).
704 * @param enmEffOpSize The effective operand size.
705 */
706IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
707{
708 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
709 uint32_t uOldPC = pCtx->eip + cbInstr;
710 if (uNewPC > pCtx->cs.u32Limit)
711 return iemRaiseGeneralProtectionFault0(pIemCpu);
712
713 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
714 if (rcStrict != VINF_SUCCESS)
715 return rcStrict;
716
717 pCtx->rip = uNewPC;
718 return VINF_SUCCESS;
719
720}
721
722
723/**
724 * Implements a 32-bit relative call.
725 *
726 * @param offDisp The displacment offset.
727 */
728IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
729{
730 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
731 uint32_t uOldPC = pCtx->eip + cbInstr;
732 uint32_t uNewPC = uOldPC + offDisp;
733 if (uNewPC > pCtx->cs.u32Limit)
734 return iemRaiseGeneralProtectionFault0(pIemCpu);
735
736 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
737 if (rcStrict != VINF_SUCCESS)
738 return rcStrict;
739
740 pCtx->rip = uNewPC;
741 return VINF_SUCCESS;
742}
743
744
745/**
746 * Implements a 64-bit indirect call.
747 *
748 * @param uNewPC The new program counter (RIP) value (loaded from the
749 * operand).
750 * @param enmEffOpSize The effective operand size.
751 */
752IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
753{
754 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
755 uint64_t uOldPC = pCtx->rip + cbInstr;
756 if (!IEM_IS_CANONICAL(uNewPC))
757 return iemRaiseGeneralProtectionFault0(pIemCpu);
758
759 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
760 if (rcStrict != VINF_SUCCESS)
761 return rcStrict;
762
763 pCtx->rip = uNewPC;
764 return VINF_SUCCESS;
765
766}
767
768
769/**
770 * Implements a 64-bit relative call.
771 *
772 * @param offDisp The displacment offset.
773 */
774IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
775{
776 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
777 uint64_t uOldPC = pCtx->rip + cbInstr;
778 uint64_t uNewPC = uOldPC + offDisp;
779 if (!IEM_IS_CANONICAL(uNewPC))
780 return iemRaiseNotCanonical(pIemCpu);
781
782 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
783 if (rcStrict != VINF_SUCCESS)
784 return rcStrict;
785
786 pCtx->rip = uNewPC;
787 return VINF_SUCCESS;
788}
789
790
791/**
792 * Implements far jumps and calls thru task segments (TSS).
793 *
794 * @param uSel The selector.
795 * @param enmBranch The kind of branching we're performing.
796 * @param enmEffOpSize The effective operand size.
797 * @param pDesc The descriptor corrsponding to @a uSel. The type is
798 * call gate.
799 */
800IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
801{
802 /* Call various functions to do the work. */
803 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
804}
805
806
807/**
808 * Implements far jumps and calls thru task gates.
809 *
810 * @param uSel The selector.
811 * @param enmBranch The kind of branching we're performing.
812 * @param enmEffOpSize The effective operand size.
813 * @param pDesc The descriptor corrsponding to @a uSel. The type is
814 * call gate.
815 */
816IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
817{
818 /* Call various functions to do the work. */
819 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
820}
821
822
823/**
824 * Implements far jumps and calls thru call gates.
825 *
826 * @param uSel The selector.
827 * @param enmBranch The kind of branching we're performing.
828 * @param enmEffOpSize The effective operand size.
829 * @param pDesc The descriptor corrsponding to @a uSel. The type is
830 * call gate.
831 */
832IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
833{
834 /* Call various functions to do the work. */
835 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
836}
837
838
839/**
840 * Implements far jumps and calls thru system selectors.
841 *
842 * @param uSel The selector.
843 * @param enmBranch The kind of branching we're performing.
844 * @param enmEffOpSize The effective operand size.
845 * @param pDesc The descriptor corrsponding to @a uSel.
846 */
847IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
848{
849 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
850 Assert((uSel & X86_SEL_MASK_OFF_RPL));
851
852 if (IEM_IS_LONG_MODE(pIemCpu))
853 switch (pDesc->Legacy.Gen.u4Type)
854 {
855 case AMD64_SEL_TYPE_SYS_CALL_GATE:
856 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
857
858 default:
859 case AMD64_SEL_TYPE_SYS_LDT:
860 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
861 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
862 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
863 case AMD64_SEL_TYPE_SYS_INT_GATE:
864 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
865 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
866
867 }
868
869 switch (pDesc->Legacy.Gen.u4Type)
870 {
871 case X86_SEL_TYPE_SYS_286_CALL_GATE:
872 case X86_SEL_TYPE_SYS_386_CALL_GATE:
873 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
874
875 case X86_SEL_TYPE_SYS_TASK_GATE:
876 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
877
878 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
879 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
880 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
881
882 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
883 Log(("branch %04x -> busy 286 TSS\n", uSel));
884 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
885
886 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
887 Log(("branch %04x -> busy 386 TSS\n", uSel));
888 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
889
890 default:
891 case X86_SEL_TYPE_SYS_LDT:
892 case X86_SEL_TYPE_SYS_286_INT_GATE:
893 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
894 case X86_SEL_TYPE_SYS_386_INT_GATE:
895 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
896 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
897 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
898 }
899}
900
901
902/**
903 * Implements far jumps.
904 *
905 * @param uSel The selector.
906 * @param offSeg The segment offset.
907 * @param enmEffOpSize The effective operand size.
908 */
909IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
910{
911 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
912 NOREF(cbInstr);
913 Assert(offSeg <= UINT32_MAX);
914
915 /*
916 * Real mode and V8086 mode are easy. The only snag seems to be that
917 * CS.limit doesn't change and the limit check is done against the current
918 * limit.
919 */
920 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
921 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
922 {
923 if (offSeg > pCtx->cs.u32Limit)
924 return iemRaiseGeneralProtectionFault0(pIemCpu);
925
926 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
927 pCtx->rip = offSeg;
928 else
929 pCtx->rip = offSeg & UINT16_MAX;
930 pCtx->cs.Sel = uSel;
931 pCtx->cs.ValidSel = uSel;
932 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
933 pCtx->cs.u64Base = (uint32_t)uSel << 4;
934 return VINF_SUCCESS;
935 }
936
937 /*
938 * Protected mode. Need to parse the specified descriptor...
939 */
940 if (!(uSel & X86_SEL_MASK_OFF_RPL))
941 {
942 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
943 return iemRaiseGeneralProtectionFault0(pIemCpu);
944 }
945
946 /* Fetch the descriptor. */
947 IEMSELDESC Desc;
948 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
949 if (rcStrict != VINF_SUCCESS)
950 return rcStrict;
951
952 /* Is it there? */
953 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
954 {
955 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
956 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
957 }
958
959 /*
960 * Deal with it according to its type. We do the standard code selectors
961 * here and dispatch the system selectors to worker functions.
962 */
963 if (!Desc.Legacy.Gen.u1DescType)
964 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
965
966 /* Only code segments. */
967 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
968 {
969 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
970 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
971 }
972
973 /* L vs D. */
974 if ( Desc.Legacy.Gen.u1Long
975 && Desc.Legacy.Gen.u1DefBig
976 && IEM_IS_LONG_MODE(pIemCpu))
977 {
978 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
979 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
980 }
981
982 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
983 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
984 {
985 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
986 {
987 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
988 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
989 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
990 }
991 }
992 else
993 {
994 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
995 {
996 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
997 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
998 }
999 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1000 {
1001 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1002 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1003 }
1004 }
1005
1006 /* Chop the high bits if 16-bit (Intel says so). */
1007 if (enmEffOpSize == IEMMODE_16BIT)
1008 offSeg &= UINT16_MAX;
1009
1010 /* Limit check. (Should alternatively check for non-canonical addresses
1011 here, but that is ruled out by offSeg being 32-bit, right?) */
1012 uint64_t u64Base;
1013 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1014 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1015 u64Base = 0;
1016 else
1017 {
1018 if (offSeg > cbLimit)
1019 {
1020 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1021 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1022 }
1023 u64Base = X86DESC_BASE(&Desc.Legacy);
1024 }
1025
1026 /*
1027 * Ok, everything checked out fine. Now set the accessed bit before
1028 * committing the result into CS, CSHID and RIP.
1029 */
1030 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1031 {
1032 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1033 if (rcStrict != VINF_SUCCESS)
1034 return rcStrict;
1035 /** @todo check what VT-x and AMD-V does. */
1036 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1037 }
1038
1039 /* commit */
1040 pCtx->rip = offSeg;
1041 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1042 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1043 pCtx->cs.ValidSel = pCtx->cs.Sel;
1044 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1045 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1046 pCtx->cs.u32Limit = cbLimit;
1047 pCtx->cs.u64Base = u64Base;
1048 /** @todo check if the hidden bits are loaded correctly for 64-bit
1049 * mode. */
1050 return VINF_SUCCESS;
1051}
1052
1053
1054/**
1055 * Implements far calls.
1056 *
1057 * This very similar to iemCImpl_FarJmp.
1058 *
1059 * @param uSel The selector.
1060 * @param offSeg The segment offset.
1061 * @param enmEffOpSize The operand size (in case we need it).
1062 */
1063IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1064{
1065 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1066 VBOXSTRICTRC rcStrict;
1067 uint64_t uNewRsp;
1068 RTPTRUNION uPtrRet;
1069
1070 /*
1071 * Real mode and V8086 mode are easy. The only snag seems to be that
1072 * CS.limit doesn't change and the limit check is done against the current
1073 * limit.
1074 */
1075 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1076 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1077 {
1078 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1079
1080 /* Check stack first - may #SS(0). */
1081 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1082 &uPtrRet.pv, &uNewRsp);
1083 if (rcStrict != VINF_SUCCESS)
1084 return rcStrict;
1085
1086 /* Check the target address range. */
1087 if (offSeg > UINT32_MAX)
1088 return iemRaiseGeneralProtectionFault0(pIemCpu);
1089
1090 /* Everything is fine, push the return address. */
1091 if (enmEffOpSize == IEMMODE_16BIT)
1092 {
1093 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1094 uPtrRet.pu16[1] = pCtx->cs.Sel;
1095 }
1096 else
1097 {
1098 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1099 uPtrRet.pu16[3] = pCtx->cs.Sel;
1100 }
1101 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1102 if (rcStrict != VINF_SUCCESS)
1103 return rcStrict;
1104
1105 /* Branch. */
1106 pCtx->rip = offSeg;
1107 pCtx->cs.Sel = uSel;
1108 pCtx->cs.ValidSel = uSel;
1109 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1110 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1111 return VINF_SUCCESS;
1112 }
1113
1114 /*
1115 * Protected mode. Need to parse the specified descriptor...
1116 */
1117 if (!(uSel & X86_SEL_MASK_OFF_RPL))
1118 {
1119 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1120 return iemRaiseGeneralProtectionFault0(pIemCpu);
1121 }
1122
1123 /* Fetch the descriptor. */
1124 IEMSELDESC Desc;
1125 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
1126 if (rcStrict != VINF_SUCCESS)
1127 return rcStrict;
1128
1129 /*
1130 * Deal with it according to its type. We do the standard code selectors
1131 * here and dispatch the system selectors to worker functions.
1132 */
1133 if (!Desc.Legacy.Gen.u1DescType)
1134 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
1135
1136 /* Only code segments. */
1137 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1138 {
1139 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1140 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1141 }
1142
1143 /* L vs D. */
1144 if ( Desc.Legacy.Gen.u1Long
1145 && Desc.Legacy.Gen.u1DefBig
1146 && IEM_IS_LONG_MODE(pIemCpu))
1147 {
1148 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1149 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1150 }
1151
1152 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1153 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1154 {
1155 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1156 {
1157 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1158 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1159 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1160 }
1161 }
1162 else
1163 {
1164 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1165 {
1166 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1167 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1168 }
1169 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1170 {
1171 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1172 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1173 }
1174 }
1175
1176 /* Is it there? */
1177 if (!Desc.Legacy.Gen.u1Present)
1178 {
1179 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1180 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1181 }
1182
1183 /* Check stack first - may #SS(0). */
1184 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
1185 * 16-bit code cause a two or four byte CS to be pushed? */
1186 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1187 enmEffOpSize == IEMMODE_64BIT ? 8+8
1188 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1189 &uPtrRet.pv, &uNewRsp);
1190 if (rcStrict != VINF_SUCCESS)
1191 return rcStrict;
1192
1193 /* Chop the high bits if 16-bit (Intel says so). */
1194 if (enmEffOpSize == IEMMODE_16BIT)
1195 offSeg &= UINT16_MAX;
1196
1197 /* Limit / canonical check. */
1198 uint64_t u64Base;
1199 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1200 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1201 {
1202 if (!IEM_IS_CANONICAL(offSeg))
1203 {
1204 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
1205 return iemRaiseNotCanonical(pIemCpu);
1206 }
1207 u64Base = 0;
1208 }
1209 else
1210 {
1211 if (offSeg > cbLimit)
1212 {
1213 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1214 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1215 }
1216 u64Base = X86DESC_BASE(&Desc.Legacy);
1217 }
1218
1219 /*
1220 * Now set the accessed bit before
1221 * writing the return address to the stack and committing the result into
1222 * CS, CSHID and RIP.
1223 */
1224 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1225 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1226 {
1227 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1228 if (rcStrict != VINF_SUCCESS)
1229 return rcStrict;
1230 /** @todo check what VT-x and AMD-V does. */
1231 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1232 }
1233
1234 /* stack */
1235 if (enmEffOpSize == IEMMODE_16BIT)
1236 {
1237 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1238 uPtrRet.pu16[1] = pCtx->cs.Sel;
1239 }
1240 else if (enmEffOpSize == IEMMODE_32BIT)
1241 {
1242 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1243 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
1244 }
1245 else
1246 {
1247 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1248 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
1249 }
1250 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1251 if (rcStrict != VINF_SUCCESS)
1252 return rcStrict;
1253
1254 /* commit */
1255 pCtx->rip = offSeg;
1256 pCtx->cs.Sel = uSel & X86_SEL_MASK_OFF_RPL;
1257 pCtx->cs.Sel |= pIemCpu->uCpl;
1258 pCtx->cs.ValidSel = pCtx->cs.Sel;
1259 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1260 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1261 pCtx->cs.u32Limit = cbLimit;
1262 pCtx->cs.u64Base = u64Base;
1263 /** @todo check if the hidden bits are loaded correctly for 64-bit
1264 * mode. */
1265 return VINF_SUCCESS;
1266}
1267
1268
1269/**
1270 * Implements retf.
1271 *
1272 * @param enmEffOpSize The effective operand size.
1273 * @param cbPop The amount of arguments to pop from the stack
1274 * (bytes).
1275 */
1276IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1277{
1278 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1279 VBOXSTRICTRC rcStrict;
1280 RTCPTRUNION uPtrFrame;
1281 uint64_t uNewRsp;
1282 uint64_t uNewRip;
1283 uint16_t uNewCs;
1284 NOREF(cbInstr);
1285
1286 /*
1287 * Read the stack values first.
1288 */
1289 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
1290 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
1291 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
1292 if (rcStrict != VINF_SUCCESS)
1293 return rcStrict;
1294 if (enmEffOpSize == IEMMODE_16BIT)
1295 {
1296 uNewRip = uPtrFrame.pu16[0];
1297 uNewCs = uPtrFrame.pu16[1];
1298 }
1299 else if (enmEffOpSize == IEMMODE_32BIT)
1300 {
1301 uNewRip = uPtrFrame.pu32[0];
1302 uNewCs = uPtrFrame.pu16[2];
1303 }
1304 else
1305 {
1306 uNewRip = uPtrFrame.pu64[0];
1307 uNewCs = uPtrFrame.pu16[4];
1308 }
1309
1310 /*
1311 * Real mode and V8086 mode are easy.
1312 */
1313 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1314 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1315 {
1316 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1317 /** @todo check how this is supposed to work if sp=0xfffe. */
1318
1319 /* Check the limit of the new EIP. */
1320 /** @todo Intel pseudo code only does the limit check for 16-bit
1321 * operands, AMD does not make any distinction. What is right? */
1322 if (uNewRip > pCtx->cs.u32Limit)
1323 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1324
1325 /* commit the operation. */
1326 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1327 if (rcStrict != VINF_SUCCESS)
1328 return rcStrict;
1329 pCtx->rip = uNewRip;
1330 pCtx->cs.Sel = uNewCs;
1331 pCtx->cs.ValidSel = uNewCs;
1332 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1333 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
1334 /** @todo do we load attribs and limit as well? */
1335 if (cbPop)
1336 iemRegAddToRsp(pCtx, cbPop);
1337 return VINF_SUCCESS;
1338 }
1339
1340 /*
1341 * Protected mode is complicated, of course.
1342 */
1343 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
1344 {
1345 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
1346 return iemRaiseGeneralProtectionFault0(pIemCpu);
1347 }
1348
1349 /* Fetch the descriptor. */
1350 IEMSELDESC DescCs;
1351 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs);
1352 if (rcStrict != VINF_SUCCESS)
1353 return rcStrict;
1354
1355 /* Can only return to a code selector. */
1356 if ( !DescCs.Legacy.Gen.u1DescType
1357 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1358 {
1359 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1360 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
1361 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1362 }
1363
1364 /* L vs D. */
1365 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
1366 && DescCs.Legacy.Gen.u1DefBig
1367 && IEM_IS_LONG_MODE(pIemCpu))
1368 {
1369 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
1370 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1371 }
1372
1373 /* DPL/RPL/CPL checks. */
1374 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
1375 {
1376 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl));
1377 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1378 }
1379
1380 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1381 {
1382 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
1383 {
1384 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
1385 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1386 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1387 }
1388 }
1389 else
1390 {
1391 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
1392 {
1393 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
1394 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1395 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1396 }
1397 }
1398
1399 /* Is it there? */
1400 if (!DescCs.Legacy.Gen.u1Present)
1401 {
1402 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
1403 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1404 }
1405
1406 /*
1407 * Return to outer privilege? (We'll typically have entered via a call gate.)
1408 */
1409 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
1410 {
1411 /* Read the return pointer, it comes before the parameters. */
1412 RTCPTRUNION uPtrStack;
1413 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);
1414 if (rcStrict != VINF_SUCCESS)
1415 return rcStrict;
1416 uint16_t uNewOuterSs;
1417 uint64_t uNewOuterRsp;
1418 if (enmEffOpSize == IEMMODE_16BIT)
1419 {
1420 uNewOuterRsp = uPtrFrame.pu16[0];
1421 uNewOuterSs = uPtrFrame.pu16[1];
1422 }
1423 else if (enmEffOpSize == IEMMODE_32BIT)
1424 {
1425 uNewOuterRsp = uPtrFrame.pu32[0];
1426 uNewOuterSs = uPtrFrame.pu16[2];
1427 }
1428 else
1429 {
1430 uNewOuterRsp = uPtrFrame.pu64[0];
1431 uNewOuterSs = uPtrFrame.pu16[4];
1432 }
1433
1434 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
1435 and read the selector. */
1436 IEMSELDESC DescSs;
1437 if (!(uNewOuterSs & X86_SEL_MASK_OFF_RPL))
1438 {
1439 if ( !DescCs.Legacy.Gen.u1Long
1440 || (uNewOuterSs & X86_SEL_RPL) == 3)
1441 {
1442 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
1443 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1444 return iemRaiseGeneralProtectionFault0(pIemCpu);
1445 }
1446 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
1447 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
1448 }
1449 else
1450 {
1451 /* Fetch the descriptor for the new stack segment. */
1452 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs);
1453 if (rcStrict != VINF_SUCCESS)
1454 return rcStrict;
1455 }
1456
1457 /* Check that RPL of stack and code selectors match. */
1458 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
1459 {
1460 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1461 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1462 }
1463
1464 /* Must be a writable data segment. */
1465 if ( !DescSs.Legacy.Gen.u1DescType
1466 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1467 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1468 {
1469 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
1470 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1471 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1472 }
1473
1474 /* L vs D. (Not mentioned by intel.) */
1475 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
1476 && DescSs.Legacy.Gen.u1DefBig
1477 && IEM_IS_LONG_MODE(pIemCpu))
1478 {
1479 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
1480 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1481 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1482 }
1483
1484 /* DPL/RPL/CPL checks. */
1485 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
1486 {
1487 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
1488 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
1489 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1490 }
1491
1492 /* Is it there? */
1493 if (!DescSs.Legacy.Gen.u1Present)
1494 {
1495 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1496 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1497 }
1498
1499 /* Calc SS limit.*/
1500 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
1501
1502 /* Is RIP canonical or within CS.limit? */
1503 uint64_t u64Base;
1504 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
1505
1506 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1507 {
1508 if (!IEM_IS_CANONICAL(uNewRip))
1509 {
1510 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1511 return iemRaiseNotCanonical(pIemCpu);
1512 }
1513 u64Base = 0;
1514 }
1515 else
1516 {
1517 if (uNewRip > cbLimitCs)
1518 {
1519 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
1520 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
1521 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1522 }
1523 u64Base = X86DESC_BASE(&DescCs.Legacy);
1524 }
1525
1526 /*
1527 * Now set the accessed bit before
1528 * writing the return address to the stack and committing the result into
1529 * CS, CSHID and RIP.
1530 */
1531 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
1532 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1533 {
1534 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1535 if (rcStrict != VINF_SUCCESS)
1536 return rcStrict;
1537 /** @todo check what VT-x and AMD-V does. */
1538 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1539 }
1540 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
1541 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1542 {
1543 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs);
1544 if (rcStrict != VINF_SUCCESS)
1545 return rcStrict;
1546 /** @todo check what VT-x and AMD-V does. */
1547 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1548 }
1549
1550 /* commit */
1551 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1552 if (rcStrict != VINF_SUCCESS)
1553 return rcStrict;
1554 if (enmEffOpSize == IEMMODE_16BIT)
1555 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1556 else
1557 pCtx->rip = uNewRip;
1558 pCtx->cs.Sel = uNewCs;
1559 pCtx->cs.ValidSel = uNewCs;
1560 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1561 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
1562 pCtx->cs.u32Limit = cbLimitCs;
1563 pCtx->cs.u64Base = u64Base;
1564 pCtx->rsp = uNewRsp;
1565 pCtx->ss.Sel = uNewOuterSs;
1566 pCtx->ss.ValidSel = uNewOuterSs;
1567 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1568 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
1569 pCtx->ss.u32Limit = cbLimitSs;
1570 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1571 pCtx->ss.u64Base = 0;
1572 else
1573 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
1574
1575 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL);
1576 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
1577 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
1578 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
1579 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
1580
1581 /** @todo check if the hidden bits are loaded correctly for 64-bit
1582 * mode. */
1583
1584 if (cbPop)
1585 iemRegAddToRsp(pCtx, cbPop);
1586
1587 /* Done! */
1588 }
1589 /*
1590 * Return to the same privilege level
1591 */
1592 else
1593 {
1594 /* Limit / canonical check. */
1595 uint64_t u64Base;
1596 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
1597
1598 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1599 {
1600 if (!IEM_IS_CANONICAL(uNewRip))
1601 {
1602 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
1603 return iemRaiseNotCanonical(pIemCpu);
1604 }
1605 u64Base = 0;
1606 }
1607 else
1608 {
1609 if (uNewRip > cbLimitCs)
1610 {
1611 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
1612 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1613 }
1614 u64Base = X86DESC_BASE(&DescCs.Legacy);
1615 }
1616
1617 /*
1618 * Now set the accessed bit before
1619 * writing the return address to the stack and committing the result into
1620 * CS, CSHID and RIP.
1621 */
1622 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1623 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1624 {
1625 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1626 if (rcStrict != VINF_SUCCESS)
1627 return rcStrict;
1628 /** @todo check what VT-x and AMD-V does. */
1629 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1630 }
1631
1632 /* commit */
1633 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1634 if (rcStrict != VINF_SUCCESS)
1635 return rcStrict;
1636 if (enmEffOpSize == IEMMODE_16BIT)
1637 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1638 else
1639 pCtx->rip = uNewRip;
1640 pCtx->cs.Sel = uNewCs;
1641 pCtx->cs.ValidSel = uNewCs;
1642 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1643 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
1644 pCtx->cs.u32Limit = cbLimitCs;
1645 pCtx->cs.u64Base = u64Base;
1646 /** @todo check if the hidden bits are loaded correctly for 64-bit
1647 * mode. */
1648 if (cbPop)
1649 iemRegAddToRsp(pCtx, cbPop);
1650 }
1651 return VINF_SUCCESS;
1652}
1653
1654
1655/**
1656 * Implements retn.
1657 *
1658 * We're doing this in C because of the \#GP that might be raised if the popped
1659 * program counter is out of bounds.
1660 *
1661 * @param enmEffOpSize The effective operand size.
1662 * @param cbPop The amount of arguments to pop from the stack
1663 * (bytes).
1664 */
1665IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1666{
1667 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1668 NOREF(cbInstr);
1669
1670 /* Fetch the RSP from the stack. */
1671 VBOXSTRICTRC rcStrict;
1672 RTUINT64U NewRip;
1673 RTUINT64U NewRsp;
1674 NewRsp.u = pCtx->rsp;
1675 switch (enmEffOpSize)
1676 {
1677 case IEMMODE_16BIT:
1678 NewRip.u = 0;
1679 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
1680 break;
1681 case IEMMODE_32BIT:
1682 NewRip.u = 0;
1683 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
1684 break;
1685 case IEMMODE_64BIT:
1686 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
1687 break;
1688 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1689 }
1690 if (rcStrict != VINF_SUCCESS)
1691 return rcStrict;
1692
1693 /* Check the new RSP before loading it. */
1694 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
1695 * of it. The canonical test is performed here and for call. */
1696 if (enmEffOpSize != IEMMODE_64BIT)
1697 {
1698 if (NewRip.DWords.dw0 > pCtx->cs.u32Limit)
1699 {
1700 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit));
1701 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1702 }
1703 }
1704 else
1705 {
1706 if (!IEM_IS_CANONICAL(NewRip.u))
1707 {
1708 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
1709 return iemRaiseNotCanonical(pIemCpu);
1710 }
1711 }
1712
1713 /* Commit it. */
1714 pCtx->rip = NewRip.u;
1715 pCtx->rsp = NewRsp.u;
1716 if (cbPop)
1717 iemRegAddToRsp(pCtx, cbPop);
1718
1719 return VINF_SUCCESS;
1720}
1721
1722
1723/**
1724 * Implements enter.
1725 *
1726 * We're doing this in C because the instruction is insane, even for the
1727 * u8NestingLevel=0 case dealing with the stack is tedious.
1728 *
1729 * @param enmEffOpSize The effective operand size.
1730 */
1731IEM_CIMPL_DEF_3(iemCImpl_enter, IEMMODE, enmEffOpSize, uint16_t, cbFrame, uint8_t, cParameters)
1732{
1733 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1734
1735 /* Push RBP, saving the old value in TmpRbp. */
1736 RTUINT64U NewRsp; NewRsp.u = pCtx->rsp;
1737 RTUINT64U TmpRbp; TmpRbp.u = pCtx->rbp;
1738 RTUINT64U NewRbp;
1739 VBOXSTRICTRC rcStrict;
1740 if (enmEffOpSize == IEMMODE_64BIT)
1741 {
1742 rcStrict = iemMemStackPushU64Ex(pIemCpu, TmpRbp.u, &NewRsp);
1743 NewRbp = NewRsp;
1744 }
1745 else if (pCtx->ss.Attr.n.u1DefBig)
1746 {
1747 rcStrict = iemMemStackPushU32Ex(pIemCpu, TmpRbp.DWords.dw0, &NewRsp);
1748 NewRbp = NewRsp;
1749 }
1750 else
1751 {
1752 rcStrict = iemMemStackPushU16Ex(pIemCpu, TmpRbp.Words.w0, &NewRsp);
1753 NewRbp = TmpRbp;
1754 NewRbp.Words.w0 = NewRsp.Words.w0;
1755 }
1756 if (rcStrict != VINF_SUCCESS)
1757 return rcStrict;
1758
1759 /* Copy the parameters (aka nesting levels by Intel). */
1760 cParameters &= 0x1f;
1761 if (cParameters > 0)
1762 {
1763 switch (enmEffOpSize)
1764 {
1765 case IEMMODE_16BIT:
1766 if (pCtx->ss.Attr.n.u1DefBig)
1767 TmpRbp.DWords.dw0 -= 2;
1768 else
1769 TmpRbp.Words.w0 -= 2;
1770 do
1771 {
1772 uint16_t u16Tmp;
1773 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Tmp, &TmpRbp);
1774 if (rcStrict != VINF_SUCCESS)
1775 break;
1776 rcStrict = iemMemStackPushU16Ex(pIemCpu, u16Tmp, &NewRsp);
1777 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1778 break;
1779
1780 case IEMMODE_32BIT:
1781 if (pCtx->ss.Attr.n.u1DefBig)
1782 TmpRbp.DWords.dw0 -= 4;
1783 else
1784 TmpRbp.Words.w0 -= 4;
1785 do
1786 {
1787 uint32_t u32Tmp;
1788 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Tmp, &TmpRbp);
1789 if (rcStrict != VINF_SUCCESS)
1790 break;
1791 rcStrict = iemMemStackPushU32Ex(pIemCpu, u32Tmp, &NewRsp);
1792 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1793 break;
1794
1795 case IEMMODE_64BIT:
1796 TmpRbp.u -= 8;
1797 do
1798 {
1799 uint64_t u64Tmp;
1800 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Tmp, &TmpRbp);
1801 if (rcStrict != VINF_SUCCESS)
1802 break;
1803 rcStrict = iemMemStackPushU64Ex(pIemCpu, u64Tmp, &NewRsp);
1804 } while (--cParameters > 0 && rcStrict == VINF_SUCCESS);
1805 break;
1806
1807 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1808 }
1809 if (rcStrict != VINF_SUCCESS)
1810 return VINF_SUCCESS;
1811
1812 /* Push the new RBP */
1813 if (enmEffOpSize == IEMMODE_64BIT)
1814 rcStrict = iemMemStackPushU64Ex(pIemCpu, NewRbp.u, &NewRsp);
1815 else if (pCtx->ss.Attr.n.u1DefBig)
1816 rcStrict = iemMemStackPushU32Ex(pIemCpu, NewRbp.DWords.dw0, &NewRsp);
1817 else
1818 rcStrict = iemMemStackPushU16Ex(pIemCpu, NewRbp.Words.w0, &NewRsp);
1819 if (rcStrict != VINF_SUCCESS)
1820 return rcStrict;
1821
1822 }
1823
1824 /* Recalc RSP. */
1825 iemRegSubFromRspEx(&NewRsp, cbFrame, pCtx);
1826
1827 /** @todo Should probe write access at the new RSP according to AMD. */
1828
1829 /* Commit it. */
1830 pCtx->rbp = NewRbp.u;
1831 pCtx->rsp = NewRsp.u;
1832 iemRegAddToRip(pIemCpu, cbInstr);
1833
1834 return VINF_SUCCESS;
1835}
1836
1837
1838
1839/**
1840 * Implements leave.
1841 *
1842 * We're doing this in C because messing with the stack registers is annoying
1843 * since they depends on SS attributes.
1844 *
1845 * @param enmEffOpSize The effective operand size.
1846 */
1847IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
1848{
1849 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1850
1851 /* Calculate the intermediate RSP from RBP and the stack attributes. */
1852 RTUINT64U NewRsp;
1853 if (pCtx->ss.Attr.n.u1Long)
1854 NewRsp.u = pCtx->rbp;
1855 else if (pCtx->ss.Attr.n.u1DefBig)
1856 NewRsp.u = pCtx->ebp;
1857 else
1858 {
1859 /** @todo Check that LEAVE actually preserve the high EBP bits. */
1860 NewRsp.u = pCtx->rsp;
1861 NewRsp.Words.w0 = pCtx->bp;
1862 }
1863
1864 /* Pop RBP according to the operand size. */
1865 VBOXSTRICTRC rcStrict;
1866 RTUINT64U NewRbp;
1867 switch (enmEffOpSize)
1868 {
1869 case IEMMODE_16BIT:
1870 NewRbp.u = pCtx->rbp;
1871 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
1872 break;
1873 case IEMMODE_32BIT:
1874 NewRbp.u = 0;
1875 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
1876 break;
1877 case IEMMODE_64BIT:
1878 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
1879 break;
1880 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1881 }
1882 if (rcStrict != VINF_SUCCESS)
1883 return rcStrict;
1884
1885
1886 /* Commit it. */
1887 pCtx->rbp = NewRbp.u;
1888 pCtx->rsp = NewRsp.u;
1889 iemRegAddToRip(pIemCpu, cbInstr);
1890
1891 return VINF_SUCCESS;
1892}
1893
1894
1895/**
1896 * Implements int3 and int XX.
1897 *
1898 * @param u8Int The interrupt vector number.
1899 * @param fIsBpInstr Is it the breakpoint instruction.
1900 */
1901IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
1902{
1903 Assert(pIemCpu->cXcptRecursions == 0);
1904 return iemRaiseXcptOrInt(pIemCpu,
1905 cbInstr,
1906 u8Int,
1907 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
1908 0,
1909 0);
1910}
1911
1912
1913/**
1914 * Implements iret for real mode and V8086 mode.
1915 *
1916 * @param enmEffOpSize The effective operand size.
1917 */
1918IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
1919{
1920 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1921 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1922 X86EFLAGS Efl;
1923 Efl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
1924 NOREF(cbInstr);
1925
1926 /*
1927 * iret throws an exception if VME isn't enabled.
1928 */
1929 if ( pCtx->eflags.Bits.u1VM
1930 && !(pCtx->cr4 & X86_CR4_VME))
1931 return iemRaiseGeneralProtectionFault0(pIemCpu);
1932
1933 /*
1934 * Do the stack bits, but don't commit RSP before everything checks
1935 * out right.
1936 */
1937 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1938 VBOXSTRICTRC rcStrict;
1939 RTCPTRUNION uFrame;
1940 uint16_t uNewCs;
1941 uint32_t uNewEip;
1942 uint32_t uNewFlags;
1943 uint64_t uNewRsp;
1944 if (enmEffOpSize == IEMMODE_32BIT)
1945 {
1946 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1947 if (rcStrict != VINF_SUCCESS)
1948 return rcStrict;
1949 uNewEip = uFrame.pu32[0];
1950 uNewCs = (uint16_t)uFrame.pu32[1];
1951 uNewFlags = uFrame.pu32[2];
1952 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1953 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
1954 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
1955 | X86_EFL_ID;
1956 uNewFlags |= Efl.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
1957 }
1958 else
1959 {
1960 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1961 if (rcStrict != VINF_SUCCESS)
1962 return rcStrict;
1963 uNewEip = uFrame.pu16[0];
1964 uNewCs = uFrame.pu16[1];
1965 uNewFlags = uFrame.pu16[2];
1966 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1967 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
1968 uNewFlags |= Efl.u & (UINT32_C(0xffff0000) | X86_EFL_1);
1969 /** @todo The intel pseudo code does not indicate what happens to
1970 * reserved flags. We just ignore them. */
1971 }
1972 /** @todo Check how this is supposed to work if sp=0xfffe. */
1973
1974 /*
1975 * Check the limit of the new EIP.
1976 */
1977 /** @todo Only the AMD pseudo code check the limit here, what's
1978 * right? */
1979 if (uNewEip > pCtx->cs.u32Limit)
1980 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1981
1982 /*
1983 * V8086 checks and flag adjustments
1984 */
1985 if (Efl.Bits.u1VM)
1986 {
1987 if (Efl.Bits.u2IOPL == 3)
1988 {
1989 /* Preserve IOPL and clear RF. */
1990 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
1991 uNewFlags |= Efl.u & (X86_EFL_IOPL);
1992 }
1993 else if ( enmEffOpSize == IEMMODE_16BIT
1994 && ( !(uNewFlags & X86_EFL_IF)
1995 || !Efl.Bits.u1VIP )
1996 && !(uNewFlags & X86_EFL_TF) )
1997 {
1998 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
1999 uNewFlags &= ~X86_EFL_VIF;
2000 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
2001 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
2002 uNewFlags |= Efl.u & (X86_EFL_IF | X86_EFL_IOPL);
2003 }
2004 else
2005 return iemRaiseGeneralProtectionFault0(pIemCpu);
2006 }
2007
2008 /*
2009 * Commit the operation.
2010 */
2011 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
2012 if (rcStrict != VINF_SUCCESS)
2013 return rcStrict;
2014 pCtx->rip = uNewEip;
2015 pCtx->cs.Sel = uNewCs;
2016 pCtx->cs.ValidSel = uNewCs;
2017 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2018 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
2019 /** @todo do we load attribs and limit as well? */
2020 Assert(uNewFlags & X86_EFL_1);
2021 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewFlags);
2022
2023 return VINF_SUCCESS;
2024}
2025
2026
2027/**
2028 * Loads a segment register when entering V8086 mode.
2029 *
2030 * @param pSReg The segment register.
2031 * @param uSeg The segment to load.
2032 */
2033static void iemCImplCommonV8086LoadSeg(PCPUMSELREG pSReg, uint16_t uSeg)
2034{
2035 pSReg->Sel = uSeg;
2036 pSReg->ValidSel = uSeg;
2037 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
2038 pSReg->u64Base = (uint32_t)uSeg << 4;
2039 pSReg->u32Limit = 0xffff;
2040 pSReg->Attr.u = X86_SEL_TYPE_RW_ACC | RT_BIT(4) /*!sys*/ | RT_BIT(7) /*P*/ | (3 /*DPL*/ << 5); /* VT-x wants 0xf3 */
2041 /** @todo Testcase: Check if VT-x really needs this and what it does itself when
2042 * IRET'ing to V8086. */
2043}
2044
2045
2046/**
2047 * Implements iret for protected mode returning to V8086 mode.
2048 *
2049 * @param pCtx Pointer to the CPU context.
2050 * @param uNewEip The new EIP.
2051 * @param uNewCs The new CS.
2052 * @param uNewFlags The new EFLAGS.
2053 * @param uNewRsp The RSP after the initial IRET frame.
2054 */
2055IEM_CIMPL_DEF_5(iemCImpl_iret_prot_v8086, PCPUMCTX, pCtx, uint32_t, uNewEip, uint16_t, uNewCs,
2056 uint32_t, uNewFlags, uint64_t, uNewRsp)
2057{
2058#if 0
2059 if (!LogIs6Enabled())
2060 {
2061 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
2062 RTLogFlags(NULL, "enabled");
2063 return VERR_IEM_RESTART_INSTRUCTION;
2064 }
2065#endif
2066
2067 /*
2068 * Pop the V8086 specific frame bits off the stack.
2069 */
2070 VBOXSTRICTRC rcStrict;
2071 RTCPTRUNION uFrame;
2072 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 24, &uFrame.pv, &uNewRsp);
2073 if (rcStrict != VINF_SUCCESS)
2074 return rcStrict;
2075 uint32_t uNewEsp = uFrame.pu32[0];
2076 uint16_t uNewSs = uFrame.pu32[1];
2077 uint16_t uNewEs = uFrame.pu32[2];
2078 uint16_t uNewDs = uFrame.pu32[3];
2079 uint16_t uNewFs = uFrame.pu32[4];
2080 uint16_t uNewGs = uFrame.pu32[5];
2081 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2082 if (rcStrict != VINF_SUCCESS)
2083 return rcStrict;
2084
2085 /*
2086 * Commit the operation.
2087 */
2088 iemCImplCommonV8086LoadSeg(&pCtx->cs, uNewCs);
2089 iemCImplCommonV8086LoadSeg(&pCtx->ss, uNewSs);
2090 iemCImplCommonV8086LoadSeg(&pCtx->es, uNewEs);
2091 iemCImplCommonV8086LoadSeg(&pCtx->ds, uNewDs);
2092 iemCImplCommonV8086LoadSeg(&pCtx->fs, uNewFs);
2093 iemCImplCommonV8086LoadSeg(&pCtx->gs, uNewGs);
2094 pCtx->rip = uNewEip;
2095 pCtx->rsp = uNewEsp;
2096 pCtx->rflags.u = uNewFlags;
2097 pIemCpu->uCpl = 3;
2098
2099 return VINF_SUCCESS;
2100}
2101
2102
2103/**
2104 * Implements iret for protected mode returning via a nested task.
2105 *
2106 * @param enmEffOpSize The effective operand size.
2107 */
2108IEM_CIMPL_DEF_1(iemCImpl_iret_prot_NestedTask, IEMMODE, enmEffOpSize)
2109{
2110 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2111}
2112
2113
2114/**
2115 * Implements iret for protected mode
2116 *
2117 * @param enmEffOpSize The effective operand size.
2118 */
2119IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
2120{
2121 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2122 NOREF(cbInstr);
2123
2124 /*
2125 * Nested task return.
2126 */
2127 if (pCtx->eflags.Bits.u1NT)
2128 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot_NestedTask, enmEffOpSize);
2129
2130 /*
2131 * Normal return.
2132 *
2133 * Do the stack bits, but don't commit RSP before everything checks
2134 * out right.
2135 */
2136 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
2137 VBOXSTRICTRC rcStrict;
2138 RTCPTRUNION uFrame;
2139 uint16_t uNewCs;
2140 uint32_t uNewEip;
2141 uint32_t uNewFlags;
2142 uint64_t uNewRsp;
2143 if (enmEffOpSize == IEMMODE_32BIT)
2144 {
2145 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
2146 if (rcStrict != VINF_SUCCESS)
2147 return rcStrict;
2148 uNewEip = uFrame.pu32[0];
2149 uNewCs = (uint16_t)uFrame.pu32[1];
2150 uNewFlags = uFrame.pu32[2];
2151 }
2152 else
2153 {
2154 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
2155 if (rcStrict != VINF_SUCCESS)
2156 return rcStrict;
2157 uNewEip = uFrame.pu16[0];
2158 uNewCs = uFrame.pu16[1];
2159 uNewFlags = uFrame.pu16[2];
2160 }
2161 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
2162 if (rcStrict != VINF_SUCCESS)
2163 return rcStrict;
2164
2165 /*
2166 * We're hopefully not returning to V8086 mode...
2167 */
2168 if ( (uNewFlags & X86_EFL_VM)
2169 && pIemCpu->uCpl == 0)
2170 {
2171 Assert(enmEffOpSize == IEMMODE_32BIT);
2172 return IEM_CIMPL_CALL_5(iemCImpl_iret_prot_v8086, pCtx, uNewEip, uNewCs, uNewFlags, uNewRsp);
2173 }
2174
2175 /*
2176 * Protected mode.
2177 */
2178 /* Read the CS descriptor. */
2179 if (!(uNewCs & X86_SEL_MASK_OFF_RPL))
2180 {
2181 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
2182 return iemRaiseGeneralProtectionFault0(pIemCpu);
2183 }
2184
2185 IEMSELDESC DescCS;
2186 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs);
2187 if (rcStrict != VINF_SUCCESS)
2188 {
2189 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
2190 return rcStrict;
2191 }
2192
2193 /* Must be a code descriptor. */
2194 if (!DescCS.Legacy.Gen.u1DescType)
2195 {
2196 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
2197 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2198 }
2199 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
2200 {
2201 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
2202 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2203 }
2204
2205 /* Privilege checks. */
2206 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
2207 {
2208 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
2209 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2210 }
2211 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
2212 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
2213 {
2214 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
2215 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2216 }
2217
2218 /* Present? */
2219 if (!DescCS.Legacy.Gen.u1Present)
2220 {
2221 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
2222 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2223 }
2224
2225 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2226
2227 /*
2228 * Return to outer level?
2229 */
2230 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
2231 {
2232 uint16_t uNewSS;
2233 uint32_t uNewESP;
2234 if (enmEffOpSize == IEMMODE_32BIT)
2235 {
2236 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2237 if (rcStrict != VINF_SUCCESS)
2238 return rcStrict;
2239 uNewESP = uFrame.pu32[0];
2240 uNewSS = (uint16_t)uFrame.pu32[1];
2241 }
2242 else
2243 {
2244 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2245 if (rcStrict != VINF_SUCCESS)
2246 return rcStrict;
2247 uNewESP = uFrame.pu16[0];
2248 uNewSS = uFrame.pu16[1];
2249 }
2250 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
2251 if (rcStrict != VINF_SUCCESS)
2252 return rcStrict;
2253
2254 /* Read the SS descriptor. */
2255 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
2256 {
2257 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
2258 return iemRaiseGeneralProtectionFault0(pIemCpu);
2259 }
2260
2261 IEMSELDESC DescSS;
2262 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS);
2263 if (rcStrict != VINF_SUCCESS)
2264 {
2265 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
2266 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
2267 return rcStrict;
2268 }
2269
2270 /* Privilege checks. */
2271 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
2272 {
2273 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
2274 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2275 }
2276 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2277 {
2278 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
2279 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
2280 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2281 }
2282
2283 /* Must be a writeable data segment descriptor. */
2284 if (!DescSS.Legacy.Gen.u1DescType)
2285 {
2286 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
2287 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2288 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2289 }
2290 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
2291 {
2292 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
2293 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2294 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2295 }
2296
2297 /* Present? */
2298 if (!DescSS.Legacy.Gen.u1Present)
2299 {
2300 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
2301 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
2302 }
2303
2304 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
2305
2306 /* Check EIP. */
2307 if (uNewEip > cbLimitCS)
2308 {
2309 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
2310 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
2311 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2312 }
2313
2314 /*
2315 * Commit the changes, marking CS and SS accessed first since
2316 * that may fail.
2317 */
2318 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2319 {
2320 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2321 if (rcStrict != VINF_SUCCESS)
2322 return rcStrict;
2323 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2324 }
2325 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2326 {
2327 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
2328 if (rcStrict != VINF_SUCCESS)
2329 return rcStrict;
2330 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2331 }
2332
2333 pCtx->rip = uNewEip;
2334 pCtx->cs.Sel = uNewCs;
2335 pCtx->cs.ValidSel = uNewCs;
2336 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2337 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2338 pCtx->cs.u32Limit = cbLimitCS;
2339 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2340 pCtx->rsp = uNewESP;
2341 pCtx->ss.Sel = uNewSS;
2342 pCtx->ss.ValidSel = uNewSS;
2343 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2344 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2345 pCtx->ss.u32Limit = cbLimitSs;
2346 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2347
2348 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2349 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2350 if (enmEffOpSize != IEMMODE_16BIT)
2351 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2352 if (pIemCpu->uCpl == 0)
2353 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2354 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2355 fEFlagsMask |= X86_EFL_IF;
2356 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx);
2357 fEFlagsNew &= ~fEFlagsMask;
2358 fEFlagsNew |= uNewFlags & fEFlagsMask;
2359 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew);
2360
2361 pIemCpu->uCpl = uNewCs & X86_SEL_RPL;
2362 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds);
2363 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->es);
2364 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->fs);
2365 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->gs);
2366
2367 /* Done! */
2368
2369 }
2370 /*
2371 * Return to the same level.
2372 */
2373 else
2374 {
2375 /* Check EIP. */
2376 if (uNewEip > cbLimitCS)
2377 {
2378 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
2379 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2380 }
2381
2382 /*
2383 * Commit the changes, marking CS first since it may fail.
2384 */
2385 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2386 {
2387 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2388 if (rcStrict != VINF_SUCCESS)
2389 return rcStrict;
2390 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2391 }
2392
2393 pCtx->rip = uNewEip;
2394 pCtx->cs.Sel = uNewCs;
2395 pCtx->cs.ValidSel = uNewCs;
2396 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2397 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2398 pCtx->cs.u32Limit = cbLimitCS;
2399 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2400 pCtx->rsp = uNewRsp;
2401
2402 X86EFLAGS NewEfl;
2403 NewEfl.u = IEMMISC_GET_EFL(pIemCpu, pCtx);
2404 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2405 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2406 if (enmEffOpSize != IEMMODE_16BIT)
2407 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2408 if (pIemCpu->uCpl == 0)
2409 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2410 else if (pIemCpu->uCpl <= NewEfl.Bits.u2IOPL)
2411 fEFlagsMask |= X86_EFL_IF;
2412 NewEfl.u &= ~fEFlagsMask;
2413 NewEfl.u |= fEFlagsMask & uNewFlags;
2414 IEMMISC_SET_EFL(pIemCpu, pCtx, NewEfl.u);
2415 /* Done! */
2416 }
2417 return VINF_SUCCESS;
2418}
2419
2420
2421/**
2422 * Implements iret for long mode
2423 *
2424 * @param enmEffOpSize The effective operand size.
2425 */
2426IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize)
2427{
2428 //PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2429 //VBOXSTRICTRC rcStrict;
2430 //uint64_t uNewRsp;
2431
2432 NOREF(pIemCpu); NOREF(cbInstr); NOREF(enmEffOpSize);
2433 IEM_RETURN_ASPECT_NOT_IMPLEMENTED();
2434}
2435
2436
2437/**
2438 * Implements iret.
2439 *
2440 * @param enmEffOpSize The effective operand size.
2441 */
2442IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
2443{
2444 /*
2445 * Call a mode specific worker.
2446 */
2447 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2448 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2449 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
2450 if (IEM_IS_LONG_MODE(pIemCpu))
2451 return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize);
2452
2453 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
2454}
2455
2456
2457/**
2458 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
2459 *
2460 * @param iSegReg The segment register number (valid).
2461 * @param uSel The new selector value.
2462 */
2463IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
2464{
2465 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2466 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
2467 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
2468
2469 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
2470
2471 /*
2472 * Real mode and V8086 mode are easy.
2473 */
2474 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2475 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2476 {
2477 *pSel = uSel;
2478 pHid->u64Base = (uint32_t)uSel << 4;
2479 pHid->ValidSel = uSel;
2480 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
2481#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
2482 /** @todo Does the CPU actually load limits and attributes in the
2483 * real/V8086 mode segment load case? It doesn't for CS in far
2484 * jumps... Affects unreal mode. */
2485 pHid->u32Limit = 0xffff;
2486 pHid->Attr.u = 0;
2487 pHid->Attr.n.u1Present = 1;
2488 pHid->Attr.n.u1DescType = 1;
2489 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
2490 ? X86_SEL_TYPE_RW
2491 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
2492#endif
2493 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2494 iemRegAddToRip(pIemCpu, cbInstr);
2495 return VINF_SUCCESS;
2496 }
2497
2498 /*
2499 * Protected mode.
2500 *
2501 * Check if it's a null segment selector value first, that's OK for DS, ES,
2502 * FS and GS. If not null, then we have to load and parse the descriptor.
2503 */
2504 if (!(uSel & X86_SEL_MASK_OFF_RPL))
2505 {
2506 if (iSegReg == X86_SREG_SS)
2507 {
2508 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
2509 || pIemCpu->uCpl != 0
2510 || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */
2511 {
2512 Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel));
2513 return iemRaiseGeneralProtectionFault0(pIemCpu);
2514 }
2515
2516 /* In 64-bit kernel mode, the stack can be 0 because of the way
2517 interrupts are dispatched when in kernel ctx. Just load the
2518 selector value into the register and leave the hidden bits
2519 as is. */
2520 *pSel = uSel;
2521 pHid->ValidSel = uSel;
2522 iemRegAddToRip(pIemCpu, cbInstr);
2523 return VINF_SUCCESS;
2524 }
2525
2526 *pSel = uSel; /* Not RPL, remember :-) */
2527 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2528 && iSegReg != X86_SREG_FS
2529 && iSegReg != X86_SREG_GS)
2530 {
2531 /** @todo figure out what this actually does, it works. Needs
2532 * testcase! */
2533 pHid->Attr.u = 0;
2534 pHid->Attr.n.u1Present = 1;
2535 pHid->Attr.n.u1Long = 1;
2536 pHid->Attr.n.u4Type = X86_SEL_TYPE_RW;
2537 pHid->Attr.n.u2Dpl = 3;
2538 pHid->u32Limit = 0;
2539 pHid->u64Base = 0;
2540 pHid->ValidSel = uSel;
2541 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
2542 }
2543 else
2544 iemHlpLoadNullDataSelectorProt(pHid, uSel);
2545 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
2546 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2547
2548 iemRegAddToRip(pIemCpu, cbInstr);
2549 return VINF_SUCCESS;
2550 }
2551
2552 /* Fetch the descriptor. */
2553 IEMSELDESC Desc;
2554 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
2555 if (rcStrict != VINF_SUCCESS)
2556 return rcStrict;
2557
2558 /* Check GPs first. */
2559 if (!Desc.Legacy.Gen.u1DescType)
2560 {
2561 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
2562 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2563 }
2564 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
2565 {
2566 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2567 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2568 {
2569 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
2570 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2571 }
2572 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
2573 {
2574 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
2575 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2576 }
2577 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
2578 {
2579 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2580 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2581 }
2582 }
2583 else
2584 {
2585 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2586 {
2587 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
2588 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2589 }
2590 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2591 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2592 {
2593#if 0 /* this is what intel says. */
2594 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2595 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2596 {
2597 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
2598 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2599 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2600 }
2601#else /* this is what makes more sense. */
2602 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
2603 {
2604 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
2605 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
2606 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2607 }
2608 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2609 {
2610 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
2611 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2612 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2613 }
2614#endif
2615 }
2616 }
2617
2618 /* Is it there? */
2619 if (!Desc.Legacy.Gen.u1Present)
2620 {
2621 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
2622 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
2623 }
2624
2625 /* The base and limit. */
2626 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2627 uint64_t u64Base;
2628 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2629 && iSegReg < X86_SREG_FS)
2630 u64Base = 0;
2631 else
2632 u64Base = X86DESC_BASE(&Desc.Legacy);
2633
2634 /*
2635 * Ok, everything checked out fine. Now set the accessed bit before
2636 * committing the result into the registers.
2637 */
2638 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2639 {
2640 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2641 if (rcStrict != VINF_SUCCESS)
2642 return rcStrict;
2643 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2644 }
2645
2646 /* commit */
2647 *pSel = uSel;
2648 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2649 pHid->u32Limit = cbLimit;
2650 pHid->u64Base = u64Base;
2651 pHid->ValidSel = uSel;
2652 pHid->fFlags = CPUMSELREG_FLAGS_VALID;
2653
2654 /** @todo check if the hidden bits are loaded correctly for 64-bit
2655 * mode. */
2656 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pHid));
2657
2658 CPUMSetChangedFlags(IEMCPU_TO_VMCPU(pIemCpu), CPUM_CHANGED_HIDDEN_SEL_REGS);
2659 iemRegAddToRip(pIemCpu, cbInstr);
2660 return VINF_SUCCESS;
2661}
2662
2663
2664/**
2665 * Implements 'mov SReg, r/m'.
2666 *
2667 * @param iSegReg The segment register number (valid).
2668 * @param uSel The new selector value.
2669 */
2670IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
2671{
2672 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2673 if (rcStrict == VINF_SUCCESS)
2674 {
2675 if (iSegReg == X86_SREG_SS)
2676 {
2677 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2678 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2679 }
2680 }
2681 return rcStrict;
2682}
2683
2684
2685/**
2686 * Implements 'pop SReg'.
2687 *
2688 * @param iSegReg The segment register number (valid).
2689 * @param enmEffOpSize The efficient operand size (valid).
2690 */
2691IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
2692{
2693 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2694 VBOXSTRICTRC rcStrict;
2695
2696 /*
2697 * Read the selector off the stack and join paths with mov ss, reg.
2698 */
2699 RTUINT64U TmpRsp;
2700 TmpRsp.u = pCtx->rsp;
2701 switch (enmEffOpSize)
2702 {
2703 case IEMMODE_16BIT:
2704 {
2705 uint16_t uSel;
2706 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
2707 if (rcStrict == VINF_SUCCESS)
2708 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2709 break;
2710 }
2711
2712 case IEMMODE_32BIT:
2713 {
2714 uint32_t u32Value;
2715 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
2716 if (rcStrict == VINF_SUCCESS)
2717 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
2718 break;
2719 }
2720
2721 case IEMMODE_64BIT:
2722 {
2723 uint64_t u64Value;
2724 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
2725 if (rcStrict == VINF_SUCCESS)
2726 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
2727 break;
2728 }
2729 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2730 }
2731
2732 /*
2733 * Commit the stack on success.
2734 */
2735 if (rcStrict == VINF_SUCCESS)
2736 {
2737 pCtx->rsp = TmpRsp.u;
2738 if (iSegReg == X86_SREG_SS)
2739 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2740 }
2741 return rcStrict;
2742}
2743
2744
2745/**
2746 * Implements lgs, lfs, les, lds & lss.
2747 */
2748IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
2749 uint16_t, uSel,
2750 uint64_t, offSeg,
2751 uint8_t, iSegReg,
2752 uint8_t, iGReg,
2753 IEMMODE, enmEffOpSize)
2754{
2755 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2756 VBOXSTRICTRC rcStrict;
2757
2758 /*
2759 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
2760 */
2761 /** @todo verify and test that mov, pop and lXs works the segment
2762 * register loading in the exact same way. */
2763 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2764 if (rcStrict == VINF_SUCCESS)
2765 {
2766 switch (enmEffOpSize)
2767 {
2768 case IEMMODE_16BIT:
2769 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2770 break;
2771 case IEMMODE_32BIT:
2772 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2773 break;
2774 case IEMMODE_64BIT:
2775 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2776 break;
2777 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2778 }
2779 }
2780
2781 return rcStrict;
2782}
2783
2784
2785/**
2786 * Implements lgdt.
2787 *
2788 * @param iEffSeg The segment of the new ldtr contents
2789 * @param GCPtrEffSrc The address of the new ldtr contents.
2790 * @param enmEffOpSize The effective operand size.
2791 */
2792IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2793{
2794 if (pIemCpu->uCpl != 0)
2795 return iemRaiseGeneralProtectionFault0(pIemCpu);
2796 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2797
2798 /*
2799 * Fetch the limit and base address.
2800 */
2801 uint16_t cbLimit;
2802 RTGCPTR GCPtrBase;
2803 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2804 if (rcStrict == VINF_SUCCESS)
2805 {
2806 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2807 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2808 else
2809 {
2810 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2811 pCtx->gdtr.cbGdt = cbLimit;
2812 pCtx->gdtr.pGdt = GCPtrBase;
2813 }
2814 if (rcStrict == VINF_SUCCESS)
2815 iemRegAddToRip(pIemCpu, cbInstr);
2816 }
2817 return rcStrict;
2818}
2819
2820
2821/**
2822 * Implements sgdt.
2823 *
2824 * @param iEffSeg The segment where to store the gdtr content.
2825 * @param GCPtrEffDst The address where to store the gdtr content.
2826 * @param enmEffOpSize The effective operand size.
2827 */
2828IEM_CIMPL_DEF_3(iemCImpl_sgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, enmEffOpSize)
2829{
2830 /*
2831 * Join paths with sidt.
2832 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
2833 * you really must know.
2834 */
2835 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2836 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->gdtr.cbGdt, pCtx->gdtr.pGdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
2837 if (rcStrict == VINF_SUCCESS)
2838 iemRegAddToRip(pIemCpu, cbInstr);
2839 return rcStrict;
2840}
2841
2842
2843/**
2844 * Implements lidt.
2845 *
2846 * @param iEffSeg The segment of the new ldtr contents
2847 * @param GCPtrEffSrc The address of the new ldtr contents.
2848 * @param enmEffOpSize The effective operand size.
2849 */
2850IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2851{
2852 if (pIemCpu->uCpl != 0)
2853 return iemRaiseGeneralProtectionFault0(pIemCpu);
2854 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2855
2856 /*
2857 * Fetch the limit and base address.
2858 */
2859 uint16_t cbLimit;
2860 RTGCPTR GCPtrBase;
2861 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2862 if (rcStrict == VINF_SUCCESS)
2863 {
2864 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2865 CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2866 else
2867 {
2868 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2869 pCtx->idtr.cbIdt = cbLimit;
2870 pCtx->idtr.pIdt = GCPtrBase;
2871 }
2872 iemRegAddToRip(pIemCpu, cbInstr);
2873 }
2874 return rcStrict;
2875}
2876
2877
2878/**
2879 * Implements sidt.
2880 *
2881 * @param iEffSeg The segment where to store the idtr content.
2882 * @param GCPtrEffDst The address where to store the idtr content.
2883 * @param enmEffOpSize The effective operand size.
2884 */
2885IEM_CIMPL_DEF_3(iemCImpl_sidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst, IEMMODE, enmEffOpSize)
2886{
2887 /*
2888 * Join paths with sgdt.
2889 * Note! No CPL or V8086 checks here, it's a really sad story, ask Intel if
2890 * you really must know.
2891 */
2892 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2893 VBOXSTRICTRC rcStrict = iemMemStoreDataXdtr(pIemCpu, pCtx->idtr.cbIdt, pCtx->idtr.pIdt, iEffSeg, GCPtrEffDst, enmEffOpSize);
2894 if (rcStrict == VINF_SUCCESS)
2895 iemRegAddToRip(pIemCpu, cbInstr);
2896 return rcStrict;
2897}
2898
2899
2900/**
2901 * Implements lldt.
2902 *
2903 * @param uNewLdt The new LDT selector value.
2904 */
2905IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
2906{
2907 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2908
2909 /*
2910 * Check preconditions.
2911 */
2912 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2913 {
2914 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
2915 return iemRaiseUndefinedOpcode(pIemCpu);
2916 }
2917 if (pIemCpu->uCpl != 0)
2918 {
2919 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
2920 return iemRaiseGeneralProtectionFault0(pIemCpu);
2921 }
2922 if (uNewLdt & X86_SEL_LDT)
2923 {
2924 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
2925 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
2926 }
2927
2928 /*
2929 * Now, loading a NULL selector is easy.
2930 */
2931 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
2932 {
2933 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
2934 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
2935 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt);
2936 else
2937 pCtx->ldtr.Sel = uNewLdt;
2938 pCtx->ldtr.ValidSel = uNewLdt;
2939 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2940 if (IEM_IS_GUEST_CPU_AMD(pIemCpu) && !IEM_VERIFICATION_ENABLED(pIemCpu))
2941 pCtx->ldtr.Attr.u = 0;
2942 else
2943 {
2944 pCtx->ldtr.u64Base = 0;
2945 pCtx->ldtr.u32Limit = 0;
2946 }
2947
2948 iemRegAddToRip(pIemCpu, cbInstr);
2949 return VINF_SUCCESS;
2950 }
2951
2952 /*
2953 * Read the descriptor.
2954 */
2955 IEMSELDESC Desc;
2956 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt);
2957 if (rcStrict != VINF_SUCCESS)
2958 return rcStrict;
2959
2960 /* Check GPs first. */
2961 if (Desc.Legacy.Gen.u1DescType)
2962 {
2963 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2964 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2965 }
2966 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2967 {
2968 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2969 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2970 }
2971 uint64_t u64Base;
2972 if (!IEM_IS_LONG_MODE(pIemCpu))
2973 u64Base = X86DESC_BASE(&Desc.Legacy);
2974 else
2975 {
2976 if (Desc.Long.Gen.u5Zeros)
2977 {
2978 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
2979 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2980 }
2981
2982 u64Base = X86DESC64_BASE(&Desc.Long);
2983 if (!IEM_IS_CANONICAL(u64Base))
2984 {
2985 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
2986 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
2987 }
2988 }
2989
2990 /* NP */
2991 if (!Desc.Legacy.Gen.u1Present)
2992 {
2993 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
2994 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
2995 }
2996
2997 /*
2998 * It checks out alright, update the registers.
2999 */
3000/** @todo check if the actual value is loaded or if the RPL is dropped */
3001 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3002 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK_OFF_RPL);
3003 else
3004 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL;
3005 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL;
3006 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
3007 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3008 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
3009 pCtx->ldtr.u64Base = u64Base;
3010
3011 iemRegAddToRip(pIemCpu, cbInstr);
3012 return VINF_SUCCESS;
3013}
3014
3015
3016/**
3017 * Implements lldt.
3018 *
3019 * @param uNewLdt The new LDT selector value.
3020 */
3021IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
3022{
3023 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3024
3025 /*
3026 * Check preconditions.
3027 */
3028 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
3029 {
3030 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
3031 return iemRaiseUndefinedOpcode(pIemCpu);
3032 }
3033 if (pIemCpu->uCpl != 0)
3034 {
3035 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
3036 return iemRaiseGeneralProtectionFault0(pIemCpu);
3037 }
3038 if (uNewTr & X86_SEL_LDT)
3039 {
3040 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
3041 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
3042 }
3043 if (!(uNewTr & X86_SEL_MASK_OFF_RPL))
3044 {
3045 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
3046 return iemRaiseGeneralProtectionFault0(pIemCpu);
3047 }
3048
3049 /*
3050 * Read the descriptor.
3051 */
3052 IEMSELDESC Desc;
3053 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr);
3054 if (rcStrict != VINF_SUCCESS)
3055 return rcStrict;
3056
3057 /* Check GPs first. */
3058 if (Desc.Legacy.Gen.u1DescType)
3059 {
3060 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
3061 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
3062 }
3063 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
3064 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
3065 || IEM_IS_LONG_MODE(pIemCpu)) )
3066 {
3067 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
3068 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
3069 }
3070 uint64_t u64Base;
3071 if (!IEM_IS_LONG_MODE(pIemCpu))
3072 u64Base = X86DESC_BASE(&Desc.Legacy);
3073 else
3074 {
3075 if (Desc.Long.Gen.u5Zeros)
3076 {
3077 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
3078 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
3079 }
3080
3081 u64Base = X86DESC64_BASE(&Desc.Long);
3082 if (!IEM_IS_CANONICAL(u64Base))
3083 {
3084 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
3085 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK_OFF_RPL);
3086 }
3087 }
3088
3089 /* NP */
3090 if (!Desc.Legacy.Gen.u1Present)
3091 {
3092 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
3093 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
3094 }
3095
3096 /*
3097 * Set it busy.
3098 * Note! Intel says this should lock down the whole descriptor, but we'll
3099 * restrict our selves to 32-bit for now due to lack of inline
3100 * assembly and such.
3101 */
3102 void *pvDesc;
3103 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
3104 if (rcStrict != VINF_SUCCESS)
3105 return rcStrict;
3106 switch ((uintptr_t)pvDesc & 3)
3107 {
3108 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
3109 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
3110 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 2, 40 + 1 - 16); break;
3111 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 1, 40 + 1 - 8); break;
3112 }
3113 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvDesc, IEM_ACCESS_DATA_RW);
3114 if (rcStrict != VINF_SUCCESS)
3115 return rcStrict;
3116 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3117
3118 /*
3119 * It checks out alright, update the registers.
3120 */
3121/** @todo check if the actual value is loaded or if the RPL is dropped */
3122 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3123 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK_OFF_RPL);
3124 else
3125 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL;
3126 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL;
3127 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
3128 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3129 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
3130 pCtx->tr.u64Base = u64Base;
3131
3132 iemRegAddToRip(pIemCpu, cbInstr);
3133 return VINF_SUCCESS;
3134}
3135
3136
3137/**
3138 * Implements mov GReg,CRx.
3139 *
3140 * @param iGReg The general register to store the CRx value in.
3141 * @param iCrReg The CRx register to read (valid).
3142 */
3143IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
3144{
3145 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3146 if (pIemCpu->uCpl != 0)
3147 return iemRaiseGeneralProtectionFault0(pIemCpu);
3148 Assert(!pCtx->eflags.Bits.u1VM);
3149
3150 /* read it */
3151 uint64_t crX;
3152 switch (iCrReg)
3153 {
3154 case 0: crX = pCtx->cr0; break;
3155 case 2: crX = pCtx->cr2; break;
3156 case 3: crX = pCtx->cr3; break;
3157 case 4: crX = pCtx->cr4; break;
3158 case 8:
3159 {
3160 uint8_t uTpr;
3161 int rc = PDMApicGetTPR(IEMCPU_TO_VMCPU(pIemCpu), &uTpr, NULL, NULL);
3162 if (RT_SUCCESS(rc))
3163 crX = uTpr >> 4;
3164 else
3165 crX = 0;
3166 break;
3167 }
3168 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3169 }
3170
3171 /* store it */
3172 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3173 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
3174 else
3175 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
3176
3177 iemRegAddToRip(pIemCpu, cbInstr);
3178 return VINF_SUCCESS;
3179}
3180
3181
3182/**
3183 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
3184 *
3185 * @param iCrReg The CRx register to write (valid).
3186 * @param uNewCrX The new value.
3187 */
3188IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
3189{
3190 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3191 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3192 VBOXSTRICTRC rcStrict;
3193 int rc;
3194
3195 /*
3196 * Try store it.
3197 * Unfortunately, CPUM only does a tiny bit of the work.
3198 */
3199 switch (iCrReg)
3200 {
3201 case 0:
3202 {
3203 /*
3204 * Perform checks.
3205 */
3206 uint64_t const uOldCrX = pCtx->cr0;
3207 uNewCrX |= X86_CR0_ET; /* hardcoded */
3208
3209 /* Check for reserved bits. */
3210 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
3211 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
3212 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
3213 if (uNewCrX & ~(uint64_t)fValid)
3214 {
3215 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3216 return iemRaiseGeneralProtectionFault0(pIemCpu);
3217 }
3218
3219 /* Check for invalid combinations. */
3220 if ( (uNewCrX & X86_CR0_PG)
3221 && !(uNewCrX & X86_CR0_PE) )
3222 {
3223 Log(("Trying to set CR0.PG without CR0.PE\n"));
3224 return iemRaiseGeneralProtectionFault0(pIemCpu);
3225 }
3226
3227 if ( !(uNewCrX & X86_CR0_CD)
3228 && (uNewCrX & X86_CR0_NW) )
3229 {
3230 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
3231 return iemRaiseGeneralProtectionFault0(pIemCpu);
3232 }
3233
3234 /* Long mode consistency checks. */
3235 if ( (uNewCrX & X86_CR0_PG)
3236 && !(uOldCrX & X86_CR0_PG)
3237 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
3238 {
3239 if (!(pCtx->cr4 & X86_CR4_PAE))
3240 {
3241 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
3242 return iemRaiseGeneralProtectionFault0(pIemCpu);
3243 }
3244 if (pCtx->cs.Attr.n.u1Long)
3245 {
3246 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
3247 return iemRaiseGeneralProtectionFault0(pIemCpu);
3248 }
3249 }
3250
3251 /** @todo check reserved PDPTR bits as AMD states. */
3252
3253 /*
3254 * Change CR0.
3255 */
3256 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3257 CPUMSetGuestCR0(pVCpu, uNewCrX);
3258 else
3259 pCtx->cr0 = uNewCrX;
3260 Assert(pCtx->cr0 == uNewCrX);
3261
3262 /*
3263 * Change EFER.LMA if entering or leaving long mode.
3264 */
3265 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
3266 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
3267 {
3268 uint64_t NewEFER = pCtx->msrEFER;
3269 if (uNewCrX & X86_CR0_PG)
3270 NewEFER |= MSR_K6_EFER_LMA;
3271 else
3272 NewEFER &= ~MSR_K6_EFER_LMA;
3273
3274 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3275 CPUMSetGuestEFER(pVCpu, NewEFER);
3276 else
3277 pCtx->msrEFER = NewEFER;
3278 Assert(pCtx->msrEFER == NewEFER);
3279 }
3280
3281 /*
3282 * Inform PGM.
3283 */
3284 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3285 {
3286 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3287 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3288 {
3289 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3290 AssertRCReturn(rc, rc);
3291 /* ignore informational status codes */
3292 }
3293 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3294 }
3295 else
3296 rcStrict = VINF_SUCCESS;
3297
3298#ifdef IN_RC
3299 /* Return to ring-3 for rescheduling if WP or AM changes. */
3300 if ( rcStrict == VINF_SUCCESS
3301 && ( (uNewCrX & (X86_CR0_WP | X86_CR0_AM))
3302 != (uOldCrX & (X86_CR0_WP | X86_CR0_AM))) )
3303 rcStrict = VINF_EM_RESCHEDULE;
3304#endif
3305 break;
3306 }
3307
3308 /*
3309 * CR2 can be changed without any restrictions.
3310 */
3311 case 2:
3312 pCtx->cr2 = uNewCrX;
3313 rcStrict = VINF_SUCCESS;
3314 break;
3315
3316 /*
3317 * CR3 is relatively simple, although AMD and Intel have different
3318 * accounts of how setting reserved bits are handled. We take intel's
3319 * word for the lower bits and AMD's for the high bits (63:52).
3320 */
3321 /** @todo Testcase: Setting reserved bits in CR3, especially before
3322 * enabling paging. */
3323 case 3:
3324 {
3325 /* check / mask the value. */
3326 if (uNewCrX & UINT64_C(0xfff0000000000000))
3327 {
3328 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
3329 return iemRaiseGeneralProtectionFault0(pIemCpu);
3330 }
3331
3332 uint64_t fValid;
3333 if ( (pCtx->cr4 & X86_CR4_PAE)
3334 && (pCtx->msrEFER & MSR_K6_EFER_LME))
3335 fValid = UINT64_C(0x000ffffffffff014);
3336 else if (pCtx->cr4 & X86_CR4_PAE)
3337 fValid = UINT64_C(0xfffffff4);
3338 else
3339 fValid = UINT64_C(0xfffff014);
3340 if (uNewCrX & ~fValid)
3341 {
3342 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
3343 uNewCrX, uNewCrX & ~fValid));
3344 uNewCrX &= fValid;
3345 }
3346
3347 /** @todo If we're in PAE mode we should check the PDPTRs for
3348 * invalid bits. */
3349
3350 /* Make the change. */
3351 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3352 {
3353 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
3354 AssertRCSuccessReturn(rc, rc);
3355 }
3356 else
3357 pCtx->cr3 = uNewCrX;
3358
3359 /* Inform PGM. */
3360 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3361 {
3362 if (pCtx->cr0 & X86_CR0_PG)
3363 {
3364 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
3365 AssertRCReturn(rc, rc);
3366 /* ignore informational status codes */
3367 }
3368 }
3369 rcStrict = VINF_SUCCESS;
3370 break;
3371 }
3372
3373 /*
3374 * CR4 is a bit more tedious as there are bits which cannot be cleared
3375 * under some circumstances and such.
3376 */
3377 case 4:
3378 {
3379 uint64_t const uOldCrX = pCtx->cr4;
3380
3381 /* reserved bits */
3382 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
3383 | X86_CR4_TSD | X86_CR4_DE
3384 | X86_CR4_PSE | X86_CR4_PAE
3385 | X86_CR4_MCE | X86_CR4_PGE
3386 | X86_CR4_PCE | X86_CR4_OSFSXR
3387 | X86_CR4_OSXMMEEXCPT;
3388 //if (xxx)
3389 // fValid |= X86_CR4_VMXE;
3390 //if (xxx)
3391 // fValid |= X86_CR4_OSXSAVE;
3392 if (uNewCrX & ~(uint64_t)fValid)
3393 {
3394 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3395 return iemRaiseGeneralProtectionFault0(pIemCpu);
3396 }
3397
3398 /* long mode checks. */
3399 if ( (uOldCrX & X86_CR4_PAE)
3400 && !(uNewCrX & X86_CR4_PAE)
3401 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )
3402 {
3403 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
3404 return iemRaiseGeneralProtectionFault0(pIemCpu);
3405 }
3406
3407
3408 /*
3409 * Change it.
3410 */
3411 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3412 {
3413 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
3414 AssertRCSuccessReturn(rc, rc);
3415 }
3416 else
3417 pCtx->cr4 = uNewCrX;
3418 Assert(pCtx->cr4 == uNewCrX);
3419
3420 /*
3421 * Notify SELM and PGM.
3422 */
3423 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3424 {
3425 /* SELM - VME may change things wrt to the TSS shadowing. */
3426 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
3427 {
3428 Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n",
3429 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) ));
3430#ifdef VBOX_WITH_RAW_MODE
3431 if (!HMIsEnabled(IEMCPU_TO_VM(pIemCpu)))
3432 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3433#endif
3434 }
3435
3436 /* PGM - flushing and mode. */
3437 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE))
3438 {
3439 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3440 AssertRCReturn(rc, rc);
3441 /* ignore informational status codes */
3442 }
3443 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3444 }
3445 else
3446 rcStrict = VINF_SUCCESS;
3447 break;
3448 }
3449
3450 /*
3451 * CR8 maps to the APIC TPR.
3452 */
3453 case 8:
3454 if (uNewCrX & ~(uint64_t)0xf)
3455 {
3456 Log(("Trying to set reserved CR8 bits (%#RX64)\n", uNewCrX));
3457 return iemRaiseGeneralProtectionFault0(pIemCpu);
3458 }
3459
3460 if (!IEM_FULL_VERIFICATION_ENABLED(pIemCpu))
3461 PDMApicSetTPR(IEMCPU_TO_VMCPU(pIemCpu), (uint8_t)uNewCrX << 4);
3462 rcStrict = VINF_SUCCESS;
3463 break;
3464
3465 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3466 }
3467
3468 /*
3469 * Advance the RIP on success.
3470 */
3471 if (RT_SUCCESS(rcStrict))
3472 {
3473 if (rcStrict != VINF_SUCCESS)
3474 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3475 iemRegAddToRip(pIemCpu, cbInstr);
3476 }
3477
3478 return rcStrict;
3479}
3480
3481
3482/**
3483 * Implements mov CRx,GReg.
3484 *
3485 * @param iCrReg The CRx register to write (valid).
3486 * @param iGReg The general register to load the DRx value from.
3487 */
3488IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
3489{
3490 if (pIemCpu->uCpl != 0)
3491 return iemRaiseGeneralProtectionFault0(pIemCpu);
3492 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3493
3494 /*
3495 * Read the new value from the source register and call common worker.
3496 */
3497 uint64_t uNewCrX;
3498 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3499 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
3500 else
3501 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
3502 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
3503}
3504
3505
3506/**
3507 * Implements 'LMSW r/m16'
3508 *
3509 * @param u16NewMsw The new value.
3510 */
3511IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
3512{
3513 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3514
3515 if (pIemCpu->uCpl != 0)
3516 return iemRaiseGeneralProtectionFault0(pIemCpu);
3517 Assert(!pCtx->eflags.Bits.u1VM);
3518
3519 /*
3520 * Compose the new CR0 value and call common worker.
3521 */
3522 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3523 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3524 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3525}
3526
3527
3528/**
3529 * Implements 'CLTS'.
3530 */
3531IEM_CIMPL_DEF_0(iemCImpl_clts)
3532{
3533 if (pIemCpu->uCpl != 0)
3534 return iemRaiseGeneralProtectionFault0(pIemCpu);
3535
3536 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3537 uint64_t uNewCr0 = pCtx->cr0;
3538 uNewCr0 &= ~X86_CR0_TS;
3539 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3540}
3541
3542
3543/**
3544 * Implements mov GReg,DRx.
3545 *
3546 * @param iGReg The general register to store the DRx value in.
3547 * @param iDrReg The DRx register to read (0-7).
3548 */
3549IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
3550{
3551 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3552
3553 /*
3554 * Check preconditions.
3555 */
3556
3557 /* Raise GPs. */
3558 if (pIemCpu->uCpl != 0)
3559 return iemRaiseGeneralProtectionFault0(pIemCpu);
3560 Assert(!pCtx->eflags.Bits.u1VM);
3561
3562 if ( (iDrReg == 4 || iDrReg == 5)
3563 && (pCtx->cr4 & X86_CR4_DE) )
3564 {
3565 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
3566 return iemRaiseGeneralProtectionFault0(pIemCpu);
3567 }
3568
3569 /* Raise #DB if general access detect is enabled. */
3570 if (pCtx->dr[7] & X86_DR7_GD)
3571 {
3572 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
3573 return iemRaiseDebugException(pIemCpu);
3574 }
3575
3576 /*
3577 * Read the debug register and store it in the specified general register.
3578 */
3579 uint64_t drX;
3580 switch (iDrReg)
3581 {
3582 case 0: drX = pCtx->dr[0]; break;
3583 case 1: drX = pCtx->dr[1]; break;
3584 case 2: drX = pCtx->dr[2]; break;
3585 case 3: drX = pCtx->dr[3]; break;
3586 case 6:
3587 case 4:
3588 drX = pCtx->dr[6];
3589 drX &= ~RT_BIT_32(12);
3590 drX |= UINT32_C(0xffff0ff0);
3591 break;
3592 case 7:
3593 case 5:
3594 drX = pCtx->dr[7];
3595 drX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3596 drX |= RT_BIT_32(10);
3597 break;
3598 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3599 }
3600
3601 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3602 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
3603 else
3604 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
3605
3606 iemRegAddToRip(pIemCpu, cbInstr);
3607 return VINF_SUCCESS;
3608}
3609
3610
3611/**
3612 * Implements mov DRx,GReg.
3613 *
3614 * @param iDrReg The DRx register to write (valid).
3615 * @param iGReg The general register to load the DRx value from.
3616 */
3617IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
3618{
3619 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3620
3621 /*
3622 * Check preconditions.
3623 */
3624 if (pIemCpu->uCpl != 0)
3625 return iemRaiseGeneralProtectionFault0(pIemCpu);
3626 Assert(!pCtx->eflags.Bits.u1VM);
3627
3628 if ( (iDrReg == 4 || iDrReg == 5)
3629 && (pCtx->cr4 & X86_CR4_DE) )
3630 {
3631 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
3632 return iemRaiseGeneralProtectionFault0(pIemCpu);
3633 }
3634
3635 /* Raise #DB if general access detect is enabled. */
3636 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
3637 * \#GP? */
3638 if (pCtx->dr[7] & X86_DR7_GD)
3639 {
3640 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
3641 return iemRaiseDebugException(pIemCpu);
3642 }
3643
3644 /*
3645 * Read the new value from the source register.
3646 */
3647 uint64_t uNewDrX;
3648 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3649 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
3650 else
3651 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
3652
3653 /*
3654 * Adjust it.
3655 */
3656 switch (iDrReg)
3657 {
3658 case 0:
3659 case 1:
3660 case 2:
3661 case 3:
3662 /* nothing to adjust */
3663 break;
3664
3665 case 6:
3666 case 4:
3667 if (uNewDrX & UINT64_C(0xffffffff00000000))
3668 {
3669 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3670 return iemRaiseGeneralProtectionFault0(pIemCpu);
3671 }
3672 uNewDrX &= ~RT_BIT_32(12);
3673 uNewDrX |= UINT32_C(0xffff0ff0);
3674 break;
3675
3676 case 7:
3677 case 5:
3678 if (uNewDrX & UINT64_C(0xffffffff00000000))
3679 {
3680 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3681 return iemRaiseGeneralProtectionFault0(pIemCpu);
3682 }
3683 uNewDrX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3684 uNewDrX |= RT_BIT_32(10);
3685 break;
3686
3687 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3688 }
3689
3690 /*
3691 * Do the actual setting.
3692 */
3693 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3694 {
3695 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
3696 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_INTERNAL_ERROR : rc);
3697 }
3698 else
3699 pCtx->dr[iDrReg] = uNewDrX;
3700
3701 iemRegAddToRip(pIemCpu, cbInstr);
3702 return VINF_SUCCESS;
3703}
3704
3705
3706/**
3707 * Implements 'INVLPG m'.
3708 *
3709 * @param GCPtrPage The effective address of the page to invalidate.
3710 * @remarks Updates the RIP.
3711 */
3712IEM_CIMPL_DEF_1(iemCImpl_invlpg, uint8_t, GCPtrPage)
3713{
3714 /* ring-0 only. */
3715 if (pIemCpu->uCpl != 0)
3716 return iemRaiseGeneralProtectionFault0(pIemCpu);
3717 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3718
3719 int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);
3720 iemRegAddToRip(pIemCpu, cbInstr);
3721
3722 if (rc == VINF_SUCCESS)
3723 return VINF_SUCCESS;
3724 if (rc == VINF_PGM_SYNC_CR3)
3725 return iemSetPassUpStatus(pIemCpu, rc);
3726
3727 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
3728 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", rc));
3729 return rc;
3730}
3731
3732
3733/**
3734 * Implements RDTSC.
3735 */
3736IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
3737{
3738 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3739
3740 /*
3741 * Check preconditions.
3742 */
3743 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_TSC))
3744 return iemRaiseUndefinedOpcode(pIemCpu);
3745
3746 if ( (pCtx->cr4 & X86_CR4_TSD)
3747 && pIemCpu->uCpl != 0)
3748 {
3749 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
3750 return iemRaiseGeneralProtectionFault0(pIemCpu);
3751 }
3752
3753 /*
3754 * Do the job.
3755 */
3756 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
3757 pCtx->rax = (uint32_t)uTicks;
3758 pCtx->rdx = uTicks >> 32;
3759#ifdef IEM_VERIFICATION_MODE_FULL
3760 pIemCpu->fIgnoreRaxRdx = true;
3761#endif
3762
3763 iemRegAddToRip(pIemCpu, cbInstr);
3764 return VINF_SUCCESS;
3765}
3766
3767
3768/**
3769 * Implements RDMSR.
3770 */
3771IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
3772{
3773 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3774
3775 /*
3776 * Check preconditions.
3777 */
3778 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
3779 return iemRaiseUndefinedOpcode(pIemCpu);
3780 if (pIemCpu->uCpl != 0)
3781 return iemRaiseGeneralProtectionFault0(pIemCpu);
3782
3783 /*
3784 * Do the job.
3785 */
3786 RTUINT64U uValue;
3787 int rc = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);
3788 if (rc != VINF_SUCCESS)
3789 {
3790 Log(("IEM: rdmsr(%#x) -> GP(0)\n", pCtx->ecx));
3791 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
3792 return iemRaiseGeneralProtectionFault0(pIemCpu);
3793 }
3794
3795 pCtx->rax = uValue.s.Lo;
3796 pCtx->rdx = uValue.s.Hi;
3797
3798 iemRegAddToRip(pIemCpu, cbInstr);
3799 return VINF_SUCCESS;
3800}
3801
3802
3803/**
3804 * Implements WRMSR.
3805 */
3806IEM_CIMPL_DEF_0(iemCImpl_wrmsr)
3807{
3808 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3809
3810 /*
3811 * Check preconditions.
3812 */
3813 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
3814 return iemRaiseUndefinedOpcode(pIemCpu);
3815 if (pIemCpu->uCpl != 0)
3816 return iemRaiseGeneralProtectionFault0(pIemCpu);
3817
3818 /*
3819 * Do the job.
3820 */
3821 RTUINT64U uValue;
3822 uValue.s.Lo = pCtx->eax;
3823 uValue.s.Hi = pCtx->edx;
3824
3825 int rc;
3826 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3827 rc = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
3828 else
3829 {
3830 CPUMCTX CtxTmp = *pCtx;
3831 rc = CPUMSetGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, uValue.u);
3832 PCPUMCTX pCtx2 = CPUMQueryGuestCtxPtr(IEMCPU_TO_VMCPU(pIemCpu));
3833 *pCtx = *pCtx2;
3834 *pCtx2 = CtxTmp;
3835 }
3836 if (rc != VINF_SUCCESS)
3837 {
3838 Log(("IEM: wrmsr(%#x,%#x`%08x) -> GP(0)\n", pCtx->ecx, uValue.s.Hi, uValue.s.Lo));
3839 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
3840 return iemRaiseGeneralProtectionFault0(pIemCpu);
3841 }
3842
3843 iemRegAddToRip(pIemCpu, cbInstr);
3844 return VINF_SUCCESS;
3845}
3846
3847
3848/**
3849 * Implements 'IN eAX, port'.
3850 *
3851 * @param u16Port The source port.
3852 * @param cbReg The register size.
3853 */
3854IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
3855{
3856 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3857
3858 /*
3859 * CPL check
3860 */
3861 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
3862 if (rcStrict != VINF_SUCCESS)
3863 return rcStrict;
3864
3865 /*
3866 * Perform the I/O.
3867 */
3868 uint32_t u32Value;
3869 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3870 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, &u32Value, cbReg);
3871 else
3872 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
3873 if (IOM_SUCCESS(rcStrict))
3874 {
3875 switch (cbReg)
3876 {
3877 case 1: pCtx->al = (uint8_t)u32Value; break;
3878 case 2: pCtx->ax = (uint16_t)u32Value; break;
3879 case 4: pCtx->rax = u32Value; break;
3880 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3881 }
3882 iemRegAddToRip(pIemCpu, cbInstr);
3883 pIemCpu->cPotentialExits++;
3884 if (rcStrict != VINF_SUCCESS)
3885 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3886 }
3887
3888 return rcStrict;
3889}
3890
3891
3892/**
3893 * Implements 'IN eAX, DX'.
3894 *
3895 * @param cbReg The register size.
3896 */
3897IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
3898{
3899 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3900}
3901
3902
3903/**
3904 * Implements 'OUT port, eAX'.
3905 *
3906 * @param u16Port The destination port.
3907 * @param cbReg The register size.
3908 */
3909IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
3910{
3911 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3912
3913 /*
3914 * CPL check
3915 */
3916 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
3917 if (rcStrict != VINF_SUCCESS)
3918 return rcStrict;
3919
3920 /*
3921 * Perform the I/O.
3922 */
3923 uint32_t u32Value;
3924 switch (cbReg)
3925 {
3926 case 1: u32Value = pCtx->al; break;
3927 case 2: u32Value = pCtx->ax; break;
3928 case 4: u32Value = pCtx->eax; break;
3929 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3930 }
3931 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3932 rcStrict = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), IEMCPU_TO_VMCPU(pIemCpu), u16Port, u32Value, cbReg);
3933 else
3934 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
3935 if (IOM_SUCCESS(rcStrict))
3936 {
3937 iemRegAddToRip(pIemCpu, cbInstr);
3938 pIemCpu->cPotentialExits++;
3939 if (rcStrict != VINF_SUCCESS)
3940 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
3941 }
3942 return rcStrict;
3943}
3944
3945
3946/**
3947 * Implements 'OUT DX, eAX'.
3948 *
3949 * @param cbReg The register size.
3950 */
3951IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
3952{
3953 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3954}
3955
3956
3957/**
3958 * Implements 'CLI'.
3959 */
3960IEM_CIMPL_DEF_0(iemCImpl_cli)
3961{
3962 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3963 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
3964 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
3965 uint32_t const fEflOld = fEfl;
3966 if (pCtx->cr0 & X86_CR0_PE)
3967 {
3968 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
3969 if (!(fEfl & X86_EFL_VM))
3970 {
3971 if (pIemCpu->uCpl <= uIopl)
3972 fEfl &= ~X86_EFL_IF;
3973 else if ( pIemCpu->uCpl == 3
3974 && (pCtx->cr4 & X86_CR4_PVI) )
3975 fEfl &= ~X86_EFL_VIF;
3976 else
3977 return iemRaiseGeneralProtectionFault0(pIemCpu);
3978 }
3979 /* V8086 */
3980 else if (uIopl == 3)
3981 fEfl &= ~X86_EFL_IF;
3982 else if ( uIopl < 3
3983 && (pCtx->cr4 & X86_CR4_VME) )
3984 fEfl &= ~X86_EFL_VIF;
3985 else
3986 return iemRaiseGeneralProtectionFault0(pIemCpu);
3987 }
3988 /* real mode */
3989 else
3990 fEfl &= ~X86_EFL_IF;
3991
3992 /* Commit. */
3993 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
3994 iemRegAddToRip(pIemCpu, cbInstr);
3995 Log2(("CLI: %#x -> %#x\n", fEflOld, fEfl)); NOREF(fEflOld);
3996 return VINF_SUCCESS;
3997}
3998
3999
4000/**
4001 * Implements 'STI'.
4002 */
4003IEM_CIMPL_DEF_0(iemCImpl_sti)
4004{
4005 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4006 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
4007 uint32_t fEfl = IEMMISC_GET_EFL(pIemCpu, pCtx);
4008 uint32_t const fEflOld = fEfl;
4009
4010 if (pCtx->cr0 & X86_CR0_PE)
4011 {
4012 uint8_t const uIopl = X86_EFL_GET_IOPL(fEfl);
4013 if (!(fEfl & X86_EFL_VM))
4014 {
4015 if (pIemCpu->uCpl <= uIopl)
4016 fEfl |= X86_EFL_IF;
4017 else if ( pIemCpu->uCpl == 3
4018 && (pCtx->cr4 & X86_CR4_PVI)
4019 && !(fEfl & X86_EFL_VIP) )
4020 fEfl |= X86_EFL_VIF;
4021 else
4022 return iemRaiseGeneralProtectionFault0(pIemCpu);
4023 }
4024 /* V8086 */
4025 else if (uIopl == 3)
4026 fEfl |= X86_EFL_IF;
4027 else if ( uIopl < 3
4028 && (pCtx->cr4 & X86_CR4_VME)
4029 && !(fEfl & X86_EFL_VIP) )
4030 fEfl |= X86_EFL_VIF;
4031 else
4032 return iemRaiseGeneralProtectionFault0(pIemCpu);
4033 }
4034 /* real mode */
4035 else
4036 fEfl |= X86_EFL_IF;
4037
4038 /* Commit. */
4039 IEMMISC_SET_EFL(pIemCpu, pCtx, fEfl);
4040 iemRegAddToRip(pIemCpu, cbInstr);
4041 if ((!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) || IEM_VERIFICATION_ENABLED(pIemCpu))
4042 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
4043 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl));
4044 return VINF_SUCCESS;
4045}
4046
4047
4048/**
4049 * Implements 'HLT'.
4050 */
4051IEM_CIMPL_DEF_0(iemCImpl_hlt)
4052{
4053 if (pIemCpu->uCpl != 0)
4054 return iemRaiseGeneralProtectionFault0(pIemCpu);
4055 iemRegAddToRip(pIemCpu, cbInstr);
4056 return VINF_EM_HALT;
4057}
4058
4059
4060/**
4061 * Implements 'CPUID'.
4062 */
4063IEM_CIMPL_DEF_0(iemCImpl_cpuid)
4064{
4065 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4066
4067 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
4068 pCtx->rax &= UINT32_C(0xffffffff);
4069 pCtx->rbx &= UINT32_C(0xffffffff);
4070 pCtx->rcx &= UINT32_C(0xffffffff);
4071 pCtx->rdx &= UINT32_C(0xffffffff);
4072
4073 iemRegAddToRip(pIemCpu, cbInstr);
4074 return VINF_SUCCESS;
4075}
4076
4077
4078/**
4079 * Implements 'AAD'.
4080 *
4081 * @param enmEffOpSize The effective operand size.
4082 */
4083IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
4084{
4085 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4086
4087 uint16_t const ax = pCtx->ax;
4088 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
4089 pCtx->ax = al;
4090 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
4091 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
4092 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
4093
4094 iemRegAddToRip(pIemCpu, cbInstr);
4095 return VINF_SUCCESS;
4096}
4097
4098
4099/**
4100 * Implements 'AAM'.
4101 *
4102 * @param bImm The immediate operand. Cannot be 0.
4103 */
4104IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
4105{
4106 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4107 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
4108
4109 uint16_t const ax = pCtx->ax;
4110 uint8_t const al = (uint8_t)ax % bImm;
4111 uint8_t const ah = (uint8_t)ax / bImm;
4112 pCtx->ax = (ah << 8) + al;
4113 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
4114 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
4115 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
4116
4117 iemRegAddToRip(pIemCpu, cbInstr);
4118 return VINF_SUCCESS;
4119}
4120
4121
4122
4123
4124/*
4125 * Instantiate the various string operation combinations.
4126 */
4127#define OP_SIZE 8
4128#define ADDR_SIZE 16
4129#include "IEMAllCImplStrInstr.cpp.h"
4130#define OP_SIZE 8
4131#define ADDR_SIZE 32
4132#include "IEMAllCImplStrInstr.cpp.h"
4133#define OP_SIZE 8
4134#define ADDR_SIZE 64
4135#include "IEMAllCImplStrInstr.cpp.h"
4136
4137#define OP_SIZE 16
4138#define ADDR_SIZE 16
4139#include "IEMAllCImplStrInstr.cpp.h"
4140#define OP_SIZE 16
4141#define ADDR_SIZE 32
4142#include "IEMAllCImplStrInstr.cpp.h"
4143#define OP_SIZE 16
4144#define ADDR_SIZE 64
4145#include "IEMAllCImplStrInstr.cpp.h"
4146
4147#define OP_SIZE 32
4148#define ADDR_SIZE 16
4149#include "IEMAllCImplStrInstr.cpp.h"
4150#define OP_SIZE 32
4151#define ADDR_SIZE 32
4152#include "IEMAllCImplStrInstr.cpp.h"
4153#define OP_SIZE 32
4154#define ADDR_SIZE 64
4155#include "IEMAllCImplStrInstr.cpp.h"
4156
4157#define OP_SIZE 64
4158#define ADDR_SIZE 32
4159#include "IEMAllCImplStrInstr.cpp.h"
4160#define OP_SIZE 64
4161#define ADDR_SIZE 64
4162#include "IEMAllCImplStrInstr.cpp.h"
4163
4164
4165/**
4166 * Implements 'FINIT' and 'FNINIT'.
4167 *
4168 * @param fCheckXcpts Whether to check for umasked pending exceptions or
4169 * not.
4170 */
4171IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
4172{
4173 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4174
4175 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
4176 return iemRaiseDeviceNotAvailable(pIemCpu);
4177
4178 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
4179 if (fCheckXcpts && TODO )
4180 return iemRaiseMathFault(pIemCpu);
4181 */
4182
4183 if (iemFRegIsFxSaveFormat(pIemCpu))
4184 {
4185 pCtx->fpu.FCW = 0x37f;
4186 pCtx->fpu.FSW = 0;
4187 pCtx->fpu.FTW = 0x00; /* 0 - empty. */
4188 pCtx->fpu.FPUDP = 0;
4189 pCtx->fpu.DS = 0; //??
4190 pCtx->fpu.Rsrvd2= 0;
4191 pCtx->fpu.FPUIP = 0;
4192 pCtx->fpu.CS = 0; //??
4193 pCtx->fpu.Rsrvd1= 0;
4194 pCtx->fpu.FOP = 0;
4195 }
4196 else
4197 {
4198 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
4199 pFpu->FCW = 0x37f;
4200 pFpu->FSW = 0;
4201 pFpu->FTW = 0xffff; /* 11 - empty */
4202 pFpu->FPUOO = 0; //??
4203 pFpu->FPUOS = 0; //??
4204 pFpu->FPUIP = 0;
4205 pFpu->CS = 0; //??
4206 pFpu->FOP = 0;
4207 }
4208
4209 iemHlpUsedFpu(pIemCpu);
4210 iemRegAddToRip(pIemCpu, cbInstr);
4211 return VINF_SUCCESS;
4212}
4213
4214
4215/**
4216 * Implements 'FXSAVE'.
4217 *
4218 * @param iEffSeg The effective segment.
4219 * @param GCPtrEff The address of the image.
4220 * @param enmEffOpSize The operand size (only REX.W really matters).
4221 */
4222IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
4223{
4224 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4225
4226 /*
4227 * Raise exceptions.
4228 */
4229 if (pCtx->cr0 & X86_CR0_EM)
4230 return iemRaiseUndefinedOpcode(pIemCpu);
4231 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
4232 return iemRaiseDeviceNotAvailable(pIemCpu);
4233 if (GCPtrEff & 15)
4234 {
4235 /** @todo CPU/VM detection possible! \#AC might not be signal for
4236 * all/any misalignment sizes, intel says its an implementation detail. */
4237 if ( (pCtx->cr0 & X86_CR0_AM)
4238 && pCtx->eflags.Bits.u1AC
4239 && pIemCpu->uCpl == 3)
4240 return iemRaiseAlignmentCheckException(pIemCpu);
4241 return iemRaiseGeneralProtectionFault0(pIemCpu);
4242 }
4243 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
4244
4245 /*
4246 * Access the memory.
4247 */
4248 void *pvMem512;
4249 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4250 if (rcStrict != VINF_SUCCESS)
4251 return rcStrict;
4252 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
4253
4254 /*
4255 * Store the registers.
4256 */
4257 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4258 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
4259
4260 /* common for all formats */
4261 pDst->FCW = pCtx->fpu.FCW;
4262 pDst->FSW = pCtx->fpu.FSW;
4263 pDst->FTW = pCtx->fpu.FTW & UINT16_C(0xff);
4264 pDst->FOP = pCtx->fpu.FOP;
4265 pDst->MXCSR = pCtx->fpu.MXCSR;
4266 pDst->MXCSR_MASK = pCtx->fpu.MXCSR_MASK;
4267 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
4268 {
4269 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
4270 * them for now... */
4271 pDst->aRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
4272 pDst->aRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
4273 pDst->aRegs[i].au32[2] = pCtx->fpu.aRegs[i].au32[2] & UINT32_C(0xffff);
4274 pDst->aRegs[i].au32[3] = 0;
4275 }
4276
4277 /* FPU IP, CS, DP and DS. */
4278 /** @todo FPU IP, CS, DP and DS cannot be implemented correctly without extra
4279 * state information. :-/
4280 * Storing zeros now to prevent any potential leakage of host info. */
4281 pDst->FPUIP = 0;
4282 pDst->CS = 0;
4283 pDst->Rsrvd1 = 0;
4284 pDst->FPUDP = 0;
4285 pDst->DS = 0;
4286 pDst->Rsrvd2 = 0;
4287
4288 /* XMM registers. */
4289 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4290 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4291 || pIemCpu->uCpl != 0)
4292 {
4293 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4294 for (uint32_t i = 0; i < cXmmRegs; i++)
4295 pDst->aXMM[i] = pCtx->fpu.aXMM[i];
4296 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
4297 * right? */
4298 }
4299
4300 /*
4301 * Commit the memory.
4302 */
4303 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4304 if (rcStrict != VINF_SUCCESS)
4305 return rcStrict;
4306
4307 iemRegAddToRip(pIemCpu, cbInstr);
4308 return VINF_SUCCESS;
4309}
4310
4311
4312/**
4313 * Implements 'FXRSTOR'.
4314 *
4315 * @param GCPtrEff The address of the image.
4316 * @param enmEffOpSize The operand size (only REX.W really matters).
4317 */
4318IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
4319{
4320 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4321
4322 /*
4323 * Raise exceptions.
4324 */
4325 if (pCtx->cr0 & X86_CR0_EM)
4326 return iemRaiseUndefinedOpcode(pIemCpu);
4327 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
4328 return iemRaiseDeviceNotAvailable(pIemCpu);
4329 if (GCPtrEff & 15)
4330 {
4331 /** @todo CPU/VM detection possible! \#AC might not be signal for
4332 * all/any misalignment sizes, intel says its an implementation detail. */
4333 if ( (pCtx->cr0 & X86_CR0_AM)
4334 && pCtx->eflags.Bits.u1AC
4335 && pIemCpu->uCpl == 3)
4336 return iemRaiseAlignmentCheckException(pIemCpu);
4337 return iemRaiseGeneralProtectionFault0(pIemCpu);
4338 }
4339 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
4340
4341 /*
4342 * Access the memory.
4343 */
4344 void *pvMem512;
4345 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
4346 if (rcStrict != VINF_SUCCESS)
4347 return rcStrict;
4348 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
4349
4350 /*
4351 * Check the state for stuff which will GP(0).
4352 */
4353 uint32_t const fMXCSR = pSrc->MXCSR;
4354 uint32_t const fMXCSR_MASK = pCtx->fpu.MXCSR_MASK ? pCtx->fpu.MXCSR_MASK : UINT32_C(0xffbf);
4355 if (fMXCSR & ~fMXCSR_MASK)
4356 {
4357 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
4358 return iemRaiseGeneralProtectionFault0(pIemCpu);
4359 }
4360
4361 /*
4362 * Load the registers.
4363 */
4364 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4365 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
4366
4367 /* common for all formats */
4368 pCtx->fpu.FCW = pSrc->FCW;
4369 pCtx->fpu.FSW = pSrc->FSW;
4370 pCtx->fpu.FTW = pSrc->FTW & UINT16_C(0xff);
4371 pCtx->fpu.FOP = pSrc->FOP;
4372 pCtx->fpu.MXCSR = fMXCSR;
4373 /* (MXCSR_MASK is read-only) */
4374 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
4375 {
4376 pCtx->fpu.aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
4377 pCtx->fpu.aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
4378 pCtx->fpu.aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
4379 pCtx->fpu.aRegs[i].au32[3] = 0;
4380 }
4381
4382 /* FPU IP, CS, DP and DS. */
4383 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4384 {
4385 pCtx->fpu.FPUIP = pSrc->FPUIP;
4386 pCtx->fpu.CS = pSrc->CS;
4387 pCtx->fpu.Rsrvd1 = pSrc->Rsrvd1;
4388 pCtx->fpu.FPUDP = pSrc->FPUDP;
4389 pCtx->fpu.DS = pSrc->DS;
4390 pCtx->fpu.Rsrvd2 = pSrc->Rsrvd2;
4391 }
4392 else
4393 {
4394 pCtx->fpu.FPUIP = pSrc->FPUIP;
4395 pCtx->fpu.CS = pSrc->CS;
4396 pCtx->fpu.Rsrvd1 = 0;
4397 pCtx->fpu.FPUDP = pSrc->FPUDP;
4398 pCtx->fpu.DS = pSrc->DS;
4399 pCtx->fpu.Rsrvd2 = 0;
4400 }
4401
4402 /* XMM registers. */
4403 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4404 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4405 || pIemCpu->uCpl != 0)
4406 {
4407 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4408 for (uint32_t i = 0; i < cXmmRegs; i++)
4409 pCtx->fpu.aXMM[i] = pSrc->aXMM[i];
4410 }
4411
4412 /*
4413 * Commit the memory.
4414 */
4415 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_R);
4416 if (rcStrict != VINF_SUCCESS)
4417 return rcStrict;
4418
4419 iemHlpUsedFpu(pIemCpu);
4420 iemRegAddToRip(pIemCpu, cbInstr);
4421 return VINF_SUCCESS;
4422}
4423
4424
4425/**
4426 * Commmon routine for fnstenv and fnsave.
4427 *
4428 * @param uPtr Where to store the state.
4429 * @param pCtx The CPU context.
4430 */
4431static void iemCImplCommonFpuStoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
4432{
4433 if (enmEffOpSize == IEMMODE_16BIT)
4434 {
4435 uPtr.pu16[0] = pCtx->fpu.FCW;
4436 uPtr.pu16[1] = pCtx->fpu.FSW;
4437 uPtr.pu16[2] = iemFpuCalcFullFtw(pCtx);
4438 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4439 {
4440 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
4441 * protected mode or long mode and we save it in real mode? And vice
4442 * versa? And with 32-bit operand size? I think CPU is storing the
4443 * effective address ((CS << 4) + IP) in the offset register and not
4444 * doing any address calculations here. */
4445 uPtr.pu16[3] = (uint16_t)pCtx->fpu.FPUIP;
4446 uPtr.pu16[4] = ((pCtx->fpu.FPUIP >> 4) & UINT16_C(0xf000)) | pCtx->fpu.FOP;
4447 uPtr.pu16[5] = (uint16_t)pCtx->fpu.FPUDP;
4448 uPtr.pu16[6] = (pCtx->fpu.FPUDP >> 4) & UINT16_C(0xf000);
4449 }
4450 else
4451 {
4452 uPtr.pu16[3] = pCtx->fpu.FPUIP;
4453 uPtr.pu16[4] = pCtx->fpu.CS;
4454 uPtr.pu16[5] = pCtx->fpu.FPUDP;
4455 uPtr.pu16[6] = pCtx->fpu.DS;
4456 }
4457 }
4458 else
4459 {
4460 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
4461 uPtr.pu16[0*2] = pCtx->fpu.FCW;
4462 uPtr.pu16[1*2] = pCtx->fpu.FSW;
4463 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pCtx);
4464 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4465 {
4466 uPtr.pu16[3*2] = (uint16_t)pCtx->fpu.FPUIP;
4467 uPtr.pu32[4] = ((pCtx->fpu.FPUIP & UINT32_C(0xffff0000)) >> 4) | pCtx->fpu.FOP;
4468 uPtr.pu16[5*2] = (uint16_t)pCtx->fpu.FPUDP;
4469 uPtr.pu32[6] = (pCtx->fpu.FPUDP & UINT32_C(0xffff0000)) >> 4;
4470 }
4471 else
4472 {
4473 uPtr.pu32[3] = pCtx->fpu.FPUIP;
4474 uPtr.pu16[4*2] = pCtx->fpu.CS;
4475 uPtr.pu16[4*2+1]= pCtx->fpu.FOP;
4476 uPtr.pu32[5] = pCtx->fpu.FPUDP;
4477 uPtr.pu16[6*2] = pCtx->fpu.DS;
4478 }
4479 }
4480}
4481
4482
4483/**
4484 * Commmon routine for fldenv and frstor
4485 *
4486 * @param uPtr Where to store the state.
4487 * @param pCtx The CPU context.
4488 */
4489static void iemCImplCommonFpuRestoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
4490{
4491 if (enmEffOpSize == IEMMODE_16BIT)
4492 {
4493 pCtx->fpu.FCW = uPtr.pu16[0];
4494 pCtx->fpu.FSW = uPtr.pu16[1];
4495 pCtx->fpu.FTW = uPtr.pu16[2];
4496 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4497 {
4498 pCtx->fpu.FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
4499 pCtx->fpu.FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
4500 pCtx->fpu.FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
4501 pCtx->fpu.CS = 0;
4502 pCtx->fpu.Rsrvd1= 0;
4503 pCtx->fpu.DS = 0;
4504 pCtx->fpu.Rsrvd2= 0;
4505 }
4506 else
4507 {
4508 pCtx->fpu.FPUIP = uPtr.pu16[3];
4509 pCtx->fpu.CS = uPtr.pu16[4];
4510 pCtx->fpu.Rsrvd1= 0;
4511 pCtx->fpu.FPUDP = uPtr.pu16[5];
4512 pCtx->fpu.DS = uPtr.pu16[6];
4513 pCtx->fpu.Rsrvd2= 0;
4514 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
4515 }
4516 }
4517 else
4518 {
4519 pCtx->fpu.FCW = uPtr.pu16[0*2];
4520 pCtx->fpu.FSW = uPtr.pu16[1*2];
4521 pCtx->fpu.FTW = uPtr.pu16[2*2];
4522 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4523 {
4524 pCtx->fpu.FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
4525 pCtx->fpu.FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
4526 pCtx->fpu.FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
4527 pCtx->fpu.CS = 0;
4528 pCtx->fpu.Rsrvd1= 0;
4529 pCtx->fpu.DS = 0;
4530 pCtx->fpu.Rsrvd2= 0;
4531 }
4532 else
4533 {
4534 pCtx->fpu.FPUIP = uPtr.pu32[3];
4535 pCtx->fpu.CS = uPtr.pu16[4*2];
4536 pCtx->fpu.Rsrvd1= 0;
4537 pCtx->fpu.FOP = uPtr.pu16[4*2+1];
4538 pCtx->fpu.FPUDP = uPtr.pu32[5];
4539 pCtx->fpu.DS = uPtr.pu16[6*2];
4540 pCtx->fpu.Rsrvd2= 0;
4541 }
4542 }
4543
4544 /* Make adjustments. */
4545 pCtx->fpu.FTW = iemFpuCompressFtw(pCtx->fpu.FTW);
4546 pCtx->fpu.FCW &= ~X86_FCW_ZERO_MASK;
4547 iemFpuRecalcExceptionStatus(pCtx);
4548 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
4549 * exceptions are pending after loading the saved state? */
4550}
4551
4552
4553/**
4554 * Implements 'FNSTENV'.
4555 *
4556 * @param enmEffOpSize The operand size (only REX.W really matters).
4557 * @param iEffSeg The effective segment register for @a GCPtrEff.
4558 * @param GCPtrEffDst The address of the image.
4559 */
4560IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4561{
4562 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4563 RTPTRUNION uPtr;
4564 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4565 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4566 if (rcStrict != VINF_SUCCESS)
4567 return rcStrict;
4568
4569 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4570
4571 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4572 if (rcStrict != VINF_SUCCESS)
4573 return rcStrict;
4574
4575 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4576 iemRegAddToRip(pIemCpu, cbInstr);
4577 return VINF_SUCCESS;
4578}
4579
4580
4581/**
4582 * Implements 'FNSAVE'.
4583 *
4584 * @param GCPtrEffDst The address of the image.
4585 * @param enmEffOpSize The operand size.
4586 */
4587IEM_CIMPL_DEF_3(iemCImpl_fnsave, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4588{
4589 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4590 RTPTRUNION uPtr;
4591 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
4592 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4593 if (rcStrict != VINF_SUCCESS)
4594 return rcStrict;
4595
4596 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4597 PRTFLOAT80U paRegs = (PRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
4598 for (uint32_t i = 0; i < RT_ELEMENTS(pCtx->fpu.aRegs); i++)
4599 {
4600 paRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
4601 paRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
4602 paRegs[i].au16[4] = pCtx->fpu.aRegs[i].au16[4];
4603 }
4604
4605 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4606 if (rcStrict != VINF_SUCCESS)
4607 return rcStrict;
4608
4609 /*
4610 * Re-initialize the FPU.
4611 */
4612 pCtx->fpu.FCW = 0x37f;
4613 pCtx->fpu.FSW = 0;
4614 pCtx->fpu.FTW = 0x00; /* 0 - empty */
4615 pCtx->fpu.FPUDP = 0;
4616 pCtx->fpu.DS = 0;
4617 pCtx->fpu.Rsrvd2= 0;
4618 pCtx->fpu.FPUIP = 0;
4619 pCtx->fpu.CS = 0;
4620 pCtx->fpu.Rsrvd1= 0;
4621 pCtx->fpu.FOP = 0;
4622
4623 iemHlpUsedFpu(pIemCpu);
4624 iemRegAddToRip(pIemCpu, cbInstr);
4625 return VINF_SUCCESS;
4626}
4627
4628
4629
4630/**
4631 * Implements 'FLDENV'.
4632 *
4633 * @param enmEffOpSize The operand size (only REX.W really matters).
4634 * @param iEffSeg The effective segment register for @a GCPtrEff.
4635 * @param GCPtrEffSrc The address of the image.
4636 */
4637IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
4638{
4639 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4640 RTCPTRUNION uPtr;
4641 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4642 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
4643 if (rcStrict != VINF_SUCCESS)
4644 return rcStrict;
4645
4646 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4647
4648 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
4649 if (rcStrict != VINF_SUCCESS)
4650 return rcStrict;
4651
4652 iemHlpUsedFpu(pIemCpu);
4653 iemRegAddToRip(pIemCpu, cbInstr);
4654 return VINF_SUCCESS;
4655}
4656
4657
4658/**
4659 * Implements 'FRSTOR'.
4660 *
4661 * @param GCPtrEffSrc The address of the image.
4662 * @param enmEffOpSize The operand size.
4663 */
4664IEM_CIMPL_DEF_3(iemCImpl_frstor, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
4665{
4666 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4667 RTCPTRUNION uPtr;
4668 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 94 : 108,
4669 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
4670 if (rcStrict != VINF_SUCCESS)
4671 return rcStrict;
4672
4673 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4674 PCRTFLOAT80U paRegs = (PCRTFLOAT80U)(uPtr.pu8 + (enmEffOpSize == IEMMODE_16BIT ? 14 : 28));
4675 for (uint32_t i = 0; i < RT_ELEMENTS(pCtx->fpu.aRegs); i++)
4676 {
4677 pCtx->fpu.aRegs[i].au32[0] = paRegs[i].au32[0];
4678 pCtx->fpu.aRegs[i].au32[1] = paRegs[i].au32[1];
4679 pCtx->fpu.aRegs[i].au32[2] = paRegs[i].au16[4];
4680 pCtx->fpu.aRegs[i].au32[3] = 0;
4681 }
4682
4683 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
4684 if (rcStrict != VINF_SUCCESS)
4685 return rcStrict;
4686
4687 iemHlpUsedFpu(pIemCpu);
4688 iemRegAddToRip(pIemCpu, cbInstr);
4689 return VINF_SUCCESS;
4690}
4691
4692
4693/**
4694 * Implements 'FLDCW'.
4695 *
4696 * @param u16Fcw The new FCW.
4697 */
4698IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
4699{
4700 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4701
4702 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
4703 /** @todo Testcase: Try see what happens when trying to set undefined bits
4704 * (other than 6 and 7). Currently ignoring them. */
4705 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
4706 * according to FSW. (This is was is currently implemented.) */
4707 pCtx->fpu.FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
4708 iemFpuRecalcExceptionStatus(pCtx);
4709
4710 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4711 iemHlpUsedFpu(pIemCpu);
4712 iemRegAddToRip(pIemCpu, cbInstr);
4713 return VINF_SUCCESS;
4714}
4715
4716
4717
4718/**
4719 * Implements the underflow case of fxch.
4720 *
4721 * @param iStReg The other stack register.
4722 */
4723IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
4724{
4725 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4726
4727 unsigned const iReg1 = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4728 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4729 Assert(!(RT_BIT(iReg1) & pCtx->fpu.FTW) || !(RT_BIT(iReg2) & pCtx->fpu.FTW));
4730
4731 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
4732 * registers are read as QNaN and then exchanged. This could be
4733 * wrong... */
4734 if (pCtx->fpu.FCW & X86_FCW_IM)
4735 {
4736 if (RT_BIT(iReg1) & pCtx->fpu.FTW)
4737 {
4738 if (RT_BIT(iReg2) & pCtx->fpu.FTW)
4739 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4740 else
4741 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[iStReg].r80;
4742 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4743 }
4744 else
4745 {
4746 pCtx->fpu.aRegs[iStReg].r80 = pCtx->fpu.aRegs[0].r80;
4747 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4748 }
4749 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4750 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4751 }
4752 else
4753 {
4754 /* raise underflow exception, don't change anything. */
4755 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
4756 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4757 }
4758
4759 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4760 iemHlpUsedFpu(pIemCpu);
4761 iemRegAddToRip(pIemCpu, cbInstr);
4762 return VINF_SUCCESS;
4763}
4764
4765
4766/**
4767 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
4768 *
4769 * @param cToAdd 1 or 7.
4770 */
4771IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
4772{
4773 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4774 Assert(iStReg < 8);
4775
4776 /*
4777 * Raise exceptions.
4778 */
4779 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
4780 return iemRaiseDeviceNotAvailable(pIemCpu);
4781 uint16_t u16Fsw = pCtx->fpu.FSW;
4782 if (u16Fsw & X86_FSW_ES)
4783 return iemRaiseMathFault(pIemCpu);
4784
4785 /*
4786 * Check if any of the register accesses causes #SF + #IA.
4787 */
4788 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
4789 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4790 if ((pCtx->fpu.FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
4791 {
4792 uint32_t u32Eflags = pfnAImpl(&pCtx->fpu, &u16Fsw, &pCtx->fpu.aRegs[0].r80, &pCtx->fpu.aRegs[iStReg].r80);
4793 pCtx->fpu.FSW &= ~X86_FSW_C1;
4794 pCtx->fpu.FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
4795 if ( !(u16Fsw & X86_FSW_IE)
4796 || (pCtx->fpu.FCW & X86_FCW_IM) )
4797 {
4798 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4799 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4800 }
4801 }
4802 else if (pCtx->fpu.FCW & X86_FCW_IM)
4803 {
4804 /* Masked underflow. */
4805 pCtx->fpu.FSW &= ~X86_FSW_C1;
4806 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4807 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4808 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
4809 }
4810 else
4811 {
4812 /* Raise underflow - don't touch EFLAGS or TOP. */
4813 pCtx->fpu.FSW &= ~X86_FSW_C1;
4814 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4815 fPop = false;
4816 }
4817
4818 /*
4819 * Pop if necessary.
4820 */
4821 if (fPop)
4822 {
4823 pCtx->fpu.FTW &= ~RT_BIT(iReg1);
4824 pCtx->fpu.FSW &= X86_FSW_TOP_MASK;
4825 pCtx->fpu.FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
4826 }
4827
4828 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4829 iemHlpUsedFpu(pIemCpu);
4830 iemRegAddToRip(pIemCpu, cbInstr);
4831 return VINF_SUCCESS;
4832}
4833
4834/** @} */
4835
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette