VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h@ 42407

Last change on this file since 42407 was 42407, checked in by vboxsync, 13 years ago

VMM: Futher work on dealing with hidden segment register, esp. when going stale.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 144.6 KB
Line 
1/* $Id: IEMAllCImpl.cpp.h 42407 2012-07-26 11:41:35Z vboxsync $ */
2/** @file
3 * IEM - Instruction Implementation in C/C++ (code include).
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @name Misc Helpers
20 * @{
21 */
22
23/**
24 * Checks if we are allowed to access the given I/O port, raising the
25 * appropriate exceptions if we aren't (or if the I/O bitmap is not
26 * accessible).
27 *
28 * @returns Strict VBox status code.
29 *
30 * @param pIemCpu The IEM per CPU data.
31 * @param pCtx The register context.
32 * @param u16Port The port number.
33 * @param cbOperand The operand size.
34 */
35DECLINLINE(VBOXSTRICTRC) iemHlpCheckPortIOPermission(PIEMCPU pIemCpu, PCCPUMCTX pCtx, uint16_t u16Port, uint8_t cbOperand)
36{
37 if ( (pCtx->cr0 & X86_CR0_PE)
38 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
39 || pCtx->eflags.Bits.u1VM) )
40 {
41 NOREF(u16Port); NOREF(cbOperand); /** @todo I/O port permission bitmap check */
42 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
43 }
44 return VINF_SUCCESS;
45}
46
47
48#if 0
49/**
50 * Calculates the parity bit.
51 *
52 * @returns true if the bit is set, false if not.
53 * @param u8Result The least significant byte of the result.
54 */
55static bool iemHlpCalcParityFlag(uint8_t u8Result)
56{
57 /*
58 * Parity is set if the number of bits in the least significant byte of
59 * the result is even.
60 */
61 uint8_t cBits;
62 cBits = u8Result & 1; /* 0 */
63 u8Result >>= 1;
64 cBits += u8Result & 1;
65 u8Result >>= 1;
66 cBits += u8Result & 1;
67 u8Result >>= 1;
68 cBits += u8Result & 1;
69 u8Result >>= 1;
70 cBits += u8Result & 1; /* 4 */
71 u8Result >>= 1;
72 cBits += u8Result & 1;
73 u8Result >>= 1;
74 cBits += u8Result & 1;
75 u8Result >>= 1;
76 cBits += u8Result & 1;
77 return !(cBits & 1);
78}
79#endif /* not used */
80
81
82/**
83 * Updates the specified flags according to a 8-bit result.
84 *
85 * @param pIemCpu The.
86 * @param u8Result The result to set the flags according to.
87 * @param fToUpdate The flags to update.
88 * @param fUndefined The flags that are specified as undefined.
89 */
90static void iemHlpUpdateArithEFlagsU8(PIEMCPU pIemCpu, uint8_t u8Result, uint32_t fToUpdate, uint32_t fUndefined)
91{
92 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
93
94 uint32_t fEFlags = pCtx->eflags.u;
95 iemAImpl_test_u8(&u8Result, u8Result, &fEFlags);
96 pCtx->eflags.u &= ~(fToUpdate | fUndefined);
97 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags;
98}
99
100
101/**
102 * Loads a NULL data selector into a selector register, both the hidden and
103 * visible parts, in protected mode.
104 *
105 * @param pSReg Pointer to the segment register.
106 */
107static void iemHlpLoadNullDataSelectorProt(PCPUMSELREG pSReg)
108{
109 /** @todo Testcase: write a testcase checking what happends when loading a NULL
110 * data selector in protected mode. */
111 pSReg->Sel = 0;
112 pSReg->ValidSel = 0;
113 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
114 pSReg->u64Base = 0;
115 pSReg->u32Limit = 0;
116 pSReg->Attr.u = 0;
117}
118
119
120/**
121 * Helper used by iret.
122 *
123 * @param uCpl The new CPL.
124 * @param pSReg Pointer to the segment register.
125 */
126static void iemHlpAdjustSelectorForNewCpl(uint8_t uCpl, PCPUMSELREG pSReg)
127{
128 if ( uCpl > pSReg->Attr.n.u2Dpl
129 && pSReg->Attr.n.u1DescType /* code or data, not system */
130 && (pSReg->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
131 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) /* not conforming code */
132 iemHlpLoadNullDataSelectorProt(pSReg);
133}
134
135/** @} */
136
137/** @name C Implementations
138 * @{
139 */
140
141/**
142 * Implements a 16-bit popa.
143 */
144IEM_CIMPL_DEF_0(iemCImpl_popa_16)
145{
146 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
147 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
148 RTGCPTR GCPtrLast = GCPtrStart + 15;
149 VBOXSTRICTRC rcStrict;
150
151 /*
152 * The docs are a bit hard to comprehend here, but it looks like we wrap
153 * around in real mode as long as none of the individual "popa" crosses the
154 * end of the stack segment. In protected mode we check the whole access
155 * in one go. For efficiency, only do the word-by-word thing if we're in
156 * danger of wrapping around.
157 */
158 /** @todo do popa boundary / wrap-around checks. */
159 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
160 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
161 {
162 /* word-by-word */
163 RTUINT64U TmpRsp;
164 TmpRsp.u = pCtx->rsp;
165 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->di, &TmpRsp);
166 if (rcStrict == VINF_SUCCESS)
167 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->si, &TmpRsp);
168 if (rcStrict == VINF_SUCCESS)
169 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bp, &TmpRsp);
170 if (rcStrict == VINF_SUCCESS)
171 {
172 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
173 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->bx, &TmpRsp);
174 }
175 if (rcStrict == VINF_SUCCESS)
176 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->dx, &TmpRsp);
177 if (rcStrict == VINF_SUCCESS)
178 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->cx, &TmpRsp);
179 if (rcStrict == VINF_SUCCESS)
180 rcStrict = iemMemStackPopU16Ex(pIemCpu, &pCtx->ax, &TmpRsp);
181 if (rcStrict == VINF_SUCCESS)
182 {
183 pCtx->rsp = TmpRsp.u;
184 iemRegAddToRip(pIemCpu, cbInstr);
185 }
186 }
187 else
188 {
189 uint16_t const *pa16Mem = NULL;
190 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
191 if (rcStrict == VINF_SUCCESS)
192 {
193 pCtx->di = pa16Mem[7 - X86_GREG_xDI];
194 pCtx->si = pa16Mem[7 - X86_GREG_xSI];
195 pCtx->bp = pa16Mem[7 - X86_GREG_xBP];
196 /* skip sp */
197 pCtx->bx = pa16Mem[7 - X86_GREG_xBX];
198 pCtx->dx = pa16Mem[7 - X86_GREG_xDX];
199 pCtx->cx = pa16Mem[7 - X86_GREG_xCX];
200 pCtx->ax = pa16Mem[7 - X86_GREG_xAX];
201 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_R);
202 if (rcStrict == VINF_SUCCESS)
203 {
204 iemRegAddToRsp(pCtx, 16);
205 iemRegAddToRip(pIemCpu, cbInstr);
206 }
207 }
208 }
209 return rcStrict;
210}
211
212
213/**
214 * Implements a 32-bit popa.
215 */
216IEM_CIMPL_DEF_0(iemCImpl_popa_32)
217{
218 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
219 RTGCPTR GCPtrStart = iemRegGetEffRsp(pCtx);
220 RTGCPTR GCPtrLast = GCPtrStart + 31;
221 VBOXSTRICTRC rcStrict;
222
223 /*
224 * The docs are a bit hard to comprehend here, but it looks like we wrap
225 * around in real mode as long as none of the individual "popa" crosses the
226 * end of the stack segment. In protected mode we check the whole access
227 * in one go. For efficiency, only do the word-by-word thing if we're in
228 * danger of wrapping around.
229 */
230 /** @todo do popa boundary / wrap-around checks. */
231 if (RT_UNLIKELY( IEM_IS_REAL_OR_V86_MODE(pIemCpu)
232 && (pCtx->cs.u32Limit < GCPtrLast)) ) /* ASSUMES 64-bit RTGCPTR */
233 {
234 /* word-by-word */
235 RTUINT64U TmpRsp;
236 TmpRsp.u = pCtx->rsp;
237 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edi, &TmpRsp);
238 if (rcStrict == VINF_SUCCESS)
239 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->esi, &TmpRsp);
240 if (rcStrict == VINF_SUCCESS)
241 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebp, &TmpRsp);
242 if (rcStrict == VINF_SUCCESS)
243 {
244 iemRegAddToRspEx(&TmpRsp, 2, pCtx); /* sp */
245 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ebx, &TmpRsp);
246 }
247 if (rcStrict == VINF_SUCCESS)
248 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->edx, &TmpRsp);
249 if (rcStrict == VINF_SUCCESS)
250 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->ecx, &TmpRsp);
251 if (rcStrict == VINF_SUCCESS)
252 rcStrict = iemMemStackPopU32Ex(pIemCpu, &pCtx->eax, &TmpRsp);
253 if (rcStrict == VINF_SUCCESS)
254 {
255#if 1 /** @todo what actually happens with the high bits when we're in 16-bit mode? */
256 pCtx->rdi &= UINT32_MAX;
257 pCtx->rsi &= UINT32_MAX;
258 pCtx->rbp &= UINT32_MAX;
259 pCtx->rbx &= UINT32_MAX;
260 pCtx->rdx &= UINT32_MAX;
261 pCtx->rcx &= UINT32_MAX;
262 pCtx->rax &= UINT32_MAX;
263#endif
264 pCtx->rsp = TmpRsp.u;
265 iemRegAddToRip(pIemCpu, cbInstr);
266 }
267 }
268 else
269 {
270 uint32_t const *pa32Mem;
271 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrStart, IEM_ACCESS_STACK_R);
272 if (rcStrict == VINF_SUCCESS)
273 {
274 pCtx->rdi = pa32Mem[7 - X86_GREG_xDI];
275 pCtx->rsi = pa32Mem[7 - X86_GREG_xSI];
276 pCtx->rbp = pa32Mem[7 - X86_GREG_xBP];
277 /* skip esp */
278 pCtx->rbx = pa32Mem[7 - X86_GREG_xBX];
279 pCtx->rdx = pa32Mem[7 - X86_GREG_xDX];
280 pCtx->rcx = pa32Mem[7 - X86_GREG_xCX];
281 pCtx->rax = pa32Mem[7 - X86_GREG_xAX];
282 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa32Mem, IEM_ACCESS_STACK_R);
283 if (rcStrict == VINF_SUCCESS)
284 {
285 iemRegAddToRsp(pCtx, 32);
286 iemRegAddToRip(pIemCpu, cbInstr);
287 }
288 }
289 }
290 return rcStrict;
291}
292
293
294/**
295 * Implements a 16-bit pusha.
296 */
297IEM_CIMPL_DEF_0(iemCImpl_pusha_16)
298{
299 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
300 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
301 RTGCPTR GCPtrBottom = GCPtrTop - 15;
302 VBOXSTRICTRC rcStrict;
303
304 /*
305 * The docs are a bit hard to comprehend here, but it looks like we wrap
306 * around in real mode as long as none of the individual "pushd" crosses the
307 * end of the stack segment. In protected mode we check the whole access
308 * in one go. For efficiency, only do the word-by-word thing if we're in
309 * danger of wrapping around.
310 */
311 /** @todo do pusha boundary / wrap-around checks. */
312 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
313 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
314 {
315 /* word-by-word */
316 RTUINT64U TmpRsp;
317 TmpRsp.u = pCtx->rsp;
318 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->ax, &TmpRsp);
319 if (rcStrict == VINF_SUCCESS)
320 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->cx, &TmpRsp);
321 if (rcStrict == VINF_SUCCESS)
322 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->dx, &TmpRsp);
323 if (rcStrict == VINF_SUCCESS)
324 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bx, &TmpRsp);
325 if (rcStrict == VINF_SUCCESS)
326 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->sp, &TmpRsp);
327 if (rcStrict == VINF_SUCCESS)
328 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->bp, &TmpRsp);
329 if (rcStrict == VINF_SUCCESS)
330 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->si, &TmpRsp);
331 if (rcStrict == VINF_SUCCESS)
332 rcStrict = iemMemStackPushU16Ex(pIemCpu, pCtx->di, &TmpRsp);
333 if (rcStrict == VINF_SUCCESS)
334 {
335 pCtx->rsp = TmpRsp.u;
336 iemRegAddToRip(pIemCpu, cbInstr);
337 }
338 }
339 else
340 {
341 GCPtrBottom--;
342 uint16_t *pa16Mem = NULL;
343 rcStrict = iemMemMap(pIemCpu, (void **)&pa16Mem, 16, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
344 if (rcStrict == VINF_SUCCESS)
345 {
346 pa16Mem[7 - X86_GREG_xDI] = pCtx->di;
347 pa16Mem[7 - X86_GREG_xSI] = pCtx->si;
348 pa16Mem[7 - X86_GREG_xBP] = pCtx->bp;
349 pa16Mem[7 - X86_GREG_xSP] = pCtx->sp;
350 pa16Mem[7 - X86_GREG_xBX] = pCtx->bx;
351 pa16Mem[7 - X86_GREG_xDX] = pCtx->dx;
352 pa16Mem[7 - X86_GREG_xCX] = pCtx->cx;
353 pa16Mem[7 - X86_GREG_xAX] = pCtx->ax;
354 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)pa16Mem, IEM_ACCESS_STACK_W);
355 if (rcStrict == VINF_SUCCESS)
356 {
357 iemRegSubFromRsp(pCtx, 16);
358 iemRegAddToRip(pIemCpu, cbInstr);
359 }
360 }
361 }
362 return rcStrict;
363}
364
365
366/**
367 * Implements a 32-bit pusha.
368 */
369IEM_CIMPL_DEF_0(iemCImpl_pusha_32)
370{
371 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
372 RTGCPTR GCPtrTop = iemRegGetEffRsp(pCtx);
373 RTGCPTR GCPtrBottom = GCPtrTop - 31;
374 VBOXSTRICTRC rcStrict;
375
376 /*
377 * The docs are a bit hard to comprehend here, but it looks like we wrap
378 * around in real mode as long as none of the individual "pusha" crosses the
379 * end of the stack segment. In protected mode we check the whole access
380 * in one go. For efficiency, only do the word-by-word thing if we're in
381 * danger of wrapping around.
382 */
383 /** @todo do pusha boundary / wrap-around checks. */
384 if (RT_UNLIKELY( GCPtrBottom > GCPtrTop
385 && IEM_IS_REAL_OR_V86_MODE(pIemCpu) ) )
386 {
387 /* word-by-word */
388 RTUINT64U TmpRsp;
389 TmpRsp.u = pCtx->rsp;
390 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->eax, &TmpRsp);
391 if (rcStrict == VINF_SUCCESS)
392 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ecx, &TmpRsp);
393 if (rcStrict == VINF_SUCCESS)
394 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edx, &TmpRsp);
395 if (rcStrict == VINF_SUCCESS)
396 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebx, &TmpRsp);
397 if (rcStrict == VINF_SUCCESS)
398 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esp, &TmpRsp);
399 if (rcStrict == VINF_SUCCESS)
400 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->ebp, &TmpRsp);
401 if (rcStrict == VINF_SUCCESS)
402 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->esi, &TmpRsp);
403 if (rcStrict == VINF_SUCCESS)
404 rcStrict = iemMemStackPushU32Ex(pIemCpu, pCtx->edi, &TmpRsp);
405 if (rcStrict == VINF_SUCCESS)
406 {
407 pCtx->rsp = TmpRsp.u;
408 iemRegAddToRip(pIemCpu, cbInstr);
409 }
410 }
411 else
412 {
413 GCPtrBottom--;
414 uint32_t *pa32Mem;
415 rcStrict = iemMemMap(pIemCpu, (void **)&pa32Mem, 32, X86_SREG_SS, GCPtrBottom, IEM_ACCESS_STACK_W);
416 if (rcStrict == VINF_SUCCESS)
417 {
418 pa32Mem[7 - X86_GREG_xDI] = pCtx->edi;
419 pa32Mem[7 - X86_GREG_xSI] = pCtx->esi;
420 pa32Mem[7 - X86_GREG_xBP] = pCtx->ebp;
421 pa32Mem[7 - X86_GREG_xSP] = pCtx->esp;
422 pa32Mem[7 - X86_GREG_xBX] = pCtx->ebx;
423 pa32Mem[7 - X86_GREG_xDX] = pCtx->edx;
424 pa32Mem[7 - X86_GREG_xCX] = pCtx->ecx;
425 pa32Mem[7 - X86_GREG_xAX] = pCtx->eax;
426 rcStrict = iemMemCommitAndUnmap(pIemCpu, pa32Mem, IEM_ACCESS_STACK_W);
427 if (rcStrict == VINF_SUCCESS)
428 {
429 iemRegSubFromRsp(pCtx, 32);
430 iemRegAddToRip(pIemCpu, cbInstr);
431 }
432 }
433 }
434 return rcStrict;
435}
436
437
438/**
439 * Implements pushf.
440 *
441 *
442 * @param enmEffOpSize The effective operand size.
443 */
444IEM_CIMPL_DEF_1(iemCImpl_pushf, IEMMODE, enmEffOpSize)
445{
446 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
447
448 /*
449 * If we're in V8086 mode some care is required (which is why we're in
450 * doing this in a C implementation).
451 */
452 uint32_t fEfl = pCtx->eflags.u;
453 if ( (fEfl & X86_EFL_VM)
454 && X86_EFL_GET_IOPL(fEfl) != 3 )
455 {
456 Assert(pCtx->cr0 & X86_CR0_PE);
457 if ( enmEffOpSize != IEMMODE_16BIT
458 || !(pCtx->cr4 & X86_CR4_VME))
459 return iemRaiseGeneralProtectionFault0(pIemCpu);
460 fEfl &= ~X86_EFL_IF; /* (RF and VM are out of range) */
461 fEfl |= (fEfl & X86_EFL_VIF) >> (19 - 9);
462 return iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
463 }
464
465 /*
466 * Ok, clear RF and VM and push the flags.
467 */
468 fEfl &= ~(X86_EFL_RF | X86_EFL_VM);
469
470 VBOXSTRICTRC rcStrict;
471 switch (enmEffOpSize)
472 {
473 case IEMMODE_16BIT:
474 rcStrict = iemMemStackPushU16(pIemCpu, (uint16_t)fEfl);
475 break;
476 case IEMMODE_32BIT:
477 rcStrict = iemMemStackPushU32(pIemCpu, fEfl);
478 break;
479 case IEMMODE_64BIT:
480 rcStrict = iemMemStackPushU64(pIemCpu, fEfl);
481 break;
482 IEM_NOT_REACHED_DEFAULT_CASE_RET();
483 }
484 if (rcStrict != VINF_SUCCESS)
485 return rcStrict;
486
487 iemRegAddToRip(pIemCpu, cbInstr);
488 return VINF_SUCCESS;
489}
490
491
492/**
493 * Implements popf.
494 *
495 * @param enmEffOpSize The effective operand size.
496 */
497IEM_CIMPL_DEF_1(iemCImpl_popf, IEMMODE, enmEffOpSize)
498{
499 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
500 uint32_t const fEflOld = pCtx->eflags.u;
501 VBOXSTRICTRC rcStrict;
502 uint32_t fEflNew;
503
504 /*
505 * V8086 is special as usual.
506 */
507 if (fEflOld & X86_EFL_VM)
508 {
509 /*
510 * Almost anything goes if IOPL is 3.
511 */
512 if (X86_EFL_GET_IOPL(fEflOld) == 3)
513 {
514 switch (enmEffOpSize)
515 {
516 case IEMMODE_16BIT:
517 {
518 uint16_t u16Value;
519 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
520 if (rcStrict != VINF_SUCCESS)
521 return rcStrict;
522 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
523 break;
524 }
525 case IEMMODE_32BIT:
526 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
527 if (rcStrict != VINF_SUCCESS)
528 return rcStrict;
529 break;
530 IEM_NOT_REACHED_DEFAULT_CASE_RET();
531 }
532
533 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
534 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
535 }
536 /*
537 * Interrupt flag virtualization with CR4.VME=1.
538 */
539 else if ( enmEffOpSize == IEMMODE_16BIT
540 && (pCtx->cr4 & X86_CR4_VME) )
541 {
542 uint16_t u16Value;
543 RTUINT64U TmpRsp;
544 TmpRsp.u = pCtx->rsp;
545 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
546 if (rcStrict != VINF_SUCCESS)
547 return rcStrict;
548
549 /** @todo Is the popf VME #GP(0) delivered after updating RSP+RIP
550 * or before? */
551 if ( ( (u16Value & X86_EFL_IF)
552 && (fEflOld & X86_EFL_VIP))
553 || (u16Value & X86_EFL_TF) )
554 return iemRaiseGeneralProtectionFault0(pIemCpu);
555
556 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000) & ~X86_EFL_VIF);
557 fEflNew |= (fEflNew & X86_EFL_IF) << (19 - 9);
558 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
559 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
560
561 pCtx->rsp = TmpRsp.u;
562 }
563 else
564 return iemRaiseGeneralProtectionFault0(pIemCpu);
565
566 }
567 /*
568 * Not in V8086 mode.
569 */
570 else
571 {
572 /* Pop the flags. */
573 switch (enmEffOpSize)
574 {
575 case IEMMODE_16BIT:
576 {
577 uint16_t u16Value;
578 rcStrict = iemMemStackPopU16(pIemCpu, &u16Value);
579 if (rcStrict != VINF_SUCCESS)
580 return rcStrict;
581 fEflNew = u16Value | (fEflOld & UINT32_C(0xffff0000));
582 break;
583 }
584 case IEMMODE_32BIT:
585 case IEMMODE_64BIT:
586 rcStrict = iemMemStackPopU32(pIemCpu, &fEflNew);
587 if (rcStrict != VINF_SUCCESS)
588 return rcStrict;
589 break;
590 IEM_NOT_REACHED_DEFAULT_CASE_RET();
591 }
592
593 /* Merge them with the current flags. */
594 if ( (fEflNew & (X86_EFL_IOPL | X86_EFL_IF)) == (fEflOld & (X86_EFL_IOPL | X86_EFL_IF))
595 || pIemCpu->uCpl == 0)
596 {
597 fEflNew &= X86_EFL_POPF_BITS;
598 fEflNew |= ~X86_EFL_POPF_BITS & fEflOld;
599 }
600 else if (pIemCpu->uCpl <= X86_EFL_GET_IOPL(fEflOld))
601 {
602 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL);
603 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL)) & fEflOld;
604 }
605 else
606 {
607 fEflNew &= X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF);
608 fEflNew |= ~(X86_EFL_POPF_BITS & ~(X86_EFL_IOPL | X86_EFL_IF)) & fEflOld;
609 }
610 }
611
612 /*
613 * Commit the flags.
614 */
615 Assert(fEflNew & RT_BIT_32(1));
616 pCtx->eflags.u = fEflNew;
617 iemRegAddToRip(pIemCpu, cbInstr);
618
619 return VINF_SUCCESS;
620}
621
622
623/**
624 * Implements an indirect call.
625 *
626 * @param uNewPC The new program counter (RIP) value (loaded from the
627 * operand).
628 * @param enmEffOpSize The effective operand size.
629 */
630IEM_CIMPL_DEF_1(iemCImpl_call_16, uint16_t, uNewPC)
631{
632 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
633 uint16_t uOldPC = pCtx->ip + cbInstr;
634 if (uNewPC > pCtx->cs.u32Limit)
635 return iemRaiseGeneralProtectionFault0(pIemCpu);
636
637 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
638 if (rcStrict != VINF_SUCCESS)
639 return rcStrict;
640
641 pCtx->rip = uNewPC;
642 return VINF_SUCCESS;
643
644}
645
646
647/**
648 * Implements a 16-bit relative call.
649 *
650 * @param offDisp The displacment offset.
651 */
652IEM_CIMPL_DEF_1(iemCImpl_call_rel_16, int16_t, offDisp)
653{
654 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
655 uint16_t uOldPC = pCtx->ip + cbInstr;
656 uint16_t uNewPC = uOldPC + offDisp;
657 if (uNewPC > pCtx->cs.u32Limit)
658 return iemRaiseGeneralProtectionFault0(pIemCpu);
659
660 VBOXSTRICTRC rcStrict = iemMemStackPushU16(pIemCpu, uOldPC);
661 if (rcStrict != VINF_SUCCESS)
662 return rcStrict;
663
664 pCtx->rip = uNewPC;
665 return VINF_SUCCESS;
666}
667
668
669/**
670 * Implements a 32-bit indirect call.
671 *
672 * @param uNewPC The new program counter (RIP) value (loaded from the
673 * operand).
674 * @param enmEffOpSize The effective operand size.
675 */
676IEM_CIMPL_DEF_1(iemCImpl_call_32, uint32_t, uNewPC)
677{
678 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
679 uint32_t uOldPC = pCtx->eip + cbInstr;
680 if (uNewPC > pCtx->cs.u32Limit)
681 return iemRaiseGeneralProtectionFault0(pIemCpu);
682
683 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
684 if (rcStrict != VINF_SUCCESS)
685 return rcStrict;
686
687 pCtx->rip = uNewPC;
688 return VINF_SUCCESS;
689
690}
691
692
693/**
694 * Implements a 32-bit relative call.
695 *
696 * @param offDisp The displacment offset.
697 */
698IEM_CIMPL_DEF_1(iemCImpl_call_rel_32, int32_t, offDisp)
699{
700 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
701 uint32_t uOldPC = pCtx->eip + cbInstr;
702 uint32_t uNewPC = uOldPC + offDisp;
703 if (uNewPC > pCtx->cs.u32Limit)
704 return iemRaiseGeneralProtectionFault0(pIemCpu);
705
706 VBOXSTRICTRC rcStrict = iemMemStackPushU32(pIemCpu, uOldPC);
707 if (rcStrict != VINF_SUCCESS)
708 return rcStrict;
709
710 pCtx->rip = uNewPC;
711 return VINF_SUCCESS;
712}
713
714
715/**
716 * Implements a 64-bit indirect call.
717 *
718 * @param uNewPC The new program counter (RIP) value (loaded from the
719 * operand).
720 * @param enmEffOpSize The effective operand size.
721 */
722IEM_CIMPL_DEF_1(iemCImpl_call_64, uint64_t, uNewPC)
723{
724 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
725 uint64_t uOldPC = pCtx->rip + cbInstr;
726 if (!IEM_IS_CANONICAL(uNewPC))
727 return iemRaiseGeneralProtectionFault0(pIemCpu);
728
729 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
730 if (rcStrict != VINF_SUCCESS)
731 return rcStrict;
732
733 pCtx->rip = uNewPC;
734 return VINF_SUCCESS;
735
736}
737
738
739/**
740 * Implements a 64-bit relative call.
741 *
742 * @param offDisp The displacment offset.
743 */
744IEM_CIMPL_DEF_1(iemCImpl_call_rel_64, int64_t, offDisp)
745{
746 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
747 uint64_t uOldPC = pCtx->rip + cbInstr;
748 uint64_t uNewPC = uOldPC + offDisp;
749 if (!IEM_IS_CANONICAL(uNewPC))
750 return iemRaiseNotCanonical(pIemCpu);
751
752 VBOXSTRICTRC rcStrict = iemMemStackPushU64(pIemCpu, uOldPC);
753 if (rcStrict != VINF_SUCCESS)
754 return rcStrict;
755
756 pCtx->rip = uNewPC;
757 return VINF_SUCCESS;
758}
759
760
761/**
762 * Implements far jumps and calls thru task segments (TSS).
763 *
764 * @param uSel The selector.
765 * @param enmBranch The kind of branching we're performing.
766 * @param enmEffOpSize The effective operand size.
767 * @param pDesc The descriptor corrsponding to @a uSel. The type is
768 * call gate.
769 */
770IEM_CIMPL_DEF_4(iemCImpl_BranchTaskSegment, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
771{
772 /* Call various functions to do the work. */
773 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
774}
775
776
777/**
778 * Implements far jumps and calls thru task gates.
779 *
780 * @param uSel The selector.
781 * @param enmBranch The kind of branching we're performing.
782 * @param enmEffOpSize The effective operand size.
783 * @param pDesc The descriptor corrsponding to @a uSel. The type is
784 * call gate.
785 */
786IEM_CIMPL_DEF_4(iemCImpl_BranchTaskGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
787{
788 /* Call various functions to do the work. */
789 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
790}
791
792
793/**
794 * Implements far jumps and calls thru call gates.
795 *
796 * @param uSel The selector.
797 * @param enmBranch The kind of branching we're performing.
798 * @param enmEffOpSize The effective operand size.
799 * @param pDesc The descriptor corrsponding to @a uSel. The type is
800 * call gate.
801 */
802IEM_CIMPL_DEF_4(iemCImpl_BranchCallGate, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
803{
804 /* Call various functions to do the work. */
805 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
806}
807
808
809/**
810 * Implements far jumps and calls thru system selectors.
811 *
812 * @param uSel The selector.
813 * @param enmBranch The kind of branching we're performing.
814 * @param enmEffOpSize The effective operand size.
815 * @param pDesc The descriptor corrsponding to @a uSel.
816 */
817IEM_CIMPL_DEF_4(iemCImpl_BranchSysSel, uint16_t, uSel, IEMBRANCH, enmBranch, IEMMODE, enmEffOpSize, PIEMSELDESC, pDesc)
818{
819 Assert(enmBranch == IEMBRANCH_JUMP || enmBranch == IEMBRANCH_CALL);
820 Assert((uSel & (X86_SEL_MASK | X86_SEL_LDT)));
821
822 if (IEM_IS_LONG_MODE(pIemCpu))
823 switch (pDesc->Legacy.Gen.u4Type)
824 {
825 case AMD64_SEL_TYPE_SYS_CALL_GATE:
826 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
827
828 default:
829 case AMD64_SEL_TYPE_SYS_LDT:
830 case AMD64_SEL_TYPE_SYS_TSS_BUSY:
831 case AMD64_SEL_TYPE_SYS_TSS_AVAIL:
832 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
833 case AMD64_SEL_TYPE_SYS_INT_GATE:
834 Log(("branch %04x -> wrong sys selector (64-bit): %d\n", uSel, pDesc->Legacy.Gen.u4Type));
835 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
836
837 }
838
839 switch (pDesc->Legacy.Gen.u4Type)
840 {
841 case X86_SEL_TYPE_SYS_286_CALL_GATE:
842 case X86_SEL_TYPE_SYS_386_CALL_GATE:
843 return IEM_CIMPL_CALL_4(iemCImpl_BranchCallGate, uSel, enmBranch, enmEffOpSize, pDesc);
844
845 case X86_SEL_TYPE_SYS_TASK_GATE:
846 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskGate, uSel, enmBranch, enmEffOpSize, pDesc);
847
848 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
849 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
850 return IEM_CIMPL_CALL_4(iemCImpl_BranchTaskSegment, uSel, enmBranch, enmEffOpSize, pDesc);
851
852 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
853 Log(("branch %04x -> busy 286 TSS\n", uSel));
854 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
855
856 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
857 Log(("branch %04x -> busy 386 TSS\n", uSel));
858 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
859
860 default:
861 case X86_SEL_TYPE_SYS_LDT:
862 case X86_SEL_TYPE_SYS_286_INT_GATE:
863 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
864 case X86_SEL_TYPE_SYS_386_INT_GATE:
865 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
866 Log(("branch %04x -> wrong sys selector: %d\n", uSel, pDesc->Legacy.Gen.u4Type));
867 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
868 }
869}
870
871
872/**
873 * Implements far jumps.
874 *
875 * @param uSel The selector.
876 * @param offSeg The segment offset.
877 * @param enmEffOpSize The effective operand size.
878 */
879IEM_CIMPL_DEF_3(iemCImpl_FarJmp, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
880{
881 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
882 NOREF(cbInstr);
883 Assert(offSeg <= UINT32_MAX);
884
885 /*
886 * Real mode and V8086 mode are easy. The only snag seems to be that
887 * CS.limit doesn't change and the limit check is done against the current
888 * limit.
889 */
890 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
891 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
892 {
893 if (offSeg > pCtx->cs.u32Limit)
894 return iemRaiseGeneralProtectionFault0(pIemCpu);
895
896 if (enmEffOpSize == IEMMODE_16BIT) /** @todo WRONG, must pass this. */
897 pCtx->rip = offSeg;
898 else
899 pCtx->rip = offSeg & UINT16_MAX;
900 pCtx->cs.Sel = uSel;
901 pCtx->cs.ValidSel = uSel;
902 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
903 pCtx->cs.u64Base = (uint32_t)uSel << 4;
904 /** @todo REM reset the accessed bit (see on jmp far16 after disabling
905 * PE. Check with VT-x and AMD-V. */
906#ifdef IEM_VERIFICATION_MODE
907 pCtx->cs.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
908#endif
909 return VINF_SUCCESS;
910 }
911
912 /*
913 * Protected mode. Need to parse the specified descriptor...
914 */
915 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
916 {
917 Log(("jmpf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
918 return iemRaiseGeneralProtectionFault0(pIemCpu);
919 }
920
921 /* Fetch the descriptor. */
922 IEMSELDESC Desc;
923 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
924 if (rcStrict != VINF_SUCCESS)
925 return rcStrict;
926
927 /* Is it there? */
928 if (!Desc.Legacy.Gen.u1Present) /** @todo this is probably checked too early. Testcase! */
929 {
930 Log(("jmpf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
931 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
932 }
933
934 /*
935 * Deal with it according to its type. We do the standard code selectors
936 * here and dispatch the system selectors to worker functions.
937 */
938 if (!Desc.Legacy.Gen.u1DescType)
939 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_JUMP, enmEffOpSize, &Desc);
940
941 /* Only code segments. */
942 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
943 {
944 Log(("jmpf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
945 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
946 }
947
948 /* L vs D. */
949 if ( Desc.Legacy.Gen.u1Long
950 && Desc.Legacy.Gen.u1DefBig
951 && IEM_IS_LONG_MODE(pIemCpu))
952 {
953 Log(("jmpf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
954 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
955 }
956
957 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
958 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
959 {
960 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
961 {
962 Log(("jmpf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
963 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
964 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
965 }
966 }
967 else
968 {
969 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
970 {
971 Log(("jmpf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
972 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
973 }
974 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
975 {
976 Log(("jmpf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
977 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
978 }
979 }
980
981 /* Chop the high bits if 16-bit (Intel says so). */
982 if (enmEffOpSize == IEMMODE_16BIT)
983 offSeg &= UINT16_MAX;
984
985 /* Limit check. (Should alternatively check for non-canonical addresses
986 here, but that is ruled out by offSeg being 32-bit, right?) */
987 uint64_t u64Base;
988 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
989 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
990 u64Base = 0;
991 else
992 {
993 if (offSeg > cbLimit)
994 {
995 Log(("jmpf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
996 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
997 }
998 u64Base = X86DESC_BASE(&Desc.Legacy);
999 }
1000
1001 /*
1002 * Ok, everything checked out fine. Now set the accessed bit before
1003 * committing the result into CS, CSHID and RIP.
1004 */
1005 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1006 {
1007 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1008 if (rcStrict != VINF_SUCCESS)
1009 return rcStrict;
1010#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1011 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1012#endif
1013 }
1014
1015 /* commit */
1016 pCtx->rip = offSeg;
1017 pCtx->cs.Sel = uSel & (X86_SEL_MASK | X86_SEL_LDT);
1018 pCtx->cs.Sel |= pIemCpu->uCpl; /** @todo is this right for conforming segs? or in general? */
1019 pCtx->cs.ValidSel = pCtx->cs.Sel;
1020 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1021 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1022 pCtx->cs.u32Limit = cbLimit;
1023 pCtx->cs.u64Base = u64Base;
1024 /** @todo check if the hidden bits are loaded correctly for 64-bit
1025 * mode. */
1026 return VINF_SUCCESS;
1027}
1028
1029
1030/**
1031 * Implements far calls.
1032 *
1033 * This very similar to iemCImpl_FarJmp.
1034 *
1035 * @param uSel The selector.
1036 * @param offSeg The segment offset.
1037 * @param enmEffOpSize The operand size (in case we need it).
1038 */
1039IEM_CIMPL_DEF_3(iemCImpl_callf, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmEffOpSize)
1040{
1041 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1042 VBOXSTRICTRC rcStrict;
1043 uint64_t uNewRsp;
1044 RTPTRUNION uPtrRet;
1045
1046 /*
1047 * Real mode and V8086 mode are easy. The only snag seems to be that
1048 * CS.limit doesn't change and the limit check is done against the current
1049 * limit.
1050 */
1051 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1052 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1053 {
1054 Assert(enmEffOpSize == IEMMODE_16BIT || enmEffOpSize == IEMMODE_32BIT);
1055
1056 /* Check stack first - may #SS(0). */
1057 rcStrict = iemMemStackPushBeginSpecial(pIemCpu, enmEffOpSize == IEMMODE_32BIT ? 6 : 4,
1058 &uPtrRet.pv, &uNewRsp);
1059 if (rcStrict != VINF_SUCCESS)
1060 return rcStrict;
1061
1062 /* Check the target address range. */
1063 if (offSeg > UINT32_MAX)
1064 return iemRaiseGeneralProtectionFault0(pIemCpu);
1065
1066 /* Everything is fine, push the return address. */
1067 if (enmEffOpSize == IEMMODE_16BIT)
1068 {
1069 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1070 uPtrRet.pu16[1] = pCtx->cs.Sel;
1071 }
1072 else
1073 {
1074 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1075 uPtrRet.pu16[3] = pCtx->cs.Sel;
1076 }
1077 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1078 if (rcStrict != VINF_SUCCESS)
1079 return rcStrict;
1080
1081 /* Branch. */
1082 pCtx->rip = offSeg;
1083 pCtx->cs.Sel = uSel;
1084 pCtx->cs.ValidSel = uSel;
1085 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1086 pCtx->cs.u64Base = (uint32_t)uSel << 4;
1087 /** @todo Does REM reset the accessed bit here too? (See on jmp far16
1088 * after disabling PE.) Check with VT-x and AMD-V. */
1089#ifdef IEM_VERIFICATION_MODE
1090 pCtx->cs.Attr.u &= ~X86_SEL_TYPE_ACCESSED;
1091#endif
1092 return VINF_SUCCESS;
1093 }
1094
1095 /*
1096 * Protected mode. Need to parse the specified descriptor...
1097 */
1098 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
1099 {
1100 Log(("callf %04x:%08RX64 -> invalid selector, #GP(0)\n", uSel, offSeg));
1101 return iemRaiseGeneralProtectionFault0(pIemCpu);
1102 }
1103
1104 /* Fetch the descriptor. */
1105 IEMSELDESC Desc;
1106 rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
1107 if (rcStrict != VINF_SUCCESS)
1108 return rcStrict;
1109
1110 /*
1111 * Deal with it according to its type. We do the standard code selectors
1112 * here and dispatch the system selectors to worker functions.
1113 */
1114 if (!Desc.Legacy.Gen.u1DescType)
1115 return IEM_CIMPL_CALL_4(iemCImpl_BranchSysSel, uSel, IEMBRANCH_CALL, enmEffOpSize, &Desc);
1116
1117 /* Only code segments. */
1118 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1119 {
1120 Log(("callf %04x:%08RX64 -> not a code selector (u4Type=%#x).\n", uSel, offSeg, Desc.Legacy.Gen.u4Type));
1121 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1122 }
1123
1124 /* L vs D. */
1125 if ( Desc.Legacy.Gen.u1Long
1126 && Desc.Legacy.Gen.u1DefBig
1127 && IEM_IS_LONG_MODE(pIemCpu))
1128 {
1129 Log(("callf %04x:%08RX64 -> both L and D are set.\n", uSel, offSeg));
1130 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1131 }
1132
1133 /* DPL/RPL/CPL check, where conforming segments makes a difference. */
1134 if (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1135 {
1136 if (pIemCpu->uCpl < Desc.Legacy.Gen.u2Dpl)
1137 {
1138 Log(("callf %04x:%08RX64 -> DPL violation (conforming); DPL=%d CPL=%u\n",
1139 uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1140 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1141 }
1142 }
1143 else
1144 {
1145 if (pIemCpu->uCpl != Desc.Legacy.Gen.u2Dpl)
1146 {
1147 Log(("callf %04x:%08RX64 -> CPL != DPL; DPL=%d CPL=%u\n", uSel, offSeg, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
1148 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1149 }
1150 if ((uSel & X86_SEL_RPL) > pIemCpu->uCpl)
1151 {
1152 Log(("callf %04x:%08RX64 -> RPL > DPL; RPL=%d CPL=%u\n", uSel, offSeg, (uSel & X86_SEL_RPL), pIemCpu->uCpl));
1153 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1154 }
1155 }
1156
1157 /* Is it there? */
1158 if (!Desc.Legacy.Gen.u1Present)
1159 {
1160 Log(("callf %04x:%08RX64 -> segment not present\n", uSel, offSeg));
1161 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
1162 }
1163
1164 /* Check stack first - may #SS(0). */
1165 /** @todo check how operand prefix affects pushing of CS! Does callf 16:32 in
1166 * 16-bit code cause a two or four byte CS to be pushed? */
1167 rcStrict = iemMemStackPushBeginSpecial(pIemCpu,
1168 enmEffOpSize == IEMMODE_64BIT ? 8+8
1169 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 2+2,
1170 &uPtrRet.pv, &uNewRsp);
1171 if (rcStrict != VINF_SUCCESS)
1172 return rcStrict;
1173
1174 /* Chop the high bits if 16-bit (Intel says so). */
1175 if (enmEffOpSize == IEMMODE_16BIT)
1176 offSeg &= UINT16_MAX;
1177
1178 /* Limit / canonical check. */
1179 uint64_t u64Base;
1180 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
1181 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1182 {
1183 if (!IEM_IS_CANONICAL(offSeg))
1184 {
1185 Log(("callf %04x:%016RX64 - not canonical -> #GP\n", uSel, offSeg));
1186 return iemRaiseNotCanonical(pIemCpu);
1187 }
1188 u64Base = 0;
1189 }
1190 else
1191 {
1192 if (offSeg > cbLimit)
1193 {
1194 Log(("callf %04x:%08RX64 -> out of bounds (%#x)\n", uSel, offSeg, cbLimit));
1195 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
1196 }
1197 u64Base = X86DESC_BASE(&Desc.Legacy);
1198 }
1199
1200 /*
1201 * Now set the accessed bit before
1202 * writing the return address to the stack and committing the result into
1203 * CS, CSHID and RIP.
1204 */
1205 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1206 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1207 {
1208 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
1209 if (rcStrict != VINF_SUCCESS)
1210 return rcStrict;
1211#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1212 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1213#endif
1214 }
1215
1216 /* stack */
1217 if (enmEffOpSize == IEMMODE_16BIT)
1218 {
1219 uPtrRet.pu16[0] = pCtx->ip + cbInstr;
1220 uPtrRet.pu16[1] = pCtx->cs.Sel;
1221 }
1222 else if (enmEffOpSize == IEMMODE_32BIT)
1223 {
1224 uPtrRet.pu32[0] = pCtx->eip + cbInstr;
1225 uPtrRet.pu32[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high word when callf is pushing CS? */
1226 }
1227 else
1228 {
1229 uPtrRet.pu64[0] = pCtx->rip + cbInstr;
1230 uPtrRet.pu64[1] = pCtx->cs.Sel; /** @todo Testcase: What is written to the high words when callf is pushing CS? */
1231 }
1232 rcStrict = iemMemStackPushCommitSpecial(pIemCpu, uPtrRet.pv, uNewRsp);
1233 if (rcStrict != VINF_SUCCESS)
1234 return rcStrict;
1235
1236 /* commit */
1237 pCtx->rip = offSeg;
1238 pCtx->cs.Sel = uSel & (X86_SEL_MASK | X86_SEL_LDT);
1239 pCtx->cs.Sel |= pIemCpu->uCpl;
1240 pCtx->cs.ValidSel = pCtx->cs.Sel;
1241 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1242 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
1243 pCtx->cs.u32Limit = cbLimit;
1244 pCtx->cs.u64Base = u64Base;
1245 /** @todo check if the hidden bits are loaded correctly for 64-bit
1246 * mode. */
1247 return VINF_SUCCESS;
1248}
1249
1250
1251/**
1252 * Implements retf.
1253 *
1254 * @param enmEffOpSize The effective operand size.
1255 * @param cbPop The amount of arguments to pop from the stack
1256 * (bytes).
1257 */
1258IEM_CIMPL_DEF_2(iemCImpl_retf, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1259{
1260 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1261 VBOXSTRICTRC rcStrict;
1262 RTCPTRUNION uPtrFrame;
1263 uint64_t uNewRsp;
1264 uint64_t uNewRip;
1265 uint16_t uNewCs;
1266 NOREF(cbInstr);
1267
1268 /*
1269 * Read the stack values first.
1270 */
1271 uint32_t cbRetPtr = enmEffOpSize == IEMMODE_16BIT ? 2+2
1272 : enmEffOpSize == IEMMODE_32BIT ? 4+4 : 8+8;
1273 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, cbRetPtr, &uPtrFrame.pv, &uNewRsp);
1274 if (rcStrict != VINF_SUCCESS)
1275 return rcStrict;
1276 if (enmEffOpSize == IEMMODE_16BIT)
1277 {
1278 uNewRip = uPtrFrame.pu16[0];
1279 uNewCs = uPtrFrame.pu16[1];
1280 }
1281 else if (enmEffOpSize == IEMMODE_32BIT)
1282 {
1283 uNewRip = uPtrFrame.pu32[0];
1284 uNewCs = uPtrFrame.pu16[2];
1285 }
1286 else
1287 {
1288 uNewRip = uPtrFrame.pu64[0];
1289 uNewCs = uPtrFrame.pu16[4];
1290 }
1291
1292 /*
1293 * Real mode and V8086 mode are easy.
1294 */
1295 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
1296 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
1297 {
1298 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1299 /** @todo check how this is supposed to work if sp=0xfffe. */
1300
1301 /* Check the limit of the new EIP. */
1302 /** @todo Intel pseudo code only does the limit check for 16-bit
1303 * operands, AMD does not make any distinction. What is right? */
1304 if (uNewRip > pCtx->cs.u32Limit)
1305 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1306
1307 /* commit the operation. */
1308 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1309 if (rcStrict != VINF_SUCCESS)
1310 return rcStrict;
1311 pCtx->rip = uNewRip;
1312 pCtx->cs.Sel = uNewCs;
1313 pCtx->cs.ValidSel = uNewCs;
1314 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1315 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
1316 /** @todo do we load attribs and limit as well? */
1317 if (cbPop)
1318 iemRegAddToRsp(pCtx, cbPop);
1319 return VINF_SUCCESS;
1320 }
1321
1322 /*
1323 * Protected mode is complicated, of course.
1324 */
1325 if (!(uNewCs & (X86_SEL_MASK | X86_SEL_LDT)))
1326 {
1327 Log(("retf %04x:%08RX64 -> invalid selector, #GP(0)\n", uNewCs, uNewRip));
1328 return iemRaiseGeneralProtectionFault0(pIemCpu);
1329 }
1330
1331 /* Fetch the descriptor. */
1332 IEMSELDESC DescCs;
1333 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCs, uNewCs);
1334 if (rcStrict != VINF_SUCCESS)
1335 return rcStrict;
1336
1337 /* Can only return to a code selector. */
1338 if ( !DescCs.Legacy.Gen.u1DescType
1339 || !(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) )
1340 {
1341 Log(("retf %04x:%08RX64 -> not a code selector (u1DescType=%u u4Type=%#x).\n",
1342 uNewCs, uNewRip, DescCs.Legacy.Gen.u1DescType, DescCs.Legacy.Gen.u4Type));
1343 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1344 }
1345
1346 /* L vs D. */
1347 if ( DescCs.Legacy.Gen.u1Long /** @todo Testcase: far return to a selector with both L and D set. */
1348 && DescCs.Legacy.Gen.u1DefBig
1349 && IEM_IS_LONG_MODE(pIemCpu))
1350 {
1351 Log(("retf %04x:%08RX64 -> both L & D set.\n", uNewCs, uNewRip));
1352 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1353 }
1354
1355 /* DPL/RPL/CPL checks. */
1356 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
1357 {
1358 Log(("retf %04x:%08RX64 -> RPL < CPL(%d).\n", uNewCs, uNewRip, pIemCpu->uCpl));
1359 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1360 }
1361
1362 if (DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1363 {
1364 if ((uNewCs & X86_SEL_RPL) < DescCs.Legacy.Gen.u2Dpl)
1365 {
1366 Log(("retf %04x:%08RX64 -> DPL violation (conforming); DPL=%u RPL=%u\n",
1367 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1368 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1369 }
1370 }
1371 else
1372 {
1373 if ((uNewCs & X86_SEL_RPL) != DescCs.Legacy.Gen.u2Dpl)
1374 {
1375 Log(("retf %04x:%08RX64 -> RPL != DPL; DPL=%u RPL=%u\n",
1376 uNewCs, uNewRip, DescCs.Legacy.Gen.u2Dpl, (uNewCs & X86_SEL_RPL)));
1377 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1378 }
1379 }
1380
1381 /* Is it there? */
1382 if (!DescCs.Legacy.Gen.u1Present)
1383 {
1384 Log(("retf %04x:%08RX64 -> segment not present\n", uNewCs, uNewRip));
1385 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1386 }
1387
1388 /*
1389 * Return to outer privilege? (We'll typically have entered via a call gate.)
1390 */
1391 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
1392 {
1393 /* Read the return pointer, it comes before the parameters. */
1394 RTCPTRUNION uPtrStack;
1395 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, cbPop + cbRetPtr, &uPtrStack.pv, &uNewRsp);
1396 if (rcStrict != VINF_SUCCESS)
1397 return rcStrict;
1398 uint16_t uNewOuterSs;
1399 uint64_t uNewOuterRsp;
1400 if (enmEffOpSize == IEMMODE_16BIT)
1401 {
1402 uNewOuterRsp = uPtrFrame.pu16[0];
1403 uNewOuterSs = uPtrFrame.pu16[1];
1404 }
1405 else if (enmEffOpSize == IEMMODE_32BIT)
1406 {
1407 uNewOuterRsp = uPtrFrame.pu32[0];
1408 uNewOuterSs = uPtrFrame.pu16[2];
1409 }
1410 else
1411 {
1412 uNewOuterRsp = uPtrFrame.pu64[0];
1413 uNewOuterSs = uPtrFrame.pu16[4];
1414 }
1415
1416 /* Check for NULL stack selector (invalid in ring-3 and non-long mode)
1417 and read the selector. */
1418 IEMSELDESC DescSs;
1419 if (!(uNewOuterSs & (X86_SEL_MASK | X86_SEL_LDT)))
1420 {
1421 if ( !DescCs.Legacy.Gen.u1Long
1422 || (uNewOuterSs & X86_SEL_RPL) == 3)
1423 {
1424 Log(("retf %04x:%08RX64 %04x:%08RX64 -> invalid stack selector, #GP\n",
1425 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1426 return iemRaiseGeneralProtectionFault0(pIemCpu);
1427 }
1428 /** @todo Testcase: Return far to ring-1 or ring-2 with SS=0. */
1429 iemMemFakeStackSelDesc(&DescSs, (uNewOuterSs & X86_SEL_RPL));
1430 }
1431 else
1432 {
1433 /* Fetch the descriptor for the new stack segment. */
1434 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSs, uNewOuterSs);
1435 if (rcStrict != VINF_SUCCESS)
1436 return rcStrict;
1437 }
1438
1439 /* Check that RPL of stack and code selectors match. */
1440 if ((uNewCs & X86_SEL_RPL) != (uNewOuterSs & X86_SEL_RPL))
1441 {
1442 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.RPL != CS.RPL -> #GP(SS)\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1443 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1444 }
1445
1446 /* Must be a writable data segment. */
1447 if ( !DescSs.Legacy.Gen.u1DescType
1448 || (DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
1449 || !(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
1450 {
1451 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not a writable data segment (u1DescType=%u u4Type=%#x) -> #GP(SS).\n",
1452 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1453 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1454 }
1455
1456 /* L vs D. (Not mentioned by intel.) */
1457 if ( DescSs.Legacy.Gen.u1Long /** @todo Testcase: far return to a stack selector with both L and D set. */
1458 && DescSs.Legacy.Gen.u1DefBig
1459 && IEM_IS_LONG_MODE(pIemCpu))
1460 {
1461 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS has both L & D set -> #GP(SS).\n",
1462 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u1DescType, DescSs.Legacy.Gen.u4Type));
1463 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1464 }
1465
1466 /* DPL/RPL/CPL checks. */
1467 if (DescSs.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
1468 {
1469 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS.DPL(%u) != CS.RPL (%u) -> #GP(SS).\n",
1470 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, DescSs.Legacy.Gen.u2Dpl, uNewCs & X86_SEL_RPL));
1471 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewOuterSs);
1472 }
1473
1474 /* Is it there? */
1475 if (!DescSs.Legacy.Gen.u1Present)
1476 {
1477 Log(("retf %04x:%08RX64 %04x:%08RX64 - SS not present -> #NP(SS).\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1478 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
1479 }
1480
1481 /* Calc SS limit.*/
1482 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy);
1483
1484 /* Is RIP canonical or within CS.limit? */
1485 uint64_t u64Base;
1486 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
1487
1488 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1489 {
1490 if (!IEM_IS_CANONICAL(uNewRip))
1491 {
1492 Log(("retf %04x:%08RX64 %04x:%08RX64 - not canonical -> #GP.\n", uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp));
1493 return iemRaiseNotCanonical(pIemCpu);
1494 }
1495 u64Base = 0;
1496 }
1497 else
1498 {
1499 if (uNewRip > cbLimitCs)
1500 {
1501 Log(("retf %04x:%08RX64 %04x:%08RX64 - out of bounds (%#x)-> #GP(CS).\n",
1502 uNewCs, uNewRip, uNewOuterSs, uNewOuterRsp, cbLimitCs));
1503 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1504 }
1505 u64Base = X86DESC_BASE(&DescCs.Legacy);
1506 }
1507
1508 /*
1509 * Now set the accessed bit before
1510 * writing the return address to the stack and committing the result into
1511 * CS, CSHID and RIP.
1512 */
1513 /** @todo Testcase: Need to check WHEN exactly the CS accessed bit is set. */
1514 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1515 {
1516 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1517 if (rcStrict != VINF_SUCCESS)
1518 return rcStrict;
1519#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1520 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1521#endif
1522 }
1523 /** @todo Testcase: Need to check WHEN exactly the SS accessed bit is set. */
1524 if (!(DescSs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1525 {
1526 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewOuterSs);
1527 if (rcStrict != VINF_SUCCESS)
1528 return rcStrict;
1529#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1530 DescSs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1531#endif
1532 }
1533
1534 /* commit */
1535 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1536 if (rcStrict != VINF_SUCCESS)
1537 return rcStrict;
1538 if (enmEffOpSize == IEMMODE_16BIT)
1539 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1540 else
1541 pCtx->rip = uNewRip;
1542 pCtx->cs.Sel = uNewCs;
1543 pCtx->cs.ValidSel = uNewCs;
1544 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1545 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
1546 pCtx->cs.u32Limit = cbLimitCs;
1547 pCtx->cs.u64Base = u64Base;
1548 pCtx->rsp = uNewRsp;
1549 pCtx->ss.Sel = uNewOuterSs;
1550 pCtx->ss.ValidSel = uNewOuterSs;
1551 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
1552 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy);
1553 pCtx->ss.u32Limit = cbLimitSs;
1554 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1555 pCtx->ss.u64Base = 0;
1556 else
1557 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy);
1558
1559 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL);
1560 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->ds);
1561 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->es);
1562 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->fs);
1563 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->gs);
1564
1565 /** @todo check if the hidden bits are loaded correctly for 64-bit
1566 * mode. */
1567
1568 if (cbPop)
1569 iemRegAddToRsp(pCtx, cbPop);
1570
1571 /* Done! */
1572 }
1573 /*
1574 * Return to the same privilege level
1575 */
1576 else
1577 {
1578 /* Limit / canonical check. */
1579 uint64_t u64Base;
1580 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy);
1581
1582 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1583 {
1584 if (!IEM_IS_CANONICAL(uNewRip))
1585 {
1586 Log(("retf %04x:%08RX64 - not canonical -> #GP\n", uNewCs, uNewRip));
1587 return iemRaiseNotCanonical(pIemCpu);
1588 }
1589 u64Base = 0;
1590 }
1591 else
1592 {
1593 if (uNewRip > cbLimitCs)
1594 {
1595 Log(("retf %04x:%08RX64 -> out of bounds (%#x)\n", uNewCs, uNewRip, cbLimitCs));
1596 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1597 }
1598 u64Base = X86DESC_BASE(&DescCs.Legacy);
1599 }
1600
1601 /*
1602 * Now set the accessed bit before
1603 * writing the return address to the stack and committing the result into
1604 * CS, CSHID and RIP.
1605 */
1606 /** @todo Testcase: Need to check WHEN exactly the accessed bit is set. */
1607 if (!(DescCs.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
1608 {
1609 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
1610 if (rcStrict != VINF_SUCCESS)
1611 return rcStrict;
1612#ifdef IEM_VERIFICATION_MODE /** @todo check what VT-x and AMD-V does. */
1613 DescCs.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
1614#endif
1615 }
1616
1617 /* commit */
1618 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uPtrFrame.pv, uNewRsp);
1619 if (rcStrict != VINF_SUCCESS)
1620 return rcStrict;
1621 if (enmEffOpSize == IEMMODE_16BIT)
1622 pCtx->rip = uNewRip & UINT16_MAX; /** @todo Testcase: When exactly does this occur? With call it happens prior to the limit check according to Intel... */
1623 else
1624 pCtx->rip = uNewRip;
1625 pCtx->cs.Sel = uNewCs;
1626 pCtx->cs.ValidSel = uNewCs;
1627 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1628 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy);
1629 pCtx->cs.u32Limit = cbLimitCs;
1630 pCtx->cs.u64Base = u64Base;
1631 /** @todo check if the hidden bits are loaded correctly for 64-bit
1632 * mode. */
1633 if (cbPop)
1634 iemRegAddToRsp(pCtx, cbPop);
1635 }
1636 return VINF_SUCCESS;
1637}
1638
1639
1640/**
1641 * Implements retn.
1642 *
1643 * We're doing this in C because of the \#GP that might be raised if the popped
1644 * program counter is out of bounds.
1645 *
1646 * @param enmEffOpSize The effective operand size.
1647 * @param cbPop The amount of arguments to pop from the stack
1648 * (bytes).
1649 */
1650IEM_CIMPL_DEF_2(iemCImpl_retn, IEMMODE, enmEffOpSize, uint16_t, cbPop)
1651{
1652 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1653 NOREF(cbInstr);
1654
1655 /* Fetch the RSP from the stack. */
1656 VBOXSTRICTRC rcStrict;
1657 RTUINT64U NewRip;
1658 RTUINT64U NewRsp;
1659 NewRsp.u = pCtx->rsp;
1660 switch (enmEffOpSize)
1661 {
1662 case IEMMODE_16BIT:
1663 NewRip.u = 0;
1664 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRip.Words.w0, &NewRsp);
1665 break;
1666 case IEMMODE_32BIT:
1667 NewRip.u = 0;
1668 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRip.DWords.dw0, &NewRsp);
1669 break;
1670 case IEMMODE_64BIT:
1671 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRip.u, &NewRsp);
1672 break;
1673 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1674 }
1675 if (rcStrict != VINF_SUCCESS)
1676 return rcStrict;
1677
1678 /* Check the new RSP before loading it. */
1679 /** @todo Should test this as the intel+amd pseudo code doesn't mention half
1680 * of it. The canonical test is performed here and for call. */
1681 if (enmEffOpSize != IEMMODE_64BIT)
1682 {
1683 if (NewRip.DWords.dw0 > pCtx->cs.u32Limit)
1684 {
1685 Log(("retn newrip=%llx - out of bounds (%x) -> #GP\n", NewRip.u, pCtx->cs.u32Limit));
1686 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1687 }
1688 }
1689 else
1690 {
1691 if (!IEM_IS_CANONICAL(NewRip.u))
1692 {
1693 Log(("retn newrip=%llx - not canonical -> #GP\n", NewRip.u));
1694 return iemRaiseNotCanonical(pIemCpu);
1695 }
1696 }
1697
1698 /* Commit it. */
1699 pCtx->rip = NewRip.u;
1700 pCtx->rsp = NewRsp.u;
1701 if (cbPop)
1702 iemRegAddToRsp(pCtx, cbPop);
1703
1704 return VINF_SUCCESS;
1705}
1706
1707
1708/**
1709 * Implements leave.
1710 *
1711 * We're doing this in C because messing with the stack registers is annoying
1712 * since they depends on SS attributes.
1713 *
1714 * @param enmEffOpSize The effective operand size.
1715 */
1716IEM_CIMPL_DEF_1(iemCImpl_leave, IEMMODE, enmEffOpSize)
1717{
1718 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1719
1720 /* Calculate the intermediate RSP from RBP and the stack attributes. */
1721 RTUINT64U NewRsp;
1722 if (pCtx->ss.Attr.n.u1Long)
1723 {
1724 /** @todo Check that LEAVE actually preserve the high EBP bits. */
1725 NewRsp.u = pCtx->rsp;
1726 NewRsp.Words.w0 = pCtx->bp;
1727 }
1728 else if (pCtx->ss.Attr.n.u1DefBig)
1729 NewRsp.u = pCtx->ebp;
1730 else
1731 NewRsp.u = pCtx->rbp;
1732
1733 /* Pop RBP according to the operand size. */
1734 VBOXSTRICTRC rcStrict;
1735 RTUINT64U NewRbp;
1736 switch (enmEffOpSize)
1737 {
1738 case IEMMODE_16BIT:
1739 NewRbp.u = pCtx->rbp;
1740 rcStrict = iemMemStackPopU16Ex(pIemCpu, &NewRbp.Words.w0, &NewRsp);
1741 break;
1742 case IEMMODE_32BIT:
1743 NewRbp.u = 0;
1744 rcStrict = iemMemStackPopU32Ex(pIemCpu, &NewRbp.DWords.dw0, &NewRsp);
1745 break;
1746 case IEMMODE_64BIT:
1747 rcStrict = iemMemStackPopU64Ex(pIemCpu, &NewRbp.u, &NewRsp);
1748 break;
1749 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1750 }
1751 if (rcStrict != VINF_SUCCESS)
1752 return rcStrict;
1753
1754
1755 /* Commit it. */
1756 pCtx->rbp = NewRbp.u;
1757 pCtx->rsp = NewRsp.u;
1758 iemRegAddToRip(pIemCpu, cbInstr);
1759
1760 return VINF_SUCCESS;
1761}
1762
1763
1764/**
1765 * Implements int3 and int XX.
1766 *
1767 * @param u8Int The interrupt vector number.
1768 * @param fIsBpInstr Is it the breakpoint instruction.
1769 */
1770IEM_CIMPL_DEF_2(iemCImpl_int, uint8_t, u8Int, bool, fIsBpInstr)
1771{
1772 Assert(pIemCpu->cXcptRecursions == 0);
1773 return iemRaiseXcptOrInt(pIemCpu,
1774 cbInstr,
1775 u8Int,
1776 (fIsBpInstr ? IEM_XCPT_FLAGS_BP_INSTR : 0) | IEM_XCPT_FLAGS_T_SOFT_INT,
1777 0,
1778 0);
1779}
1780
1781
1782/**
1783 * Implements iret for real mode and V8086 mode.
1784 *
1785 * @param enmEffOpSize The effective operand size.
1786 */
1787IEM_CIMPL_DEF_1(iemCImpl_iret_real_v8086, IEMMODE, enmEffOpSize)
1788{
1789 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1790 NOREF(cbInstr);
1791
1792 /*
1793 * iret throws an exception if VME isn't enabled.
1794 */
1795 if ( pCtx->eflags.Bits.u1VM
1796 && !(pCtx->cr4 & X86_CR4_VME))
1797 return iemRaiseGeneralProtectionFault0(pIemCpu);
1798
1799 /*
1800 * Do the stack bits, but don't commit RSP before everything checks
1801 * out right.
1802 */
1803 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1804 VBOXSTRICTRC rcStrict;
1805 RTCPTRUNION uFrame;
1806 uint16_t uNewCs;
1807 uint32_t uNewEip;
1808 uint32_t uNewFlags;
1809 uint64_t uNewRsp;
1810 if (enmEffOpSize == IEMMODE_32BIT)
1811 {
1812 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1813 if (rcStrict != VINF_SUCCESS)
1814 return rcStrict;
1815 uNewEip = uFrame.pu32[0];
1816 uNewCs = (uint16_t)uFrame.pu32[1];
1817 uNewFlags = uFrame.pu32[2];
1818 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1819 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT
1820 | X86_EFL_RF /*| X86_EFL_VM*/ | X86_EFL_AC /*|X86_EFL_VIF*/ /*|X86_EFL_VIP*/
1821 | X86_EFL_ID;
1822 uNewFlags |= pCtx->eflags.u & (X86_EFL_VM | X86_EFL_VIF | X86_EFL_VIP | X86_EFL_1);
1823 }
1824 else
1825 {
1826 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1827 if (rcStrict != VINF_SUCCESS)
1828 return rcStrict;
1829 uNewEip = uFrame.pu16[0];
1830 uNewCs = uFrame.pu16[1];
1831 uNewFlags = uFrame.pu16[2];
1832 uNewFlags &= X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
1833 | X86_EFL_TF | X86_EFL_IF | X86_EFL_DF | X86_EFL_OF | X86_EFL_IOPL | X86_EFL_NT;
1834 uNewFlags |= pCtx->eflags.u & (UINT32_C(0xffff0000) | X86_EFL_1);
1835 /** @todo The intel pseudo code does not indicate what happens to
1836 * reserved flags. We just ignore them. */
1837 }
1838 /** @todo Check how this is supposed to work if sp=0xfffe. */
1839
1840 /*
1841 * Check the limit of the new EIP.
1842 */
1843 /** @todo Only the AMD pseudo code check the limit here, what's
1844 * right? */
1845 if (uNewEip > pCtx->cs.u32Limit)
1846 return iemRaiseSelectorBounds(pIemCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1847
1848 /*
1849 * V8086 checks and flag adjustments
1850 */
1851 if (pCtx->eflags.Bits.u1VM)
1852 {
1853 if (pCtx->eflags.Bits.u2IOPL == 3)
1854 {
1855 /* Preserve IOPL and clear RF. */
1856 uNewFlags &= ~(X86_EFL_IOPL | X86_EFL_RF);
1857 uNewFlags |= pCtx->eflags.u & (X86_EFL_IOPL);
1858 }
1859 else if ( enmEffOpSize == IEMMODE_16BIT
1860 && ( !(uNewFlags & X86_EFL_IF)
1861 || !pCtx->eflags.Bits.u1VIP )
1862 && !(uNewFlags & X86_EFL_TF) )
1863 {
1864 /* Move IF to VIF, clear RF and preserve IF and IOPL.*/
1865 uNewFlags &= ~X86_EFL_VIF;
1866 uNewFlags |= (uNewFlags & X86_EFL_IF) << (19 - 9);
1867 uNewFlags &= ~(X86_EFL_IF | X86_EFL_IOPL | X86_EFL_RF);
1868 uNewFlags |= pCtx->eflags.u & (X86_EFL_IF | X86_EFL_IOPL);
1869 }
1870 else
1871 return iemRaiseGeneralProtectionFault0(pIemCpu);
1872 }
1873
1874 /*
1875 * Commit the operation.
1876 */
1877 rcStrict = iemMemStackPopCommitSpecial(pIemCpu, uFrame.pv, uNewRsp);
1878 if (rcStrict != VINF_SUCCESS)
1879 return rcStrict;
1880 pCtx->rip = uNewEip;
1881 pCtx->cs.Sel = uNewCs;
1882 pCtx->cs.ValidSel = uNewCs;
1883 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
1884 pCtx->cs.u64Base = (uint32_t)uNewCs << 4;
1885 /** @todo do we load attribs and limit as well? */
1886 Assert(uNewFlags & X86_EFL_1);
1887 pCtx->eflags.u = uNewFlags;
1888
1889 return VINF_SUCCESS;
1890}
1891
1892
1893/**
1894 * Implements iret for protected mode
1895 *
1896 * @param enmEffOpSize The effective operand size.
1897 */
1898IEM_CIMPL_DEF_1(iemCImpl_iret_prot, IEMMODE, enmEffOpSize)
1899{
1900 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1901 NOREF(cbInstr);
1902
1903 /*
1904 * Nested task return.
1905 */
1906 if (pCtx->eflags.Bits.u1NT)
1907 {
1908 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
1909 }
1910 /*
1911 * Normal return.
1912 */
1913 else
1914 {
1915 /*
1916 * Do the stack bits, but don't commit RSP before everything checks
1917 * out right.
1918 */
1919 Assert(enmEffOpSize == IEMMODE_32BIT || enmEffOpSize == IEMMODE_16BIT);
1920 VBOXSTRICTRC rcStrict;
1921 RTCPTRUNION uFrame;
1922 uint16_t uNewCs;
1923 uint32_t uNewEip;
1924 uint32_t uNewFlags;
1925 uint64_t uNewRsp;
1926 if (enmEffOpSize == IEMMODE_32BIT)
1927 {
1928 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 12, &uFrame.pv, &uNewRsp);
1929 if (rcStrict != VINF_SUCCESS)
1930 return rcStrict;
1931 uNewEip = uFrame.pu32[0];
1932 uNewCs = (uint16_t)uFrame.pu32[1];
1933 uNewFlags = uFrame.pu32[2];
1934 }
1935 else
1936 {
1937 rcStrict = iemMemStackPopBeginSpecial(pIemCpu, 6, &uFrame.pv, &uNewRsp);
1938 if (rcStrict != VINF_SUCCESS)
1939 return rcStrict;
1940 uNewEip = uFrame.pu16[0];
1941 uNewCs = uFrame.pu16[1];
1942 uNewFlags = uFrame.pu16[2];
1943 }
1944 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R); /* don't use iemMemStackPopCommitSpecial here. */
1945 if (rcStrict != VINF_SUCCESS)
1946 return rcStrict;
1947
1948 /*
1949 * What are we returning to?
1950 */
1951 if ( (uNewFlags & X86_EFL_VM)
1952 && pIemCpu->uCpl == 0)
1953 {
1954 /* V8086 mode! */
1955 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
1956 }
1957 else
1958 {
1959 /*
1960 * Protected mode.
1961 */
1962 /* Read the CS descriptor. */
1963 if (!(uNewCs & (X86_SEL_MASK | X86_SEL_LDT)))
1964 {
1965 Log(("iret %04x:%08x -> invalid CS selector, #GP(0)\n", uNewCs, uNewEip));
1966 return iemRaiseGeneralProtectionFault0(pIemCpu);
1967 }
1968
1969 IEMSELDESC DescCS;
1970 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescCS, uNewCs);
1971 if (rcStrict != VINF_SUCCESS)
1972 {
1973 Log(("iret %04x:%08x - rcStrict=%Rrc when fetching CS\n", uNewCs, uNewEip, VBOXSTRICTRC_VAL(rcStrict)));
1974 return rcStrict;
1975 }
1976
1977 /* Must be a code descriptor. */
1978 if (!DescCS.Legacy.Gen.u1DescType)
1979 {
1980 Log(("iret %04x:%08x - CS is system segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
1981 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1982 }
1983 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
1984 {
1985 Log(("iret %04x:%08x - not code segment (%#x) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u4Type));
1986 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1987 }
1988
1989 /* Privilege checks. */
1990 if ((uNewCs & X86_SEL_RPL) < pIemCpu->uCpl)
1991 {
1992 Log(("iret %04x:%08x - RPL < CPL (%d) -> #GP\n", uNewCs, uNewEip, pIemCpu->uCpl));
1993 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
1994 }
1995 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
1996 && (uNewCs & X86_SEL_RPL) < DescCS.Legacy.Gen.u2Dpl)
1997 {
1998 Log(("iret %04x:%08x - RPL < DPL (%d) -> #GP\n", uNewCs, uNewEip, DescCS.Legacy.Gen.u2Dpl));
1999 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs);
2000 }
2001
2002 /* Present? */
2003 if (!DescCS.Legacy.Gen.u1Present)
2004 {
2005 Log(("iret %04x:%08x - CS not present -> #NP\n", uNewCs, uNewEip));
2006 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewCs);
2007 }
2008
2009 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
2010
2011 /*
2012 * Return to outer level?
2013 */
2014 if ((uNewCs & X86_SEL_RPL) != pIemCpu->uCpl)
2015 {
2016 uint16_t uNewSS;
2017 uint32_t uNewESP;
2018 if (enmEffOpSize == IEMMODE_32BIT)
2019 {
2020 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2021 if (rcStrict != VINF_SUCCESS)
2022 return rcStrict;
2023 uNewESP = uFrame.pu32[0];
2024 uNewSS = (uint16_t)uFrame.pu32[1];
2025 }
2026 else
2027 {
2028 rcStrict = iemMemStackPopContinueSpecial(pIemCpu, 8, &uFrame.pv, &uNewRsp);
2029 if (rcStrict != VINF_SUCCESS)
2030 return rcStrict;
2031 uNewESP = uFrame.pu16[0];
2032 uNewSS = uFrame.pu16[1];
2033 }
2034 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uFrame.pv, IEM_ACCESS_STACK_R);
2035 if (rcStrict != VINF_SUCCESS)
2036 return rcStrict;
2037
2038 /* Read the SS descriptor. */
2039 if (!(uNewSS & (X86_SEL_MASK | X86_SEL_LDT)))
2040 {
2041 Log(("iret %04x:%08x/%04x:%08x -> invalid SS selector, #GP(0)\n", uNewCs, uNewEip, uNewSS, uNewESP));
2042 return iemRaiseGeneralProtectionFault0(pIemCpu);
2043 }
2044
2045 IEMSELDESC DescSS;
2046 rcStrict = iemMemFetchSelDesc(pIemCpu, &DescSS, uNewSS);
2047 if (rcStrict != VINF_SUCCESS)
2048 {
2049 Log(("iret %04x:%08x/%04x:%08x - %Rrc when fetching SS\n",
2050 uNewCs, uNewEip, uNewSS, uNewESP, VBOXSTRICTRC_VAL(rcStrict)));
2051 return rcStrict;
2052 }
2053
2054 /* Privilege checks. */
2055 if ((uNewSS & X86_SEL_RPL) != (uNewCs & X86_SEL_RPL))
2056 {
2057 Log(("iret %04x:%08x/%04x:%08x -> SS.RPL != CS.RPL -> #GP\n", uNewCs, uNewEip, uNewSS, uNewESP));
2058 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2059 }
2060 if (DescSS.Legacy.Gen.u2Dpl != (uNewCs & X86_SEL_RPL))
2061 {
2062 Log(("iret %04x:%08x/%04x:%08x -> SS.DPL (%d) != CS.RPL -> #GP\n",
2063 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u2Dpl));
2064 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2065 }
2066
2067 /* Must be a writeable data segment descriptor. */
2068 if (!DescSS.Legacy.Gen.u1DescType)
2069 {
2070 Log(("iret %04x:%08x/%04x:%08x -> SS is system segment (%#x) -> #GP\n",
2071 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2072 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2073 }
2074 if ((DescSS.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE)
2075 {
2076 Log(("iret %04x:%08x/%04x:%08x - not writable data segment (%#x) -> #GP\n",
2077 uNewCs, uNewEip, uNewSS, uNewESP, DescSS.Legacy.Gen.u4Type));
2078 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewSS);
2079 }
2080
2081 /* Present? */
2082 if (!DescSS.Legacy.Gen.u1Present)
2083 {
2084 Log(("iret %04x:%08x/%04x:%08x -> SS not present -> #SS\n", uNewCs, uNewEip, uNewSS, uNewESP));
2085 return iemRaiseStackSelectorNotPresentBySelector(pIemCpu, uNewSS);
2086 }
2087
2088 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy);
2089
2090 /* Check EIP. */
2091 if (uNewEip > cbLimitCS)
2092 {
2093 Log(("iret %04x:%08x/%04x:%08x -> EIP is out of bounds (%#x) -> #GP(0)\n",
2094 uNewCs, uNewEip, uNewSS, uNewESP, cbLimitCS));
2095 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2096 }
2097
2098 /*
2099 * Commit the changes, marking CS and SS accessed first since
2100 * that may fail.
2101 */
2102 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2103 {
2104 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2105 if (rcStrict != VINF_SUCCESS)
2106 return rcStrict;
2107 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2108 }
2109 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2110 {
2111 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewSS);
2112 if (rcStrict != VINF_SUCCESS)
2113 return rcStrict;
2114 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2115 }
2116
2117 pCtx->rip = uNewEip;
2118 pCtx->cs.Sel = uNewCs;
2119 pCtx->cs.ValidSel = uNewCs;
2120 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2121 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2122 pCtx->cs.u32Limit = cbLimitCS;
2123 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2124 pCtx->rsp = uNewESP;
2125 pCtx->ss.Sel = uNewSS;
2126 pCtx->ss.ValidSel = uNewSS;
2127 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
2128 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
2129 pCtx->ss.u32Limit = cbLimitSs;
2130 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
2131
2132 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2133 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2134 if (enmEffOpSize != IEMMODE_16BIT)
2135 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2136 if (pIemCpu->uCpl == 0)
2137 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2138 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2139 fEFlagsMask |= X86_EFL_IF;
2140 pCtx->eflags.u &= ~fEFlagsMask;
2141 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
2142
2143 pIemCpu->uCpl = uNewCs & X86_SEL_RPL;
2144 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->ds);
2145 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->es);
2146 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->fs);
2147 iemHlpAdjustSelectorForNewCpl(uNewCs & X86_SEL_RPL, &pCtx->gs);
2148
2149 /* Done! */
2150
2151 }
2152 /*
2153 * Return to the same level.
2154 */
2155 else
2156 {
2157 /* Check EIP. */
2158 if (uNewEip > cbLimitCS)
2159 {
2160 Log(("iret %04x:%08x - EIP is out of bounds (%#x) -> #GP(0)\n", uNewCs, uNewEip, cbLimitCS));
2161 return iemRaiseSelectorBoundsBySelector(pIemCpu, uNewCs);
2162 }
2163
2164 /*
2165 * Commit the changes, marking CS first since it may fail.
2166 */
2167 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2168 {
2169 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uNewCs);
2170 if (rcStrict != VINF_SUCCESS)
2171 return rcStrict;
2172 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2173 }
2174
2175 pCtx->rip = uNewEip;
2176 pCtx->cs.Sel = uNewCs;
2177 pCtx->cs.ValidSel = uNewCs;
2178 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
2179 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
2180 pCtx->cs.u32Limit = cbLimitCS;
2181 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
2182 pCtx->rsp = uNewRsp;
2183
2184 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF
2185 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;
2186 if (enmEffOpSize != IEMMODE_16BIT)
2187 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;
2188 if (pIemCpu->uCpl == 0)
2189 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */
2190 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)
2191 fEFlagsMask |= X86_EFL_IF;
2192 pCtx->eflags.u &= ~fEFlagsMask;
2193 pCtx->eflags.u |= fEFlagsMask & uNewFlags;
2194 /* Done! */
2195 }
2196 }
2197 }
2198
2199 return VINF_SUCCESS;
2200}
2201
2202
2203/**
2204 * Implements iret for long mode
2205 *
2206 * @param enmEffOpSize The effective operand size.
2207 */
2208IEM_CIMPL_DEF_1(iemCImpl_iret_long, IEMMODE, enmEffOpSize)
2209{
2210 //PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2211 //VBOXSTRICTRC rcStrict;
2212 //uint64_t uNewRsp;
2213
2214 NOREF(pIemCpu); NOREF(cbInstr); NOREF(enmEffOpSize);
2215 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
2216}
2217
2218
2219/**
2220 * Implements iret.
2221 *
2222 * @param enmEffOpSize The effective operand size.
2223 */
2224IEM_CIMPL_DEF_1(iemCImpl_iret, IEMMODE, enmEffOpSize)
2225{
2226 /*
2227 * Call a mode specific worker.
2228 */
2229 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2230 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2231 return IEM_CIMPL_CALL_1(iemCImpl_iret_real_v8086, enmEffOpSize);
2232 if (IEM_IS_LONG_MODE(pIemCpu))
2233 return IEM_CIMPL_CALL_1(iemCImpl_iret_long, enmEffOpSize);
2234
2235 return IEM_CIMPL_CALL_1(iemCImpl_iret_prot, enmEffOpSize);
2236}
2237
2238
2239/**
2240 * Common worker for 'pop SReg', 'mov SReg, GReg' and 'lXs GReg, reg/mem'.
2241 *
2242 * @param iSegReg The segment register number (valid).
2243 * @param uSel The new selector value.
2244 */
2245IEM_CIMPL_DEF_2(iemCImpl_LoadSReg, uint8_t, iSegReg, uint16_t, uSel)
2246{
2247 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2248 uint16_t *pSel = iemSRegRef(pIemCpu, iSegReg);
2249 PCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iSegReg);
2250
2251 Assert(iSegReg <= X86_SREG_GS && iSegReg != X86_SREG_CS);
2252
2253 /*
2254 * Real mode and V8086 mode are easy.
2255 */
2256 if ( pIemCpu->enmCpuMode == IEMMODE_16BIT
2257 && IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2258 {
2259 *pSel = uSel;
2260 pHid->u64Base = (uint32_t)uSel << 4;
2261#if 0 /* AMD Volume 2, chapter 4.1 - "real mode segmentation" - states that limit and attributes are untouched. */
2262 /** @todo Does the CPU actually load limits and attributes in the
2263 * real/V8086 mode segment load case? It doesn't for CS in far
2264 * jumps... Affects unreal mode. */
2265 pHid->u32Limit = 0xffff;
2266 pHid->Attr.u = 0;
2267 pHid->Attr.n.u1Present = 1;
2268 pHid->Attr.n.u1DescType = 1;
2269 pHid->Attr.n.u4Type = iSegReg != X86_SREG_CS
2270 ? X86_SEL_TYPE_RW
2271 : X86_SEL_TYPE_READ | X86_SEL_TYPE_CODE;
2272#endif
2273 iemRegAddToRip(pIemCpu, cbInstr);
2274 return VINF_SUCCESS;
2275 }
2276
2277 /*
2278 * Protected mode.
2279 *
2280 * Check if it's a null segment selector value first, that's OK for DS, ES,
2281 * FS and GS. If not null, then we have to load and parse the descriptor.
2282 */
2283 if (!(uSel & (X86_SEL_MASK | X86_SEL_LDT)))
2284 {
2285 if (iSegReg == X86_SREG_SS)
2286 {
2287 if ( pIemCpu->enmCpuMode != IEMMODE_64BIT
2288 || pIemCpu->uCpl != 0
2289 || uSel != 0) /** @todo We cannot 'mov ss, 3' in 64-bit kernel mode, can we? */
2290 {
2291 Log(("load sreg -> invalid stack selector, #GP(0)\n", uSel));
2292 return iemRaiseGeneralProtectionFault0(pIemCpu);
2293 }
2294
2295 /* In 64-bit kernel mode, the stack can be 0 because of the way
2296 interrupts are dispatched when in kernel ctx. Just load the
2297 selector value into the register and leave the hidden bits
2298 as is. */
2299 *pSel = uSel;
2300 iemRegAddToRip(pIemCpu, cbInstr);
2301 return VINF_SUCCESS;
2302 }
2303
2304 *pSel = uSel; /* Not RPL, remember :-) */
2305 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2306 && iSegReg != X86_SREG_FS
2307 && iSegReg != X86_SREG_GS)
2308 {
2309 /** @todo figure out what this actually does, it works. Needs
2310 * testcase! */
2311 pHid->Attr.u = 0;
2312 pHid->Attr.n.u1Present = 1;
2313 pHid->Attr.n.u1Long = 1;
2314 pHid->Attr.n.u4Type = X86_SEL_TYPE_RW;
2315 pHid->Attr.n.u2Dpl = 3;
2316 pHid->u32Limit = 0;
2317 pHid->u64Base = 0;
2318 }
2319 else
2320 {
2321 pHid->Attr.u = 0;
2322 pHid->u32Limit = 0;
2323 pHid->u64Base = 0;
2324 }
2325 iemRegAddToRip(pIemCpu, cbInstr);
2326 return VINF_SUCCESS;
2327 }
2328
2329 /* Fetch the descriptor. */
2330 IEMSELDESC Desc;
2331 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uSel);
2332 if (rcStrict != VINF_SUCCESS)
2333 return rcStrict;
2334
2335 /* Check GPs first. */
2336 if (!Desc.Legacy.Gen.u1DescType)
2337 {
2338 Log(("load sreg %d - system selector (%#x) -> #GP\n", iSegReg, uSel, Desc.Legacy.Gen.u4Type));
2339 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2340 }
2341 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */
2342 {
2343 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
2344 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
2345 {
2346 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));
2347 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2348 }
2349 if ((uSel & X86_SEL_RPL) != pIemCpu->uCpl)
2350 {
2351 Log(("load sreg SS, %#x - RPL and CPL (%d) differs -> #GP\n", uSel, pIemCpu->uCpl));
2352 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2353 }
2354 if (Desc.Legacy.Gen.u2Dpl != pIemCpu->uCpl)
2355 {
2356 Log(("load sreg SS, %#x - DPL (%d) and CPL (%d) differs -> #GP\n", uSel, Desc.Legacy.Gen.u2Dpl, pIemCpu->uCpl));
2357 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2358 }
2359 }
2360 else
2361 {
2362 if ((Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
2363 {
2364 Log(("load sreg%u, %#x - execute only segment -> #GP\n", iSegReg, uSel));
2365 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2366 }
2367 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2368 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
2369 {
2370#if 0 /* this is what intel says. */
2371 if ( (uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
2372 && pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2373 {
2374 Log(("load sreg%u, %#x - both RPL (%d) and CPL (%d) are greater than DPL (%d) -> #GP\n",
2375 iSegReg, uSel, (uSel & X86_SEL_RPL), pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2376 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2377 }
2378#else /* this is what makes more sense. */
2379 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl)
2380 {
2381 Log(("load sreg%u, %#x - RPL (%d) is greater than DPL (%d) -> #GP\n",
2382 iSegReg, uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl));
2383 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2384 }
2385 if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl)
2386 {
2387 Log(("load sreg%u, %#x - CPL (%d) is greater than DPL (%d) -> #GP\n",
2388 iSegReg, uSel, pIemCpu->uCpl, Desc.Legacy.Gen.u2Dpl));
2389 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);
2390 }
2391#endif
2392 }
2393 }
2394
2395 /* Is it there? */
2396 if (!Desc.Legacy.Gen.u1Present)
2397 {
2398 Log(("load sreg%d,%#x - segment not present -> #NP\n", iSegReg, uSel));
2399 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uSel);
2400 }
2401
2402 /* The base and limit. */
2403 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
2404 uint64_t u64Base;
2405 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
2406 && iSegReg < X86_SREG_FS)
2407 u64Base = 0;
2408 else
2409 u64Base = X86DESC_BASE(&Desc.Legacy);
2410
2411 /*
2412 * Ok, everything checked out fine. Now set the accessed bit before
2413 * committing the result into the registers.
2414 */
2415 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
2416 {
2417 rcStrict = iemMemMarkSelDescAccessed(pIemCpu, uSel);
2418 if (rcStrict != VINF_SUCCESS)
2419 return rcStrict;
2420 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
2421 }
2422
2423 /* commit */
2424 *pSel = uSel;
2425 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2426 pHid->u32Limit = cbLimit;
2427 pHid->u64Base = u64Base;
2428
2429 /** @todo check if the hidden bits are loaded correctly for 64-bit
2430 * mode. */
2431
2432 iemRegAddToRip(pIemCpu, cbInstr);
2433 return VINF_SUCCESS;
2434}
2435
2436
2437/**
2438 * Implements 'mov SReg, r/m'.
2439 *
2440 * @param iSegReg The segment register number (valid).
2441 * @param uSel The new selector value.
2442 */
2443IEM_CIMPL_DEF_2(iemCImpl_load_SReg, uint8_t, iSegReg, uint16_t, uSel)
2444{
2445 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2446 if (rcStrict == VINF_SUCCESS)
2447 {
2448 if (iSegReg == X86_SREG_SS)
2449 {
2450 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2451 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2452 }
2453 }
2454 return rcStrict;
2455}
2456
2457
2458/**
2459 * Implements 'pop SReg'.
2460 *
2461 * @param iSegReg The segment register number (valid).
2462 * @param enmEffOpSize The efficient operand size (valid).
2463 */
2464IEM_CIMPL_DEF_2(iemCImpl_pop_Sreg, uint8_t, iSegReg, IEMMODE, enmEffOpSize)
2465{
2466 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2467 VBOXSTRICTRC rcStrict;
2468
2469 /*
2470 * Read the selector off the stack and join paths with mov ss, reg.
2471 */
2472 RTUINT64U TmpRsp;
2473 TmpRsp.u = pCtx->rsp;
2474 switch (enmEffOpSize)
2475 {
2476 case IEMMODE_16BIT:
2477 {
2478 uint16_t uSel;
2479 rcStrict = iemMemStackPopU16Ex(pIemCpu, &uSel, &TmpRsp);
2480 if (rcStrict == VINF_SUCCESS)
2481 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2482 break;
2483 }
2484
2485 case IEMMODE_32BIT:
2486 {
2487 uint32_t u32Value;
2488 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
2489 if (rcStrict == VINF_SUCCESS)
2490 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u32Value);
2491 break;
2492 }
2493
2494 case IEMMODE_64BIT:
2495 {
2496 uint64_t u64Value;
2497 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
2498 if (rcStrict == VINF_SUCCESS)
2499 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, (uint16_t)u64Value);
2500 break;
2501 }
2502 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2503 }
2504
2505 /*
2506 * Commit the stack on success.
2507 */
2508 if (rcStrict == VINF_SUCCESS)
2509 {
2510 pCtx->rsp = TmpRsp.u;
2511 if (iSegReg == X86_SREG_SS)
2512 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
2513 }
2514 return rcStrict;
2515}
2516
2517
2518/**
2519 * Implements lgs, lfs, les, lds & lss.
2520 */
2521IEM_CIMPL_DEF_5(iemCImpl_load_SReg_Greg,
2522 uint16_t, uSel,
2523 uint64_t, offSeg,
2524 uint8_t, iSegReg,
2525 uint8_t, iGReg,
2526 IEMMODE, enmEffOpSize)
2527{
2528 /*PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);*/
2529 VBOXSTRICTRC rcStrict;
2530
2531 /*
2532 * Use iemCImpl_LoadSReg to do the tricky segment register loading.
2533 */
2534 /** @todo verify and test that mov, pop and lXs works the segment
2535 * register loading in the exact same way. */
2536 rcStrict = IEM_CIMPL_CALL_2(iemCImpl_LoadSReg, iSegReg, uSel);
2537 if (rcStrict == VINF_SUCCESS)
2538 {
2539 switch (enmEffOpSize)
2540 {
2541 case IEMMODE_16BIT:
2542 *(uint16_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2543 break;
2544 case IEMMODE_32BIT:
2545 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2546 break;
2547 case IEMMODE_64BIT:
2548 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = offSeg;
2549 break;
2550 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2551 }
2552 }
2553
2554 return rcStrict;
2555}
2556
2557
2558/**
2559 * Implements lgdt.
2560 *
2561 * @param iEffSeg The segment of the new ldtr contents
2562 * @param GCPtrEffSrc The address of the new ldtr contents.
2563 * @param enmEffOpSize The effective operand size.
2564 */
2565IEM_CIMPL_DEF_3(iemCImpl_lgdt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2566{
2567 if (pIemCpu->uCpl != 0)
2568 return iemRaiseGeneralProtectionFault0(pIemCpu);
2569 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2570
2571 /*
2572 * Fetch the limit and base address.
2573 */
2574 uint16_t cbLimit;
2575 RTGCPTR GCPtrBase;
2576 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2577 if (rcStrict == VINF_SUCCESS)
2578 {
2579 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2580 rcStrict = CPUMSetGuestGDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2581 else
2582 {
2583 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2584 pCtx->gdtr.cbGdt = cbLimit;
2585 pCtx->gdtr.pGdt = GCPtrBase;
2586 }
2587 if (rcStrict == VINF_SUCCESS)
2588 iemRegAddToRip(pIemCpu, cbInstr);
2589 }
2590 return rcStrict;
2591}
2592
2593
2594/**
2595 * Implements lidt.
2596 *
2597 * @param iEffSeg The segment of the new ldtr contents
2598 * @param GCPtrEffSrc The address of the new ldtr contents.
2599 * @param enmEffOpSize The effective operand size.
2600 */
2601IEM_CIMPL_DEF_3(iemCImpl_lidt, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc, IEMMODE, enmEffOpSize)
2602{
2603 if (pIemCpu->uCpl != 0)
2604 return iemRaiseGeneralProtectionFault0(pIemCpu);
2605 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
2606
2607 /*
2608 * Fetch the limit and base address.
2609 */
2610 uint16_t cbLimit;
2611 RTGCPTR GCPtrBase;
2612 VBOXSTRICTRC rcStrict = iemMemFetchDataXdtr(pIemCpu, &cbLimit, &GCPtrBase, iEffSeg, GCPtrEffSrc, enmEffOpSize);
2613 if (rcStrict == VINF_SUCCESS)
2614 {
2615 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2616 rcStrict = CPUMSetGuestIDTR(IEMCPU_TO_VMCPU(pIemCpu), GCPtrBase, cbLimit);
2617 else
2618 {
2619 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2620 pCtx->idtr.cbIdt = cbLimit;
2621 pCtx->idtr.pIdt = GCPtrBase;
2622 }
2623 if (rcStrict == VINF_SUCCESS)
2624 iemRegAddToRip(pIemCpu, cbInstr);
2625 }
2626 return rcStrict;
2627}
2628
2629
2630/**
2631 * Implements lldt.
2632 *
2633 * @param uNewLdt The new LDT selector value.
2634 */
2635IEM_CIMPL_DEF_1(iemCImpl_lldt, uint16_t, uNewLdt)
2636{
2637 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2638
2639 /*
2640 * Check preconditions.
2641 */
2642 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2643 {
2644 Log(("lldt %04x - real or v8086 mode -> #GP(0)\n", uNewLdt));
2645 return iemRaiseUndefinedOpcode(pIemCpu);
2646 }
2647 if (pIemCpu->uCpl != 0)
2648 {
2649 Log(("lldt %04x - CPL is %d -> #GP(0)\n", uNewLdt, pIemCpu->uCpl));
2650 return iemRaiseGeneralProtectionFault0(pIemCpu);
2651 }
2652 if (uNewLdt & X86_SEL_LDT)
2653 {
2654 Log(("lldt %04x - LDT selector -> #GP\n", uNewLdt));
2655 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewLdt);
2656 }
2657
2658 /*
2659 * Now, loading a NULL selector is easy.
2660 */
2661 if ((uNewLdt & X86_SEL_MASK) == 0)
2662 {
2663 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt));
2664 /** @todo check if the actual value is loaded or if it's always 0. */
2665 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2666 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), 0);
2667 else
2668 pCtx->ldtr.Sel = 0;
2669 pCtx->ldtr.ValidSel = 0;
2670 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2671 pCtx->ldtr.Attr.u = 0;
2672 pCtx->ldtr.u64Base = 0;
2673 pCtx->ldtr.u32Limit = 0;
2674
2675 iemRegAddToRip(pIemCpu, cbInstr);
2676 return VINF_SUCCESS;
2677 }
2678
2679 /*
2680 * Read the descriptor.
2681 */
2682 IEMSELDESC Desc;
2683 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewLdt);
2684 if (rcStrict != VINF_SUCCESS)
2685 return rcStrict;
2686
2687 /* Check GPs first. */
2688 if (Desc.Legacy.Gen.u1DescType)
2689 {
2690 Log(("lldt %#x - not system selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2691 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2692 }
2693 if (Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
2694 {
2695 Log(("lldt %#x - not LDT selector (type %x) -> #GP\n", uNewLdt, Desc.Legacy.Gen.u4Type));
2696 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2697 }
2698 uint64_t u64Base;
2699 if (!IEM_IS_LONG_MODE(pIemCpu))
2700 u64Base = X86DESC_BASE(&Desc.Legacy);
2701 else
2702 {
2703 if (Desc.Long.Gen.u5Zeros)
2704 {
2705 Log(("lldt %#x - u5Zeros=%#x -> #GP\n", uNewLdt, Desc.Long.Gen.u5Zeros));
2706 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2707 }
2708
2709 u64Base = X86DESC64_BASE(&Desc.Long);
2710 if (!IEM_IS_CANONICAL(u64Base))
2711 {
2712 Log(("lldt %#x - non-canonical base address %#llx -> #GP\n", uNewLdt, u64Base));
2713 return iemRaiseGeneralProtectionFault(pIemCpu, uNewLdt & X86_SEL_MASK);
2714 }
2715 }
2716
2717 /* NP */
2718 if (!Desc.Legacy.Gen.u1Present)
2719 {
2720 Log(("lldt %#x - segment not present -> #NP\n", uNewLdt));
2721 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewLdt);
2722 }
2723
2724 /*
2725 * It checks out alright, update the registers.
2726 */
2727/** @todo check if the actual value is loaded or if the RPL is dropped */
2728 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2729 CPUMSetGuestLDTR(IEMCPU_TO_VMCPU(pIemCpu), uNewLdt & X86_SEL_MASK);
2730 else
2731 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK;
2732 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK;
2733 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
2734 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2735 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
2736 pCtx->ldtr.u64Base = u64Base;
2737
2738 iemRegAddToRip(pIemCpu, cbInstr);
2739 return VINF_SUCCESS;
2740}
2741
2742
2743/**
2744 * Implements lldt.
2745 *
2746 * @param uNewLdt The new LDT selector value.
2747 */
2748IEM_CIMPL_DEF_1(iemCImpl_ltr, uint16_t, uNewTr)
2749{
2750 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2751
2752 /*
2753 * Check preconditions.
2754 */
2755 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
2756 {
2757 Log(("ltr %04x - real or v8086 mode -> #GP(0)\n", uNewTr));
2758 return iemRaiseUndefinedOpcode(pIemCpu);
2759 }
2760 if (pIemCpu->uCpl != 0)
2761 {
2762 Log(("ltr %04x - CPL is %d -> #GP(0)\n", uNewTr, pIemCpu->uCpl));
2763 return iemRaiseGeneralProtectionFault0(pIemCpu);
2764 }
2765 if (uNewTr & X86_SEL_LDT)
2766 {
2767 Log(("ltr %04x - LDT selector -> #GP\n", uNewTr));
2768 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewTr);
2769 }
2770 if ((uNewTr & X86_SEL_MASK) == 0)
2771 {
2772 Log(("ltr %04x - NULL selector -> #GP(0)\n", uNewTr));
2773 return iemRaiseGeneralProtectionFault0(pIemCpu);
2774 }
2775
2776 /*
2777 * Read the descriptor.
2778 */
2779 IEMSELDESC Desc;
2780 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pIemCpu, &Desc, uNewTr);
2781 if (rcStrict != VINF_SUCCESS)
2782 return rcStrict;
2783
2784 /* Check GPs first. */
2785 if (Desc.Legacy.Gen.u1DescType)
2786 {
2787 Log(("ltr %#x - not system selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2788 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2789 }
2790 if ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL /* same as AMD64_SEL_TYPE_SYS_TSS_AVAIL */
2791 && ( Desc.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
2792 || IEM_IS_LONG_MODE(pIemCpu)) )
2793 {
2794 Log(("ltr %#x - not an available TSS selector (type %x) -> #GP\n", uNewTr, Desc.Legacy.Gen.u4Type));
2795 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2796 }
2797 uint64_t u64Base;
2798 if (!IEM_IS_LONG_MODE(pIemCpu))
2799 u64Base = X86DESC_BASE(&Desc.Legacy);
2800 else
2801 {
2802 if (Desc.Long.Gen.u5Zeros)
2803 {
2804 Log(("ltr %#x - u5Zeros=%#x -> #GP\n", uNewTr, Desc.Long.Gen.u5Zeros));
2805 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2806 }
2807
2808 u64Base = X86DESC64_BASE(&Desc.Long);
2809 if (!IEM_IS_CANONICAL(u64Base))
2810 {
2811 Log(("ltr %#x - non-canonical base address %#llx -> #GP\n", uNewTr, u64Base));
2812 return iemRaiseGeneralProtectionFault(pIemCpu, uNewTr & X86_SEL_MASK);
2813 }
2814 }
2815
2816 /* NP */
2817 if (!Desc.Legacy.Gen.u1Present)
2818 {
2819 Log(("ltr %#x - segment not present -> #NP\n", uNewTr));
2820 return iemRaiseSelectorNotPresentBySelector(pIemCpu, uNewTr);
2821 }
2822
2823 /*
2824 * Set it busy.
2825 * Note! Intel says this should lock down the whole descriptor, but we'll
2826 * restrict our selves to 32-bit for now due to lack of inline
2827 * assembly and such.
2828 */
2829 void *pvDesc;
2830 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
2831 if (rcStrict != VINF_SUCCESS)
2832 return rcStrict;
2833 switch ((uintptr_t)pvDesc & 3)
2834 {
2835 case 0: ASMAtomicBitSet(pvDesc, 40 + 1); break;
2836 case 1: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 24); break;
2837 case 2: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 16); break;
2838 case 3: ASMAtomicBitSet((uint8_t *)pvDesc + 3, 40 + 1 - 8); break;
2839 }
2840 rcStrict = iemMemMap(pIemCpu, &pvDesc, 8, UINT8_MAX, pCtx->gdtr.pGdt, IEM_ACCESS_DATA_RW);
2841 if (rcStrict != VINF_SUCCESS)
2842 return rcStrict;
2843 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
2844
2845 /*
2846 * It checks out alright, update the registers.
2847 */
2848/** @todo check if the actual value is loaded or if the RPL is dropped */
2849 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2850 CPUMSetGuestTR(IEMCPU_TO_VMCPU(pIemCpu), uNewTr & X86_SEL_MASK);
2851 else
2852 pCtx->tr.Sel = uNewTr & X86_SEL_MASK;
2853 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK;
2854 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
2855 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
2856 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy);
2857 pCtx->tr.u64Base = u64Base;
2858
2859 iemRegAddToRip(pIemCpu, cbInstr);
2860 return VINF_SUCCESS;
2861}
2862
2863
2864/**
2865 * Implements mov GReg,CRx.
2866 *
2867 * @param iGReg The general register to store the CRx value in.
2868 * @param iCrReg The CRx register to read (valid).
2869 */
2870IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Cd, uint8_t, iGReg, uint8_t, iCrReg)
2871{
2872 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2873 if (pIemCpu->uCpl != 0)
2874 return iemRaiseGeneralProtectionFault0(pIemCpu);
2875 Assert(!pCtx->eflags.Bits.u1VM);
2876
2877 /* read it */
2878 uint64_t crX;
2879 switch (iCrReg)
2880 {
2881 case 0: crX = pCtx->cr0; break;
2882 case 2: crX = pCtx->cr2; break;
2883 case 3: crX = pCtx->cr3; break;
2884 case 4: crX = pCtx->cr4; break;
2885 case 8:
2886 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2887 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
2888 else
2889 crX = 0xff;
2890 break;
2891 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
2892 }
2893
2894 /* store it */
2895 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
2896 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = crX;
2897 else
2898 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)crX;
2899
2900 iemRegAddToRip(pIemCpu, cbInstr);
2901 return VINF_SUCCESS;
2902}
2903
2904
2905/**
2906 * Used to implemented 'mov CRx,GReg' and 'lmsw r/m16'.
2907 *
2908 * @param iCrReg The CRx register to write (valid).
2909 * @param uNewCrX The new value.
2910 */
2911IEM_CIMPL_DEF_2(iemCImpl_load_CrX, uint8_t, iCrReg, uint64_t, uNewCrX)
2912{
2913 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
2914 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
2915 VBOXSTRICTRC rcStrict;
2916 int rc;
2917
2918 /*
2919 * Try store it.
2920 * Unfortunately, CPUM only does a tiny bit of the work.
2921 */
2922 switch (iCrReg)
2923 {
2924 case 0:
2925 {
2926 /*
2927 * Perform checks.
2928 */
2929 uint64_t const uOldCrX = pCtx->cr0;
2930 uNewCrX |= X86_CR0_ET; /* hardcoded */
2931
2932 /* Check for reserved bits. */
2933 uint32_t const fValid = X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS
2934 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM
2935 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG;
2936 if (uNewCrX & ~(uint64_t)fValid)
2937 {
2938 Log(("Trying to set reserved CR0 bits: NewCR0=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
2939 return iemRaiseGeneralProtectionFault0(pIemCpu);
2940 }
2941
2942 /* Check for invalid combinations. */
2943 if ( (uNewCrX & X86_CR0_PG)
2944 && !(uNewCrX & X86_CR0_PE) )
2945 {
2946 Log(("Trying to set CR0.PG without CR0.PE\n"));
2947 return iemRaiseGeneralProtectionFault0(pIemCpu);
2948 }
2949
2950 if ( !(uNewCrX & X86_CR0_CD)
2951 && (uNewCrX & X86_CR0_NW) )
2952 {
2953 Log(("Trying to clear CR0.CD while leaving CR0.NW set\n"));
2954 return iemRaiseGeneralProtectionFault0(pIemCpu);
2955 }
2956
2957 /* Long mode consistency checks. */
2958 if ( (uNewCrX & X86_CR0_PG)
2959 && !(uOldCrX & X86_CR0_PG)
2960 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
2961 {
2962 if (!(pCtx->cr4 & X86_CR4_PAE))
2963 {
2964 Log(("Trying to enabled long mode paging without CR4.PAE set\n"));
2965 return iemRaiseGeneralProtectionFault0(pIemCpu);
2966 }
2967 if (pCtx->cs.Attr.n.u1Long)
2968 {
2969 Log(("Trying to enabled long mode paging with a long CS descriptor loaded.\n"));
2970 return iemRaiseGeneralProtectionFault0(pIemCpu);
2971 }
2972 }
2973
2974 /** @todo check reserved PDPTR bits as AMD states. */
2975
2976 /*
2977 * Change CR0.
2978 */
2979 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
2980 {
2981 rc = CPUMSetGuestCR0(pVCpu, uNewCrX);
2982 AssertRCSuccessReturn(rc, RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_3);
2983 }
2984 else
2985 pCtx->cr0 = uNewCrX;
2986 Assert(pCtx->cr0 == uNewCrX);
2987
2988 /*
2989 * Change EFER.LMA if entering or leaving long mode.
2990 */
2991 if ( (uNewCrX & X86_CR0_PG) != (uOldCrX & X86_CR0_PG)
2992 && (pCtx->msrEFER & MSR_K6_EFER_LME) )
2993 {
2994 uint64_t NewEFER = pCtx->msrEFER;
2995 if (uNewCrX & X86_CR0_PG)
2996 NewEFER |= MSR_K6_EFER_LME;
2997 else
2998 NewEFER &= ~MSR_K6_EFER_LME;
2999
3000 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3001 CPUMSetGuestEFER(pVCpu, NewEFER);
3002 else
3003 pCtx->msrEFER = NewEFER;
3004 Assert(pCtx->msrEFER == NewEFER);
3005 }
3006
3007 /*
3008 * Inform PGM.
3009 */
3010 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3011 {
3012 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3013 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3014 {
3015 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3016 AssertRCReturn(rc, rc);
3017 /* ignore informational status codes */
3018 }
3019 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3020 /** @todo Status code management. */
3021 }
3022 else
3023 rcStrict = VINF_SUCCESS;
3024 break;
3025 }
3026
3027 /*
3028 * CR2 can be changed without any restrictions.
3029 */
3030 case 2:
3031 pCtx->cr2 = uNewCrX;
3032 rcStrict = VINF_SUCCESS;
3033 break;
3034
3035 /*
3036 * CR3 is relatively simple, although AMD and Intel have different
3037 * accounts of how setting reserved bits are handled. We take intel's
3038 * word for the lower bits and AMD's for the high bits (63:52).
3039 */
3040 /** @todo Testcase: Setting reserved bits in CR3, especially before
3041 * enabling paging. */
3042 case 3:
3043 {
3044 /* check / mask the value. */
3045 if (uNewCrX & UINT64_C(0xfff0000000000000))
3046 {
3047 Log(("Trying to load CR3 with invalid high bits set: %#llx\n", uNewCrX));
3048 return iemRaiseGeneralProtectionFault0(pIemCpu);
3049 }
3050
3051 uint64_t fValid;
3052 if ( (pCtx->cr4 & X86_CR4_PAE)
3053 && (pCtx->msrEFER & MSR_K6_EFER_LME))
3054 fValid = UINT64_C(0x000ffffffffff014);
3055 else if (pCtx->cr4 & X86_CR4_PAE)
3056 fValid = UINT64_C(0xfffffff4);
3057 else
3058 fValid = UINT64_C(0xfffff014);
3059 if (uNewCrX & ~fValid)
3060 {
3061 Log(("Automatically clearing reserved bits in CR3 load: NewCR3=%#llx ClearedBits=%#llx\n",
3062 uNewCrX, uNewCrX & ~fValid));
3063 uNewCrX &= fValid;
3064 }
3065
3066 /** @todo If we're in PAE mode we should check the PDPTRs for
3067 * invalid bits. */
3068
3069 /* Make the change. */
3070 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3071 {
3072 rc = CPUMSetGuestCR3(pVCpu, uNewCrX);
3073 AssertRCSuccessReturn(rc, rc);
3074 }
3075 else
3076 pCtx->cr3 = uNewCrX;
3077
3078 /* Inform PGM. */
3079 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3080 {
3081 if (pCtx->cr0 & X86_CR0_PG)
3082 {
3083 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr3 & X86_CR4_PGE));
3084 AssertRCReturn(rc, rc);
3085 /* ignore informational status codes */
3086 /** @todo status code management */
3087 }
3088 }
3089 rcStrict = VINF_SUCCESS;
3090 break;
3091 }
3092
3093 /*
3094 * CR4 is a bit more tedious as there are bits which cannot be cleared
3095 * under some circumstances and such.
3096 */
3097 case 4:
3098 {
3099 uint64_t const uOldCrX = pCtx->cr0;
3100
3101 /* reserved bits */
3102 uint32_t fValid = X86_CR4_VME | X86_CR4_PVI
3103 | X86_CR4_TSD | X86_CR4_DE
3104 | X86_CR4_PSE | X86_CR4_PAE
3105 | X86_CR4_MCE | X86_CR4_PGE
3106 | X86_CR4_PCE | X86_CR4_OSFSXR
3107 | X86_CR4_OSXMMEEXCPT;
3108 //if (xxx)
3109 // fValid |= X86_CR4_VMXE;
3110 //if (xxx)
3111 // fValid |= X86_CR4_OSXSAVE;
3112 if (uNewCrX & ~(uint64_t)fValid)
3113 {
3114 Log(("Trying to set reserved CR4 bits: NewCR4=%#llx InvalidBits=%#llx\n", uNewCrX, uNewCrX & ~(uint64_t)fValid));
3115 return iemRaiseGeneralProtectionFault0(pIemCpu);
3116 }
3117
3118 /* long mode checks. */
3119 if ( (uOldCrX & X86_CR4_PAE)
3120 && !(uNewCrX & X86_CR4_PAE)
3121 && (pCtx->msrEFER & MSR_K6_EFER_LMA) )
3122 {
3123 Log(("Trying to set clear CR4.PAE while long mode is active\n"));
3124 return iemRaiseGeneralProtectionFault0(pIemCpu);
3125 }
3126
3127
3128 /*
3129 * Change it.
3130 */
3131 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3132 {
3133 rc = CPUMSetGuestCR4(pVCpu, uNewCrX);
3134 AssertRCSuccessReturn(rc, rc);
3135 }
3136 else
3137 pCtx->cr4 = uNewCrX;
3138 Assert(pCtx->cr4 == uNewCrX);
3139
3140 /*
3141 * Notify SELM and PGM.
3142 */
3143 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3144 {
3145 /* SELM - VME may change things wrt to the TSS shadowing. */
3146 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME)
3147 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);
3148
3149 /* PGM - flushing and mode. */
3150 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE))
3151 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) )
3152 {
3153 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */);
3154 AssertRCReturn(rc, rc);
3155 /* ignore informational status codes */
3156 }
3157 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
3158 /** @todo Status code management. */
3159 }
3160 else
3161 rcStrict = VINF_SUCCESS;
3162 break;
3163 }
3164
3165 /*
3166 * CR8 maps to the APIC TPR.
3167 */
3168 case 8:
3169 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3170 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED); /** @todo implement CR8 reading and writing. */
3171 else
3172 rcStrict = VINF_SUCCESS;
3173 break;
3174
3175 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3176 }
3177
3178 /*
3179 * Advance the RIP on success.
3180 */
3181 /** @todo Status code management. */
3182 if (rcStrict == VINF_SUCCESS)
3183 iemRegAddToRip(pIemCpu, cbInstr);
3184 return rcStrict;
3185
3186}
3187
3188
3189/**
3190 * Implements mov CRx,GReg.
3191 *
3192 * @param iCrReg The CRx register to write (valid).
3193 * @param iGReg The general register to load the DRx value from.
3194 */
3195IEM_CIMPL_DEF_2(iemCImpl_mov_Cd_Rd, uint8_t, iCrReg, uint8_t, iGReg)
3196{
3197 if (pIemCpu->uCpl != 0)
3198 return iemRaiseGeneralProtectionFault0(pIemCpu);
3199 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3200
3201 /*
3202 * Read the new value from the source register and call common worker.
3203 */
3204 uint64_t uNewCrX;
3205 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3206 uNewCrX = iemGRegFetchU64(pIemCpu, iGReg);
3207 else
3208 uNewCrX = iemGRegFetchU32(pIemCpu, iGReg);
3209 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, iCrReg, uNewCrX);
3210}
3211
3212
3213/**
3214 * Implements 'LMSW r/m16'
3215 *
3216 * @param u16NewMsw The new value.
3217 */
3218IEM_CIMPL_DEF_1(iemCImpl_lmsw, uint16_t, u16NewMsw)
3219{
3220 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3221
3222 if (pIemCpu->uCpl != 0)
3223 return iemRaiseGeneralProtectionFault0(pIemCpu);
3224 Assert(!pCtx->eflags.Bits.u1VM);
3225
3226 /*
3227 * Compose the new CR0 value and call common worker.
3228 */
3229 uint64_t uNewCr0 = pCtx->cr0 & ~(X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3230 uNewCr0 |= u16NewMsw & (X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS);
3231 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3232}
3233
3234
3235/**
3236 * Implements 'CLTS'.
3237 */
3238IEM_CIMPL_DEF_0(iemCImpl_clts)
3239{
3240 if (pIemCpu->uCpl != 0)
3241 return iemRaiseGeneralProtectionFault0(pIemCpu);
3242
3243 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3244 uint64_t uNewCr0 = pCtx->cr0;
3245 uNewCr0 &= ~X86_CR0_TS;
3246 return IEM_CIMPL_CALL_2(iemCImpl_load_CrX, /*cr*/ 0, uNewCr0);
3247}
3248
3249
3250/**
3251 * Implements mov GReg,DRx.
3252 *
3253 * @param iGReg The general register to store the DRx value in.
3254 * @param iDrReg The DRx register to read (0-7).
3255 */
3256IEM_CIMPL_DEF_2(iemCImpl_mov_Rd_Dd, uint8_t, iGReg, uint8_t, iDrReg)
3257{
3258 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3259
3260 /*
3261 * Check preconditions.
3262 */
3263
3264 /* Raise GPs. */
3265 if (pIemCpu->uCpl != 0)
3266 return iemRaiseGeneralProtectionFault0(pIemCpu);
3267 Assert(!pCtx->eflags.Bits.u1VM);
3268
3269 if ( (iDrReg == 4 || iDrReg == 5)
3270 && (pCtx->cr4 & X86_CR4_DE) )
3271 {
3272 Log(("mov r%u,dr%u: CR4.DE=1 -> #GP(0)\n", iGReg, iDrReg));
3273 return iemRaiseGeneralProtectionFault0(pIemCpu);
3274 }
3275
3276 /* Raise #DB if general access detect is enabled. */
3277 if (pCtx->dr[7] & X86_DR7_GD)
3278 {
3279 Log(("mov r%u,dr%u: DR7.GD=1 -> #DB\n", iGReg, iDrReg));
3280 return iemRaiseDebugException(pIemCpu);
3281 }
3282
3283 /*
3284 * Read the debug register and store it in the specified general register.
3285 */
3286 uint64_t drX;
3287 switch (iDrReg)
3288 {
3289 case 0: drX = pCtx->dr[0]; break;
3290 case 1: drX = pCtx->dr[1]; break;
3291 case 2: drX = pCtx->dr[2]; break;
3292 case 3: drX = pCtx->dr[3]; break;
3293 case 6:
3294 case 4:
3295 drX = pCtx->dr[6];
3296 drX &= ~RT_BIT_32(12);
3297 drX |= UINT32_C(0xffff0ff0);
3298 break;
3299 case 7:
3300 case 5:
3301 drX = pCtx->dr[7];
3302 drX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3303 drX |= RT_BIT_32(10);
3304 break;
3305 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* call checks */
3306 }
3307
3308 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3309 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = drX;
3310 else
3311 *(uint64_t *)iemGRegRef(pIemCpu, iGReg) = (uint32_t)drX;
3312
3313 iemRegAddToRip(pIemCpu, cbInstr);
3314 return VINF_SUCCESS;
3315}
3316
3317
3318/**
3319 * Implements mov DRx,GReg.
3320 *
3321 * @param iDrReg The DRx register to write (valid).
3322 * @param iGReg The general register to load the DRx value from.
3323 */
3324IEM_CIMPL_DEF_2(iemCImpl_mov_Dd_Rd, uint8_t, iDrReg, uint8_t, iGReg)
3325{
3326 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3327
3328 /*
3329 * Check preconditions.
3330 */
3331 if (pIemCpu->uCpl != 0)
3332 return iemRaiseGeneralProtectionFault0(pIemCpu);
3333 Assert(!pCtx->eflags.Bits.u1VM);
3334
3335 if ( (iDrReg == 4 || iDrReg == 5)
3336 && (pCtx->cr4 & X86_CR4_DE) )
3337 {
3338 Log(("mov dr%u,r%u: CR4.DE=1 -> #GP(0)\n", iDrReg, iGReg));
3339 return iemRaiseGeneralProtectionFault0(pIemCpu);
3340 }
3341
3342 /* Raise #DB if general access detect is enabled. */
3343 /** @todo is \#DB/DR7.GD raised before any reserved high bits in DR7/DR6
3344 * \#GP? */
3345 if (pCtx->dr[7] & X86_DR7_GD)
3346 {
3347 Log(("mov dr%u,r%u: DR7.GD=1 -> #DB\n", iDrReg, iGReg));
3348 return iemRaiseDebugException(pIemCpu);
3349 }
3350
3351 /*
3352 * Read the new value from the source register.
3353 */
3354 uint64_t uNewDrX;
3355 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
3356 uNewDrX = iemGRegFetchU64(pIemCpu, iGReg);
3357 else
3358 uNewDrX = iemGRegFetchU32(pIemCpu, iGReg);
3359
3360 /*
3361 * Adjust it.
3362 */
3363 switch (iDrReg)
3364 {
3365 case 0:
3366 case 1:
3367 case 2:
3368 case 3:
3369 /* nothing to adjust */
3370 break;
3371
3372 case 6:
3373 case 4:
3374 if (uNewDrX & UINT64_C(0xffffffff00000000))
3375 {
3376 Log(("mov dr%u,%#llx: DR6 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3377 return iemRaiseGeneralProtectionFault0(pIemCpu);
3378 }
3379 uNewDrX &= ~RT_BIT_32(12);
3380 uNewDrX |= UINT32_C(0xffff0ff0);
3381 break;
3382
3383 case 7:
3384 case 5:
3385 if (uNewDrX & UINT64_C(0xffffffff00000000))
3386 {
3387 Log(("mov dr%u,%#llx: DR7 high bits are not zero -> #GP(0)\n", iDrReg, uNewDrX));
3388 return iemRaiseGeneralProtectionFault0(pIemCpu);
3389 }
3390 uNewDrX &= ~(RT_BIT_32(11) | RT_BIT_32(12) | RT_BIT_32(14) | RT_BIT_32(15));
3391 uNewDrX |= RT_BIT_32(10);
3392 break;
3393
3394 IEM_NOT_REACHED_DEFAULT_CASE_RET();
3395 }
3396
3397 /*
3398 * Do the actual setting.
3399 */
3400 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3401 {
3402 int rc = CPUMSetGuestDRx(IEMCPU_TO_VMCPU(pIemCpu), iDrReg, uNewDrX);
3403 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_INTERNAL_ERROR : rc);
3404 }
3405 else
3406 pCtx->dr[iDrReg] = uNewDrX;
3407
3408 iemRegAddToRip(pIemCpu, cbInstr);
3409 return VINF_SUCCESS;
3410}
3411
3412
3413/**
3414 * Implements 'INVLPG m'.
3415 *
3416 * @param GCPtrPage The effective address of the page to invalidate.
3417 * @remarks Updates the RIP.
3418 */
3419IEM_CIMPL_DEF_1(iemCImpl_invlpg, uint8_t, GCPtrPage)
3420{
3421 /* ring-0 only. */
3422 if (pIemCpu->uCpl != 0)
3423 return iemRaiseGeneralProtectionFault0(pIemCpu);
3424 Assert(!pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1VM);
3425
3426 int rc = PGMInvalidatePage(IEMCPU_TO_VMCPU(pIemCpu), GCPtrPage);
3427 iemRegAddToRip(pIemCpu, cbInstr);
3428
3429 if ( rc == VINF_SUCCESS
3430 || rc == VINF_PGM_SYNC_CR3)
3431 return VINF_SUCCESS;
3432 Log(("PGMInvalidatePage(%RGv) -> %Rrc\n", rc));
3433 return rc;
3434}
3435
3436
3437/**
3438 * Implements RDTSC.
3439 */
3440IEM_CIMPL_DEF_0(iemCImpl_rdtsc)
3441{
3442 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3443
3444 /*
3445 * Check preconditions.
3446 */
3447 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_TSC))
3448 return iemRaiseUndefinedOpcode(pIemCpu);
3449
3450 if ( (pCtx->cr4 & X86_CR4_TSD)
3451 && pIemCpu->uCpl != 0)
3452 {
3453 Log(("rdtsc: CR4.TSD and CPL=%u -> #GP(0)\n", pIemCpu->uCpl));
3454 return iemRaiseGeneralProtectionFault0(pIemCpu);
3455 }
3456
3457 /*
3458 * Do the job.
3459 */
3460 uint64_t uTicks = TMCpuTickGet(IEMCPU_TO_VMCPU(pIemCpu));
3461 pCtx->rax = (uint32_t)uTicks;
3462 pCtx->rdx = uTicks >> 32;
3463#ifdef IEM_VERIFICATION_MODE
3464 pIemCpu->fIgnoreRaxRdx = true;
3465#endif
3466
3467 iemRegAddToRip(pIemCpu, cbInstr);
3468 return VINF_SUCCESS;
3469}
3470
3471
3472/**
3473 * Implements RDMSR.
3474 */
3475IEM_CIMPL_DEF_0(iemCImpl_rdmsr)
3476{
3477 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3478
3479 /*
3480 * Check preconditions.
3481 */
3482 if (!IEM_IS_INTEL_CPUID_FEATURE_PRESENT_EDX(X86_CPUID_FEATURE_EDX_MSR))
3483 return iemRaiseUndefinedOpcode(pIemCpu);
3484 if (pIemCpu->uCpl != 0)
3485 return iemRaiseGeneralProtectionFault0(pIemCpu);
3486
3487 /*
3488 * Do the job.
3489 */
3490 RTUINT64U uValue;
3491 int rc = CPUMQueryGuestMsr(IEMCPU_TO_VMCPU(pIemCpu), pCtx->ecx, &uValue.u);
3492 if (rc != VINF_SUCCESS)
3493 {
3494 AssertMsgReturn(rc == VERR_CPUM_RAISE_GP_0, ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_STATUS);
3495 return iemRaiseGeneralProtectionFault0(pIemCpu);
3496 }
3497
3498 pCtx->rax = uValue.au32[0];
3499 pCtx->rdx = uValue.au32[1];
3500
3501 iemRegAddToRip(pIemCpu, cbInstr);
3502 return VINF_SUCCESS;
3503}
3504
3505
3506/**
3507 * Implements 'IN eAX, port'.
3508 *
3509 * @param u16Port The source port.
3510 * @param cbReg The register size.
3511 */
3512IEM_CIMPL_DEF_2(iemCImpl_in, uint16_t, u16Port, uint8_t, cbReg)
3513{
3514 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3515
3516 /*
3517 * CPL check
3518 */
3519 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, cbReg);
3520 if (rcStrict != VINF_SUCCESS)
3521 return rcStrict;
3522
3523 /*
3524 * Perform the I/O.
3525 */
3526 uint32_t u32Value;
3527 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3528 rcStrict = IOMIOPortRead(IEMCPU_TO_VM(pIemCpu), u16Port, &u32Value, cbReg);
3529 else
3530 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, cbReg);
3531 if (IOM_SUCCESS(rcStrict))
3532 {
3533 switch (cbReg)
3534 {
3535 case 1: pCtx->al = (uint8_t)u32Value; break;
3536 case 2: pCtx->ax = (uint16_t)u32Value; break;
3537 case 4: pCtx->rax = u32Value; break;
3538 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3539 }
3540 iemRegAddToRip(pIemCpu, cbInstr);
3541 pIemCpu->cPotentialExits++;
3542 }
3543 /** @todo massage rcStrict. */
3544 return rcStrict;
3545}
3546
3547
3548/**
3549 * Implements 'IN eAX, DX'.
3550 *
3551 * @param cbReg The register size.
3552 */
3553IEM_CIMPL_DEF_1(iemCImpl_in_eAX_DX, uint8_t, cbReg)
3554{
3555 return IEM_CIMPL_CALL_2(iemCImpl_in, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3556}
3557
3558
3559/**
3560 * Implements 'OUT port, eAX'.
3561 *
3562 * @param u16Port The destination port.
3563 * @param cbReg The register size.
3564 */
3565IEM_CIMPL_DEF_2(iemCImpl_out, uint16_t, u16Port, uint8_t, cbReg)
3566{
3567 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3568
3569 /*
3570 * CPL check
3571 */
3572 if ( (pCtx->cr0 & X86_CR0_PE)
3573 && ( pIemCpu->uCpl > pCtx->eflags.Bits.u2IOPL
3574 || pCtx->eflags.Bits.u1VM) )
3575 {
3576 /** @todo I/O port permission bitmap check */
3577 AssertFailedReturn(VERR_IEM_ASPECT_NOT_IMPLEMENTED);
3578 }
3579
3580 /*
3581 * Perform the I/O.
3582 */
3583 uint32_t u32Value;
3584 switch (cbReg)
3585 {
3586 case 1: u32Value = pCtx->al; break;
3587 case 2: u32Value = pCtx->ax; break;
3588 case 4: u32Value = pCtx->eax; break;
3589 default: AssertFailedReturn(VERR_INTERNAL_ERROR_3);
3590 }
3591 VBOXSTRICTRC rc;
3592 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
3593 rc = IOMIOPortWrite(IEMCPU_TO_VM(pIemCpu), u16Port, u32Value, cbReg);
3594 else
3595 rc = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, cbReg);
3596 if (IOM_SUCCESS(rc))
3597 {
3598 iemRegAddToRip(pIemCpu, cbInstr);
3599 pIemCpu->cPotentialExits++;
3600 /** @todo massage rc. */
3601 }
3602 return rc;
3603}
3604
3605
3606/**
3607 * Implements 'OUT DX, eAX'.
3608 *
3609 * @param cbReg The register size.
3610 */
3611IEM_CIMPL_DEF_1(iemCImpl_out_DX_eAX, uint8_t, cbReg)
3612{
3613 return IEM_CIMPL_CALL_2(iemCImpl_out, pIemCpu->CTX_SUFF(pCtx)->dx, cbReg);
3614}
3615
3616
3617/**
3618 * Implements 'CLI'.
3619 */
3620IEM_CIMPL_DEF_0(iemCImpl_cli)
3621{
3622 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3623
3624 if (pCtx->cr0 & X86_CR0_PE)
3625 {
3626 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
3627 if (!pCtx->eflags.Bits.u1VM)
3628 {
3629 if (pIemCpu->uCpl <= uIopl)
3630 pCtx->eflags.Bits.u1IF = 0;
3631 else if ( pIemCpu->uCpl == 3
3632 && (pCtx->cr4 & X86_CR4_PVI) )
3633 pCtx->eflags.Bits.u1VIF = 0;
3634 else
3635 return iemRaiseGeneralProtectionFault0(pIemCpu);
3636 }
3637 /* V8086 */
3638 else if (uIopl == 3)
3639 pCtx->eflags.Bits.u1IF = 0;
3640 else if ( uIopl < 3
3641 && (pCtx->cr4 & X86_CR4_VME) )
3642 pCtx->eflags.Bits.u1VIF = 0;
3643 else
3644 return iemRaiseGeneralProtectionFault0(pIemCpu);
3645 }
3646 /* real mode */
3647 else
3648 pCtx->eflags.Bits.u1IF = 0;
3649 iemRegAddToRip(pIemCpu, cbInstr);
3650 return VINF_SUCCESS;
3651}
3652
3653
3654/**
3655 * Implements 'STI'.
3656 */
3657IEM_CIMPL_DEF_0(iemCImpl_sti)
3658{
3659 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3660
3661 if (pCtx->cr0 & X86_CR0_PE)
3662 {
3663 uint8_t const uIopl = pCtx->eflags.Bits.u2IOPL;
3664 if (!pCtx->eflags.Bits.u1VM)
3665 {
3666 if (pIemCpu->uCpl <= uIopl)
3667 pCtx->eflags.Bits.u1IF = 1;
3668 else if ( pIemCpu->uCpl == 3
3669 && (pCtx->cr4 & X86_CR4_PVI)
3670 && !pCtx->eflags.Bits.u1VIP )
3671 pCtx->eflags.Bits.u1VIF = 1;
3672 else
3673 return iemRaiseGeneralProtectionFault0(pIemCpu);
3674 }
3675 /* V8086 */
3676 else if (uIopl == 3)
3677 pCtx->eflags.Bits.u1IF = 1;
3678 else if ( uIopl < 3
3679 && (pCtx->cr4 & X86_CR4_VME)
3680 && !pCtx->eflags.Bits.u1VIP )
3681 pCtx->eflags.Bits.u1VIF = 1;
3682 else
3683 return iemRaiseGeneralProtectionFault0(pIemCpu);
3684 }
3685 /* real mode */
3686 else
3687 pCtx->eflags.Bits.u1IF = 1;
3688
3689 iemRegAddToRip(pIemCpu, cbInstr);
3690 /** @todo don't do this unconditionally... */
3691 EMSetInhibitInterruptsPC(IEMCPU_TO_VMCPU(pIemCpu), pCtx->rip);
3692 return VINF_SUCCESS;
3693}
3694
3695
3696/**
3697 * Implements 'HLT'.
3698 */
3699IEM_CIMPL_DEF_0(iemCImpl_hlt)
3700{
3701 if (pIemCpu->uCpl != 0)
3702 return iemRaiseGeneralProtectionFault0(pIemCpu);
3703 iemRegAddToRip(pIemCpu, cbInstr);
3704 return VINF_EM_HALT;
3705}
3706
3707
3708/**
3709 * Implements 'CPUID'.
3710 */
3711IEM_CIMPL_DEF_0(iemCImpl_cpuid)
3712{
3713 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3714
3715 CPUMGetGuestCpuId(IEMCPU_TO_VMCPU(pIemCpu), pCtx->eax, &pCtx->eax, &pCtx->ebx, &pCtx->ecx, &pCtx->edx);
3716 pCtx->rax &= UINT32_C(0xffffffff);
3717 pCtx->rbx &= UINT32_C(0xffffffff);
3718 pCtx->rcx &= UINT32_C(0xffffffff);
3719 pCtx->rdx &= UINT32_C(0xffffffff);
3720
3721 iemRegAddToRip(pIemCpu, cbInstr);
3722 return VINF_SUCCESS;
3723}
3724
3725
3726/**
3727 * Implements 'AAD'.
3728 *
3729 * @param enmEffOpSize The effective operand size.
3730 */
3731IEM_CIMPL_DEF_1(iemCImpl_aad, uint8_t, bImm)
3732{
3733 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3734
3735 uint16_t const ax = pCtx->ax;
3736 uint8_t const al = (uint8_t)ax + (uint8_t)(ax >> 8) * bImm;
3737 pCtx->ax = al;
3738 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
3739 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
3740 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
3741
3742 iemRegAddToRip(pIemCpu, cbInstr);
3743 return VINF_SUCCESS;
3744}
3745
3746
3747/**
3748 * Implements 'AAM'.
3749 *
3750 * @param bImm The immediate operand. Cannot be 0.
3751 */
3752IEM_CIMPL_DEF_1(iemCImpl_aam, uint8_t, bImm)
3753{
3754 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3755 Assert(bImm != 0); /* #DE on 0 is handled in the decoder. */
3756
3757 uint16_t const ax = pCtx->ax;
3758 uint8_t const al = (uint8_t)ax % bImm;
3759 uint8_t const ah = (uint8_t)ax / bImm;
3760 pCtx->ax = (ah << 8) + al;
3761 iemHlpUpdateArithEFlagsU8(pIemCpu, al,
3762 X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF,
3763 X86_EFL_OF | X86_EFL_AF | X86_EFL_CF);
3764
3765 iemRegAddToRip(pIemCpu, cbInstr);
3766 return VINF_SUCCESS;
3767}
3768
3769
3770
3771
3772/*
3773 * Instantiate the various string operation combinations.
3774 */
3775#define OP_SIZE 8
3776#define ADDR_SIZE 16
3777#include "IEMAllCImplStrInstr.cpp.h"
3778#define OP_SIZE 8
3779#define ADDR_SIZE 32
3780#include "IEMAllCImplStrInstr.cpp.h"
3781#define OP_SIZE 8
3782#define ADDR_SIZE 64
3783#include "IEMAllCImplStrInstr.cpp.h"
3784
3785#define OP_SIZE 16
3786#define ADDR_SIZE 16
3787#include "IEMAllCImplStrInstr.cpp.h"
3788#define OP_SIZE 16
3789#define ADDR_SIZE 32
3790#include "IEMAllCImplStrInstr.cpp.h"
3791#define OP_SIZE 16
3792#define ADDR_SIZE 64
3793#include "IEMAllCImplStrInstr.cpp.h"
3794
3795#define OP_SIZE 32
3796#define ADDR_SIZE 16
3797#include "IEMAllCImplStrInstr.cpp.h"
3798#define OP_SIZE 32
3799#define ADDR_SIZE 32
3800#include "IEMAllCImplStrInstr.cpp.h"
3801#define OP_SIZE 32
3802#define ADDR_SIZE 64
3803#include "IEMAllCImplStrInstr.cpp.h"
3804
3805#define OP_SIZE 64
3806#define ADDR_SIZE 32
3807#include "IEMAllCImplStrInstr.cpp.h"
3808#define OP_SIZE 64
3809#define ADDR_SIZE 64
3810#include "IEMAllCImplStrInstr.cpp.h"
3811
3812
3813/**
3814 * Implements 'FINIT' and 'FNINIT'.
3815 *
3816 * @param fCheckXcpts Whether to check for umasked pending exceptions or
3817 * not.
3818 */
3819IEM_CIMPL_DEF_1(iemCImpl_finit, bool, fCheckXcpts)
3820{
3821 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3822
3823 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
3824 return iemRaiseDeviceNotAvailable(pIemCpu);
3825
3826 NOREF(fCheckXcpts); /** @todo trigger pending exceptions:
3827 if (fCheckXcpts && TODO )
3828 return iemRaiseMathFault(pIemCpu);
3829 */
3830
3831 if (iemFRegIsFxSaveFormat(pIemCpu))
3832 {
3833 pCtx->fpu.FCW = 0x37f;
3834 pCtx->fpu.FSW = 0;
3835 pCtx->fpu.FTW = 0x00; /* 0 - empty. */
3836 pCtx->fpu.FPUDP = 0;
3837 pCtx->fpu.DS = 0; //??
3838 pCtx->fpu.FPUIP = 0;
3839 pCtx->fpu.CS = 0; //??
3840 pCtx->fpu.FOP = 0;
3841 }
3842 else
3843 {
3844 PX86FPUSTATE pFpu = (PX86FPUSTATE)&pCtx->fpu;
3845 pFpu->FCW = 0x37f;
3846 pFpu->FSW = 0;
3847 pFpu->FTW = 0xffff; /* 11 - empty */
3848 pFpu->FPUOO = 0; //??
3849 pFpu->FPUOS = 0; //??
3850 pFpu->FPUIP = 0;
3851 pFpu->CS = 0; //??
3852 pFpu->FOP = 0;
3853 }
3854
3855 iemRegAddToRip(pIemCpu, cbInstr);
3856 return VINF_SUCCESS;
3857}
3858
3859
3860/**
3861 * Implements 'FXSAVE'.
3862 *
3863 * @param iEffSeg The effective segment.
3864 * @param GCPtrEff The address of the image.
3865 * @param enmEffOpSize The operand size (only REX.W really matters).
3866 */
3867IEM_CIMPL_DEF_3(iemCImpl_fxsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
3868{
3869 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3870
3871 /*
3872 * Raise exceptions.
3873 */
3874 if (pCtx->cr0 & X86_CR0_EM)
3875 return iemRaiseUndefinedOpcode(pIemCpu);
3876 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
3877 return iemRaiseDeviceNotAvailable(pIemCpu);
3878 if (GCPtrEff & 15)
3879 {
3880 /** @todo CPU/VM detection possible! \#AC might not be signal for
3881 * all/any misalignment sizes, intel says its an implementation detail. */
3882 if ( (pCtx->cr0 & X86_CR0_AM)
3883 && pCtx->eflags.Bits.u1AC
3884 && pIemCpu->uCpl == 3)
3885 return iemRaiseAlignmentCheckException(pIemCpu);
3886 return iemRaiseGeneralProtectionFault0(pIemCpu);
3887 }
3888 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
3889
3890 /*
3891 * Access the memory.
3892 */
3893 void *pvMem512;
3894 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
3895 if (rcStrict != VINF_SUCCESS)
3896 return rcStrict;
3897 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512;
3898
3899 /*
3900 * Store the registers.
3901 */
3902 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
3903 * implementation specific whether MXCSR and XMM0-XMM7 are saved. */
3904
3905 /* common for all formats */
3906 pDst->FCW = pCtx->fpu.FCW;
3907 pDst->FSW = pCtx->fpu.FSW;
3908 pDst->FTW = pCtx->fpu.FTW & UINT16_C(0xff);
3909 pDst->FOP = pCtx->fpu.FOP;
3910 pDst->MXCSR = pCtx->fpu.MXCSR;
3911 pDst->MXCSR_MASK = pCtx->fpu.MXCSR_MASK;
3912 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++)
3913 {
3914 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing
3915 * them for now... */
3916 pDst->aRegs[i].au32[0] = pCtx->fpu.aRegs[i].au32[0];
3917 pDst->aRegs[i].au32[1] = pCtx->fpu.aRegs[i].au32[1];
3918 pDst->aRegs[i].au32[2] = pCtx->fpu.aRegs[i].au32[2] & UINT32_C(0xffff);
3919 pDst->aRegs[i].au32[3] = 0;
3920 }
3921
3922 /* FPU IP, CS, DP and DS. */
3923 /** @todo FPU IP, CS, DP and DS cannot be implemented correctly without extra
3924 * state information. :-/
3925 * Storing zeros now to prevent any potential leakage of host info. */
3926 pDst->FPUIP = 0;
3927 pDst->CS = 0;
3928 pDst->Rsrvd1 = 0;
3929 pDst->FPUDP = 0;
3930 pDst->DS = 0;
3931 pDst->Rsrvd2 = 0;
3932
3933 /* XMM registers. */
3934 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
3935 || pIemCpu->enmCpuMode != IEMMODE_64BIT
3936 || pIemCpu->uCpl != 0)
3937 {
3938 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
3939 for (uint32_t i = 0; i < cXmmRegs; i++)
3940 pDst->aXMM[i] = pCtx->fpu.aXMM[i];
3941 /** @todo Testcase: What happens to the reserved XMM registers? Untouched,
3942 * right? */
3943 }
3944
3945 /*
3946 * Commit the memory.
3947 */
3948 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
3949 if (rcStrict != VINF_SUCCESS)
3950 return rcStrict;
3951
3952 iemRegAddToRip(pIemCpu, cbInstr);
3953 return VINF_SUCCESS;
3954}
3955
3956
3957/**
3958 * Implements 'FXRSTOR'.
3959 *
3960 * @param GCPtrEff The address of the image.
3961 * @param enmEffOpSize The operand size (only REX.W really matters).
3962 */
3963IEM_CIMPL_DEF_3(iemCImpl_fxrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize)
3964{
3965 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
3966
3967 /*
3968 * Raise exceptions.
3969 */
3970 if (pCtx->cr0 & X86_CR0_EM)
3971 return iemRaiseUndefinedOpcode(pIemCpu);
3972 if (pCtx->cr0 & (X86_CR0_TS | X86_CR0_EM))
3973 return iemRaiseDeviceNotAvailable(pIemCpu);
3974 if (GCPtrEff & 15)
3975 {
3976 /** @todo CPU/VM detection possible! \#AC might not be signal for
3977 * all/any misalignment sizes, intel says its an implementation detail. */
3978 if ( (pCtx->cr0 & X86_CR0_AM)
3979 && pCtx->eflags.Bits.u1AC
3980 && pIemCpu->uCpl == 3)
3981 return iemRaiseAlignmentCheckException(pIemCpu);
3982 return iemRaiseGeneralProtectionFault0(pIemCpu);
3983 }
3984 AssertReturn(iemFRegIsFxSaveFormat(pIemCpu), VERR_IEM_IPE_2);
3985
3986 /*
3987 * Access the memory.
3988 */
3989 void *pvMem512;
3990 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R);
3991 if (rcStrict != VINF_SUCCESS)
3992 return rcStrict;
3993 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512;
3994
3995 /*
3996 * Check the state for stuff which will GP(0).
3997 */
3998 uint32_t const fMXCSR = pSrc->MXCSR;
3999 uint32_t const fMXCSR_MASK = pCtx->fpu.MXCSR_MASK ? pCtx->fpu.MXCSR_MASK : UINT32_C(0xffbf);
4000 if (fMXCSR & ~fMXCSR_MASK)
4001 {
4002 Log(("fxrstor: MXCSR=%#x (MXCSR_MASK=%#x) -> #GP(0)\n", fMXCSR, fMXCSR_MASK));
4003 return iemRaiseGeneralProtectionFault0(pIemCpu);
4004 }
4005
4006 /*
4007 * Load the registers.
4008 */
4009 /** @todo CPU/VM detection possible! If CR4.OSFXSR=0 MXCSR it's
4010 * implementation specific whether MXCSR and XMM0-XMM7 are restored. */
4011
4012 /* common for all formats */
4013 pCtx->fpu.FCW = pSrc->FCW;
4014 pCtx->fpu.FSW = pSrc->FSW;
4015 pCtx->fpu.FTW = pSrc->FTW & UINT16_C(0xff);
4016 pCtx->fpu.FOP = pSrc->FOP;
4017 pCtx->fpu.MXCSR = fMXCSR;
4018 /* (MXCSR_MASK is read-only) */
4019 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++)
4020 {
4021 pCtx->fpu.aRegs[i].au32[0] = pSrc->aRegs[i].au32[0];
4022 pCtx->fpu.aRegs[i].au32[1] = pSrc->aRegs[i].au32[1];
4023 pCtx->fpu.aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff);
4024 pCtx->fpu.aRegs[i].au32[3] = 0;
4025 }
4026
4027 /* FPU IP, CS, DP and DS. */
4028 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
4029 {
4030 pCtx->fpu.FPUIP = pSrc->FPUIP;
4031 pCtx->fpu.CS = pSrc->CS;
4032 pCtx->fpu.Rsrvd1 = pSrc->Rsrvd1;
4033 pCtx->fpu.FPUDP = pSrc->FPUDP;
4034 pCtx->fpu.DS = pSrc->DS;
4035 pCtx->fpu.Rsrvd2 = pSrc->Rsrvd2;
4036 }
4037 else
4038 {
4039 pCtx->fpu.FPUIP = pSrc->FPUIP;
4040 pCtx->fpu.CS = pSrc->CS;
4041 pCtx->fpu.Rsrvd1 = 0;
4042 pCtx->fpu.FPUDP = pSrc->FPUDP;
4043 pCtx->fpu.DS = pSrc->DS;
4044 pCtx->fpu.Rsrvd2 = 0;
4045 }
4046
4047 /* XMM registers. */
4048 if ( !(pCtx->msrEFER & MSR_K6_EFER_FFXSR)
4049 || pIemCpu->enmCpuMode != IEMMODE_64BIT
4050 || pIemCpu->uCpl != 0)
4051 {
4052 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8;
4053 for (uint32_t i = 0; i < cXmmRegs; i++)
4054 pCtx->fpu.aXMM[i] = pSrc->aXMM[i];
4055 }
4056
4057 /*
4058 * Commit the memory.
4059 */
4060 rcStrict = iemMemCommitAndUnmap(pIemCpu, pvMem512, IEM_ACCESS_DATA_R);
4061 if (rcStrict != VINF_SUCCESS)
4062 return rcStrict;
4063
4064 iemRegAddToRip(pIemCpu, cbInstr);
4065 return VINF_SUCCESS;
4066}
4067
4068
4069/**
4070 * Commmon routine for fnstenv and fnsave.
4071 *
4072 * @param uPtr Where to store the state.
4073 * @param pCtx The CPU context.
4074 */
4075static void iemCImplCommonFpuStoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTPTRUNION uPtr, PCCPUMCTX pCtx)
4076{
4077 if (enmEffOpSize == IEMMODE_16BIT)
4078 {
4079 uPtr.pu16[0] = pCtx->fpu.FCW;
4080 uPtr.pu16[1] = pCtx->fpu.FSW;
4081 uPtr.pu16[2] = iemFpuCalcFullFtw(pCtx);
4082 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4083 {
4084 /** @todo Testcase: How does this work when the FPUIP/CS was saved in
4085 * protected mode or long mode and we save it in real mode? And vice
4086 * versa? And with 32-bit operand size? I think CPU is storing the
4087 * effective address ((CS << 4) + IP) in the offset register and not
4088 * doing any address calculations here. */
4089 uPtr.pu16[3] = (uint16_t)pCtx->fpu.FPUIP;
4090 uPtr.pu16[4] = ((pCtx->fpu.FPUIP >> 4) & UINT16_C(0xf000)) | pCtx->fpu.FOP;
4091 uPtr.pu16[5] = (uint16_t)pCtx->fpu.FPUDP;
4092 uPtr.pu16[6] = (pCtx->fpu.FPUDP >> 4) & UINT16_C(0xf000);
4093 }
4094 else
4095 {
4096 uPtr.pu16[3] = pCtx->fpu.FPUIP;
4097 uPtr.pu16[4] = pCtx->fpu.CS;
4098 uPtr.pu16[5] = pCtx->fpu.FPUDP;
4099 uPtr.pu16[6] = pCtx->fpu.DS;
4100 }
4101 }
4102 else
4103 {
4104 /** @todo Testcase: what is stored in the "gray" areas? (figure 8-9 and 8-10) */
4105 uPtr.pu16[0*2] = pCtx->fpu.FCW;
4106 uPtr.pu16[1*2] = pCtx->fpu.FSW;
4107 uPtr.pu16[2*2] = iemFpuCalcFullFtw(pCtx);
4108 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4109 {
4110 uPtr.pu16[3*2] = (uint16_t)pCtx->fpu.FPUIP;
4111 uPtr.pu32[4] = ((pCtx->fpu.FPUIP & UINT32_C(0xffff0000)) >> 4) | pCtx->fpu.FOP;
4112 uPtr.pu16[5*2] = (uint16_t)pCtx->fpu.FPUDP;
4113 uPtr.pu32[6] = (pCtx->fpu.FPUDP & UINT32_C(0xffff0000)) >> 4;
4114 }
4115 else
4116 {
4117 uPtr.pu32[3] = pCtx->fpu.FPUIP;
4118 uPtr.pu16[4*2] = pCtx->fpu.CS;
4119 uPtr.pu16[4*2+1]= pCtx->fpu.FOP;
4120 uPtr.pu32[5] = pCtx->fpu.FPUDP;
4121 uPtr.pu16[6*2] = pCtx->fpu.DS;
4122 }
4123 }
4124}
4125
4126
4127/**
4128 * Commmon routine for fnstenv and fnsave.
4129 *
4130 * @param uPtr Where to store the state.
4131 * @param pCtx The CPU context.
4132 */
4133static void iemCImplCommonFpuRestoreEnv(PIEMCPU pIemCpu, IEMMODE enmEffOpSize, RTCPTRUNION uPtr, PCPUMCTX pCtx)
4134{
4135 if (enmEffOpSize == IEMMODE_16BIT)
4136 {
4137 pCtx->fpu.FCW = uPtr.pu16[0];
4138 pCtx->fpu.FSW = uPtr.pu16[1];
4139 pCtx->fpu.FTW = uPtr.pu16[2];
4140 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4141 {
4142 pCtx->fpu.FPUIP = uPtr.pu16[3] | ((uint32_t)(uPtr.pu16[4] & UINT16_C(0xf000)) << 4);
4143 pCtx->fpu.FPUDP = uPtr.pu16[5] | ((uint32_t)(uPtr.pu16[6] & UINT16_C(0xf000)) << 4);
4144 pCtx->fpu.FOP = uPtr.pu16[4] & UINT16_C(0x07ff);
4145 pCtx->fpu.CS = 0;
4146 pCtx->fpu.DS = 0;
4147 }
4148 else
4149 {
4150 pCtx->fpu.FPUIP = uPtr.pu16[3];
4151 pCtx->fpu.CS = uPtr.pu16[4];
4152 pCtx->fpu.FPUDP = uPtr.pu16[5];
4153 pCtx->fpu.DS = uPtr.pu16[6];
4154 /** @todo Testcase: Is FOP cleared when doing 16-bit protected mode fldenv? */
4155 }
4156 }
4157 else
4158 {
4159 pCtx->fpu.FCW = uPtr.pu16[0*2];
4160 pCtx->fpu.FSW = uPtr.pu16[1*2];
4161 pCtx->fpu.FTW = uPtr.pu16[2*2];
4162 if (IEM_IS_REAL_OR_V86_MODE(pIemCpu))
4163 {
4164 pCtx->fpu.FPUIP = uPtr.pu16[3*2] | ((uPtr.pu32[4] & UINT32_C(0x0ffff000)) << 4);
4165 pCtx->fpu.FOP = uPtr.pu32[4] & UINT16_C(0x07ff);
4166 pCtx->fpu.FPUDP = uPtr.pu16[5*2] | ((uPtr.pu32[6] & UINT32_C(0x0ffff000)) << 4);
4167 pCtx->fpu.CS = 0;
4168 pCtx->fpu.DS = 0;
4169 }
4170 else
4171 {
4172 pCtx->fpu.FPUIP = uPtr.pu32[3];
4173 pCtx->fpu.CS = uPtr.pu16[4*2];
4174 pCtx->fpu.FOP = uPtr.pu16[4*2+1];
4175 pCtx->fpu.FPUDP = uPtr.pu32[5];
4176 pCtx->fpu.DS = uPtr.pu16[6*2];
4177 }
4178 }
4179
4180 /* Make adjustments. */
4181 pCtx->fpu.FTW = iemFpuCompressFtw(pCtx->fpu.FTW);
4182 pCtx->fpu.FCW &= ~X86_FCW_ZERO_MASK;
4183 iemFpuRecalcExceptionStatus(pCtx);
4184 /** @todo Testcase: Check if ES and/or B are automatically cleared if no
4185 * exceptions are pending after loading the saved state? */
4186}
4187
4188
4189/**
4190 * Implements 'FNSTENV'.
4191 *
4192 * @param enmEffOpSize The operand size (only REX.W really matters).
4193 * @param iEffSeg The effective segment register for @a GCPtrEff.
4194 * @param GCPtrEffDst The address of the image.
4195 */
4196IEM_CIMPL_DEF_3(iemCImpl_fnstenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffDst)
4197{
4198 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4199 RTPTRUNION uPtr;
4200 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, &uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4201 iEffSeg, GCPtrEffDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4202 if (rcStrict != VINF_SUCCESS)
4203 return rcStrict;
4204
4205 iemCImplCommonFpuStoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4206
4207 rcStrict = iemMemCommitAndUnmap(pIemCpu, uPtr.pv, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE);
4208 if (rcStrict != VINF_SUCCESS)
4209 return rcStrict;
4210
4211 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4212 iemRegAddToRip(pIemCpu, cbInstr);
4213 return VINF_SUCCESS;
4214}
4215
4216
4217/**
4218 * Implements 'FLDENV'.
4219 *
4220 * @param enmEffOpSize The operand size (only REX.W really matters).
4221 * @param iEffSeg The effective segment register for @a GCPtrEff.
4222 * @param GCPtrEffSrc The address of the image.
4223 */
4224IEM_CIMPL_DEF_3(iemCImpl_fldenv, IEMMODE, enmEffOpSize, uint8_t, iEffSeg, RTGCPTR, GCPtrEffSrc)
4225{
4226 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4227 RTCPTRUNION uPtr;
4228 VBOXSTRICTRC rcStrict = iemMemMap(pIemCpu, (void **)&uPtr.pv, enmEffOpSize == IEMMODE_16BIT ? 14 : 28,
4229 iEffSeg, GCPtrEffSrc, IEM_ACCESS_DATA_R);
4230 if (rcStrict != VINF_SUCCESS)
4231 return rcStrict;
4232
4233 iemCImplCommonFpuRestoreEnv(pIemCpu, enmEffOpSize, uPtr, pCtx);
4234
4235 rcStrict = iemMemCommitAndUnmap(pIemCpu, (void *)uPtr.pv, IEM_ACCESS_DATA_R);
4236 if (rcStrict != VINF_SUCCESS)
4237 return rcStrict;
4238
4239 iemRegAddToRip(pIemCpu, cbInstr);
4240 return VINF_SUCCESS;
4241}
4242
4243
4244/**
4245 * Implements 'FLDCW'.
4246 *
4247 * @param u16Fcw The new FCW.
4248 */
4249IEM_CIMPL_DEF_1(iemCImpl_fldcw, uint16_t, u16Fcw)
4250{
4251 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4252
4253 /** @todo Testcase: Check what happens when trying to load X86_FCW_PC_RSVD. */
4254 /** @todo Testcase: Try see what happens when trying to set undefined bits
4255 * (other than 6 and 7). Currently ignoring them. */
4256 /** @todo Testcase: Test that it raises and loweres the FPU exception bits
4257 * according to FSW. (This is was is currently implemented.) */
4258 pCtx->fpu.FCW = u16Fcw & ~X86_FCW_ZERO_MASK;
4259 iemFpuRecalcExceptionStatus(pCtx);
4260
4261 /* Note: C0, C1, C2 and C3 are documented as undefined, we leave them untouched! */
4262 iemRegAddToRip(pIemCpu, cbInstr);
4263 return VINF_SUCCESS;
4264}
4265
4266
4267
4268/**
4269 * Implements the underflow case of fxch.
4270 *
4271 * @param iStReg The other stack register.
4272 */
4273IEM_CIMPL_DEF_1(iemCImpl_fxch_underflow, uint8_t, iStReg)
4274{
4275 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4276
4277 unsigned const iReg1 = X86_FSW_TOP_GET(pCtx->fpu.FSW);
4278 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4279 Assert(!(RT_BIT(iReg1) & pCtx->fpu.FTW) || !(RT_BIT(iReg2) & pCtx->fpu.FTW));
4280
4281 /** @todo Testcase: fxch underflow. Making assumptions that underflowed
4282 * registers are read as QNaN and then exchanged. This could be
4283 * wrong... */
4284 if (pCtx->fpu.FCW & X86_FCW_IM)
4285 {
4286 if (RT_BIT(iReg1) & pCtx->fpu.FTW)
4287 {
4288 if (RT_BIT(iReg2) & pCtx->fpu.FTW)
4289 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4290 else
4291 pCtx->fpu.aRegs[0].r80 = pCtx->fpu.aRegs[iStReg].r80;
4292 iemFpuStoreQNan(&pCtx->fpu.aRegs[iStReg].r80);
4293 }
4294 else
4295 {
4296 pCtx->fpu.aRegs[iStReg].r80 = pCtx->fpu.aRegs[0].r80;
4297 iemFpuStoreQNan(&pCtx->fpu.aRegs[0].r80);
4298 }
4299 pCtx->fpu.FSW &= ~X86_FSW_C_MASK;
4300 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
4301 }
4302 else
4303 {
4304 /* raise underflow exception, don't change anything. */
4305 pCtx->fpu.FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_XCPT_MASK);
4306 pCtx->fpu.FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4307 }
4308 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4309
4310 iemRegAddToRip(pIemCpu, cbInstr);
4311 return VINF_SUCCESS;
4312}
4313
4314
4315/**
4316 * Implements 'FCOMI', 'FCOMIP', 'FUCOMI', and 'FUCOMIP'.
4317 *
4318 * @param cToAdd 1 or 7.
4319 */
4320IEM_CIMPL_DEF_3(iemCImpl_fcomi_fucomi, uint8_t, iStReg, PFNIEMAIMPLFPUR80EFL, pfnAImpl, bool, fPop)
4321{
4322 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
4323 Assert(iStReg < 8);
4324
4325 /*
4326 * Raise exceptions.
4327 */
4328 if (pCtx->cr0 & (X86_CR0_EM | X86_CR0_TS))
4329 return iemRaiseDeviceNotAvailable(pIemCpu);
4330 uint16_t u16Fsw = pCtx->fpu.FSW;
4331 if (u16Fsw & X86_FSW_ES)
4332 return iemRaiseMathFault(pIemCpu);
4333
4334 /*
4335 * Check if any of the register accesses causes #SF + #IA.
4336 */
4337 unsigned const iReg1 = X86_FSW_TOP_GET(u16Fsw);
4338 unsigned const iReg2 = (iReg1 + iStReg) & X86_FSW_TOP_SMASK;
4339 if ((pCtx->fpu.FTW & (RT_BIT(iReg1) | RT_BIT(iReg2))) == (RT_BIT(iReg1) | RT_BIT(iReg2)))
4340 {
4341 uint32_t u32Eflags = pfnAImpl(&pCtx->fpu, &u16Fsw, &pCtx->fpu.aRegs[0].r80, &pCtx->fpu.aRegs[iStReg].r80);
4342 pCtx->fpu.FSW &= ~X86_FSW_C1;
4343 pCtx->fpu.FSW |= u16Fsw & ~X86_FSW_TOP_MASK;
4344 if ( !(u16Fsw & X86_FSW_IE)
4345 || (pCtx->fpu.FCW & X86_FCW_IM) )
4346 {
4347 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4348 pCtx->eflags.u |= pCtx->eflags.u & (X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4349 }
4350 }
4351 else if (pCtx->fpu.FCW & X86_FCW_IM)
4352 {
4353 /* Masked underflow. */
4354 pCtx->fpu.FSW &= ~X86_FSW_C1;
4355 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF;
4356 pCtx->eflags.u &= ~(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF);
4357 pCtx->eflags.u |= X86_EFL_ZF | X86_EFL_PF | X86_EFL_CF;
4358 }
4359 else
4360 {
4361 /* Raise underflow - don't touch EFLAGS or TOP. */
4362 pCtx->fpu.FSW &= ~X86_FSW_C1;
4363 pCtx->fpu.FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
4364 fPop = false;
4365 }
4366
4367 /*
4368 * Pop if necessary.
4369 */
4370 if (fPop)
4371 {
4372 pCtx->fpu.FTW &= ~RT_BIT(iReg1);
4373 pCtx->fpu.FSW &= X86_FSW_TOP_MASK;
4374 pCtx->fpu.FSW |= ((iReg1 + 7) & X86_FSW_TOP_SMASK) << X86_FSW_TOP_SHIFT;
4375 }
4376
4377 iemFpuUpdateOpcodeAndIpWorker(pIemCpu, pCtx);
4378 iemRegAddToRip(pIemCpu, cbInstr);
4379 return VINF_SUCCESS;
4380}
4381
4382/** @} */
4383
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette