VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h@ 61395

Last change on this file since 61395 was 61395, checked in by vboxsync, 9 years ago

IEM: Enabled the three SSE instruction that was !VBOX_WITH_REM only until now.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 605.6 KB
Line 
1/* $Id: IEMAllInstructions.cpp.h 61395 2016-06-02 01:03:49Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Global Variables *
21*******************************************************************************/
22extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
23
24
25/**
26 * Common worker for instructions like ADD, AND, OR, ++ with a byte
27 * memory/register as the destination.
28 *
29 * @param pImpl Pointer to the instruction implementation (assembly).
30 */
31FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_r8, PCIEMOPBINSIZES, pImpl)
32{
33 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
34
35 /*
36 * If rm is denoting a register, no more instruction bytes.
37 */
38 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
39 {
40 IEMOP_HLP_NO_LOCK_PREFIX();
41
42 IEM_MC_BEGIN(3, 0);
43 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
44 IEM_MC_ARG(uint8_t, u8Src, 1);
45 IEM_MC_ARG(uint32_t *, pEFlags, 2);
46
47 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
48 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
49 IEM_MC_REF_EFLAGS(pEFlags);
50 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
51
52 IEM_MC_ADVANCE_RIP();
53 IEM_MC_END();
54 }
55 else
56 {
57 /*
58 * We're accessing memory.
59 * Note! We're putting the eflags on the stack here so we can commit them
60 * after the memory.
61 */
62 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R; /* CMP,TEST */
63 IEM_MC_BEGIN(3, 2);
64 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
65 IEM_MC_ARG(uint8_t, u8Src, 1);
66 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
67 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
68
69 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
70 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
71 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
72 IEM_MC_FETCH_EFLAGS(EFlags);
73 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
74 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
75 else
76 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
77
78 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
79 IEM_MC_COMMIT_EFLAGS(EFlags);
80 IEM_MC_ADVANCE_RIP();
81 IEM_MC_END();
82 }
83 return VINF_SUCCESS;
84}
85
86
87/**
88 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with
89 * memory/register as the destination.
90 *
91 * @param pImpl Pointer to the instruction implementation (assembly).
92 */
93FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rm_rv, PCIEMOPBINSIZES, pImpl)
94{
95 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
96
97 /*
98 * If rm is denoting a register, no more instruction bytes.
99 */
100 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
101 {
102 IEMOP_HLP_NO_LOCK_PREFIX();
103
104 switch (pIemCpu->enmEffOpSize)
105 {
106 case IEMMODE_16BIT:
107 IEM_MC_BEGIN(3, 0);
108 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
109 IEM_MC_ARG(uint16_t, u16Src, 1);
110 IEM_MC_ARG(uint32_t *, pEFlags, 2);
111
112 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
113 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
114 IEM_MC_REF_EFLAGS(pEFlags);
115 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
116
117 IEM_MC_ADVANCE_RIP();
118 IEM_MC_END();
119 break;
120
121 case IEMMODE_32BIT:
122 IEM_MC_BEGIN(3, 0);
123 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
124 IEM_MC_ARG(uint32_t, u32Src, 1);
125 IEM_MC_ARG(uint32_t *, pEFlags, 2);
126
127 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
128 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
129 IEM_MC_REF_EFLAGS(pEFlags);
130 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
131
132 if (pImpl != &g_iemAImpl_test)
133 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
134 IEM_MC_ADVANCE_RIP();
135 IEM_MC_END();
136 break;
137
138 case IEMMODE_64BIT:
139 IEM_MC_BEGIN(3, 0);
140 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
141 IEM_MC_ARG(uint64_t, u64Src, 1);
142 IEM_MC_ARG(uint32_t *, pEFlags, 2);
143
144 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
145 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
146 IEM_MC_REF_EFLAGS(pEFlags);
147 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
148
149 IEM_MC_ADVANCE_RIP();
150 IEM_MC_END();
151 break;
152 }
153 }
154 else
155 {
156 /*
157 * We're accessing memory.
158 * Note! We're putting the eflags on the stack here so we can commit them
159 * after the memory.
160 */
161 uint32_t const fAccess = pImpl->pfnLockedU8 ? IEM_ACCESS_DATA_RW : IEM_ACCESS_DATA_R /* CMP,TEST */;
162 switch (pIemCpu->enmEffOpSize)
163 {
164 case IEMMODE_16BIT:
165 IEM_MC_BEGIN(3, 2);
166 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
167 IEM_MC_ARG(uint16_t, u16Src, 1);
168 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
169 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
170
171 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
172 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
173 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
174 IEM_MC_FETCH_EFLAGS(EFlags);
175 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
176 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
177 else
178 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
179
180 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
181 IEM_MC_COMMIT_EFLAGS(EFlags);
182 IEM_MC_ADVANCE_RIP();
183 IEM_MC_END();
184 break;
185
186 case IEMMODE_32BIT:
187 IEM_MC_BEGIN(3, 2);
188 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
189 IEM_MC_ARG(uint32_t, u32Src, 1);
190 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
191 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
192
193 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
194 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
195 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
196 IEM_MC_FETCH_EFLAGS(EFlags);
197 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
198 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
199 else
200 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
201
202 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
203 IEM_MC_COMMIT_EFLAGS(EFlags);
204 IEM_MC_ADVANCE_RIP();
205 IEM_MC_END();
206 break;
207
208 case IEMMODE_64BIT:
209 IEM_MC_BEGIN(3, 2);
210 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
211 IEM_MC_ARG(uint64_t, u64Src, 1);
212 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
213 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
214
215 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
216 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
217 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
218 IEM_MC_FETCH_EFLAGS(EFlags);
219 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
220 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
221 else
222 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
223
224 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
225 IEM_MC_COMMIT_EFLAGS(EFlags);
226 IEM_MC_ADVANCE_RIP();
227 IEM_MC_END();
228 break;
229 }
230 }
231 return VINF_SUCCESS;
232}
233
234
235/**
236 * Common worker for byte instructions like ADD, AND, OR, ++ with a register as
237 * the destination.
238 *
239 * @param pImpl Pointer to the instruction implementation (assembly).
240 */
241FNIEMOP_DEF_1(iemOpHlpBinaryOperator_r8_rm, PCIEMOPBINSIZES, pImpl)
242{
243 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
244 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
245
246 /*
247 * If rm is denoting a register, no more instruction bytes.
248 */
249 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
250 {
251 IEM_MC_BEGIN(3, 0);
252 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
253 IEM_MC_ARG(uint8_t, u8Src, 1);
254 IEM_MC_ARG(uint32_t *, pEFlags, 2);
255
256 IEM_MC_FETCH_GREG_U8(u8Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
257 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
258 IEM_MC_REF_EFLAGS(pEFlags);
259 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
260
261 IEM_MC_ADVANCE_RIP();
262 IEM_MC_END();
263 }
264 else
265 {
266 /*
267 * We're accessing memory.
268 */
269 IEM_MC_BEGIN(3, 1);
270 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
271 IEM_MC_ARG(uint8_t, u8Src, 1);
272 IEM_MC_ARG(uint32_t *, pEFlags, 2);
273 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
274
275 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
276 IEM_MC_FETCH_MEM_U8(u8Src, pIemCpu->iEffSeg, GCPtrEffDst);
277 IEM_MC_REF_GREG_U8(pu8Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
278 IEM_MC_REF_EFLAGS(pEFlags);
279 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
280
281 IEM_MC_ADVANCE_RIP();
282 IEM_MC_END();
283 }
284 return VINF_SUCCESS;
285}
286
287
288/**
289 * Common worker for word/dword/qword instructions like ADD, AND, OR, ++ with a
290 * register as the destination.
291 *
292 * @param pImpl Pointer to the instruction implementation (assembly).
293 */
294FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rv_rm, PCIEMOPBINSIZES, pImpl)
295{
296 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
297 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
298
299 /*
300 * If rm is denoting a register, no more instruction bytes.
301 */
302 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
303 {
304 switch (pIemCpu->enmEffOpSize)
305 {
306 case IEMMODE_16BIT:
307 IEM_MC_BEGIN(3, 0);
308 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
309 IEM_MC_ARG(uint16_t, u16Src, 1);
310 IEM_MC_ARG(uint32_t *, pEFlags, 2);
311
312 IEM_MC_FETCH_GREG_U16(u16Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
313 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
314 IEM_MC_REF_EFLAGS(pEFlags);
315 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
316
317 IEM_MC_ADVANCE_RIP();
318 IEM_MC_END();
319 break;
320
321 case IEMMODE_32BIT:
322 IEM_MC_BEGIN(3, 0);
323 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
324 IEM_MC_ARG(uint32_t, u32Src, 1);
325 IEM_MC_ARG(uint32_t *, pEFlags, 2);
326
327 IEM_MC_FETCH_GREG_U32(u32Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
328 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
329 IEM_MC_REF_EFLAGS(pEFlags);
330 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
331
332 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
333 IEM_MC_ADVANCE_RIP();
334 IEM_MC_END();
335 break;
336
337 case IEMMODE_64BIT:
338 IEM_MC_BEGIN(3, 0);
339 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
340 IEM_MC_ARG(uint64_t, u64Src, 1);
341 IEM_MC_ARG(uint32_t *, pEFlags, 2);
342
343 IEM_MC_FETCH_GREG_U64(u64Src, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
344 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
345 IEM_MC_REF_EFLAGS(pEFlags);
346 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
347
348 IEM_MC_ADVANCE_RIP();
349 IEM_MC_END();
350 break;
351 }
352 }
353 else
354 {
355 /*
356 * We're accessing memory.
357 */
358 switch (pIemCpu->enmEffOpSize)
359 {
360 case IEMMODE_16BIT:
361 IEM_MC_BEGIN(3, 1);
362 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
363 IEM_MC_ARG(uint16_t, u16Src, 1);
364 IEM_MC_ARG(uint32_t *, pEFlags, 2);
365 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
366
367 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
368 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffDst);
369 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
370 IEM_MC_REF_EFLAGS(pEFlags);
371 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
372
373 IEM_MC_ADVANCE_RIP();
374 IEM_MC_END();
375 break;
376
377 case IEMMODE_32BIT:
378 IEM_MC_BEGIN(3, 1);
379 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
380 IEM_MC_ARG(uint32_t, u32Src, 1);
381 IEM_MC_ARG(uint32_t *, pEFlags, 2);
382 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
383
384 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
385 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffDst);
386 IEM_MC_REF_GREG_U32(pu32Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
387 IEM_MC_REF_EFLAGS(pEFlags);
388 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
389
390 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
391 IEM_MC_ADVANCE_RIP();
392 IEM_MC_END();
393 break;
394
395 case IEMMODE_64BIT:
396 IEM_MC_BEGIN(3, 1);
397 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
398 IEM_MC_ARG(uint64_t, u64Src, 1);
399 IEM_MC_ARG(uint32_t *, pEFlags, 2);
400 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
401
402 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
403 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffDst);
404 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
405 IEM_MC_REF_EFLAGS(pEFlags);
406 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
407
408 IEM_MC_ADVANCE_RIP();
409 IEM_MC_END();
410 break;
411 }
412 }
413 return VINF_SUCCESS;
414}
415
416
417/**
418 * Common worker for instructions like ADD, AND, OR, ++ with working on AL with
419 * a byte immediate.
420 *
421 * @param pImpl Pointer to the instruction implementation (assembly).
422 */
423FNIEMOP_DEF_1(iemOpHlpBinaryOperator_AL_Ib, PCIEMOPBINSIZES, pImpl)
424{
425 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
426 IEMOP_HLP_NO_LOCK_PREFIX();
427
428 IEM_MC_BEGIN(3, 0);
429 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
430 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/ u8Imm, 1);
431 IEM_MC_ARG(uint32_t *, pEFlags, 2);
432
433 IEM_MC_REF_GREG_U8(pu8Dst, X86_GREG_xAX);
434 IEM_MC_REF_EFLAGS(pEFlags);
435 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
436
437 IEM_MC_ADVANCE_RIP();
438 IEM_MC_END();
439 return VINF_SUCCESS;
440}
441
442
443/**
444 * Common worker for instructions like ADD, AND, OR, ++ with working on
445 * AX/EAX/RAX with a word/dword immediate.
446 *
447 * @param pImpl Pointer to the instruction implementation (assembly).
448 */
449FNIEMOP_DEF_1(iemOpHlpBinaryOperator_rAX_Iz, PCIEMOPBINSIZES, pImpl)
450{
451 switch (pIemCpu->enmEffOpSize)
452 {
453 case IEMMODE_16BIT:
454 {
455 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
456 IEMOP_HLP_NO_LOCK_PREFIX();
457
458 IEM_MC_BEGIN(3, 0);
459 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
460 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm, 1);
461 IEM_MC_ARG(uint32_t *, pEFlags, 2);
462
463 IEM_MC_REF_GREG_U16(pu16Dst, X86_GREG_xAX);
464 IEM_MC_REF_EFLAGS(pEFlags);
465 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
466
467 IEM_MC_ADVANCE_RIP();
468 IEM_MC_END();
469 return VINF_SUCCESS;
470 }
471
472 case IEMMODE_32BIT:
473 {
474 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
475 IEMOP_HLP_NO_LOCK_PREFIX();
476
477 IEM_MC_BEGIN(3, 0);
478 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
479 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm, 1);
480 IEM_MC_ARG(uint32_t *, pEFlags, 2);
481
482 IEM_MC_REF_GREG_U32(pu32Dst, X86_GREG_xAX);
483 IEM_MC_REF_EFLAGS(pEFlags);
484 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
485
486 if (pImpl != &g_iemAImpl_test)
487 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
488 IEM_MC_ADVANCE_RIP();
489 IEM_MC_END();
490 return VINF_SUCCESS;
491 }
492
493 case IEMMODE_64BIT:
494 {
495 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
496 IEMOP_HLP_NO_LOCK_PREFIX();
497
498 IEM_MC_BEGIN(3, 0);
499 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
500 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm, 1);
501 IEM_MC_ARG(uint32_t *, pEFlags, 2);
502
503 IEM_MC_REF_GREG_U64(pu64Dst, X86_GREG_xAX);
504 IEM_MC_REF_EFLAGS(pEFlags);
505 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
506
507 IEM_MC_ADVANCE_RIP();
508 IEM_MC_END();
509 return VINF_SUCCESS;
510 }
511
512 IEM_NOT_REACHED_DEFAULT_CASE_RET();
513 }
514}
515
516
517/** Opcodes 0xf1, 0xd6. */
518FNIEMOP_DEF(iemOp_Invalid)
519{
520 IEMOP_MNEMONIC("Invalid");
521 return IEMOP_RAISE_INVALID_OPCODE();
522}
523
524
525
526/** @name ..... opcodes.
527 *
528 * @{
529 */
530
531/** @} */
532
533
534/** @name Two byte opcodes (first byte 0x0f).
535 *
536 * @{
537 */
538
539/** Opcode 0x0f 0x00 /0. */
540FNIEMOP_DEF_1(iemOp_Grp6_sldt, uint8_t, bRm)
541{
542 IEMOP_MNEMONIC("sldt Rv/Mw");
543 IEMOP_HLP_MIN_286();
544 IEMOP_HLP_NO_REAL_OR_V86_MODE();
545
546 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
547 {
548 IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
549 switch (pIemCpu->enmEffOpSize)
550 {
551 case IEMMODE_16BIT:
552 IEM_MC_BEGIN(0, 1);
553 IEM_MC_LOCAL(uint16_t, u16Ldtr);
554 IEM_MC_FETCH_LDTR_U16(u16Ldtr);
555 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Ldtr);
556 IEM_MC_ADVANCE_RIP();
557 IEM_MC_END();
558 break;
559
560 case IEMMODE_32BIT:
561 IEM_MC_BEGIN(0, 1);
562 IEM_MC_LOCAL(uint32_t, u32Ldtr);
563 IEM_MC_FETCH_LDTR_U32(u32Ldtr);
564 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Ldtr);
565 IEM_MC_ADVANCE_RIP();
566 IEM_MC_END();
567 break;
568
569 case IEMMODE_64BIT:
570 IEM_MC_BEGIN(0, 1);
571 IEM_MC_LOCAL(uint64_t, u64Ldtr);
572 IEM_MC_FETCH_LDTR_U64(u64Ldtr);
573 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Ldtr);
574 IEM_MC_ADVANCE_RIP();
575 IEM_MC_END();
576 break;
577
578 IEM_NOT_REACHED_DEFAULT_CASE_RET();
579 }
580 }
581 else
582 {
583 IEM_MC_BEGIN(0, 2);
584 IEM_MC_LOCAL(uint16_t, u16Ldtr);
585 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
586 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
587 IEMOP_HLP_DECODED_NL_1(OP_SLDT, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
588 IEM_MC_FETCH_LDTR_U16(u16Ldtr);
589 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Ldtr);
590 IEM_MC_ADVANCE_RIP();
591 IEM_MC_END();
592 }
593 return VINF_SUCCESS;
594}
595
596
597/** Opcode 0x0f 0x00 /1. */
598FNIEMOP_DEF_1(iemOp_Grp6_str, uint8_t, bRm)
599{
600 IEMOP_MNEMONIC("str Rv/Mw");
601 IEMOP_HLP_MIN_286();
602 IEMOP_HLP_NO_REAL_OR_V86_MODE();
603
604 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
605 {
606 IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
607 switch (pIemCpu->enmEffOpSize)
608 {
609 case IEMMODE_16BIT:
610 IEM_MC_BEGIN(0, 1);
611 IEM_MC_LOCAL(uint16_t, u16Tr);
612 IEM_MC_FETCH_TR_U16(u16Tr);
613 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Tr);
614 IEM_MC_ADVANCE_RIP();
615 IEM_MC_END();
616 break;
617
618 case IEMMODE_32BIT:
619 IEM_MC_BEGIN(0, 1);
620 IEM_MC_LOCAL(uint32_t, u32Tr);
621 IEM_MC_FETCH_TR_U32(u32Tr);
622 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tr);
623 IEM_MC_ADVANCE_RIP();
624 IEM_MC_END();
625 break;
626
627 case IEMMODE_64BIT:
628 IEM_MC_BEGIN(0, 1);
629 IEM_MC_LOCAL(uint64_t, u64Tr);
630 IEM_MC_FETCH_TR_U64(u64Tr);
631 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tr);
632 IEM_MC_ADVANCE_RIP();
633 IEM_MC_END();
634 break;
635
636 IEM_NOT_REACHED_DEFAULT_CASE_RET();
637 }
638 }
639 else
640 {
641 IEM_MC_BEGIN(0, 2);
642 IEM_MC_LOCAL(uint16_t, u16Tr);
643 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
644 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
645 IEMOP_HLP_DECODED_NL_1(OP_STR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
646 IEM_MC_FETCH_TR_U16(u16Tr);
647 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tr);
648 IEM_MC_ADVANCE_RIP();
649 IEM_MC_END();
650 }
651 return VINF_SUCCESS;
652}
653
654
655/** Opcode 0x0f 0x00 /2. */
656FNIEMOP_DEF_1(iemOp_Grp6_lldt, uint8_t, bRm)
657{
658 IEMOP_MNEMONIC("lldt Ew");
659 IEMOP_HLP_MIN_286();
660 IEMOP_HLP_NO_REAL_OR_V86_MODE();
661
662 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
663 {
664 IEMOP_HLP_DECODED_NL_1(OP_LLDT, IEMOPFORM_M_REG, OP_PARM_Ew, DISOPTYPE_DANGEROUS);
665 IEM_MC_BEGIN(1, 0);
666 IEM_MC_ARG(uint16_t, u16Sel, 0);
667 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
668 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
669 IEM_MC_END();
670 }
671 else
672 {
673 IEM_MC_BEGIN(1, 1);
674 IEM_MC_ARG(uint16_t, u16Sel, 0);
675 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
676 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
677 IEMOP_HLP_DECODED_NL_1(OP_LLDT, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS);
678 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO(); /** @todo test order */
679 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
680 IEM_MC_CALL_CIMPL_1(iemCImpl_lldt, u16Sel);
681 IEM_MC_END();
682 }
683 return VINF_SUCCESS;
684}
685
686
687/** Opcode 0x0f 0x00 /3. */
688FNIEMOP_DEF_1(iemOp_Grp6_ltr, uint8_t, bRm)
689{
690 IEMOP_MNEMONIC("ltr Ew");
691 IEMOP_HLP_MIN_286();
692 IEMOP_HLP_NO_REAL_OR_V86_MODE();
693
694 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
695 {
696 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
697 IEM_MC_BEGIN(1, 0);
698 IEM_MC_ARG(uint16_t, u16Sel, 0);
699 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
700 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
701 IEM_MC_END();
702 }
703 else
704 {
705 IEM_MC_BEGIN(1, 1);
706 IEM_MC_ARG(uint16_t, u16Sel, 0);
707 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
708 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
709 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
710 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO(); /** @todo test ordre */
711 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
712 IEM_MC_CALL_CIMPL_1(iemCImpl_ltr, u16Sel);
713 IEM_MC_END();
714 }
715 return VINF_SUCCESS;
716}
717
718
719/** Opcode 0x0f 0x00 /3. */
720FNIEMOP_DEF_2(iemOpCommonGrp6VerX, uint8_t, bRm, bool, fWrite)
721{
722 IEMOP_HLP_MIN_286();
723 IEMOP_HLP_NO_REAL_OR_V86_MODE();
724
725 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
726 {
727 IEMOP_HLP_DECODED_NL_1(fWrite ? OP_VERW : OP_VERR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
728 IEM_MC_BEGIN(2, 0);
729 IEM_MC_ARG(uint16_t, u16Sel, 0);
730 IEM_MC_ARG_CONST(bool, fWriteArg, fWrite, 1);
731 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
732 IEM_MC_CALL_CIMPL_2(iemCImpl_VerX, u16Sel, fWriteArg);
733 IEM_MC_END();
734 }
735 else
736 {
737 IEM_MC_BEGIN(2, 1);
738 IEM_MC_ARG(uint16_t, u16Sel, 0);
739 IEM_MC_ARG_CONST(bool, fWriteArg, fWrite, 1);
740 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
741 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
742 IEMOP_HLP_DECODED_NL_1(fWrite ? OP_VERW : OP_VERR, IEMOPFORM_M_MEM, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
743 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
744 IEM_MC_CALL_CIMPL_2(iemCImpl_VerX, u16Sel, fWriteArg);
745 IEM_MC_END();
746 }
747 return VINF_SUCCESS;
748}
749
750
751/** Opcode 0x0f 0x00 /4. */
752FNIEMOP_DEF_1(iemOp_Grp6_verr, uint8_t, bRm)
753{
754 IEMOP_MNEMONIC("verr Ew");
755 IEMOP_HLP_MIN_286();
756 return FNIEMOP_CALL_2(iemOpCommonGrp6VerX, bRm, false);
757}
758
759
760/** Opcode 0x0f 0x00 /5. */
761FNIEMOP_DEF_1(iemOp_Grp6_verw, uint8_t, bRm)
762{
763 IEMOP_MNEMONIC("verr Ew");
764 IEMOP_HLP_MIN_286();
765 return FNIEMOP_CALL_2(iemOpCommonGrp6VerX, bRm, true);
766}
767
768
769/** Opcode 0x0f 0x00. */
770FNIEMOP_DEF(iemOp_Grp6)
771{
772 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
773 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
774 {
775 case 0: return FNIEMOP_CALL_1(iemOp_Grp6_sldt, bRm);
776 case 1: return FNIEMOP_CALL_1(iemOp_Grp6_str, bRm);
777 case 2: return FNIEMOP_CALL_1(iemOp_Grp6_lldt, bRm);
778 case 3: return FNIEMOP_CALL_1(iemOp_Grp6_ltr, bRm);
779 case 4: return FNIEMOP_CALL_1(iemOp_Grp6_verr, bRm);
780 case 5: return FNIEMOP_CALL_1(iemOp_Grp6_verw, bRm);
781 case 6: return IEMOP_RAISE_INVALID_OPCODE();
782 case 7: return IEMOP_RAISE_INVALID_OPCODE();
783 IEM_NOT_REACHED_DEFAULT_CASE_RET();
784 }
785
786}
787
788
789/** Opcode 0x0f 0x01 /0. */
790FNIEMOP_DEF_1(iemOp_Grp7_sgdt, uint8_t, bRm)
791{
792 IEMOP_MNEMONIC("sgdt Ms");
793 IEMOP_HLP_MIN_286();
794 IEMOP_HLP_64BIT_OP_SIZE();
795 IEM_MC_BEGIN(2, 1);
796 IEM_MC_ARG(uint8_t, iEffSeg, 0);
797 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
798 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
799 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
800 IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg);
801 IEM_MC_CALL_CIMPL_2(iemCImpl_sgdt, iEffSeg, GCPtrEffSrc);
802 IEM_MC_END();
803 return VINF_SUCCESS;
804}
805
806
807/** Opcode 0x0f 0x01 /0. */
808FNIEMOP_DEF(iemOp_Grp7_vmcall)
809{
810 IEMOP_BITCH_ABOUT_STUB();
811 return IEMOP_RAISE_INVALID_OPCODE();
812}
813
814
815/** Opcode 0x0f 0x01 /0. */
816FNIEMOP_DEF(iemOp_Grp7_vmlaunch)
817{
818 IEMOP_BITCH_ABOUT_STUB();
819 return IEMOP_RAISE_INVALID_OPCODE();
820}
821
822
823/** Opcode 0x0f 0x01 /0. */
824FNIEMOP_DEF(iemOp_Grp7_vmresume)
825{
826 IEMOP_BITCH_ABOUT_STUB();
827 return IEMOP_RAISE_INVALID_OPCODE();
828}
829
830
831/** Opcode 0x0f 0x01 /0. */
832FNIEMOP_DEF(iemOp_Grp7_vmxoff)
833{
834 IEMOP_BITCH_ABOUT_STUB();
835 return IEMOP_RAISE_INVALID_OPCODE();
836}
837
838
839/** Opcode 0x0f 0x01 /1. */
840FNIEMOP_DEF_1(iemOp_Grp7_sidt, uint8_t, bRm)
841{
842 IEMOP_MNEMONIC("sidt Ms");
843 IEMOP_HLP_MIN_286();
844 IEMOP_HLP_64BIT_OP_SIZE();
845 IEM_MC_BEGIN(2, 1);
846 IEM_MC_ARG(uint8_t, iEffSeg, 0);
847 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
848 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
849 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
850 IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg);
851 IEM_MC_CALL_CIMPL_2(iemCImpl_sidt, iEffSeg, GCPtrEffSrc);
852 IEM_MC_END();
853 return VINF_SUCCESS;
854}
855
856
857/** Opcode 0x0f 0x01 /1. */
858FNIEMOP_DEF(iemOp_Grp7_monitor)
859{
860 IEMOP_MNEMONIC("monitor");
861 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); /** @todo Verify that monitor is allergic to lock prefixes. */
862 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_monitor, pIemCpu->iEffSeg);
863}
864
865
866/** Opcode 0x0f 0x01 /1. */
867FNIEMOP_DEF(iemOp_Grp7_mwait)
868{
869 IEMOP_MNEMONIC("mwait"); /** @todo Verify that mwait is allergic to lock prefixes. */
870 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
871 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_mwait);
872}
873
874
875/** Opcode 0x0f 0x01 /2. */
876FNIEMOP_DEF_1(iemOp_Grp7_lgdt, uint8_t, bRm)
877{
878 IEMOP_MNEMONIC("lgdt");
879 IEMOP_HLP_64BIT_OP_SIZE();
880 IEM_MC_BEGIN(3, 1);
881 IEM_MC_ARG(uint8_t, iEffSeg, 0);
882 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
883 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/pIemCpu->enmEffOpSize, 2);
884 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
885 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
886 IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg);
887 IEM_MC_CALL_CIMPL_3(iemCImpl_lgdt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
888 IEM_MC_END();
889 return VINF_SUCCESS;
890}
891
892
893/** Opcode 0x0f 0x01 0xd0. */
894FNIEMOP_DEF(iemOp_Grp7_xgetbv)
895{
896 IEMOP_MNEMONIC("xgetbv");
897 if (IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fXSaveRstor)
898 {
899 IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES();
900 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_xgetbv);
901 }
902 return IEMOP_RAISE_INVALID_OPCODE();
903}
904
905
906/** Opcode 0x0f 0x01 0xd1. */
907FNIEMOP_DEF(iemOp_Grp7_xsetbv)
908{
909 IEMOP_MNEMONIC("xsetbv");
910 if (IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fXSaveRstor)
911 {
912 IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES();
913 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_xsetbv);
914 }
915 return IEMOP_RAISE_INVALID_OPCODE();
916}
917
918
919/** Opcode 0x0f 0x01 /3. */
920FNIEMOP_DEF_1(iemOp_Grp7_lidt, uint8_t, bRm)
921{
922 IEMMODE enmEffOpSize = pIemCpu->enmCpuMode == IEMMODE_64BIT
923 ? IEMMODE_64BIT
924 : pIemCpu->enmEffOpSize;
925 IEM_MC_BEGIN(3, 1);
926 IEM_MC_ARG(uint8_t, iEffSeg, 0);
927 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 1);
928 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSizeArg,/*=*/enmEffOpSize, 2);
929 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
930 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
931 IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg);
932 IEM_MC_CALL_CIMPL_3(iemCImpl_lidt, iEffSeg, GCPtrEffSrc, enmEffOpSizeArg);
933 IEM_MC_END();
934 return VINF_SUCCESS;
935}
936
937
938/** Opcode 0x0f 0x01 0xd8. */
939FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmrun);
940
941/** Opcode 0x0f 0x01 0xd9. */
942FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmmcall);
943
944/** Opcode 0x0f 0x01 0xda. */
945FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmload);
946
947/** Opcode 0x0f 0x01 0xdb. */
948FNIEMOP_UD_STUB(iemOp_Grp7_Amd_vmsave);
949
950/** Opcode 0x0f 0x01 0xdc. */
951FNIEMOP_UD_STUB(iemOp_Grp7_Amd_stgi);
952
953/** Opcode 0x0f 0x01 0xdd. */
954FNIEMOP_UD_STUB(iemOp_Grp7_Amd_clgi);
955
956/** Opcode 0x0f 0x01 0xde. */
957FNIEMOP_UD_STUB(iemOp_Grp7_Amd_skinit);
958
959/** Opcode 0x0f 0x01 0xdf. */
960FNIEMOP_UD_STUB(iemOp_Grp7_Amd_invlpga);
961
962/** Opcode 0x0f 0x01 /4. */
963FNIEMOP_DEF_1(iemOp_Grp7_smsw, uint8_t, bRm)
964{
965 IEMOP_MNEMONIC("smsw");
966 IEMOP_HLP_MIN_286();
967 IEMOP_HLP_NO_LOCK_PREFIX();
968 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
969 {
970 switch (pIemCpu->enmEffOpSize)
971 {
972 case IEMMODE_16BIT:
973 IEM_MC_BEGIN(0, 1);
974 IEM_MC_LOCAL(uint16_t, u16Tmp);
975 IEM_MC_FETCH_CR0_U16(u16Tmp);
976 if (IEM_GET_TARGET_CPU(pIemCpu) > IEMTARGETCPU_386)
977 { /* likely */ }
978 else if (IEM_GET_TARGET_CPU(pIemCpu) >= IEMTARGETCPU_386)
979 IEM_MC_OR_LOCAL_U16(u16Tmp, 0xffe0);
980 else
981 IEM_MC_OR_LOCAL_U16(u16Tmp, 0xfff0);
982 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Tmp);
983 IEM_MC_ADVANCE_RIP();
984 IEM_MC_END();
985 return VINF_SUCCESS;
986
987 case IEMMODE_32BIT:
988 IEM_MC_BEGIN(0, 1);
989 IEM_MC_LOCAL(uint32_t, u32Tmp);
990 IEM_MC_FETCH_CR0_U32(u32Tmp);
991 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
992 IEM_MC_ADVANCE_RIP();
993 IEM_MC_END();
994 return VINF_SUCCESS;
995
996 case IEMMODE_64BIT:
997 IEM_MC_BEGIN(0, 1);
998 IEM_MC_LOCAL(uint64_t, u64Tmp);
999 IEM_MC_FETCH_CR0_U64(u64Tmp);
1000 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
1001 IEM_MC_ADVANCE_RIP();
1002 IEM_MC_END();
1003 return VINF_SUCCESS;
1004
1005 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1006 }
1007 }
1008 else
1009 {
1010 /* Ignore operand size here, memory refs are always 16-bit. */
1011 IEM_MC_BEGIN(0, 2);
1012 IEM_MC_LOCAL(uint16_t, u16Tmp);
1013 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1014 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1015 IEM_MC_FETCH_CR0_U16(u16Tmp);
1016 if (IEM_GET_TARGET_CPU(pIemCpu) > IEMTARGETCPU_386)
1017 { /* likely */ }
1018 else if (pIemCpu->uTargetCpu >= IEMTARGETCPU_386)
1019 IEM_MC_OR_LOCAL_U16(u16Tmp, 0xffe0);
1020 else
1021 IEM_MC_OR_LOCAL_U16(u16Tmp, 0xfff0);
1022 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tmp);
1023 IEM_MC_ADVANCE_RIP();
1024 IEM_MC_END();
1025 return VINF_SUCCESS;
1026 }
1027}
1028
1029
1030/** Opcode 0x0f 0x01 /6. */
1031FNIEMOP_DEF_1(iemOp_Grp7_lmsw, uint8_t, bRm)
1032{
1033 /* The operand size is effectively ignored, all is 16-bit and only the
1034 lower 3-bits are used. */
1035 IEMOP_MNEMONIC("lmsw");
1036 IEMOP_HLP_MIN_286();
1037 IEMOP_HLP_NO_LOCK_PREFIX();
1038 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1039 {
1040 IEM_MC_BEGIN(1, 0);
1041 IEM_MC_ARG(uint16_t, u16Tmp, 0);
1042 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
1043 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
1044 IEM_MC_END();
1045 }
1046 else
1047 {
1048 IEM_MC_BEGIN(1, 1);
1049 IEM_MC_ARG(uint16_t, u16Tmp, 0);
1050 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
1051 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1052 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
1053 IEM_MC_CALL_CIMPL_1(iemCImpl_lmsw, u16Tmp);
1054 IEM_MC_END();
1055 }
1056 return VINF_SUCCESS;
1057}
1058
1059
1060/** Opcode 0x0f 0x01 /7. */
1061FNIEMOP_DEF_1(iemOp_Grp7_invlpg, uint8_t, bRm)
1062{
1063 IEMOP_MNEMONIC("invlpg");
1064 IEMOP_HLP_MIN_486();
1065 IEMOP_HLP_NO_LOCK_PREFIX();
1066 IEM_MC_BEGIN(1, 1);
1067 IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 0);
1068 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
1069 IEM_MC_CALL_CIMPL_1(iemCImpl_invlpg, GCPtrEffDst);
1070 IEM_MC_END();
1071 return VINF_SUCCESS;
1072}
1073
1074
1075/** Opcode 0x0f 0x01 /7. */
1076FNIEMOP_DEF(iemOp_Grp7_swapgs)
1077{
1078 IEMOP_MNEMONIC("swapgs");
1079 IEMOP_HLP_ONLY_64BIT();
1080 IEMOP_HLP_NO_LOCK_PREFIX();
1081 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_swapgs);
1082}
1083
1084
1085/** Opcode 0x0f 0x01 /7. */
1086FNIEMOP_DEF(iemOp_Grp7_rdtscp)
1087{
1088 NOREF(pIemCpu);
1089 IEMOP_BITCH_ABOUT_STUB();
1090 return VERR_IEM_INSTR_NOT_IMPLEMENTED;
1091}
1092
1093
1094/** Opcode 0x0f 0x01. */
1095FNIEMOP_DEF(iemOp_Grp7)
1096{
1097 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1098 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
1099 {
1100 case 0:
1101 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1102 return FNIEMOP_CALL_1(iemOp_Grp7_sgdt, bRm);
1103 switch (bRm & X86_MODRM_RM_MASK)
1104 {
1105 case 1: return FNIEMOP_CALL(iemOp_Grp7_vmcall);
1106 case 2: return FNIEMOP_CALL(iemOp_Grp7_vmlaunch);
1107 case 3: return FNIEMOP_CALL(iemOp_Grp7_vmresume);
1108 case 4: return FNIEMOP_CALL(iemOp_Grp7_vmxoff);
1109 }
1110 return IEMOP_RAISE_INVALID_OPCODE();
1111
1112 case 1:
1113 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1114 return FNIEMOP_CALL_1(iemOp_Grp7_sidt, bRm);
1115 switch (bRm & X86_MODRM_RM_MASK)
1116 {
1117 case 0: return FNIEMOP_CALL(iemOp_Grp7_monitor);
1118 case 1: return FNIEMOP_CALL(iemOp_Grp7_mwait);
1119 }
1120 return IEMOP_RAISE_INVALID_OPCODE();
1121
1122 case 2:
1123 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1124 return FNIEMOP_CALL_1(iemOp_Grp7_lgdt, bRm);
1125 switch (bRm & X86_MODRM_RM_MASK)
1126 {
1127 case 0: return FNIEMOP_CALL(iemOp_Grp7_xgetbv);
1128 case 1: return FNIEMOP_CALL(iemOp_Grp7_xsetbv);
1129 }
1130 return IEMOP_RAISE_INVALID_OPCODE();
1131
1132 case 3:
1133 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1134 return FNIEMOP_CALL_1(iemOp_Grp7_lidt, bRm);
1135 switch (bRm & X86_MODRM_RM_MASK)
1136 {
1137 case 0: return FNIEMOP_CALL(iemOp_Grp7_Amd_vmrun);
1138 case 1: return FNIEMOP_CALL(iemOp_Grp7_Amd_vmmcall);
1139 case 2: return FNIEMOP_CALL(iemOp_Grp7_Amd_vmload);
1140 case 3: return FNIEMOP_CALL(iemOp_Grp7_Amd_vmsave);
1141 case 4: return FNIEMOP_CALL(iemOp_Grp7_Amd_stgi);
1142 case 5: return FNIEMOP_CALL(iemOp_Grp7_Amd_clgi);
1143 case 6: return FNIEMOP_CALL(iemOp_Grp7_Amd_skinit);
1144 case 7: return FNIEMOP_CALL(iemOp_Grp7_Amd_invlpga);
1145 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1146 }
1147
1148 case 4:
1149 return FNIEMOP_CALL_1(iemOp_Grp7_smsw, bRm);
1150
1151 case 5:
1152 return IEMOP_RAISE_INVALID_OPCODE();
1153
1154 case 6:
1155 return FNIEMOP_CALL_1(iemOp_Grp7_lmsw, bRm);
1156
1157 case 7:
1158 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1159 return FNIEMOP_CALL_1(iemOp_Grp7_invlpg, bRm);
1160 switch (bRm & X86_MODRM_RM_MASK)
1161 {
1162 case 0: return FNIEMOP_CALL(iemOp_Grp7_swapgs);
1163 case 1: return FNIEMOP_CALL(iemOp_Grp7_rdtscp);
1164 }
1165 return IEMOP_RAISE_INVALID_OPCODE();
1166
1167 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1168 }
1169}
1170
1171/** Opcode 0x0f 0x00 /3. */
1172FNIEMOP_DEF_1(iemOpCommonLarLsl_Gv_Ew, bool, fIsLar)
1173{
1174 IEMOP_HLP_NO_REAL_OR_V86_MODE();
1175 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1176
1177 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1178 {
1179 IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_REG, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
1180 switch (pIemCpu->enmEffOpSize)
1181 {
1182 case IEMMODE_16BIT:
1183 {
1184 IEM_MC_BEGIN(4, 0);
1185 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1186 IEM_MC_ARG(uint16_t, u16Sel, 1);
1187 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1188 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3);
1189
1190 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
1191 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
1192 IEM_MC_REF_EFLAGS(pEFlags);
1193 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u16, pu16Dst, u16Sel, pEFlags, fIsLarArg);
1194
1195 IEM_MC_END();
1196 return VINF_SUCCESS;
1197 }
1198
1199 case IEMMODE_32BIT:
1200 case IEMMODE_64BIT:
1201 {
1202 IEM_MC_BEGIN(4, 0);
1203 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1204 IEM_MC_ARG(uint16_t, u16Sel, 1);
1205 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1206 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3);
1207
1208 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
1209 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
1210 IEM_MC_REF_EFLAGS(pEFlags);
1211 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u64, pu64Dst, u16Sel, pEFlags, fIsLarArg);
1212
1213 IEM_MC_END();
1214 return VINF_SUCCESS;
1215 }
1216
1217 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1218 }
1219 }
1220 else
1221 {
1222 switch (pIemCpu->enmEffOpSize)
1223 {
1224 case IEMMODE_16BIT:
1225 {
1226 IEM_MC_BEGIN(4, 1);
1227 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
1228 IEM_MC_ARG(uint16_t, u16Sel, 1);
1229 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1230 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3);
1231 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1232
1233 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1234 IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_MEM, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
1235
1236 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
1237 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
1238 IEM_MC_REF_EFLAGS(pEFlags);
1239 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u16, pu16Dst, u16Sel, pEFlags, fIsLarArg);
1240
1241 IEM_MC_END();
1242 return VINF_SUCCESS;
1243 }
1244
1245 case IEMMODE_32BIT:
1246 case IEMMODE_64BIT:
1247 {
1248 IEM_MC_BEGIN(4, 1);
1249 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
1250 IEM_MC_ARG(uint16_t, u16Sel, 1);
1251 IEM_MC_ARG(uint32_t *, pEFlags, 2);
1252 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3);
1253 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1254
1255 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1256 IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_MEM, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP);
1257/** @todo testcase: make sure it's a 16-bit read. */
1258
1259 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc);
1260 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
1261 IEM_MC_REF_EFLAGS(pEFlags);
1262 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u64, pu64Dst, u16Sel, pEFlags, fIsLarArg);
1263
1264 IEM_MC_END();
1265 return VINF_SUCCESS;
1266 }
1267
1268 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1269 }
1270 }
1271}
1272
1273
1274
1275/** Opcode 0x0f 0x02. */
1276FNIEMOP_DEF(iemOp_lar_Gv_Ew)
1277{
1278 IEMOP_MNEMONIC("lar Gv,Ew");
1279 return FNIEMOP_CALL_1(iemOpCommonLarLsl_Gv_Ew, true);
1280}
1281
1282
1283/** Opcode 0x0f 0x03. */
1284FNIEMOP_DEF(iemOp_lsl_Gv_Ew)
1285{
1286 IEMOP_MNEMONIC("lsl Gv,Ew");
1287 return FNIEMOP_CALL_1(iemOpCommonLarLsl_Gv_Ew, false);
1288}
1289
1290
1291/** Opcode 0x0f 0x05. */
1292FNIEMOP_DEF(iemOp_syscall)
1293{
1294 IEMOP_MNEMONIC("syscall"); /** @todo 286 LOADALL */
1295 IEMOP_HLP_NO_LOCK_PREFIX();
1296 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_syscall);
1297}
1298
1299
1300/** Opcode 0x0f 0x06. */
1301FNIEMOP_DEF(iemOp_clts)
1302{
1303 IEMOP_MNEMONIC("clts");
1304 IEMOP_HLP_NO_LOCK_PREFIX();
1305 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_clts);
1306}
1307
1308
1309/** Opcode 0x0f 0x07. */
1310FNIEMOP_DEF(iemOp_sysret)
1311{
1312 IEMOP_MNEMONIC("sysret"); /** @todo 386 LOADALL */
1313 IEMOP_HLP_NO_LOCK_PREFIX();
1314 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sysret);
1315}
1316
1317
1318/** Opcode 0x0f 0x08. */
1319FNIEMOP_STUB(iemOp_invd);
1320// IEMOP_HLP_MIN_486();
1321
1322
1323/** Opcode 0x0f 0x09. */
1324FNIEMOP_DEF(iemOp_wbinvd)
1325{
1326 IEMOP_MNEMONIC("wbinvd");
1327 IEMOP_HLP_MIN_486();
1328 IEMOP_HLP_NO_LOCK_PREFIX();
1329 IEM_MC_BEGIN(0, 0);
1330 IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO();
1331 IEM_MC_ADVANCE_RIP();
1332 IEM_MC_END();
1333 return VINF_SUCCESS; /* ignore for now */
1334}
1335
1336
1337/** Opcode 0x0f 0x0b. */
1338FNIEMOP_DEF(iemOp_ud2)
1339{
1340 IEMOP_MNEMONIC("ud2");
1341 return IEMOP_RAISE_INVALID_OPCODE();
1342}
1343
1344/** Opcode 0x0f 0x0d. */
1345FNIEMOP_DEF(iemOp_nop_Ev_GrpP)
1346{
1347 /* AMD prefetch group, Intel implements this as NOP Ev (and so do we). */
1348 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->f3DNowPrefetch)
1349 {
1350 IEMOP_MNEMONIC("GrpP");
1351 return IEMOP_RAISE_INVALID_OPCODE();
1352 }
1353
1354 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1355 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1356 {
1357 IEMOP_MNEMONIC("GrpP");
1358 return IEMOP_RAISE_INVALID_OPCODE();
1359 }
1360
1361 IEMOP_HLP_NO_LOCK_PREFIX();
1362 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
1363 {
1364 case 2: /* Aliased to /0 for the time being. */
1365 case 4: /* Aliased to /0 for the time being. */
1366 case 5: /* Aliased to /0 for the time being. */
1367 case 6: /* Aliased to /0 for the time being. */
1368 case 7: /* Aliased to /0 for the time being. */
1369 case 0: IEMOP_MNEMONIC("prefetch"); break;
1370 case 1: IEMOP_MNEMONIC("prefetchw"); break;
1371 case 3: IEMOP_MNEMONIC("prefetchw"); break;
1372 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1373 }
1374
1375 IEM_MC_BEGIN(0, 1);
1376 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1377 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1378 /* Currently a NOP. */
1379 IEM_MC_ADVANCE_RIP();
1380 IEM_MC_END();
1381 return VINF_SUCCESS;
1382}
1383
1384
1385/** Opcode 0x0f 0x0e. */
1386FNIEMOP_STUB(iemOp_femms);
1387
1388
1389/** Opcode 0x0f 0x0f 0x0c. */
1390FNIEMOP_STUB(iemOp_3Dnow_pi2fw_Pq_Qq);
1391
1392/** Opcode 0x0f 0x0f 0x0d. */
1393FNIEMOP_STUB(iemOp_3Dnow_pi2fd_Pq_Qq);
1394
1395/** Opcode 0x0f 0x0f 0x1c. */
1396FNIEMOP_STUB(iemOp_3Dnow_pf2fw_Pq_Qq);
1397
1398/** Opcode 0x0f 0x0f 0x1d. */
1399FNIEMOP_STUB(iemOp_3Dnow_pf2fd_Pq_Qq);
1400
1401/** Opcode 0x0f 0x0f 0x8a. */
1402FNIEMOP_STUB(iemOp_3Dnow_pfnacc_Pq_Qq);
1403
1404/** Opcode 0x0f 0x0f 0x8e. */
1405FNIEMOP_STUB(iemOp_3Dnow_pfpnacc_Pq_Qq);
1406
1407/** Opcode 0x0f 0x0f 0x90. */
1408FNIEMOP_STUB(iemOp_3Dnow_pfcmpge_Pq_Qq);
1409
1410/** Opcode 0x0f 0x0f 0x94. */
1411FNIEMOP_STUB(iemOp_3Dnow_pfmin_Pq_Qq);
1412
1413/** Opcode 0x0f 0x0f 0x96. */
1414FNIEMOP_STUB(iemOp_3Dnow_pfrcp_Pq_Qq);
1415
1416/** Opcode 0x0f 0x0f 0x97. */
1417FNIEMOP_STUB(iemOp_3Dnow_pfrsqrt_Pq_Qq);
1418
1419/** Opcode 0x0f 0x0f 0x9a. */
1420FNIEMOP_STUB(iemOp_3Dnow_pfsub_Pq_Qq);
1421
1422/** Opcode 0x0f 0x0f 0x9e. */
1423FNIEMOP_STUB(iemOp_3Dnow_pfadd_PQ_Qq);
1424
1425/** Opcode 0x0f 0x0f 0xa0. */
1426FNIEMOP_STUB(iemOp_3Dnow_pfcmpgt_Pq_Qq);
1427
1428/** Opcode 0x0f 0x0f 0xa4. */
1429FNIEMOP_STUB(iemOp_3Dnow_pfmax_Pq_Qq);
1430
1431/** Opcode 0x0f 0x0f 0xa6. */
1432FNIEMOP_STUB(iemOp_3Dnow_pfrcpit1_Pq_Qq);
1433
1434/** Opcode 0x0f 0x0f 0xa7. */
1435FNIEMOP_STUB(iemOp_3Dnow_pfrsqit1_Pq_Qq);
1436
1437/** Opcode 0x0f 0x0f 0xaa. */
1438FNIEMOP_STUB(iemOp_3Dnow_pfsubr_Pq_Qq);
1439
1440/** Opcode 0x0f 0x0f 0xae. */
1441FNIEMOP_STUB(iemOp_3Dnow_pfacc_PQ_Qq);
1442
1443/** Opcode 0x0f 0x0f 0xb0. */
1444FNIEMOP_STUB(iemOp_3Dnow_pfcmpeq_Pq_Qq);
1445
1446/** Opcode 0x0f 0x0f 0xb4. */
1447FNIEMOP_STUB(iemOp_3Dnow_pfmul_Pq_Qq);
1448
1449/** Opcode 0x0f 0x0f 0xb6. */
1450FNIEMOP_STUB(iemOp_3Dnow_pfrcpit2_Pq_Qq);
1451
1452/** Opcode 0x0f 0x0f 0xb7. */
1453FNIEMOP_STUB(iemOp_3Dnow_pmulhrw_Pq_Qq);
1454
1455/** Opcode 0x0f 0x0f 0xbb. */
1456FNIEMOP_STUB(iemOp_3Dnow_pswapd_Pq_Qq);
1457
1458/** Opcode 0x0f 0x0f 0xbf. */
1459FNIEMOP_STUB(iemOp_3Dnow_pavgusb_PQ_Qq);
1460
1461
1462/** Opcode 0x0f 0x0f. */
1463FNIEMOP_DEF(iemOp_3Dnow)
1464{
1465 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->f3DNow)
1466 {
1467 IEMOP_MNEMONIC("3Dnow");
1468 return IEMOP_RAISE_INVALID_OPCODE();
1469 }
1470
1471 /* This is pretty sparse, use switch instead of table. */
1472 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
1473 switch (b)
1474 {
1475 case 0x0c: return FNIEMOP_CALL(iemOp_3Dnow_pi2fw_Pq_Qq);
1476 case 0x0d: return FNIEMOP_CALL(iemOp_3Dnow_pi2fd_Pq_Qq);
1477 case 0x1c: return FNIEMOP_CALL(iemOp_3Dnow_pf2fw_Pq_Qq);
1478 case 0x1d: return FNIEMOP_CALL(iemOp_3Dnow_pf2fd_Pq_Qq);
1479 case 0x8a: return FNIEMOP_CALL(iemOp_3Dnow_pfnacc_Pq_Qq);
1480 case 0x8e: return FNIEMOP_CALL(iemOp_3Dnow_pfpnacc_Pq_Qq);
1481 case 0x90: return FNIEMOP_CALL(iemOp_3Dnow_pfcmpge_Pq_Qq);
1482 case 0x94: return FNIEMOP_CALL(iemOp_3Dnow_pfmin_Pq_Qq);
1483 case 0x96: return FNIEMOP_CALL(iemOp_3Dnow_pfrcp_Pq_Qq);
1484 case 0x97: return FNIEMOP_CALL(iemOp_3Dnow_pfrsqrt_Pq_Qq);
1485 case 0x9a: return FNIEMOP_CALL(iemOp_3Dnow_pfsub_Pq_Qq);
1486 case 0x9e: return FNIEMOP_CALL(iemOp_3Dnow_pfadd_PQ_Qq);
1487 case 0xa0: return FNIEMOP_CALL(iemOp_3Dnow_pfcmpgt_Pq_Qq);
1488 case 0xa4: return FNIEMOP_CALL(iemOp_3Dnow_pfmax_Pq_Qq);
1489 case 0xa6: return FNIEMOP_CALL(iemOp_3Dnow_pfrcpit1_Pq_Qq);
1490 case 0xa7: return FNIEMOP_CALL(iemOp_3Dnow_pfrsqit1_Pq_Qq);
1491 case 0xaa: return FNIEMOP_CALL(iemOp_3Dnow_pfsubr_Pq_Qq);
1492 case 0xae: return FNIEMOP_CALL(iemOp_3Dnow_pfacc_PQ_Qq);
1493 case 0xb0: return FNIEMOP_CALL(iemOp_3Dnow_pfcmpeq_Pq_Qq);
1494 case 0xb4: return FNIEMOP_CALL(iemOp_3Dnow_pfmul_Pq_Qq);
1495 case 0xb6: return FNIEMOP_CALL(iemOp_3Dnow_pfrcpit2_Pq_Qq);
1496 case 0xb7: return FNIEMOP_CALL(iemOp_3Dnow_pmulhrw_Pq_Qq);
1497 case 0xbb: return FNIEMOP_CALL(iemOp_3Dnow_pswapd_Pq_Qq);
1498 case 0xbf: return FNIEMOP_CALL(iemOp_3Dnow_pavgusb_PQ_Qq);
1499 default:
1500 return IEMOP_RAISE_INVALID_OPCODE();
1501 }
1502}
1503
1504
1505/** Opcode 0x0f 0x10. */
1506FNIEMOP_STUB(iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd);
1507/** Opcode 0x0f 0x11. */
1508FNIEMOP_STUB(iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd);
1509/** Opcode 0x0f 0x12. */
1510FNIEMOP_STUB(iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq); //NEXT
1511/** Opcode 0x0f 0x13. */
1512FNIEMOP_STUB(iemOp_movlps_Mq_Vq__movlpd_Mq_Vq); //NEXT
1513/** Opcode 0x0f 0x14. */
1514FNIEMOP_STUB(iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq);
1515/** Opcode 0x0f 0x15. */
1516FNIEMOP_STUB(iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq);
1517/** Opcode 0x0f 0x16. */
1518FNIEMOP_STUB(iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq); //NEXT
1519/** Opcode 0x0f 0x17. */
1520FNIEMOP_STUB(iemOp_movhps_Mq_Vq__movhpd_Mq_Vq); //NEXT
1521
1522
1523/** Opcode 0x0f 0x18. */
1524FNIEMOP_DEF(iemOp_prefetch_Grp16)
1525{
1526 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1527 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1528 {
1529 IEMOP_HLP_NO_LOCK_PREFIX();
1530 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
1531 {
1532 case 4: /* Aliased to /0 for the time being according to AMD. */
1533 case 5: /* Aliased to /0 for the time being according to AMD. */
1534 case 6: /* Aliased to /0 for the time being according to AMD. */
1535 case 7: /* Aliased to /0 for the time being according to AMD. */
1536 case 0: IEMOP_MNEMONIC("prefetchNTA m8"); break;
1537 case 1: IEMOP_MNEMONIC("prefetchT0 m8"); break;
1538 case 2: IEMOP_MNEMONIC("prefetchT1 m8"); break;
1539 case 3: IEMOP_MNEMONIC("prefetchT2 m8"); break;
1540 IEM_NOT_REACHED_DEFAULT_CASE_RET();
1541 }
1542
1543 IEM_MC_BEGIN(0, 1);
1544 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1545 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1546 /* Currently a NOP. */
1547 IEM_MC_ADVANCE_RIP();
1548 IEM_MC_END();
1549 return VINF_SUCCESS;
1550 }
1551
1552 return IEMOP_RAISE_INVALID_OPCODE();
1553}
1554
1555
1556/** Opcode 0x0f 0x19..0x1f. */
1557FNIEMOP_DEF(iemOp_nop_Ev)
1558{
1559 IEMOP_HLP_NO_LOCK_PREFIX();
1560 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1561 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1562 {
1563 IEM_MC_BEGIN(0, 0);
1564 IEM_MC_ADVANCE_RIP();
1565 IEM_MC_END();
1566 }
1567 else
1568 {
1569 IEM_MC_BEGIN(0, 1);
1570 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1571 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1572 /* Currently a NOP. */
1573 IEM_MC_ADVANCE_RIP();
1574 IEM_MC_END();
1575 }
1576 return VINF_SUCCESS;
1577}
1578
1579
1580/** Opcode 0x0f 0x20. */
1581FNIEMOP_DEF(iemOp_mov_Rd_Cd)
1582{
1583 /* mod is ignored, as is operand size overrides. */
1584 IEMOP_MNEMONIC("mov Rd,Cd");
1585 IEMOP_HLP_MIN_386();
1586 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1587 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
1588 else
1589 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
1590
1591 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1592 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
1593 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
1594 {
1595 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
1596 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMovCr8In32Bit)
1597 return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */
1598 iCrReg |= 8;
1599 }
1600 switch (iCrReg)
1601 {
1602 case 0: case 2: case 3: case 4: case 8:
1603 break;
1604 default:
1605 return IEMOP_RAISE_INVALID_OPCODE();
1606 }
1607 IEMOP_HLP_DONE_DECODING();
1608
1609 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Cd, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB, iCrReg);
1610}
1611
1612
1613/** Opcode 0x0f 0x21. */
1614FNIEMOP_DEF(iemOp_mov_Rd_Dd)
1615{
1616 IEMOP_MNEMONIC("mov Rd,Dd");
1617 IEMOP_HLP_MIN_386();
1618 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1619 IEMOP_HLP_NO_LOCK_PREFIX();
1620 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
1621 return IEMOP_RAISE_INVALID_OPCODE();
1622 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Rd_Dd,
1623 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB,
1624 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK));
1625}
1626
1627
1628/** Opcode 0x0f 0x22. */
1629FNIEMOP_DEF(iemOp_mov_Cd_Rd)
1630{
1631 /* mod is ignored, as is operand size overrides. */
1632 IEMOP_MNEMONIC("mov Cd,Rd");
1633 IEMOP_HLP_MIN_386();
1634 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
1635 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_64BIT;
1636 else
1637 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_32BIT;
1638
1639 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1640 uint8_t iCrReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
1641 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
1642 {
1643 /* The lock prefix can be used to encode CR8 accesses on some CPUs. */
1644 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fMovCr8In32Bit)
1645 return IEMOP_RAISE_INVALID_OPCODE(); /* #UD takes precedence over #GP(), see test. */
1646 iCrReg |= 8;
1647 }
1648 switch (iCrReg)
1649 {
1650 case 0: case 2: case 3: case 4: case 8:
1651 break;
1652 default:
1653 return IEMOP_RAISE_INVALID_OPCODE();
1654 }
1655 IEMOP_HLP_DONE_DECODING();
1656
1657 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Cd_Rd, iCrReg, (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1658}
1659
1660
1661/** Opcode 0x0f 0x23. */
1662FNIEMOP_DEF(iemOp_mov_Dd_Rd)
1663{
1664 IEMOP_MNEMONIC("mov Dd,Rd");
1665 IEMOP_HLP_MIN_386();
1666 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1667 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1668 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_R)
1669 return IEMOP_RAISE_INVALID_OPCODE();
1670 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_mov_Dd_Rd,
1671 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK),
1672 (X86_MODRM_RM_MASK & bRm) | pIemCpu->uRexB);
1673}
1674
1675
1676/** Opcode 0x0f 0x24. */
1677FNIEMOP_DEF(iemOp_mov_Rd_Td)
1678{
1679 IEMOP_MNEMONIC("mov Rd,Td");
1680 /** @todo works on 386 and 486. */
1681 /* The RM byte is not considered, see testcase. */
1682 return IEMOP_RAISE_INVALID_OPCODE();
1683}
1684
1685
1686/** Opcode 0x0f 0x26. */
1687FNIEMOP_DEF(iemOp_mov_Td_Rd)
1688{
1689 IEMOP_MNEMONIC("mov Td,Rd");
1690 /** @todo works on 386 and 486. */
1691 /* The RM byte is not considered, see testcase. */
1692 return IEMOP_RAISE_INVALID_OPCODE();
1693}
1694
1695
1696/** Opcode 0x0f 0x28. */
1697FNIEMOP_DEF(iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd)
1698{
1699 IEMOP_MNEMONIC(!(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? "movaps r,mr" : "movapd r,mr");
1700 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1701 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1702 {
1703 /*
1704 * Register, register.
1705 */
1706 IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES();
1707 IEM_MC_BEGIN(0, 0);
1708 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP))
1709 IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT();
1710 else
1711 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
1712 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE();
1713 IEM_MC_COPY_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg,
1714 (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
1715 IEM_MC_ADVANCE_RIP();
1716 IEM_MC_END();
1717 }
1718 else
1719 {
1720 /*
1721 * Register, memory.
1722 */
1723 IEM_MC_BEGIN(0, 2);
1724 IEM_MC_LOCAL(uint128_t, uSrc); /** @todo optimize this one day... */
1725 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1726
1727 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1728 IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES(); /** @todo check if this is delayed this long for REPZ/NZ */
1729 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP))
1730 IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT();
1731 else
1732 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
1733 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE();
1734
1735 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
1736 IEM_MC_STORE_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uSrc);
1737
1738 IEM_MC_ADVANCE_RIP();
1739 IEM_MC_END();
1740 }
1741 return VINF_SUCCESS;
1742}
1743
1744
1745/** Opcode 0x0f 0x29. */
1746FNIEMOP_DEF(iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd)
1747{
1748 IEMOP_MNEMONIC(!(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? "movaps mr,r" : "movapd mr,r");
1749 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1750 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
1751 {
1752 /*
1753 * Register, register.
1754 */
1755 IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES();
1756 IEM_MC_BEGIN(0, 0);
1757 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP))
1758 IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT();
1759 else
1760 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
1761 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE();
1762 IEM_MC_COPY_XREG_U128((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB,
1763 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
1764 IEM_MC_ADVANCE_RIP();
1765 IEM_MC_END();
1766 }
1767 else
1768 {
1769 /*
1770 * Memory, register.
1771 */
1772 IEM_MC_BEGIN(0, 2);
1773 IEM_MC_LOCAL(uint128_t, uSrc); /** @todo optimize this one day... */
1774 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1775
1776 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1777 IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES(); /** @todo check if this is delayed this long for REPZ/NZ */
1778 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP))
1779 IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT();
1780 else
1781 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
1782 IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ();
1783
1784 IEM_MC_FETCH_XREG_U128(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
1785 IEM_MC_STORE_MEM_U128_ALIGN_SSE(pIemCpu->iEffSeg, GCPtrEffSrc, uSrc);
1786
1787 IEM_MC_ADVANCE_RIP();
1788 IEM_MC_END();
1789 }
1790 return VINF_SUCCESS;
1791}
1792
1793
1794/** Opcode 0x0f 0x2a. */
1795FNIEMOP_STUB(iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey); //NEXT
1796
1797
1798/** Opcode 0x0f 0x2b. */
1799FNIEMOP_DEF(iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd)
1800{
1801 IEMOP_MNEMONIC(!(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? "movntps mr,r" : "movntpd mr,r");
1802 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
1803 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
1804 {
1805 /*
1806 * Register, memory.
1807 */
1808 IEM_MC_BEGIN(0, 2);
1809 IEM_MC_LOCAL(uint128_t, uSrc); /** @todo optimize this one day... */
1810 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
1811
1812 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
1813 IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES(); /** @todo check if this is delayed this long for REPZ/NZ */
1814 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP))
1815 IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT();
1816 else
1817 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
1818 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE();
1819
1820 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
1821 IEM_MC_STORE_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uSrc);
1822
1823 IEM_MC_ADVANCE_RIP();
1824 IEM_MC_END();
1825 }
1826 /* The register, register encoding is invalid. */
1827 else
1828 return IEMOP_RAISE_INVALID_OPCODE();
1829 return VINF_SUCCESS;
1830}
1831
1832
1833/** Opcode 0x0f 0x2c. */
1834FNIEMOP_STUB(iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd); //NEXT
1835/** Opcode 0x0f 0x2d. */
1836FNIEMOP_STUB(iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd);
1837/** Opcode 0x0f 0x2e. */
1838FNIEMOP_STUB(iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd); //NEXT
1839/** Opcode 0x0f 0x2f. */
1840FNIEMOP_STUB(iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd);
1841
1842
1843/** Opcode 0x0f 0x30. */
1844FNIEMOP_DEF(iemOp_wrmsr)
1845{
1846 IEMOP_MNEMONIC("wrmsr");
1847 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1848 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_wrmsr);
1849}
1850
1851
1852/** Opcode 0x0f 0x31. */
1853FNIEMOP_DEF(iemOp_rdtsc)
1854{
1855 IEMOP_MNEMONIC("rdtsc");
1856 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1857 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdtsc);
1858}
1859
1860
1861/** Opcode 0x0f 0x33. */
1862FNIEMOP_DEF(iemOp_rdmsr)
1863{
1864 IEMOP_MNEMONIC("rdmsr");
1865 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
1866 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_rdmsr);
1867}
1868
1869
1870/** Opcode 0x0f 0x34. */
1871FNIEMOP_STUB(iemOp_rdpmc);
1872/** Opcode 0x0f 0x34. */
1873FNIEMOP_STUB(iemOp_sysenter);
1874/** Opcode 0x0f 0x35. */
1875FNIEMOP_STUB(iemOp_sysexit);
1876/** Opcode 0x0f 0x37. */
1877FNIEMOP_STUB(iemOp_getsec);
1878/** Opcode 0x0f 0x38. */
1879FNIEMOP_UD_STUB(iemOp_3byte_Esc_A4); /* Here there be dragons... */
1880/** Opcode 0x0f 0x3a. */
1881FNIEMOP_UD_STUB(iemOp_3byte_Esc_A5); /* Here there be dragons... */
1882
1883
1884/**
1885 * Implements a conditional move.
1886 *
1887 * Wish there was an obvious way to do this where we could share and reduce
1888 * code bloat.
1889 *
1890 * @param a_Cnd The conditional "microcode" operation.
1891 */
1892#define CMOV_X(a_Cnd) \
1893 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); \
1894 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) \
1895 { \
1896 switch (pIemCpu->enmEffOpSize) \
1897 { \
1898 case IEMMODE_16BIT: \
1899 IEM_MC_BEGIN(0, 1); \
1900 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1901 a_Cnd { \
1902 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1903 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1904 } IEM_MC_ENDIF(); \
1905 IEM_MC_ADVANCE_RIP(); \
1906 IEM_MC_END(); \
1907 return VINF_SUCCESS; \
1908 \
1909 case IEMMODE_32BIT: \
1910 IEM_MC_BEGIN(0, 1); \
1911 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1912 a_Cnd { \
1913 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1914 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1915 } IEM_MC_ELSE() { \
1916 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1917 } IEM_MC_ENDIF(); \
1918 IEM_MC_ADVANCE_RIP(); \
1919 IEM_MC_END(); \
1920 return VINF_SUCCESS; \
1921 \
1922 case IEMMODE_64BIT: \
1923 IEM_MC_BEGIN(0, 1); \
1924 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1925 a_Cnd { \
1926 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); \
1927 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1928 } IEM_MC_ENDIF(); \
1929 IEM_MC_ADVANCE_RIP(); \
1930 IEM_MC_END(); \
1931 return VINF_SUCCESS; \
1932 \
1933 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1934 } \
1935 } \
1936 else \
1937 { \
1938 switch (pIemCpu->enmEffOpSize) \
1939 { \
1940 case IEMMODE_16BIT: \
1941 IEM_MC_BEGIN(0, 2); \
1942 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1943 IEM_MC_LOCAL(uint16_t, u16Tmp); \
1944 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \
1945 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1946 a_Cnd { \
1947 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp); \
1948 } IEM_MC_ENDIF(); \
1949 IEM_MC_ADVANCE_RIP(); \
1950 IEM_MC_END(); \
1951 return VINF_SUCCESS; \
1952 \
1953 case IEMMODE_32BIT: \
1954 IEM_MC_BEGIN(0, 2); \
1955 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1956 IEM_MC_LOCAL(uint32_t, u32Tmp); \
1957 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \
1958 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1959 a_Cnd { \
1960 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp); \
1961 } IEM_MC_ELSE() { \
1962 IEM_MC_CLEAR_HIGH_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); \
1963 } IEM_MC_ENDIF(); \
1964 IEM_MC_ADVANCE_RIP(); \
1965 IEM_MC_END(); \
1966 return VINF_SUCCESS; \
1967 \
1968 case IEMMODE_64BIT: \
1969 IEM_MC_BEGIN(0, 2); \
1970 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); \
1971 IEM_MC_LOCAL(uint64_t, u64Tmp); \
1972 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); \
1973 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc); \
1974 a_Cnd { \
1975 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp); \
1976 } IEM_MC_ENDIF(); \
1977 IEM_MC_ADVANCE_RIP(); \
1978 IEM_MC_END(); \
1979 return VINF_SUCCESS; \
1980 \
1981 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
1982 } \
1983 } do {} while (0)
1984
1985
1986
1987/** Opcode 0x0f 0x40. */
1988FNIEMOP_DEF(iemOp_cmovo_Gv_Ev)
1989{
1990 IEMOP_MNEMONIC("cmovo Gv,Ev");
1991 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF));
1992}
1993
1994
1995/** Opcode 0x0f 0x41. */
1996FNIEMOP_DEF(iemOp_cmovno_Gv_Ev)
1997{
1998 IEMOP_MNEMONIC("cmovno Gv,Ev");
1999 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_OF));
2000}
2001
2002
2003/** Opcode 0x0f 0x42. */
2004FNIEMOP_DEF(iemOp_cmovc_Gv_Ev)
2005{
2006 IEMOP_MNEMONIC("cmovc Gv,Ev");
2007 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF));
2008}
2009
2010
2011/** Opcode 0x0f 0x43. */
2012FNIEMOP_DEF(iemOp_cmovnc_Gv_Ev)
2013{
2014 IEMOP_MNEMONIC("cmovnc Gv,Ev");
2015 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_CF));
2016}
2017
2018
2019/** Opcode 0x0f 0x44. */
2020FNIEMOP_DEF(iemOp_cmove_Gv_Ev)
2021{
2022 IEMOP_MNEMONIC("cmove Gv,Ev");
2023 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF));
2024}
2025
2026
2027/** Opcode 0x0f 0x45. */
2028FNIEMOP_DEF(iemOp_cmovne_Gv_Ev)
2029{
2030 IEMOP_MNEMONIC("cmovne Gv,Ev");
2031 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF));
2032}
2033
2034
2035/** Opcode 0x0f 0x46. */
2036FNIEMOP_DEF(iemOp_cmovbe_Gv_Ev)
2037{
2038 IEMOP_MNEMONIC("cmovbe Gv,Ev");
2039 CMOV_X(IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
2040}
2041
2042
2043/** Opcode 0x0f 0x47. */
2044FNIEMOP_DEF(iemOp_cmovnbe_Gv_Ev)
2045{
2046 IEMOP_MNEMONIC("cmovnbe Gv,Ev");
2047 CMOV_X(IEM_MC_IF_EFL_NO_BITS_SET(X86_EFL_CF | X86_EFL_ZF));
2048}
2049
2050
2051/** Opcode 0x0f 0x48. */
2052FNIEMOP_DEF(iemOp_cmovs_Gv_Ev)
2053{
2054 IEMOP_MNEMONIC("cmovs Gv,Ev");
2055 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF));
2056}
2057
2058
2059/** Opcode 0x0f 0x49. */
2060FNIEMOP_DEF(iemOp_cmovns_Gv_Ev)
2061{
2062 IEMOP_MNEMONIC("cmovns Gv,Ev");
2063 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_SF));
2064}
2065
2066
2067/** Opcode 0x0f 0x4a. */
2068FNIEMOP_DEF(iemOp_cmovp_Gv_Ev)
2069{
2070 IEMOP_MNEMONIC("cmovp Gv,Ev");
2071 CMOV_X(IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF));
2072}
2073
2074
2075/** Opcode 0x0f 0x4b. */
2076FNIEMOP_DEF(iemOp_cmovnp_Gv_Ev)
2077{
2078 IEMOP_MNEMONIC("cmovnp Gv,Ev");
2079 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_PF));
2080}
2081
2082
2083/** Opcode 0x0f 0x4c. */
2084FNIEMOP_DEF(iemOp_cmovl_Gv_Ev)
2085{
2086 IEMOP_MNEMONIC("cmovl Gv,Ev");
2087 CMOV_X(IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF));
2088}
2089
2090
2091/** Opcode 0x0f 0x4d. */
2092FNIEMOP_DEF(iemOp_cmovnl_Gv_Ev)
2093{
2094 IEMOP_MNEMONIC("cmovnl Gv,Ev");
2095 CMOV_X(IEM_MC_IF_EFL_BITS_EQ(X86_EFL_SF, X86_EFL_OF));
2096}
2097
2098
2099/** Opcode 0x0f 0x4e. */
2100FNIEMOP_DEF(iemOp_cmovle_Gv_Ev)
2101{
2102 IEMOP_MNEMONIC("cmovle Gv,Ev");
2103 CMOV_X(IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
2104}
2105
2106
2107/** Opcode 0x0f 0x4f. */
2108FNIEMOP_DEF(iemOp_cmovnle_Gv_Ev)
2109{
2110 IEMOP_MNEMONIC("cmovnle Gv,Ev");
2111 CMOV_X(IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF));
2112}
2113
2114#undef CMOV_X
2115
2116/** Opcode 0x0f 0x50. */
2117FNIEMOP_STUB(iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd);
2118/** Opcode 0x0f 0x51. */
2119FNIEMOP_STUB(iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd);
2120/** Opcode 0x0f 0x52. */
2121FNIEMOP_STUB(iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss);
2122/** Opcode 0x0f 0x53. */
2123FNIEMOP_STUB(iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss);
2124/** Opcode 0x0f 0x54. */
2125FNIEMOP_STUB(iemOp_andps_Vps_Wps__andpd_Wpd_Vpd);
2126/** Opcode 0x0f 0x55. */
2127FNIEMOP_STUB(iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd);
2128/** Opcode 0x0f 0x56. */
2129FNIEMOP_STUB(iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd);
2130/** Opcode 0x0f 0x57. */
2131FNIEMOP_STUB(iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd);
2132/** Opcode 0x0f 0x58. */
2133FNIEMOP_STUB(iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd); //NEXT
2134/** Opcode 0x0f 0x59. */
2135FNIEMOP_STUB(iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd);//NEXT
2136/** Opcode 0x0f 0x5a. */
2137FNIEMOP_STUB(iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd);
2138/** Opcode 0x0f 0x5b. */
2139FNIEMOP_STUB(iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps);
2140/** Opcode 0x0f 0x5c. */
2141FNIEMOP_STUB(iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd);
2142/** Opcode 0x0f 0x5d. */
2143FNIEMOP_STUB(iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd);
2144/** Opcode 0x0f 0x5e. */
2145FNIEMOP_STUB(iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd);
2146/** Opcode 0x0f 0x5f. */
2147FNIEMOP_STUB(iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd);
2148
2149
2150/**
2151 * Common worker for SSE2 and MMX instructions on the forms:
2152 * pxxxx xmm1, xmm2/mem128
2153 * pxxxx mm1, mm2/mem32
2154 *
2155 * The 2nd operand is the first half of a register, which in the memory case
2156 * means a 32-bit memory access for MMX and 128-bit aligned 64-bit or 128-bit
2157 * memory accessed for MMX.
2158 *
2159 * Exceptions type 4.
2160 */
2161FNIEMOP_DEF_1(iemOpCommonMmxSse_LowLow_To_Full, PCIEMOPMEDIAF1L1, pImpl)
2162{
2163 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2164 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2165 {
2166 case IEM_OP_PRF_SIZE_OP: /* SSE */
2167 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2168 {
2169 /*
2170 * Register, register.
2171 */
2172 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2173 IEM_MC_BEGIN(2, 0);
2174 IEM_MC_ARG(uint128_t *, pDst, 0);
2175 IEM_MC_ARG(uint64_t const *, pSrc, 1);
2176 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2177 IEM_MC_PREPARE_SSE_USAGE();
2178 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2179 IEM_MC_REF_XREG_U64_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2180 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2181 IEM_MC_ADVANCE_RIP();
2182 IEM_MC_END();
2183 }
2184 else
2185 {
2186 /*
2187 * Register, memory.
2188 */
2189 IEM_MC_BEGIN(2, 2);
2190 IEM_MC_ARG(uint128_t *, pDst, 0);
2191 IEM_MC_LOCAL(uint64_t, uSrc);
2192 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1);
2193 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2194
2195 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2196 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2197 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2198 IEM_MC_FETCH_MEM_U64_ALIGN_U128(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2199
2200 IEM_MC_PREPARE_SSE_USAGE();
2201 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2202 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2203
2204 IEM_MC_ADVANCE_RIP();
2205 IEM_MC_END();
2206 }
2207 return VINF_SUCCESS;
2208
2209 case 0: /* MMX */
2210 if (!pImpl->pfnU64)
2211 return IEMOP_RAISE_INVALID_OPCODE();
2212 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2213 {
2214 /*
2215 * Register, register.
2216 */
2217 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
2218 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
2219 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2220 IEM_MC_BEGIN(2, 0);
2221 IEM_MC_ARG(uint64_t *, pDst, 0);
2222 IEM_MC_ARG(uint32_t const *, pSrc, 1);
2223 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2224 IEM_MC_PREPARE_FPU_USAGE();
2225 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2226 IEM_MC_REF_MREG_U32_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
2227 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2228 IEM_MC_ADVANCE_RIP();
2229 IEM_MC_END();
2230 }
2231 else
2232 {
2233 /*
2234 * Register, memory.
2235 */
2236 IEM_MC_BEGIN(2, 2);
2237 IEM_MC_ARG(uint64_t *, pDst, 0);
2238 IEM_MC_LOCAL(uint32_t, uSrc);
2239 IEM_MC_ARG_LOCAL_REF(uint32_t const *, pSrc, uSrc, 1);
2240 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2241
2242 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2243 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2244 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2245 IEM_MC_FETCH_MEM_U32(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2246
2247 IEM_MC_PREPARE_FPU_USAGE();
2248 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2249 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2250
2251 IEM_MC_ADVANCE_RIP();
2252 IEM_MC_END();
2253 }
2254 return VINF_SUCCESS;
2255
2256 default:
2257 return IEMOP_RAISE_INVALID_OPCODE();
2258 }
2259}
2260
2261
2262/** Opcode 0x0f 0x60. */
2263FNIEMOP_DEF(iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq)
2264{
2265 IEMOP_MNEMONIC("punpcklbw");
2266 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklbw);
2267}
2268
2269
2270/** Opcode 0x0f 0x61. */
2271FNIEMOP_DEF(iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq)
2272{
2273 IEMOP_MNEMONIC("punpcklwd"); /** @todo AMD mark the MMX version as 3DNow!. Intel says MMX CPUID req. */
2274 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklwd);
2275}
2276
2277
2278/** Opcode 0x0f 0x62. */
2279FNIEMOP_DEF(iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq)
2280{
2281 IEMOP_MNEMONIC("punpckldq");
2282 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpckldq);
2283}
2284
2285
2286/** Opcode 0x0f 0x63. */
2287FNIEMOP_STUB(iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq);
2288/** Opcode 0x0f 0x64. */
2289FNIEMOP_STUB(iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq);
2290/** Opcode 0x0f 0x65. */
2291FNIEMOP_STUB(iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq);
2292/** Opcode 0x0f 0x66. */
2293FNIEMOP_STUB(iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq);
2294/** Opcode 0x0f 0x67. */
2295FNIEMOP_STUB(iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq);
2296
2297
2298/**
2299 * Common worker for SSE2 and MMX instructions on the forms:
2300 * pxxxx xmm1, xmm2/mem128
2301 * pxxxx mm1, mm2/mem64
2302 *
2303 * The 2nd operand is the second half of a register, which in the memory case
2304 * means a 64-bit memory access for MMX, and for MMX a 128-bit aligned access
2305 * where it may read the full 128 bits or only the upper 64 bits.
2306 *
2307 * Exceptions type 4.
2308 */
2309FNIEMOP_DEF_1(iemOpCommonMmxSse_HighHigh_To_Full, PCIEMOPMEDIAF1H1, pImpl)
2310{
2311 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2312 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2313 {
2314 case IEM_OP_PRF_SIZE_OP: /* SSE */
2315 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2316 {
2317 /*
2318 * Register, register.
2319 */
2320 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2321 IEM_MC_BEGIN(2, 0);
2322 IEM_MC_ARG(uint128_t *, pDst, 0);
2323 IEM_MC_ARG(uint128_t const *, pSrc, 1);
2324 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2325 IEM_MC_PREPARE_SSE_USAGE();
2326 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2327 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2328 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2329 IEM_MC_ADVANCE_RIP();
2330 IEM_MC_END();
2331 }
2332 else
2333 {
2334 /*
2335 * Register, memory.
2336 */
2337 IEM_MC_BEGIN(2, 2);
2338 IEM_MC_ARG(uint128_t *, pDst, 0);
2339 IEM_MC_LOCAL(uint128_t, uSrc);
2340 IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1);
2341 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2342
2343 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2344 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2345 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2346 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc); /* Most CPUs probably only right high qword */
2347
2348 IEM_MC_PREPARE_SSE_USAGE();
2349 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2350 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2351
2352 IEM_MC_ADVANCE_RIP();
2353 IEM_MC_END();
2354 }
2355 return VINF_SUCCESS;
2356
2357 case 0: /* MMX */
2358 if (!pImpl->pfnU64)
2359 return IEMOP_RAISE_INVALID_OPCODE();
2360 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2361 {
2362 /*
2363 * Register, register.
2364 */
2365 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
2366 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
2367 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2368 IEM_MC_BEGIN(2, 0);
2369 IEM_MC_ARG(uint64_t *, pDst, 0);
2370 IEM_MC_ARG(uint64_t const *, pSrc, 1);
2371 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2372 IEM_MC_PREPARE_FPU_USAGE();
2373 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2374 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
2375 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2376 IEM_MC_ADVANCE_RIP();
2377 IEM_MC_END();
2378 }
2379 else
2380 {
2381 /*
2382 * Register, memory.
2383 */
2384 IEM_MC_BEGIN(2, 2);
2385 IEM_MC_ARG(uint64_t *, pDst, 0);
2386 IEM_MC_LOCAL(uint64_t, uSrc);
2387 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1);
2388 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2389
2390 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2391 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2392 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2393 IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2394
2395 IEM_MC_PREPARE_FPU_USAGE();
2396 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2397 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
2398
2399 IEM_MC_ADVANCE_RIP();
2400 IEM_MC_END();
2401 }
2402 return VINF_SUCCESS;
2403
2404 default:
2405 return IEMOP_RAISE_INVALID_OPCODE();
2406 }
2407}
2408
2409
2410/** Opcode 0x0f 0x68. */
2411FNIEMOP_DEF(iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq)
2412{
2413 IEMOP_MNEMONIC("punpckhbw");
2414 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhbw);
2415}
2416
2417
2418/** Opcode 0x0f 0x69. */
2419FNIEMOP_DEF(iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq)
2420{
2421 IEMOP_MNEMONIC("punpckhwd");
2422 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhwd);
2423}
2424
2425
2426/** Opcode 0x0f 0x6a. */
2427FNIEMOP_DEF(iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq)
2428{
2429 IEMOP_MNEMONIC("punpckhdq");
2430 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhdq);
2431}
2432
2433/** Opcode 0x0f 0x6b. */
2434FNIEMOP_STUB(iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq);
2435
2436
2437/** Opcode 0x0f 0x6c. */
2438FNIEMOP_DEF(iemOp_punpcklqdq_Vdq_Wdq)
2439{
2440 IEMOP_MNEMONIC("punpcklqdq");
2441 return FNIEMOP_CALL_1(iemOpCommonMmxSse_LowLow_To_Full, &g_iemAImpl_punpcklqdq);
2442}
2443
2444
2445/** Opcode 0x0f 0x6d. */
2446FNIEMOP_DEF(iemOp_punpckhqdq_Vdq_Wdq)
2447{
2448 IEMOP_MNEMONIC("punpckhqdq");
2449 return FNIEMOP_CALL_1(iemOpCommonMmxSse_HighHigh_To_Full, &g_iemAImpl_punpckhqdq);
2450}
2451
2452
2453/** Opcode 0x0f 0x6e. */
2454FNIEMOP_DEF(iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey)
2455{
2456 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2457 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2458 {
2459 case IEM_OP_PRF_SIZE_OP: /* SSE */
2460 IEMOP_MNEMONIC("movd/q Wd/q,Ed/q");
2461 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2462 {
2463 /* XMM, greg*/
2464 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2465 IEM_MC_BEGIN(0, 1);
2466 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2467 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE();
2468 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2469 {
2470 IEM_MC_LOCAL(uint64_t, u64Tmp);
2471 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2472 IEM_MC_STORE_XREG_U64_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
2473 }
2474 else
2475 {
2476 IEM_MC_LOCAL(uint32_t, u32Tmp);
2477 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2478 IEM_MC_STORE_XREG_U32_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
2479 }
2480 IEM_MC_ADVANCE_RIP();
2481 IEM_MC_END();
2482 }
2483 else
2484 {
2485 /* XMM, [mem] */
2486 IEM_MC_BEGIN(0, 2);
2487 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2488 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT(); /** @todo order */
2489 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1);
2490 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2491 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE();
2492 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2493 {
2494 IEM_MC_LOCAL(uint64_t, u64Tmp);
2495 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2496 IEM_MC_STORE_XREG_U64_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
2497 }
2498 else
2499 {
2500 IEM_MC_LOCAL(uint32_t, u32Tmp);
2501 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2502 IEM_MC_STORE_XREG_U32_ZX_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
2503 }
2504 IEM_MC_ADVANCE_RIP();
2505 IEM_MC_END();
2506 }
2507 return VINF_SUCCESS;
2508
2509 case 0: /* MMX */
2510 IEMOP_MNEMONIC("movq/d Pd/q,Ed/q");
2511 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2512 {
2513 /* MMX, greg */
2514 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2515 IEM_MC_BEGIN(0, 1);
2516 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2517 IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE();
2518 IEM_MC_LOCAL(uint64_t, u64Tmp);
2519 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2520 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2521 else
2522 IEM_MC_FETCH_GREG_U32_ZX_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2523 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp);
2524 IEM_MC_ADVANCE_RIP();
2525 IEM_MC_END();
2526 }
2527 else
2528 {
2529 /* MMX, [mem] */
2530 IEM_MC_BEGIN(0, 2);
2531 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2532 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2533 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1);
2534 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2535 IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE();
2536 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
2537 {
2538 IEM_MC_LOCAL(uint64_t, u64Tmp);
2539 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2540 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp);
2541 }
2542 else
2543 {
2544 IEM_MC_LOCAL(uint32_t, u32Tmp);
2545 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2546 IEM_MC_STORE_MREG_U32_ZX_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u32Tmp);
2547 }
2548 IEM_MC_ADVANCE_RIP();
2549 IEM_MC_END();
2550 }
2551 return VINF_SUCCESS;
2552
2553 default:
2554 return IEMOP_RAISE_INVALID_OPCODE();
2555 }
2556}
2557
2558
2559/** Opcode 0x0f 0x6f. */
2560FNIEMOP_DEF(iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq)
2561{
2562 bool fAligned = false;
2563 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2564 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2565 {
2566 case IEM_OP_PRF_SIZE_OP: /* SSE aligned */
2567 fAligned = true;
2568 case IEM_OP_PRF_REPZ: /* SSE unaligned */
2569 if (fAligned)
2570 IEMOP_MNEMONIC("movdqa Vdq,Wdq");
2571 else
2572 IEMOP_MNEMONIC("movdqu Vdq,Wdq");
2573 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2574 {
2575 /*
2576 * Register, register.
2577 */
2578 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2579 IEM_MC_BEGIN(0, 0);
2580 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2581 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE();
2582 IEM_MC_COPY_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg,
2583 (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2584 IEM_MC_ADVANCE_RIP();
2585 IEM_MC_END();
2586 }
2587 else
2588 {
2589 /*
2590 * Register, memory.
2591 */
2592 IEM_MC_BEGIN(0, 2);
2593 IEM_MC_LOCAL(uint128_t, u128Tmp);
2594 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2595
2596 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2597 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2598 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2599 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE();
2600 if (fAligned)
2601 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(u128Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2602 else
2603 IEM_MC_FETCH_MEM_U128(u128Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2604 IEM_MC_STORE_XREG_U128(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u128Tmp);
2605
2606 IEM_MC_ADVANCE_RIP();
2607 IEM_MC_END();
2608 }
2609 return VINF_SUCCESS;
2610
2611 case 0: /* MMX */
2612 IEMOP_MNEMONIC("movq Pq,Qq");
2613 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2614 {
2615 /*
2616 * Register, register.
2617 */
2618 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
2619 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
2620 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2621 IEM_MC_BEGIN(0, 1);
2622 IEM_MC_LOCAL(uint64_t, u64Tmp);
2623 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2624 IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE();
2625 IEM_MC_FETCH_MREG_U64(u64Tmp, bRm & X86_MODRM_RM_MASK);
2626 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp);
2627 IEM_MC_ADVANCE_RIP();
2628 IEM_MC_END();
2629 }
2630 else
2631 {
2632 /*
2633 * Register, memory.
2634 */
2635 IEM_MC_BEGIN(0, 2);
2636 IEM_MC_LOCAL(uint64_t, u64Tmp);
2637 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2638
2639 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2640 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2641 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
2642 IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE();
2643 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffSrc);
2644 IEM_MC_STORE_MREG_U64((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK, u64Tmp);
2645
2646 IEM_MC_ADVANCE_RIP();
2647 IEM_MC_END();
2648 }
2649 return VINF_SUCCESS;
2650
2651 default:
2652 return IEMOP_RAISE_INVALID_OPCODE();
2653 }
2654}
2655
2656
2657/** Opcode 0x0f 0x70. The immediate here is evil! */
2658FNIEMOP_DEF(iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib)
2659{
2660 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2661 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2662 {
2663 case IEM_OP_PRF_SIZE_OP: /* SSE */
2664 case IEM_OP_PRF_REPNZ: /* SSE */
2665 case IEM_OP_PRF_REPZ: /* SSE */
2666 {
2667 PFNIEMAIMPLMEDIAPSHUF pfnAImpl;
2668 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2669 {
2670 case IEM_OP_PRF_SIZE_OP:
2671 IEMOP_MNEMONIC("pshufd Vdq,Wdq,Ib");
2672 pfnAImpl = iemAImpl_pshufd;
2673 break;
2674 case IEM_OP_PRF_REPNZ:
2675 IEMOP_MNEMONIC("pshuflw Vdq,Wdq,Ib");
2676 pfnAImpl = iemAImpl_pshuflw;
2677 break;
2678 case IEM_OP_PRF_REPZ:
2679 IEMOP_MNEMONIC("pshufhw Vdq,Wdq,Ib");
2680 pfnAImpl = iemAImpl_pshufhw;
2681 break;
2682 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2683 }
2684 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2685 {
2686 /*
2687 * Register, register.
2688 */
2689 uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil);
2690 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2691
2692 IEM_MC_BEGIN(3, 0);
2693 IEM_MC_ARG(uint128_t *, pDst, 0);
2694 IEM_MC_ARG(uint128_t const *, pSrc, 1);
2695 IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2);
2696 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2697 IEM_MC_PREPARE_SSE_USAGE();
2698 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2699 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2700 IEM_MC_CALL_SSE_AIMPL_3(pfnAImpl, pDst, pSrc, bEvilArg);
2701 IEM_MC_ADVANCE_RIP();
2702 IEM_MC_END();
2703 }
2704 else
2705 {
2706 /*
2707 * Register, memory.
2708 */
2709 IEM_MC_BEGIN(3, 2);
2710 IEM_MC_ARG(uint128_t *, pDst, 0);
2711 IEM_MC_LOCAL(uint128_t, uSrc);
2712 IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1);
2713 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2714
2715 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2716 uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil);
2717 IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2);
2718 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2719 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2720
2721 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2722 IEM_MC_PREPARE_SSE_USAGE();
2723 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2724 IEM_MC_CALL_SSE_AIMPL_3(pfnAImpl, pDst, pSrc, bEvilArg);
2725
2726 IEM_MC_ADVANCE_RIP();
2727 IEM_MC_END();
2728 }
2729 return VINF_SUCCESS;
2730 }
2731
2732 case 0: /* MMX Extension */
2733 IEMOP_MNEMONIC("pshufw Pq,Qq,Ib");
2734 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2735 {
2736 /*
2737 * Register, register.
2738 */
2739 uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil);
2740 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2741
2742 IEM_MC_BEGIN(3, 0);
2743 IEM_MC_ARG(uint64_t *, pDst, 0);
2744 IEM_MC_ARG(uint64_t const *, pSrc, 1);
2745 IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2);
2746 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT();
2747 IEM_MC_PREPARE_FPU_USAGE();
2748 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2749 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
2750 IEM_MC_CALL_MMX_AIMPL_3(iemAImpl_pshufw, pDst, pSrc, bEvilArg);
2751 IEM_MC_ADVANCE_RIP();
2752 IEM_MC_END();
2753 }
2754 else
2755 {
2756 /*
2757 * Register, memory.
2758 */
2759 IEM_MC_BEGIN(3, 2);
2760 IEM_MC_ARG(uint64_t *, pDst, 0);
2761 IEM_MC_LOCAL(uint64_t, uSrc);
2762 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1);
2763 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2764
2765 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
2766 uint8_t bEvil; IEM_OPCODE_GET_NEXT_U8(&bEvil);
2767 IEM_MC_ARG_CONST(uint8_t, bEvilArg, /*=*/ bEvil, 2);
2768 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2769 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT();
2770
2771 IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
2772 IEM_MC_PREPARE_FPU_USAGE();
2773 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
2774 IEM_MC_CALL_MMX_AIMPL_3(iemAImpl_pshufw, pDst, pSrc, bEvilArg);
2775
2776 IEM_MC_ADVANCE_RIP();
2777 IEM_MC_END();
2778 }
2779 return VINF_SUCCESS;
2780
2781 default:
2782 return IEMOP_RAISE_INVALID_OPCODE();
2783 }
2784}
2785
2786
2787/** Opcode 0x0f 0x71 11/2. */
2788FNIEMOP_STUB_1(iemOp_Grp12_psrlw_Nq_Ib, uint8_t, bRm);
2789
2790/** Opcode 0x66 0x0f 0x71 11/2. */
2791FNIEMOP_STUB_1(iemOp_Grp12_psrlw_Udq_Ib, uint8_t, bRm);
2792
2793/** Opcode 0x0f 0x71 11/4. */
2794FNIEMOP_STUB_1(iemOp_Grp12_psraw_Nq_Ib, uint8_t, bRm);
2795
2796/** Opcode 0x66 0x0f 0x71 11/4. */
2797FNIEMOP_STUB_1(iemOp_Grp12_psraw_Udq_Ib, uint8_t, bRm);
2798
2799/** Opcode 0x0f 0x71 11/6. */
2800FNIEMOP_STUB_1(iemOp_Grp12_psllw_Nq_Ib, uint8_t, bRm);
2801
2802/** Opcode 0x66 0x0f 0x71 11/6. */
2803FNIEMOP_STUB_1(iemOp_Grp12_psllw_Udq_Ib, uint8_t, bRm);
2804
2805
2806/** Opcode 0x0f 0x71. */
2807FNIEMOP_DEF(iemOp_Grp12)
2808{
2809 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2810 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
2811 return IEMOP_RAISE_INVALID_OPCODE();
2812 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
2813 {
2814 case 0: case 1: case 3: case 5: case 7:
2815 return IEMOP_RAISE_INVALID_OPCODE();
2816 case 2:
2817 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2818 {
2819 case 0: return FNIEMOP_CALL_1(iemOp_Grp12_psrlw_Nq_Ib, bRm);
2820 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp12_psrlw_Udq_Ib, bRm);
2821 default: return IEMOP_RAISE_INVALID_OPCODE();
2822 }
2823 case 4:
2824 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2825 {
2826 case 0: return FNIEMOP_CALL_1(iemOp_Grp12_psraw_Nq_Ib, bRm);
2827 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp12_psraw_Udq_Ib, bRm);
2828 default: return IEMOP_RAISE_INVALID_OPCODE();
2829 }
2830 case 6:
2831 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2832 {
2833 case 0: return FNIEMOP_CALL_1(iemOp_Grp12_psllw_Nq_Ib, bRm);
2834 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp12_psllw_Udq_Ib, bRm);
2835 default: return IEMOP_RAISE_INVALID_OPCODE();
2836 }
2837 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2838 }
2839}
2840
2841
2842/** Opcode 0x0f 0x72 11/2. */
2843FNIEMOP_STUB_1(iemOp_Grp13_psrld_Nq_Ib, uint8_t, bRm);
2844
2845/** Opcode 0x66 0x0f 0x72 11/2. */
2846FNIEMOP_STUB_1(iemOp_Grp13_psrld_Udq_Ib, uint8_t, bRm);
2847
2848/** Opcode 0x0f 0x72 11/4. */
2849FNIEMOP_STUB_1(iemOp_Grp13_psrad_Nq_Ib, uint8_t, bRm);
2850
2851/** Opcode 0x66 0x0f 0x72 11/4. */
2852FNIEMOP_STUB_1(iemOp_Grp13_psrad_Udq_Ib, uint8_t, bRm);
2853
2854/** Opcode 0x0f 0x72 11/6. */
2855FNIEMOP_STUB_1(iemOp_Grp13_pslld_Nq_Ib, uint8_t, bRm);
2856
2857/** Opcode 0x66 0x0f 0x72 11/6. */
2858FNIEMOP_STUB_1(iemOp_Grp13_pslld_Udq_Ib, uint8_t, bRm);
2859
2860
2861/** Opcode 0x0f 0x72. */
2862FNIEMOP_DEF(iemOp_Grp13)
2863{
2864 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2865 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
2866 return IEMOP_RAISE_INVALID_OPCODE();
2867 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
2868 {
2869 case 0: case 1: case 3: case 5: case 7:
2870 return IEMOP_RAISE_INVALID_OPCODE();
2871 case 2:
2872 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2873 {
2874 case 0: return FNIEMOP_CALL_1(iemOp_Grp13_psrld_Nq_Ib, bRm);
2875 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp13_psrld_Udq_Ib, bRm);
2876 default: return IEMOP_RAISE_INVALID_OPCODE();
2877 }
2878 case 4:
2879 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2880 {
2881 case 0: return FNIEMOP_CALL_1(iemOp_Grp13_psrad_Nq_Ib, bRm);
2882 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp13_psrad_Udq_Ib, bRm);
2883 default: return IEMOP_RAISE_INVALID_OPCODE();
2884 }
2885 case 6:
2886 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2887 {
2888 case 0: return FNIEMOP_CALL_1(iemOp_Grp13_pslld_Nq_Ib, bRm);
2889 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp13_pslld_Udq_Ib, bRm);
2890 default: return IEMOP_RAISE_INVALID_OPCODE();
2891 }
2892 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2893 }
2894}
2895
2896
2897/** Opcode 0x0f 0x73 11/2. */
2898FNIEMOP_STUB_1(iemOp_Grp14_psrlq_Nq_Ib, uint8_t, bRm);
2899
2900/** Opcode 0x66 0x0f 0x73 11/2. */
2901FNIEMOP_STUB_1(iemOp_Grp14_psrlq_Udq_Ib, uint8_t, bRm);
2902
2903/** Opcode 0x66 0x0f 0x73 11/3. */
2904FNIEMOP_STUB_1(iemOp_Grp14_psrldq_Udq_Ib, uint8_t, bRm); //NEXT
2905
2906/** Opcode 0x0f 0x73 11/6. */
2907FNIEMOP_STUB_1(iemOp_Grp14_psllq_Nq_Ib, uint8_t, bRm);
2908
2909/** Opcode 0x66 0x0f 0x73 11/6. */
2910FNIEMOP_STUB_1(iemOp_Grp14_psllq_Udq_Ib, uint8_t, bRm);
2911
2912/** Opcode 0x66 0x0f 0x73 11/7. */
2913FNIEMOP_STUB_1(iemOp_Grp14_pslldq_Udq_Ib, uint8_t, bRm); //NEXT
2914
2915
2916/** Opcode 0x0f 0x73. */
2917FNIEMOP_DEF(iemOp_Grp14)
2918{
2919 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2920 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
2921 return IEMOP_RAISE_INVALID_OPCODE();
2922 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
2923 {
2924 case 0: case 1: case 4: case 5:
2925 return IEMOP_RAISE_INVALID_OPCODE();
2926 case 2:
2927 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2928 {
2929 case 0: return FNIEMOP_CALL_1(iemOp_Grp14_psrlq_Nq_Ib, bRm);
2930 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp14_psrlq_Udq_Ib, bRm);
2931 default: return IEMOP_RAISE_INVALID_OPCODE();
2932 }
2933 case 3:
2934 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2935 {
2936 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp14_psrldq_Udq_Ib, bRm);
2937 default: return IEMOP_RAISE_INVALID_OPCODE();
2938 }
2939 case 6:
2940 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2941 {
2942 case 0: return FNIEMOP_CALL_1(iemOp_Grp14_psllq_Nq_Ib, bRm);
2943 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp14_psllq_Udq_Ib, bRm);
2944 default: return IEMOP_RAISE_INVALID_OPCODE();
2945 }
2946 case 7:
2947 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
2948 {
2949 case IEM_OP_PRF_SIZE_OP: return FNIEMOP_CALL_1(iemOp_Grp14_pslldq_Udq_Ib, bRm);
2950 default: return IEMOP_RAISE_INVALID_OPCODE();
2951 }
2952 IEM_NOT_REACHED_DEFAULT_CASE_RET();
2953 }
2954}
2955
2956
2957/**
2958 * Common worker for SSE2 and MMX instructions on the forms:
2959 * pxxx mm1, mm2/mem64
2960 * pxxx xmm1, xmm2/mem128
2961 *
2962 * Proper alignment of the 128-bit operand is enforced.
2963 * Exceptions type 4. SSE2 and MMX cpuid checks.
2964 */
2965FNIEMOP_DEF_1(iemOpCommonMmxSse2_FullFull_To_Full, PCIEMOPMEDIAF2, pImpl)
2966{
2967 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
2968 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
2969 {
2970 case IEM_OP_PRF_SIZE_OP: /* SSE */
2971 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
2972 {
2973 /*
2974 * Register, register.
2975 */
2976 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
2977 IEM_MC_BEGIN(2, 0);
2978 IEM_MC_ARG(uint128_t *, pDst, 0);
2979 IEM_MC_ARG(uint128_t const *, pSrc, 1);
2980 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
2981 IEM_MC_PREPARE_SSE_USAGE();
2982 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
2983 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
2984 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
2985 IEM_MC_ADVANCE_RIP();
2986 IEM_MC_END();
2987 }
2988 else
2989 {
2990 /*
2991 * Register, memory.
2992 */
2993 IEM_MC_BEGIN(2, 2);
2994 IEM_MC_ARG(uint128_t *, pDst, 0);
2995 IEM_MC_LOCAL(uint128_t, uSrc);
2996 IEM_MC_ARG_LOCAL_REF(uint128_t const *, pSrc, uSrc, 1);
2997 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
2998
2999 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
3000 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3001 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
3002 IEM_MC_FETCH_MEM_U128_ALIGN_SSE(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
3003
3004 IEM_MC_PREPARE_SSE_USAGE();
3005 IEM_MC_REF_XREG_U128(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3006 IEM_MC_CALL_SSE_AIMPL_2(pImpl->pfnU128, pDst, pSrc);
3007
3008 IEM_MC_ADVANCE_RIP();
3009 IEM_MC_END();
3010 }
3011 return VINF_SUCCESS;
3012
3013 case 0: /* MMX */
3014 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3015 {
3016 /*
3017 * Register, register.
3018 */
3019 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
3020 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
3021 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3022 IEM_MC_BEGIN(2, 0);
3023 IEM_MC_ARG(uint64_t *, pDst, 0);
3024 IEM_MC_ARG(uint64_t const *, pSrc, 1);
3025 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
3026 IEM_MC_PREPARE_FPU_USAGE();
3027 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
3028 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
3029 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
3030 IEM_MC_ADVANCE_RIP();
3031 IEM_MC_END();
3032 }
3033 else
3034 {
3035 /*
3036 * Register, memory.
3037 */
3038 IEM_MC_BEGIN(2, 2);
3039 IEM_MC_ARG(uint64_t *, pDst, 0);
3040 IEM_MC_LOCAL(uint64_t, uSrc);
3041 IEM_MC_ARG_LOCAL_REF(uint64_t const *, pSrc, uSrc, 1);
3042 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
3043
3044 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
3045 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3046 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
3047 IEM_MC_FETCH_MEM_U64(uSrc, pIemCpu->iEffSeg, GCPtrEffSrc);
3048
3049 IEM_MC_PREPARE_FPU_USAGE();
3050 IEM_MC_REF_MREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
3051 IEM_MC_CALL_MMX_AIMPL_2(pImpl->pfnU64, pDst, pSrc);
3052
3053 IEM_MC_ADVANCE_RIP();
3054 IEM_MC_END();
3055 }
3056 return VINF_SUCCESS;
3057
3058 default:
3059 return IEMOP_RAISE_INVALID_OPCODE();
3060 }
3061}
3062
3063
3064/** Opcode 0x0f 0x74. */
3065FNIEMOP_DEF(iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq)
3066{
3067 IEMOP_MNEMONIC("pcmpeqb");
3068 return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pcmpeqb);
3069}
3070
3071
3072/** Opcode 0x0f 0x75. */
3073FNIEMOP_DEF(iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq)
3074{
3075 IEMOP_MNEMONIC("pcmpeqw");
3076 return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pcmpeqw);
3077}
3078
3079
3080/** Opcode 0x0f 0x76. */
3081FNIEMOP_DEF(iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq)
3082{
3083 IEMOP_MNEMONIC("pcmpeqd");
3084 return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pcmpeqd);
3085}
3086
3087
3088/** Opcode 0x0f 0x77. */
3089FNIEMOP_STUB(iemOp_emms);
3090/** Opcode 0x0f 0x78. */
3091FNIEMOP_UD_STUB(iemOp_vmread_AmdGrp17);
3092/** Opcode 0x0f 0x79. */
3093FNIEMOP_UD_STUB(iemOp_vmwrite);
3094/** Opcode 0x0f 0x7c. */
3095FNIEMOP_STUB(iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps);
3096/** Opcode 0x0f 0x7d. */
3097FNIEMOP_STUB(iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps);
3098
3099
3100/** Opcode 0x0f 0x7e. */
3101FNIEMOP_DEF(iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq)
3102{
3103 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3104 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
3105 {
3106 case IEM_OP_PRF_SIZE_OP: /* SSE */
3107 IEMOP_MNEMONIC("movd/q Ed/q,Wd/q");
3108 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3109 {
3110 /* greg, XMM */
3111 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3112 IEM_MC_BEGIN(0, 1);
3113 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
3114 IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ();
3115 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
3116 {
3117 IEM_MC_LOCAL(uint64_t, u64Tmp);
3118 IEM_MC_FETCH_XREG_U64(u64Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3119 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
3120 }
3121 else
3122 {
3123 IEM_MC_LOCAL(uint32_t, u32Tmp);
3124 IEM_MC_FETCH_XREG_U32(u32Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3125 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
3126 }
3127 IEM_MC_ADVANCE_RIP();
3128 IEM_MC_END();
3129 }
3130 else
3131 {
3132 /* [mem], XMM */
3133 IEM_MC_BEGIN(0, 2);
3134 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
3135 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
3136 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1);
3137 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3138 IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ();
3139 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
3140 {
3141 IEM_MC_LOCAL(uint64_t, u64Tmp);
3142 IEM_MC_FETCH_XREG_U64(u64Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3143 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffSrc, u64Tmp);
3144 }
3145 else
3146 {
3147 IEM_MC_LOCAL(uint32_t, u32Tmp);
3148 IEM_MC_FETCH_XREG_U32(u32Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3149 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffSrc, u32Tmp);
3150 }
3151 IEM_MC_ADVANCE_RIP();
3152 IEM_MC_END();
3153 }
3154 return VINF_SUCCESS;
3155
3156 case 0: /* MMX */
3157 IEMOP_MNEMONIC("movq/d Ed/q,Pd/q");
3158 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3159 {
3160 /* greg, MMX */
3161 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3162 IEM_MC_BEGIN(0, 1);
3163 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
3164 IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ();
3165 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
3166 {
3167 IEM_MC_LOCAL(uint64_t, u64Tmp);
3168 IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
3169 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Tmp);
3170 }
3171 else
3172 {
3173 IEM_MC_LOCAL(uint32_t, u32Tmp);
3174 IEM_MC_FETCH_MREG_U32(u32Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
3175 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Tmp);
3176 }
3177 IEM_MC_ADVANCE_RIP();
3178 IEM_MC_END();
3179 }
3180 else
3181 {
3182 /* [mem], MMX */
3183 IEM_MC_BEGIN(0, 2);
3184 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
3185 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
3186 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 1);
3187 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3188 IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ();
3189 if (pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_REX_W)
3190 {
3191 IEM_MC_LOCAL(uint64_t, u64Tmp);
3192 IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
3193 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffSrc, u64Tmp);
3194 }
3195 else
3196 {
3197 IEM_MC_LOCAL(uint32_t, u32Tmp);
3198 IEM_MC_FETCH_MREG_U32(u32Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
3199 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffSrc, u32Tmp);
3200 }
3201 IEM_MC_ADVANCE_RIP();
3202 IEM_MC_END();
3203 }
3204 return VINF_SUCCESS;
3205
3206 default:
3207 return IEMOP_RAISE_INVALID_OPCODE();
3208 }
3209}
3210
3211
3212/** Opcode 0x0f 0x7f. */
3213FNIEMOP_DEF(iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq)
3214{
3215 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3216 bool fAligned = false;
3217 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
3218 {
3219 case IEM_OP_PRF_SIZE_OP: /* SSE aligned */
3220 fAligned = true;
3221 case IEM_OP_PRF_REPZ: /* SSE unaligned */
3222 if (fAligned)
3223 IEMOP_MNEMONIC("movdqa Wdq,Vdq");
3224 else
3225 IEMOP_MNEMONIC("movdqu Wdq,Vdq");
3226 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3227 {
3228 /*
3229 * Register, register.
3230 */
3231 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3232 IEM_MC_BEGIN(0, 0);
3233 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
3234 IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE();
3235 IEM_MC_COPY_XREG_U128((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB,
3236 ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3237 IEM_MC_ADVANCE_RIP();
3238 IEM_MC_END();
3239 }
3240 else
3241 {
3242 /*
3243 * Register, memory.
3244 */
3245 IEM_MC_BEGIN(0, 2);
3246 IEM_MC_LOCAL(uint128_t, u128Tmp);
3247 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
3248
3249 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
3250 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3251 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
3252 IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ();
3253
3254 IEM_MC_FETCH_XREG_U128(u128Tmp, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
3255 if (fAligned)
3256 IEM_MC_STORE_MEM_U128_ALIGN_SSE(pIemCpu->iEffSeg, GCPtrEffSrc, u128Tmp);
3257 else
3258 IEM_MC_STORE_MEM_U128(pIemCpu->iEffSeg, GCPtrEffSrc, u128Tmp);
3259
3260 IEM_MC_ADVANCE_RIP();
3261 IEM_MC_END();
3262 }
3263 return VINF_SUCCESS;
3264
3265 case 0: /* MMX */
3266 IEMOP_MNEMONIC("movq Qq,Pq");
3267
3268 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3269 {
3270 /*
3271 * Register, register.
3272 */
3273 /** @todo testcase: REX.B / REX.R and MMX register indexing. Ignored? */
3274 /** @todo testcase: REX.B / REX.R and segment register indexing. Ignored? */
3275 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3276 IEM_MC_BEGIN(0, 1);
3277 IEM_MC_LOCAL(uint64_t, u64Tmp);
3278 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
3279 IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE();
3280 IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
3281 IEM_MC_STORE_MREG_U64(bRm & X86_MODRM_RM_MASK, u64Tmp);
3282 IEM_MC_ADVANCE_RIP();
3283 IEM_MC_END();
3284 }
3285 else
3286 {
3287 /*
3288 * Register, memory.
3289 */
3290 IEM_MC_BEGIN(0, 2);
3291 IEM_MC_LOCAL(uint64_t, u64Tmp);
3292 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
3293
3294 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
3295 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
3296 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
3297 IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ();
3298
3299 IEM_MC_FETCH_MREG_U64(u64Tmp, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
3300 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffSrc, u64Tmp);
3301
3302 IEM_MC_ADVANCE_RIP();
3303 IEM_MC_END();
3304 }
3305 return VINF_SUCCESS;
3306
3307 default:
3308 return IEMOP_RAISE_INVALID_OPCODE();
3309 }
3310}
3311
3312
3313
3314/** Opcode 0x0f 0x80. */
3315FNIEMOP_DEF(iemOp_jo_Jv)
3316{
3317 IEMOP_MNEMONIC("jo Jv");
3318 IEMOP_HLP_MIN_386();
3319 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3320 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3321 {
3322 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3323 IEMOP_HLP_NO_LOCK_PREFIX();
3324
3325 IEM_MC_BEGIN(0, 0);
3326 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3327 IEM_MC_REL_JMP_S16(i16Imm);
3328 } IEM_MC_ELSE() {
3329 IEM_MC_ADVANCE_RIP();
3330 } IEM_MC_ENDIF();
3331 IEM_MC_END();
3332 }
3333 else
3334 {
3335 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3336 IEMOP_HLP_NO_LOCK_PREFIX();
3337
3338 IEM_MC_BEGIN(0, 0);
3339 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3340 IEM_MC_REL_JMP_S32(i32Imm);
3341 } IEM_MC_ELSE() {
3342 IEM_MC_ADVANCE_RIP();
3343 } IEM_MC_ENDIF();
3344 IEM_MC_END();
3345 }
3346 return VINF_SUCCESS;
3347}
3348
3349
3350/** Opcode 0x0f 0x81. */
3351FNIEMOP_DEF(iemOp_jno_Jv)
3352{
3353 IEMOP_MNEMONIC("jno Jv");
3354 IEMOP_HLP_MIN_386();
3355 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3356 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3357 {
3358 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3359 IEMOP_HLP_NO_LOCK_PREFIX();
3360
3361 IEM_MC_BEGIN(0, 0);
3362 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3363 IEM_MC_ADVANCE_RIP();
3364 } IEM_MC_ELSE() {
3365 IEM_MC_REL_JMP_S16(i16Imm);
3366 } IEM_MC_ENDIF();
3367 IEM_MC_END();
3368 }
3369 else
3370 {
3371 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3372 IEMOP_HLP_NO_LOCK_PREFIX();
3373
3374 IEM_MC_BEGIN(0, 0);
3375 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3376 IEM_MC_ADVANCE_RIP();
3377 } IEM_MC_ELSE() {
3378 IEM_MC_REL_JMP_S32(i32Imm);
3379 } IEM_MC_ENDIF();
3380 IEM_MC_END();
3381 }
3382 return VINF_SUCCESS;
3383}
3384
3385
3386/** Opcode 0x0f 0x82. */
3387FNIEMOP_DEF(iemOp_jc_Jv)
3388{
3389 IEMOP_MNEMONIC("jc/jb/jnae Jv");
3390 IEMOP_HLP_MIN_386();
3391 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3392 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3393 {
3394 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3395 IEMOP_HLP_NO_LOCK_PREFIX();
3396
3397 IEM_MC_BEGIN(0, 0);
3398 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3399 IEM_MC_REL_JMP_S16(i16Imm);
3400 } IEM_MC_ELSE() {
3401 IEM_MC_ADVANCE_RIP();
3402 } IEM_MC_ENDIF();
3403 IEM_MC_END();
3404 }
3405 else
3406 {
3407 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3408 IEMOP_HLP_NO_LOCK_PREFIX();
3409
3410 IEM_MC_BEGIN(0, 0);
3411 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3412 IEM_MC_REL_JMP_S32(i32Imm);
3413 } IEM_MC_ELSE() {
3414 IEM_MC_ADVANCE_RIP();
3415 } IEM_MC_ENDIF();
3416 IEM_MC_END();
3417 }
3418 return VINF_SUCCESS;
3419}
3420
3421
3422/** Opcode 0x0f 0x83. */
3423FNIEMOP_DEF(iemOp_jnc_Jv)
3424{
3425 IEMOP_MNEMONIC("jnc/jnb/jae Jv");
3426 IEMOP_HLP_MIN_386();
3427 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3428 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3429 {
3430 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3431 IEMOP_HLP_NO_LOCK_PREFIX();
3432
3433 IEM_MC_BEGIN(0, 0);
3434 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3435 IEM_MC_ADVANCE_RIP();
3436 } IEM_MC_ELSE() {
3437 IEM_MC_REL_JMP_S16(i16Imm);
3438 } IEM_MC_ENDIF();
3439 IEM_MC_END();
3440 }
3441 else
3442 {
3443 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3444 IEMOP_HLP_NO_LOCK_PREFIX();
3445
3446 IEM_MC_BEGIN(0, 0);
3447 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3448 IEM_MC_ADVANCE_RIP();
3449 } IEM_MC_ELSE() {
3450 IEM_MC_REL_JMP_S32(i32Imm);
3451 } IEM_MC_ENDIF();
3452 IEM_MC_END();
3453 }
3454 return VINF_SUCCESS;
3455}
3456
3457
3458/** Opcode 0x0f 0x84. */
3459FNIEMOP_DEF(iemOp_je_Jv)
3460{
3461 IEMOP_MNEMONIC("je/jz Jv");
3462 IEMOP_HLP_MIN_386();
3463 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3464 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3465 {
3466 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3467 IEMOP_HLP_NO_LOCK_PREFIX();
3468
3469 IEM_MC_BEGIN(0, 0);
3470 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3471 IEM_MC_REL_JMP_S16(i16Imm);
3472 } IEM_MC_ELSE() {
3473 IEM_MC_ADVANCE_RIP();
3474 } IEM_MC_ENDIF();
3475 IEM_MC_END();
3476 }
3477 else
3478 {
3479 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3480 IEMOP_HLP_NO_LOCK_PREFIX();
3481
3482 IEM_MC_BEGIN(0, 0);
3483 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3484 IEM_MC_REL_JMP_S32(i32Imm);
3485 } IEM_MC_ELSE() {
3486 IEM_MC_ADVANCE_RIP();
3487 } IEM_MC_ENDIF();
3488 IEM_MC_END();
3489 }
3490 return VINF_SUCCESS;
3491}
3492
3493
3494/** Opcode 0x0f 0x85. */
3495FNIEMOP_DEF(iemOp_jne_Jv)
3496{
3497 IEMOP_MNEMONIC("jne/jnz Jv");
3498 IEMOP_HLP_MIN_386();
3499 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3500 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3501 {
3502 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3503 IEMOP_HLP_NO_LOCK_PREFIX();
3504
3505 IEM_MC_BEGIN(0, 0);
3506 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3507 IEM_MC_ADVANCE_RIP();
3508 } IEM_MC_ELSE() {
3509 IEM_MC_REL_JMP_S16(i16Imm);
3510 } IEM_MC_ENDIF();
3511 IEM_MC_END();
3512 }
3513 else
3514 {
3515 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3516 IEMOP_HLP_NO_LOCK_PREFIX();
3517
3518 IEM_MC_BEGIN(0, 0);
3519 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
3520 IEM_MC_ADVANCE_RIP();
3521 } IEM_MC_ELSE() {
3522 IEM_MC_REL_JMP_S32(i32Imm);
3523 } IEM_MC_ENDIF();
3524 IEM_MC_END();
3525 }
3526 return VINF_SUCCESS;
3527}
3528
3529
3530/** Opcode 0x0f 0x86. */
3531FNIEMOP_DEF(iemOp_jbe_Jv)
3532{
3533 IEMOP_MNEMONIC("jbe/jna Jv");
3534 IEMOP_HLP_MIN_386();
3535 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3536 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3537 {
3538 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3539 IEMOP_HLP_NO_LOCK_PREFIX();
3540
3541 IEM_MC_BEGIN(0, 0);
3542 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3543 IEM_MC_REL_JMP_S16(i16Imm);
3544 } IEM_MC_ELSE() {
3545 IEM_MC_ADVANCE_RIP();
3546 } IEM_MC_ENDIF();
3547 IEM_MC_END();
3548 }
3549 else
3550 {
3551 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3552 IEMOP_HLP_NO_LOCK_PREFIX();
3553
3554 IEM_MC_BEGIN(0, 0);
3555 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3556 IEM_MC_REL_JMP_S32(i32Imm);
3557 } IEM_MC_ELSE() {
3558 IEM_MC_ADVANCE_RIP();
3559 } IEM_MC_ENDIF();
3560 IEM_MC_END();
3561 }
3562 return VINF_SUCCESS;
3563}
3564
3565
3566/** Opcode 0x0f 0x87. */
3567FNIEMOP_DEF(iemOp_jnbe_Jv)
3568{
3569 IEMOP_MNEMONIC("jnbe/ja Jv");
3570 IEMOP_HLP_MIN_386();
3571 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3572 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3573 {
3574 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3575 IEMOP_HLP_NO_LOCK_PREFIX();
3576
3577 IEM_MC_BEGIN(0, 0);
3578 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3579 IEM_MC_ADVANCE_RIP();
3580 } IEM_MC_ELSE() {
3581 IEM_MC_REL_JMP_S16(i16Imm);
3582 } IEM_MC_ENDIF();
3583 IEM_MC_END();
3584 }
3585 else
3586 {
3587 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3588 IEMOP_HLP_NO_LOCK_PREFIX();
3589
3590 IEM_MC_BEGIN(0, 0);
3591 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
3592 IEM_MC_ADVANCE_RIP();
3593 } IEM_MC_ELSE() {
3594 IEM_MC_REL_JMP_S32(i32Imm);
3595 } IEM_MC_ENDIF();
3596 IEM_MC_END();
3597 }
3598 return VINF_SUCCESS;
3599}
3600
3601
3602/** Opcode 0x0f 0x88. */
3603FNIEMOP_DEF(iemOp_js_Jv)
3604{
3605 IEMOP_MNEMONIC("js Jv");
3606 IEMOP_HLP_MIN_386();
3607 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3608 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3609 {
3610 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3611 IEMOP_HLP_NO_LOCK_PREFIX();
3612
3613 IEM_MC_BEGIN(0, 0);
3614 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
3615 IEM_MC_REL_JMP_S16(i16Imm);
3616 } IEM_MC_ELSE() {
3617 IEM_MC_ADVANCE_RIP();
3618 } IEM_MC_ENDIF();
3619 IEM_MC_END();
3620 }
3621 else
3622 {
3623 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3624 IEMOP_HLP_NO_LOCK_PREFIX();
3625
3626 IEM_MC_BEGIN(0, 0);
3627 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
3628 IEM_MC_REL_JMP_S32(i32Imm);
3629 } IEM_MC_ELSE() {
3630 IEM_MC_ADVANCE_RIP();
3631 } IEM_MC_ENDIF();
3632 IEM_MC_END();
3633 }
3634 return VINF_SUCCESS;
3635}
3636
3637
3638/** Opcode 0x0f 0x89. */
3639FNIEMOP_DEF(iemOp_jns_Jv)
3640{
3641 IEMOP_MNEMONIC("jns Jv");
3642 IEMOP_HLP_MIN_386();
3643 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3644 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3645 {
3646 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3647 IEMOP_HLP_NO_LOCK_PREFIX();
3648
3649 IEM_MC_BEGIN(0, 0);
3650 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
3651 IEM_MC_ADVANCE_RIP();
3652 } IEM_MC_ELSE() {
3653 IEM_MC_REL_JMP_S16(i16Imm);
3654 } IEM_MC_ENDIF();
3655 IEM_MC_END();
3656 }
3657 else
3658 {
3659 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3660 IEMOP_HLP_NO_LOCK_PREFIX();
3661
3662 IEM_MC_BEGIN(0, 0);
3663 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
3664 IEM_MC_ADVANCE_RIP();
3665 } IEM_MC_ELSE() {
3666 IEM_MC_REL_JMP_S32(i32Imm);
3667 } IEM_MC_ENDIF();
3668 IEM_MC_END();
3669 }
3670 return VINF_SUCCESS;
3671}
3672
3673
3674/** Opcode 0x0f 0x8a. */
3675FNIEMOP_DEF(iemOp_jp_Jv)
3676{
3677 IEMOP_MNEMONIC("jp Jv");
3678 IEMOP_HLP_MIN_386();
3679 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3680 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3681 {
3682 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3683 IEMOP_HLP_NO_LOCK_PREFIX();
3684
3685 IEM_MC_BEGIN(0, 0);
3686 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
3687 IEM_MC_REL_JMP_S16(i16Imm);
3688 } IEM_MC_ELSE() {
3689 IEM_MC_ADVANCE_RIP();
3690 } IEM_MC_ENDIF();
3691 IEM_MC_END();
3692 }
3693 else
3694 {
3695 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3696 IEMOP_HLP_NO_LOCK_PREFIX();
3697
3698 IEM_MC_BEGIN(0, 0);
3699 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
3700 IEM_MC_REL_JMP_S32(i32Imm);
3701 } IEM_MC_ELSE() {
3702 IEM_MC_ADVANCE_RIP();
3703 } IEM_MC_ENDIF();
3704 IEM_MC_END();
3705 }
3706 return VINF_SUCCESS;
3707}
3708
3709
3710/** Opcode 0x0f 0x8b. */
3711FNIEMOP_DEF(iemOp_jnp_Jv)
3712{
3713 IEMOP_MNEMONIC("jo Jv");
3714 IEMOP_HLP_MIN_386();
3715 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3716 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3717 {
3718 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3719 IEMOP_HLP_NO_LOCK_PREFIX();
3720
3721 IEM_MC_BEGIN(0, 0);
3722 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
3723 IEM_MC_ADVANCE_RIP();
3724 } IEM_MC_ELSE() {
3725 IEM_MC_REL_JMP_S16(i16Imm);
3726 } IEM_MC_ENDIF();
3727 IEM_MC_END();
3728 }
3729 else
3730 {
3731 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3732 IEMOP_HLP_NO_LOCK_PREFIX();
3733
3734 IEM_MC_BEGIN(0, 0);
3735 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
3736 IEM_MC_ADVANCE_RIP();
3737 } IEM_MC_ELSE() {
3738 IEM_MC_REL_JMP_S32(i32Imm);
3739 } IEM_MC_ENDIF();
3740 IEM_MC_END();
3741 }
3742 return VINF_SUCCESS;
3743}
3744
3745
3746/** Opcode 0x0f 0x8c. */
3747FNIEMOP_DEF(iemOp_jl_Jv)
3748{
3749 IEMOP_MNEMONIC("jl/jnge Jv");
3750 IEMOP_HLP_MIN_386();
3751 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3752 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3753 {
3754 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3755 IEMOP_HLP_NO_LOCK_PREFIX();
3756
3757 IEM_MC_BEGIN(0, 0);
3758 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
3759 IEM_MC_REL_JMP_S16(i16Imm);
3760 } IEM_MC_ELSE() {
3761 IEM_MC_ADVANCE_RIP();
3762 } IEM_MC_ENDIF();
3763 IEM_MC_END();
3764 }
3765 else
3766 {
3767 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3768 IEMOP_HLP_NO_LOCK_PREFIX();
3769
3770 IEM_MC_BEGIN(0, 0);
3771 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
3772 IEM_MC_REL_JMP_S32(i32Imm);
3773 } IEM_MC_ELSE() {
3774 IEM_MC_ADVANCE_RIP();
3775 } IEM_MC_ENDIF();
3776 IEM_MC_END();
3777 }
3778 return VINF_SUCCESS;
3779}
3780
3781
3782/** Opcode 0x0f 0x8d. */
3783FNIEMOP_DEF(iemOp_jnl_Jv)
3784{
3785 IEMOP_MNEMONIC("jnl/jge Jv");
3786 IEMOP_HLP_MIN_386();
3787 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3788 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3789 {
3790 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3791 IEMOP_HLP_NO_LOCK_PREFIX();
3792
3793 IEM_MC_BEGIN(0, 0);
3794 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
3795 IEM_MC_ADVANCE_RIP();
3796 } IEM_MC_ELSE() {
3797 IEM_MC_REL_JMP_S16(i16Imm);
3798 } IEM_MC_ENDIF();
3799 IEM_MC_END();
3800 }
3801 else
3802 {
3803 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3804 IEMOP_HLP_NO_LOCK_PREFIX();
3805
3806 IEM_MC_BEGIN(0, 0);
3807 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
3808 IEM_MC_ADVANCE_RIP();
3809 } IEM_MC_ELSE() {
3810 IEM_MC_REL_JMP_S32(i32Imm);
3811 } IEM_MC_ENDIF();
3812 IEM_MC_END();
3813 }
3814 return VINF_SUCCESS;
3815}
3816
3817
3818/** Opcode 0x0f 0x8e. */
3819FNIEMOP_DEF(iemOp_jle_Jv)
3820{
3821 IEMOP_MNEMONIC("jle/jng Jv");
3822 IEMOP_HLP_MIN_386();
3823 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3824 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3825 {
3826 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3827 IEMOP_HLP_NO_LOCK_PREFIX();
3828
3829 IEM_MC_BEGIN(0, 0);
3830 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
3831 IEM_MC_REL_JMP_S16(i16Imm);
3832 } IEM_MC_ELSE() {
3833 IEM_MC_ADVANCE_RIP();
3834 } IEM_MC_ENDIF();
3835 IEM_MC_END();
3836 }
3837 else
3838 {
3839 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3840 IEMOP_HLP_NO_LOCK_PREFIX();
3841
3842 IEM_MC_BEGIN(0, 0);
3843 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
3844 IEM_MC_REL_JMP_S32(i32Imm);
3845 } IEM_MC_ELSE() {
3846 IEM_MC_ADVANCE_RIP();
3847 } IEM_MC_ENDIF();
3848 IEM_MC_END();
3849 }
3850 return VINF_SUCCESS;
3851}
3852
3853
3854/** Opcode 0x0f 0x8f. */
3855FNIEMOP_DEF(iemOp_jnle_Jv)
3856{
3857 IEMOP_MNEMONIC("jnle/jg Jv");
3858 IEMOP_HLP_MIN_386();
3859 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
3860 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
3861 {
3862 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
3863 IEMOP_HLP_NO_LOCK_PREFIX();
3864
3865 IEM_MC_BEGIN(0, 0);
3866 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
3867 IEM_MC_ADVANCE_RIP();
3868 } IEM_MC_ELSE() {
3869 IEM_MC_REL_JMP_S16(i16Imm);
3870 } IEM_MC_ENDIF();
3871 IEM_MC_END();
3872 }
3873 else
3874 {
3875 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
3876 IEMOP_HLP_NO_LOCK_PREFIX();
3877
3878 IEM_MC_BEGIN(0, 0);
3879 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
3880 IEM_MC_ADVANCE_RIP();
3881 } IEM_MC_ELSE() {
3882 IEM_MC_REL_JMP_S32(i32Imm);
3883 } IEM_MC_ENDIF();
3884 IEM_MC_END();
3885 }
3886 return VINF_SUCCESS;
3887}
3888
3889
3890/** Opcode 0x0f 0x90. */
3891FNIEMOP_DEF(iemOp_seto_Eb)
3892{
3893 IEMOP_MNEMONIC("seto Eb");
3894 IEMOP_HLP_MIN_386();
3895 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3896 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3897
3898 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3899 * any way. AMD says it's "unused", whatever that means. We're
3900 * ignoring for now. */
3901 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3902 {
3903 /* register target */
3904 IEM_MC_BEGIN(0, 0);
3905 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3906 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3907 } IEM_MC_ELSE() {
3908 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3909 } IEM_MC_ENDIF();
3910 IEM_MC_ADVANCE_RIP();
3911 IEM_MC_END();
3912 }
3913 else
3914 {
3915 /* memory target */
3916 IEM_MC_BEGIN(0, 1);
3917 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3918 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3919 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3920 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3921 } IEM_MC_ELSE() {
3922 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3923 } IEM_MC_ENDIF();
3924 IEM_MC_ADVANCE_RIP();
3925 IEM_MC_END();
3926 }
3927 return VINF_SUCCESS;
3928}
3929
3930
3931/** Opcode 0x0f 0x91. */
3932FNIEMOP_DEF(iemOp_setno_Eb)
3933{
3934 IEMOP_MNEMONIC("setno Eb");
3935 IEMOP_HLP_MIN_386();
3936 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3937 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3938
3939 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3940 * any way. AMD says it's "unused", whatever that means. We're
3941 * ignoring for now. */
3942 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3943 {
3944 /* register target */
3945 IEM_MC_BEGIN(0, 0);
3946 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3947 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3948 } IEM_MC_ELSE() {
3949 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3950 } IEM_MC_ENDIF();
3951 IEM_MC_ADVANCE_RIP();
3952 IEM_MC_END();
3953 }
3954 else
3955 {
3956 /* memory target */
3957 IEM_MC_BEGIN(0, 1);
3958 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
3959 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
3960 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
3961 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
3962 } IEM_MC_ELSE() {
3963 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
3964 } IEM_MC_ENDIF();
3965 IEM_MC_ADVANCE_RIP();
3966 IEM_MC_END();
3967 }
3968 return VINF_SUCCESS;
3969}
3970
3971
3972/** Opcode 0x0f 0x92. */
3973FNIEMOP_DEF(iemOp_setc_Eb)
3974{
3975 IEMOP_MNEMONIC("setc Eb");
3976 IEMOP_HLP_MIN_386();
3977 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
3978 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
3979
3980 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
3981 * any way. AMD says it's "unused", whatever that means. We're
3982 * ignoring for now. */
3983 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
3984 {
3985 /* register target */
3986 IEM_MC_BEGIN(0, 0);
3987 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
3988 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
3989 } IEM_MC_ELSE() {
3990 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
3991 } IEM_MC_ENDIF();
3992 IEM_MC_ADVANCE_RIP();
3993 IEM_MC_END();
3994 }
3995 else
3996 {
3997 /* memory target */
3998 IEM_MC_BEGIN(0, 1);
3999 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4000 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4001 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
4002 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4003 } IEM_MC_ELSE() {
4004 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4005 } IEM_MC_ENDIF();
4006 IEM_MC_ADVANCE_RIP();
4007 IEM_MC_END();
4008 }
4009 return VINF_SUCCESS;
4010}
4011
4012
4013/** Opcode 0x0f 0x93. */
4014FNIEMOP_DEF(iemOp_setnc_Eb)
4015{
4016 IEMOP_MNEMONIC("setnc Eb");
4017 IEMOP_HLP_MIN_386();
4018 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4019 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4020
4021 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4022 * any way. AMD says it's "unused", whatever that means. We're
4023 * ignoring for now. */
4024 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4025 {
4026 /* register target */
4027 IEM_MC_BEGIN(0, 0);
4028 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
4029 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4030 } IEM_MC_ELSE() {
4031 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4032 } IEM_MC_ENDIF();
4033 IEM_MC_ADVANCE_RIP();
4034 IEM_MC_END();
4035 }
4036 else
4037 {
4038 /* memory target */
4039 IEM_MC_BEGIN(0, 1);
4040 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4041 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4042 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
4043 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4044 } IEM_MC_ELSE() {
4045 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4046 } IEM_MC_ENDIF();
4047 IEM_MC_ADVANCE_RIP();
4048 IEM_MC_END();
4049 }
4050 return VINF_SUCCESS;
4051}
4052
4053
4054/** Opcode 0x0f 0x94. */
4055FNIEMOP_DEF(iemOp_sete_Eb)
4056{
4057 IEMOP_MNEMONIC("sete Eb");
4058 IEMOP_HLP_MIN_386();
4059 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4060 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4061
4062 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4063 * any way. AMD says it's "unused", whatever that means. We're
4064 * ignoring for now. */
4065 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4066 {
4067 /* register target */
4068 IEM_MC_BEGIN(0, 0);
4069 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
4070 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4071 } IEM_MC_ELSE() {
4072 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4073 } IEM_MC_ENDIF();
4074 IEM_MC_ADVANCE_RIP();
4075 IEM_MC_END();
4076 }
4077 else
4078 {
4079 /* memory target */
4080 IEM_MC_BEGIN(0, 1);
4081 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4082 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4083 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
4084 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4085 } IEM_MC_ELSE() {
4086 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4087 } IEM_MC_ENDIF();
4088 IEM_MC_ADVANCE_RIP();
4089 IEM_MC_END();
4090 }
4091 return VINF_SUCCESS;
4092}
4093
4094
4095/** Opcode 0x0f 0x95. */
4096FNIEMOP_DEF(iemOp_setne_Eb)
4097{
4098 IEMOP_MNEMONIC("setne Eb");
4099 IEMOP_HLP_MIN_386();
4100 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4101 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4102
4103 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4104 * any way. AMD says it's "unused", whatever that means. We're
4105 * ignoring for now. */
4106 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4107 {
4108 /* register target */
4109 IEM_MC_BEGIN(0, 0);
4110 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
4111 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4112 } IEM_MC_ELSE() {
4113 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4114 } IEM_MC_ENDIF();
4115 IEM_MC_ADVANCE_RIP();
4116 IEM_MC_END();
4117 }
4118 else
4119 {
4120 /* memory target */
4121 IEM_MC_BEGIN(0, 1);
4122 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4123 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4124 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
4125 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4126 } IEM_MC_ELSE() {
4127 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4128 } IEM_MC_ENDIF();
4129 IEM_MC_ADVANCE_RIP();
4130 IEM_MC_END();
4131 }
4132 return VINF_SUCCESS;
4133}
4134
4135
4136/** Opcode 0x0f 0x96. */
4137FNIEMOP_DEF(iemOp_setbe_Eb)
4138{
4139 IEMOP_MNEMONIC("setbe Eb");
4140 IEMOP_HLP_MIN_386();
4141 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4142 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4143
4144 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4145 * any way. AMD says it's "unused", whatever that means. We're
4146 * ignoring for now. */
4147 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4148 {
4149 /* register target */
4150 IEM_MC_BEGIN(0, 0);
4151 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
4152 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4153 } IEM_MC_ELSE() {
4154 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4155 } IEM_MC_ENDIF();
4156 IEM_MC_ADVANCE_RIP();
4157 IEM_MC_END();
4158 }
4159 else
4160 {
4161 /* memory target */
4162 IEM_MC_BEGIN(0, 1);
4163 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4164 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4165 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
4166 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4167 } IEM_MC_ELSE() {
4168 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4169 } IEM_MC_ENDIF();
4170 IEM_MC_ADVANCE_RIP();
4171 IEM_MC_END();
4172 }
4173 return VINF_SUCCESS;
4174}
4175
4176
4177/** Opcode 0x0f 0x97. */
4178FNIEMOP_DEF(iemOp_setnbe_Eb)
4179{
4180 IEMOP_MNEMONIC("setnbe Eb");
4181 IEMOP_HLP_MIN_386();
4182 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4183 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4184
4185 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4186 * any way. AMD says it's "unused", whatever that means. We're
4187 * ignoring for now. */
4188 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4189 {
4190 /* register target */
4191 IEM_MC_BEGIN(0, 0);
4192 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
4193 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4194 } IEM_MC_ELSE() {
4195 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4196 } IEM_MC_ENDIF();
4197 IEM_MC_ADVANCE_RIP();
4198 IEM_MC_END();
4199 }
4200 else
4201 {
4202 /* memory target */
4203 IEM_MC_BEGIN(0, 1);
4204 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4205 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4206 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
4207 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4208 } IEM_MC_ELSE() {
4209 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4210 } IEM_MC_ENDIF();
4211 IEM_MC_ADVANCE_RIP();
4212 IEM_MC_END();
4213 }
4214 return VINF_SUCCESS;
4215}
4216
4217
4218/** Opcode 0x0f 0x98. */
4219FNIEMOP_DEF(iemOp_sets_Eb)
4220{
4221 IEMOP_MNEMONIC("sets Eb");
4222 IEMOP_HLP_MIN_386();
4223 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4224 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4225
4226 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4227 * any way. AMD says it's "unused", whatever that means. We're
4228 * ignoring for now. */
4229 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4230 {
4231 /* register target */
4232 IEM_MC_BEGIN(0, 0);
4233 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
4234 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4235 } IEM_MC_ELSE() {
4236 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4237 } IEM_MC_ENDIF();
4238 IEM_MC_ADVANCE_RIP();
4239 IEM_MC_END();
4240 }
4241 else
4242 {
4243 /* memory target */
4244 IEM_MC_BEGIN(0, 1);
4245 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4246 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4247 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
4248 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4249 } IEM_MC_ELSE() {
4250 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4251 } IEM_MC_ENDIF();
4252 IEM_MC_ADVANCE_RIP();
4253 IEM_MC_END();
4254 }
4255 return VINF_SUCCESS;
4256}
4257
4258
4259/** Opcode 0x0f 0x99. */
4260FNIEMOP_DEF(iemOp_setns_Eb)
4261{
4262 IEMOP_MNEMONIC("setns Eb");
4263 IEMOP_HLP_MIN_386();
4264 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4265 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4266
4267 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4268 * any way. AMD says it's "unused", whatever that means. We're
4269 * ignoring for now. */
4270 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4271 {
4272 /* register target */
4273 IEM_MC_BEGIN(0, 0);
4274 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
4275 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4276 } IEM_MC_ELSE() {
4277 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4278 } IEM_MC_ENDIF();
4279 IEM_MC_ADVANCE_RIP();
4280 IEM_MC_END();
4281 }
4282 else
4283 {
4284 /* memory target */
4285 IEM_MC_BEGIN(0, 1);
4286 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4287 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4288 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
4289 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4290 } IEM_MC_ELSE() {
4291 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4292 } IEM_MC_ENDIF();
4293 IEM_MC_ADVANCE_RIP();
4294 IEM_MC_END();
4295 }
4296 return VINF_SUCCESS;
4297}
4298
4299
4300/** Opcode 0x0f 0x9a. */
4301FNIEMOP_DEF(iemOp_setp_Eb)
4302{
4303 IEMOP_MNEMONIC("setnp Eb");
4304 IEMOP_HLP_MIN_386();
4305 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4306 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4307
4308 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4309 * any way. AMD says it's "unused", whatever that means. We're
4310 * ignoring for now. */
4311 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4312 {
4313 /* register target */
4314 IEM_MC_BEGIN(0, 0);
4315 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
4316 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4317 } IEM_MC_ELSE() {
4318 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4319 } IEM_MC_ENDIF();
4320 IEM_MC_ADVANCE_RIP();
4321 IEM_MC_END();
4322 }
4323 else
4324 {
4325 /* memory target */
4326 IEM_MC_BEGIN(0, 1);
4327 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4328 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4329 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
4330 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4331 } IEM_MC_ELSE() {
4332 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4333 } IEM_MC_ENDIF();
4334 IEM_MC_ADVANCE_RIP();
4335 IEM_MC_END();
4336 }
4337 return VINF_SUCCESS;
4338}
4339
4340
4341/** Opcode 0x0f 0x9b. */
4342FNIEMOP_DEF(iemOp_setnp_Eb)
4343{
4344 IEMOP_MNEMONIC("setnp Eb");
4345 IEMOP_HLP_MIN_386();
4346 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4347 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4348
4349 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4350 * any way. AMD says it's "unused", whatever that means. We're
4351 * ignoring for now. */
4352 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4353 {
4354 /* register target */
4355 IEM_MC_BEGIN(0, 0);
4356 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
4357 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4358 } IEM_MC_ELSE() {
4359 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4360 } IEM_MC_ENDIF();
4361 IEM_MC_ADVANCE_RIP();
4362 IEM_MC_END();
4363 }
4364 else
4365 {
4366 /* memory target */
4367 IEM_MC_BEGIN(0, 1);
4368 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4369 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4370 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
4371 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4372 } IEM_MC_ELSE() {
4373 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4374 } IEM_MC_ENDIF();
4375 IEM_MC_ADVANCE_RIP();
4376 IEM_MC_END();
4377 }
4378 return VINF_SUCCESS;
4379}
4380
4381
4382/** Opcode 0x0f 0x9c. */
4383FNIEMOP_DEF(iemOp_setl_Eb)
4384{
4385 IEMOP_MNEMONIC("setl Eb");
4386 IEMOP_HLP_MIN_386();
4387 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4388 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4389
4390 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4391 * any way. AMD says it's "unused", whatever that means. We're
4392 * ignoring for now. */
4393 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4394 {
4395 /* register target */
4396 IEM_MC_BEGIN(0, 0);
4397 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
4398 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4399 } IEM_MC_ELSE() {
4400 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4401 } IEM_MC_ENDIF();
4402 IEM_MC_ADVANCE_RIP();
4403 IEM_MC_END();
4404 }
4405 else
4406 {
4407 /* memory target */
4408 IEM_MC_BEGIN(0, 1);
4409 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4410 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4411 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
4412 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4413 } IEM_MC_ELSE() {
4414 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4415 } IEM_MC_ENDIF();
4416 IEM_MC_ADVANCE_RIP();
4417 IEM_MC_END();
4418 }
4419 return VINF_SUCCESS;
4420}
4421
4422
4423/** Opcode 0x0f 0x9d. */
4424FNIEMOP_DEF(iemOp_setnl_Eb)
4425{
4426 IEMOP_MNEMONIC("setnl Eb");
4427 IEMOP_HLP_MIN_386();
4428 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4429 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4430
4431 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4432 * any way. AMD says it's "unused", whatever that means. We're
4433 * ignoring for now. */
4434 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4435 {
4436 /* register target */
4437 IEM_MC_BEGIN(0, 0);
4438 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
4439 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4440 } IEM_MC_ELSE() {
4441 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4442 } IEM_MC_ENDIF();
4443 IEM_MC_ADVANCE_RIP();
4444 IEM_MC_END();
4445 }
4446 else
4447 {
4448 /* memory target */
4449 IEM_MC_BEGIN(0, 1);
4450 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4451 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4452 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
4453 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4454 } IEM_MC_ELSE() {
4455 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4456 } IEM_MC_ENDIF();
4457 IEM_MC_ADVANCE_RIP();
4458 IEM_MC_END();
4459 }
4460 return VINF_SUCCESS;
4461}
4462
4463
4464/** Opcode 0x0f 0x9e. */
4465FNIEMOP_DEF(iemOp_setle_Eb)
4466{
4467 IEMOP_MNEMONIC("setle Eb");
4468 IEMOP_HLP_MIN_386();
4469 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4470 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4471
4472 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4473 * any way. AMD says it's "unused", whatever that means. We're
4474 * ignoring for now. */
4475 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4476 {
4477 /* register target */
4478 IEM_MC_BEGIN(0, 0);
4479 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
4480 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4481 } IEM_MC_ELSE() {
4482 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4483 } IEM_MC_ENDIF();
4484 IEM_MC_ADVANCE_RIP();
4485 IEM_MC_END();
4486 }
4487 else
4488 {
4489 /* memory target */
4490 IEM_MC_BEGIN(0, 1);
4491 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4492 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4493 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
4494 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4495 } IEM_MC_ELSE() {
4496 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4497 } IEM_MC_ENDIF();
4498 IEM_MC_ADVANCE_RIP();
4499 IEM_MC_END();
4500 }
4501 return VINF_SUCCESS;
4502}
4503
4504
4505/** Opcode 0x0f 0x9f. */
4506FNIEMOP_DEF(iemOp_setnle_Eb)
4507{
4508 IEMOP_MNEMONIC("setnle Eb");
4509 IEMOP_HLP_MIN_386();
4510 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4511 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4512
4513 /** @todo Encoding test: Check if the 'reg' field is ignored or decoded in
4514 * any way. AMD says it's "unused", whatever that means. We're
4515 * ignoring for now. */
4516 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4517 {
4518 /* register target */
4519 IEM_MC_BEGIN(0, 0);
4520 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
4521 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 0);
4522 } IEM_MC_ELSE() {
4523 IEM_MC_STORE_GREG_U8_CONST((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, 1);
4524 } IEM_MC_ENDIF();
4525 IEM_MC_ADVANCE_RIP();
4526 IEM_MC_END();
4527 }
4528 else
4529 {
4530 /* memory target */
4531 IEM_MC_BEGIN(0, 1);
4532 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4533 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4534 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
4535 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 0);
4536 } IEM_MC_ELSE() {
4537 IEM_MC_STORE_MEM_U8_CONST(pIemCpu->iEffSeg, GCPtrEffDst, 1);
4538 } IEM_MC_ENDIF();
4539 IEM_MC_ADVANCE_RIP();
4540 IEM_MC_END();
4541 }
4542 return VINF_SUCCESS;
4543}
4544
4545
4546/**
4547 * Common 'push segment-register' helper.
4548 */
4549FNIEMOP_DEF_1(iemOpCommonPushSReg, uint8_t, iReg)
4550{
4551 IEMOP_HLP_NO_LOCK_PREFIX();
4552 if (iReg < X86_SREG_FS)
4553 IEMOP_HLP_NO_64BIT();
4554 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
4555
4556 switch (pIemCpu->enmEffOpSize)
4557 {
4558 case IEMMODE_16BIT:
4559 IEM_MC_BEGIN(0, 1);
4560 IEM_MC_LOCAL(uint16_t, u16Value);
4561 IEM_MC_FETCH_SREG_U16(u16Value, iReg);
4562 IEM_MC_PUSH_U16(u16Value);
4563 IEM_MC_ADVANCE_RIP();
4564 IEM_MC_END();
4565 break;
4566
4567 case IEMMODE_32BIT:
4568 IEM_MC_BEGIN(0, 1);
4569 IEM_MC_LOCAL(uint32_t, u32Value);
4570 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iReg);
4571 IEM_MC_PUSH_U32_SREG(u32Value);
4572 IEM_MC_ADVANCE_RIP();
4573 IEM_MC_END();
4574 break;
4575
4576 case IEMMODE_64BIT:
4577 IEM_MC_BEGIN(0, 1);
4578 IEM_MC_LOCAL(uint64_t, u64Value);
4579 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iReg);
4580 IEM_MC_PUSH_U64(u64Value);
4581 IEM_MC_ADVANCE_RIP();
4582 IEM_MC_END();
4583 break;
4584 }
4585
4586 return VINF_SUCCESS;
4587}
4588
4589
4590/** Opcode 0x0f 0xa0. */
4591FNIEMOP_DEF(iemOp_push_fs)
4592{
4593 IEMOP_MNEMONIC("push fs");
4594 IEMOP_HLP_MIN_386();
4595 IEMOP_HLP_NO_LOCK_PREFIX();
4596 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_FS);
4597}
4598
4599
4600/** Opcode 0x0f 0xa1. */
4601FNIEMOP_DEF(iemOp_pop_fs)
4602{
4603 IEMOP_MNEMONIC("pop fs");
4604 IEMOP_HLP_MIN_386();
4605 IEMOP_HLP_NO_LOCK_PREFIX();
4606 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_FS, pIemCpu->enmEffOpSize);
4607}
4608
4609
4610/** Opcode 0x0f 0xa2. */
4611FNIEMOP_DEF(iemOp_cpuid)
4612{
4613 IEMOP_MNEMONIC("cpuid");
4614 IEMOP_HLP_MIN_486(); /* not all 486es. */
4615 IEMOP_HLP_NO_LOCK_PREFIX();
4616 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cpuid);
4617}
4618
4619
4620/**
4621 * Common worker for iemOp_bt_Ev_Gv, iemOp_btc_Ev_Gv, iemOp_btr_Ev_Gv and
4622 * iemOp_bts_Ev_Gv.
4623 */
4624FNIEMOP_DEF_1(iemOpCommonBit_Ev_Gv, PCIEMOPBINSIZES, pImpl)
4625{
4626 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4627 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
4628
4629 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4630 {
4631 /* register destination. */
4632 IEMOP_HLP_NO_LOCK_PREFIX();
4633 switch (pIemCpu->enmEffOpSize)
4634 {
4635 case IEMMODE_16BIT:
4636 IEM_MC_BEGIN(3, 0);
4637 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4638 IEM_MC_ARG(uint16_t, u16Src, 1);
4639 IEM_MC_ARG(uint32_t *, pEFlags, 2);
4640
4641 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4642 IEM_MC_AND_LOCAL_U16(u16Src, 0xf);
4643 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4644 IEM_MC_REF_EFLAGS(pEFlags);
4645 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
4646
4647 IEM_MC_ADVANCE_RIP();
4648 IEM_MC_END();
4649 return VINF_SUCCESS;
4650
4651 case IEMMODE_32BIT:
4652 IEM_MC_BEGIN(3, 0);
4653 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4654 IEM_MC_ARG(uint32_t, u32Src, 1);
4655 IEM_MC_ARG(uint32_t *, pEFlags, 2);
4656
4657 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4658 IEM_MC_AND_LOCAL_U32(u32Src, 0x1f);
4659 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4660 IEM_MC_REF_EFLAGS(pEFlags);
4661 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
4662
4663 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
4664 IEM_MC_ADVANCE_RIP();
4665 IEM_MC_END();
4666 return VINF_SUCCESS;
4667
4668 case IEMMODE_64BIT:
4669 IEM_MC_BEGIN(3, 0);
4670 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4671 IEM_MC_ARG(uint64_t, u64Src, 1);
4672 IEM_MC_ARG(uint32_t *, pEFlags, 2);
4673
4674 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4675 IEM_MC_AND_LOCAL_U64(u64Src, 0x3f);
4676 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4677 IEM_MC_REF_EFLAGS(pEFlags);
4678 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
4679
4680 IEM_MC_ADVANCE_RIP();
4681 IEM_MC_END();
4682 return VINF_SUCCESS;
4683
4684 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4685 }
4686 }
4687 else
4688 {
4689 /* memory destination. */
4690
4691 uint32_t fAccess;
4692 if (pImpl->pfnLockedU16)
4693 fAccess = IEM_ACCESS_DATA_RW;
4694 else /* BT */
4695 {
4696 IEMOP_HLP_NO_LOCK_PREFIX();
4697 fAccess = IEM_ACCESS_DATA_R;
4698 }
4699
4700 NOREF(fAccess);
4701
4702 /** @todo test negative bit offsets! */
4703 switch (pIemCpu->enmEffOpSize)
4704 {
4705 case IEMMODE_16BIT:
4706 IEM_MC_BEGIN(3, 2);
4707 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4708 IEM_MC_ARG(uint16_t, u16Src, 1);
4709 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
4710 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4711 IEM_MC_LOCAL(int16_t, i16AddrAdj);
4712
4713 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4714 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4715 IEM_MC_ASSIGN(i16AddrAdj, u16Src);
4716 IEM_MC_AND_ARG_U16(u16Src, 0x0f);
4717 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 4);
4718 IEM_MC_SAR_LOCAL_S16(i16AddrAdj, 1);
4719 IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(GCPtrEffDst, i16AddrAdj);
4720 IEM_MC_FETCH_EFLAGS(EFlags);
4721
4722 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4723 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4724 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
4725 else
4726 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
4727 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
4728
4729 IEM_MC_COMMIT_EFLAGS(EFlags);
4730 IEM_MC_ADVANCE_RIP();
4731 IEM_MC_END();
4732 return VINF_SUCCESS;
4733
4734 case IEMMODE_32BIT:
4735 IEM_MC_BEGIN(3, 2);
4736 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4737 IEM_MC_ARG(uint32_t, u32Src, 1);
4738 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
4739 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4740 IEM_MC_LOCAL(int32_t, i32AddrAdj);
4741
4742 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4743 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4744 IEM_MC_ASSIGN(i32AddrAdj, u32Src);
4745 IEM_MC_AND_ARG_U32(u32Src, 0x1f);
4746 IEM_MC_SAR_LOCAL_S32(i32AddrAdj, 5);
4747 IEM_MC_SHL_LOCAL_S32(i32AddrAdj, 2);
4748 IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(GCPtrEffDst, i32AddrAdj);
4749 IEM_MC_FETCH_EFLAGS(EFlags);
4750
4751 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4752 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4753 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
4754 else
4755 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
4756 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
4757
4758 IEM_MC_COMMIT_EFLAGS(EFlags);
4759 IEM_MC_ADVANCE_RIP();
4760 IEM_MC_END();
4761 return VINF_SUCCESS;
4762
4763 case IEMMODE_64BIT:
4764 IEM_MC_BEGIN(3, 2);
4765 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4766 IEM_MC_ARG(uint64_t, u64Src, 1);
4767 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
4768 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4769 IEM_MC_LOCAL(int64_t, i64AddrAdj);
4770
4771 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
4772 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4773 IEM_MC_ASSIGN(i64AddrAdj, u64Src);
4774 IEM_MC_AND_ARG_U64(u64Src, 0x3f);
4775 IEM_MC_SAR_LOCAL_S64(i64AddrAdj, 6);
4776 IEM_MC_SHL_LOCAL_S64(i64AddrAdj, 3);
4777 IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(GCPtrEffDst, i64AddrAdj);
4778 IEM_MC_FETCH_EFLAGS(EFlags);
4779
4780 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4781 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
4782 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
4783 else
4784 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
4785 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
4786
4787 IEM_MC_COMMIT_EFLAGS(EFlags);
4788 IEM_MC_ADVANCE_RIP();
4789 IEM_MC_END();
4790 return VINF_SUCCESS;
4791
4792 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4793 }
4794 }
4795}
4796
4797
4798/** Opcode 0x0f 0xa3. */
4799FNIEMOP_DEF(iemOp_bt_Ev_Gv)
4800{
4801 IEMOP_MNEMONIC("bt Gv,Gv");
4802 IEMOP_HLP_MIN_386();
4803 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bt);
4804}
4805
4806
4807/**
4808 * Common worker for iemOp_shrd_Ev_Gv_Ib and iemOp_shld_Ev_Gv_Ib.
4809 */
4810FNIEMOP_DEF_1(iemOpCommonShldShrd_Ib, PCIEMOPSHIFTDBLSIZES, pImpl)
4811{
4812 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4813 IEMOP_HLP_NO_LOCK_PREFIX();
4814 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
4815
4816 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4817 {
4818 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
4819 IEMOP_HLP_NO_LOCK_PREFIX();
4820
4821 switch (pIemCpu->enmEffOpSize)
4822 {
4823 case IEMMODE_16BIT:
4824 IEM_MC_BEGIN(4, 0);
4825 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4826 IEM_MC_ARG(uint16_t, u16Src, 1);
4827 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
4828 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4829
4830 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4831 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4832 IEM_MC_REF_EFLAGS(pEFlags);
4833 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
4834
4835 IEM_MC_ADVANCE_RIP();
4836 IEM_MC_END();
4837 return VINF_SUCCESS;
4838
4839 case IEMMODE_32BIT:
4840 IEM_MC_BEGIN(4, 0);
4841 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4842 IEM_MC_ARG(uint32_t, u32Src, 1);
4843 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
4844 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4845
4846 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4847 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4848 IEM_MC_REF_EFLAGS(pEFlags);
4849 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
4850
4851 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
4852 IEM_MC_ADVANCE_RIP();
4853 IEM_MC_END();
4854 return VINF_SUCCESS;
4855
4856 case IEMMODE_64BIT:
4857 IEM_MC_BEGIN(4, 0);
4858 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4859 IEM_MC_ARG(uint64_t, u64Src, 1);
4860 IEM_MC_ARG_CONST(uint8_t, cShiftArg, /*=*/cShift, 2);
4861 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4862
4863 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4864 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4865 IEM_MC_REF_EFLAGS(pEFlags);
4866 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
4867
4868 IEM_MC_ADVANCE_RIP();
4869 IEM_MC_END();
4870 return VINF_SUCCESS;
4871
4872 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4873 }
4874 }
4875 else
4876 {
4877 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
4878
4879 switch (pIemCpu->enmEffOpSize)
4880 {
4881 case IEMMODE_16BIT:
4882 IEM_MC_BEGIN(4, 2);
4883 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4884 IEM_MC_ARG(uint16_t, u16Src, 1);
4885 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4886 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4887 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4888
4889 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
4890 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
4891 IEM_MC_ASSIGN(cShiftArg, cShift);
4892 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4893 IEM_MC_FETCH_EFLAGS(EFlags);
4894 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4895 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
4896
4897 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
4898 IEM_MC_COMMIT_EFLAGS(EFlags);
4899 IEM_MC_ADVANCE_RIP();
4900 IEM_MC_END();
4901 return VINF_SUCCESS;
4902
4903 case IEMMODE_32BIT:
4904 IEM_MC_BEGIN(4, 2);
4905 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4906 IEM_MC_ARG(uint32_t, u32Src, 1);
4907 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4908 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4909 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4910
4911 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
4912 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
4913 IEM_MC_ASSIGN(cShiftArg, cShift);
4914 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4915 IEM_MC_FETCH_EFLAGS(EFlags);
4916 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4917 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
4918
4919 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
4920 IEM_MC_COMMIT_EFLAGS(EFlags);
4921 IEM_MC_ADVANCE_RIP();
4922 IEM_MC_END();
4923 return VINF_SUCCESS;
4924
4925 case IEMMODE_64BIT:
4926 IEM_MC_BEGIN(4, 2);
4927 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
4928 IEM_MC_ARG(uint64_t, u64Src, 1);
4929 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4930 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
4931 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
4932
4933 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
4934 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
4935 IEM_MC_ASSIGN(cShiftArg, cShift);
4936 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4937 IEM_MC_FETCH_EFLAGS(EFlags);
4938 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
4939 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
4940
4941 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
4942 IEM_MC_COMMIT_EFLAGS(EFlags);
4943 IEM_MC_ADVANCE_RIP();
4944 IEM_MC_END();
4945 return VINF_SUCCESS;
4946
4947 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4948 }
4949 }
4950}
4951
4952
4953/**
4954 * Common worker for iemOp_shrd_Ev_Gv_CL and iemOp_shld_Ev_Gv_CL.
4955 */
4956FNIEMOP_DEF_1(iemOpCommonShldShrd_CL, PCIEMOPSHIFTDBLSIZES, pImpl)
4957{
4958 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
4959 IEMOP_HLP_NO_LOCK_PREFIX();
4960 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF | X86_EFL_OF);
4961
4962 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
4963 {
4964 IEMOP_HLP_NO_LOCK_PREFIX();
4965
4966 switch (pIemCpu->enmEffOpSize)
4967 {
4968 case IEMMODE_16BIT:
4969 IEM_MC_BEGIN(4, 0);
4970 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
4971 IEM_MC_ARG(uint16_t, u16Src, 1);
4972 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4973 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4974
4975 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4976 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4977 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
4978 IEM_MC_REF_EFLAGS(pEFlags);
4979 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
4980
4981 IEM_MC_ADVANCE_RIP();
4982 IEM_MC_END();
4983 return VINF_SUCCESS;
4984
4985 case IEMMODE_32BIT:
4986 IEM_MC_BEGIN(4, 0);
4987 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
4988 IEM_MC_ARG(uint32_t, u32Src, 1);
4989 IEM_MC_ARG(uint8_t, cShiftArg, 2);
4990 IEM_MC_ARG(uint32_t *, pEFlags, 3);
4991
4992 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
4993 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
4994 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
4995 IEM_MC_REF_EFLAGS(pEFlags);
4996 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
4997
4998 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
4999 IEM_MC_ADVANCE_RIP();
5000 IEM_MC_END();
5001 return VINF_SUCCESS;
5002
5003 case IEMMODE_64BIT:
5004 IEM_MC_BEGIN(4, 0);
5005 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5006 IEM_MC_ARG(uint64_t, u64Src, 1);
5007 IEM_MC_ARG(uint8_t, cShiftArg, 2);
5008 IEM_MC_ARG(uint32_t *, pEFlags, 3);
5009
5010 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5011 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5012 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
5013 IEM_MC_REF_EFLAGS(pEFlags);
5014 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
5015
5016 IEM_MC_ADVANCE_RIP();
5017 IEM_MC_END();
5018 return VINF_SUCCESS;
5019
5020 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5021 }
5022 }
5023 else
5024 {
5025 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo too early? */
5026
5027 switch (pIemCpu->enmEffOpSize)
5028 {
5029 case IEMMODE_16BIT:
5030 IEM_MC_BEGIN(4, 2);
5031 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5032 IEM_MC_ARG(uint16_t, u16Src, 1);
5033 IEM_MC_ARG(uint8_t, cShiftArg, 2);
5034 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
5035 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5036
5037 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5038 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5039 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
5040 IEM_MC_FETCH_EFLAGS(EFlags);
5041 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5042 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU16, pu16Dst, u16Src, cShiftArg, pEFlags);
5043
5044 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
5045 IEM_MC_COMMIT_EFLAGS(EFlags);
5046 IEM_MC_ADVANCE_RIP();
5047 IEM_MC_END();
5048 return VINF_SUCCESS;
5049
5050 case IEMMODE_32BIT:
5051 IEM_MC_BEGIN(4, 2);
5052 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5053 IEM_MC_ARG(uint32_t, u32Src, 1);
5054 IEM_MC_ARG(uint8_t, cShiftArg, 2);
5055 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
5056 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5057
5058 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5059 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5060 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
5061 IEM_MC_FETCH_EFLAGS(EFlags);
5062 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5063 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU32, pu32Dst, u32Src, cShiftArg, pEFlags);
5064
5065 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
5066 IEM_MC_COMMIT_EFLAGS(EFlags);
5067 IEM_MC_ADVANCE_RIP();
5068 IEM_MC_END();
5069 return VINF_SUCCESS;
5070
5071 case IEMMODE_64BIT:
5072 IEM_MC_BEGIN(4, 2);
5073 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5074 IEM_MC_ARG(uint64_t, u64Src, 1);
5075 IEM_MC_ARG(uint8_t, cShiftArg, 2);
5076 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
5077 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5078
5079 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5080 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5081 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
5082 IEM_MC_FETCH_EFLAGS(EFlags);
5083 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5084 IEM_MC_CALL_VOID_AIMPL_4(pImpl->pfnNormalU64, pu64Dst, u64Src, cShiftArg, pEFlags);
5085
5086 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
5087 IEM_MC_COMMIT_EFLAGS(EFlags);
5088 IEM_MC_ADVANCE_RIP();
5089 IEM_MC_END();
5090 return VINF_SUCCESS;
5091
5092 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5093 }
5094 }
5095}
5096
5097
5098
5099/** Opcode 0x0f 0xa4. */
5100FNIEMOP_DEF(iemOp_shld_Ev_Gv_Ib)
5101{
5102 IEMOP_MNEMONIC("shld Ev,Gv,Ib");
5103 IEMOP_HLP_MIN_386();
5104 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shld);
5105}
5106
5107
5108/** Opcode 0x0f 0xa5. */
5109FNIEMOP_DEF(iemOp_shld_Ev_Gv_CL)
5110{
5111 IEMOP_MNEMONIC("shld Ev,Gv,CL");
5112 IEMOP_HLP_MIN_386();
5113 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shld);
5114}
5115
5116
5117/** Opcode 0x0f 0xa8. */
5118FNIEMOP_DEF(iemOp_push_gs)
5119{
5120 IEMOP_MNEMONIC("push gs");
5121 IEMOP_HLP_MIN_386();
5122 IEMOP_HLP_NO_LOCK_PREFIX();
5123 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_GS);
5124}
5125
5126
5127/** Opcode 0x0f 0xa9. */
5128FNIEMOP_DEF(iemOp_pop_gs)
5129{
5130 IEMOP_MNEMONIC("pop gs");
5131 IEMOP_HLP_MIN_386();
5132 IEMOP_HLP_NO_LOCK_PREFIX();
5133 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_GS, pIemCpu->enmEffOpSize);
5134}
5135
5136
5137/** Opcode 0x0f 0xaa. */
5138FNIEMOP_STUB(iemOp_rsm);
5139//IEMOP_HLP_MIN_386();
5140
5141
5142/** Opcode 0x0f 0xab. */
5143FNIEMOP_DEF(iemOp_bts_Ev_Gv)
5144{
5145 IEMOP_MNEMONIC("bts Ev,Gv");
5146 IEMOP_HLP_MIN_386();
5147 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_bts);
5148}
5149
5150
5151/** Opcode 0x0f 0xac. */
5152FNIEMOP_DEF(iemOp_shrd_Ev_Gv_Ib)
5153{
5154 IEMOP_MNEMONIC("shrd Ev,Gv,Ib");
5155 IEMOP_HLP_MIN_386();
5156 return FNIEMOP_CALL_1(iemOpCommonShldShrd_Ib, &g_iemAImpl_shrd);
5157}
5158
5159
5160/** Opcode 0x0f 0xad. */
5161FNIEMOP_DEF(iemOp_shrd_Ev_Gv_CL)
5162{
5163 IEMOP_MNEMONIC("shrd Ev,Gv,CL");
5164 IEMOP_HLP_MIN_386();
5165 return FNIEMOP_CALL_1(iemOpCommonShldShrd_CL, &g_iemAImpl_shrd);
5166}
5167
5168
5169/** Opcode 0x0f 0xae mem/0. */
5170FNIEMOP_DEF_1(iemOp_Grp15_fxsave, uint8_t, bRm)
5171{
5172 IEMOP_MNEMONIC("fxsave m512");
5173 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fFxSaveRstor)
5174 return IEMOP_RAISE_INVALID_OPCODE();
5175
5176 IEM_MC_BEGIN(3, 1);
5177 IEM_MC_ARG(uint8_t, iEffSeg, 0);
5178 IEM_MC_ARG(RTGCPTR, GCPtrEff, 1);
5179 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 2);
5180 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
5181 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
5182 IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg);
5183 IEM_MC_CALL_CIMPL_3(iemCImpl_fxsave, iEffSeg, GCPtrEff, enmEffOpSize);
5184 IEM_MC_END();
5185 return VINF_SUCCESS;
5186}
5187
5188
5189/** Opcode 0x0f 0xae mem/1. */
5190FNIEMOP_DEF_1(iemOp_Grp15_fxrstor, uint8_t, bRm)
5191{
5192 IEMOP_MNEMONIC("fxrstor m512");
5193 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fFxSaveRstor)
5194 return IEMOP_RAISE_INVALID_OPCODE();
5195
5196 IEM_MC_BEGIN(3, 1);
5197 IEM_MC_ARG(uint8_t, iEffSeg, 0);
5198 IEM_MC_ARG(RTGCPTR, GCPtrEff, 1);
5199 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 2);
5200 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
5201 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
5202 IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg);
5203 IEM_MC_CALL_CIMPL_3(iemCImpl_fxrstor, iEffSeg, GCPtrEff, enmEffOpSize);
5204 IEM_MC_END();
5205 return VINF_SUCCESS;
5206}
5207
5208
5209/** Opcode 0x0f 0xae mem/2. */
5210FNIEMOP_STUB_1(iemOp_Grp15_ldmxcsr, uint8_t, bRm);
5211
5212/** Opcode 0x0f 0xae mem/3. */
5213FNIEMOP_STUB_1(iemOp_Grp15_stmxcsr, uint8_t, bRm);
5214
5215/** Opcode 0x0f 0xae mem/4. */
5216FNIEMOP_UD_STUB_1(iemOp_Grp15_xsave, uint8_t, bRm);
5217
5218/** Opcode 0x0f 0xae mem/5. */
5219FNIEMOP_UD_STUB_1(iemOp_Grp15_xrstor, uint8_t, bRm);
5220
5221/** Opcode 0x0f 0xae mem/6. */
5222FNIEMOP_UD_STUB_1(iemOp_Grp15_xsaveopt, uint8_t, bRm);
5223
5224/** Opcode 0x0f 0xae mem/7. */
5225FNIEMOP_STUB_1(iemOp_Grp15_clflush, uint8_t, bRm);
5226
5227
5228/** Opcode 0x0f 0xae 11b/5. */
5229FNIEMOP_DEF_1(iemOp_Grp15_lfence, uint8_t, bRm)
5230{
5231 IEMOP_MNEMONIC("lfence");
5232 IEMOP_HLP_NO_LOCK_PREFIX();
5233 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2)
5234 return IEMOP_RAISE_INVALID_OPCODE();
5235
5236 IEM_MC_BEGIN(0, 0);
5237 if (IEM_GET_HOST_CPU_FEATURES(pIemCpu)->fSse2)
5238 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_lfence);
5239 else
5240 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence);
5241 IEM_MC_ADVANCE_RIP();
5242 IEM_MC_END();
5243 return VINF_SUCCESS;
5244}
5245
5246
5247/** Opcode 0x0f 0xae 11b/6. */
5248FNIEMOP_DEF_1(iemOp_Grp15_mfence, uint8_t, bRm)
5249{
5250 IEMOP_MNEMONIC("mfence");
5251 IEMOP_HLP_NO_LOCK_PREFIX();
5252 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2)
5253 return IEMOP_RAISE_INVALID_OPCODE();
5254
5255 IEM_MC_BEGIN(0, 0);
5256 if (IEM_GET_HOST_CPU_FEATURES(pIemCpu)->fSse2)
5257 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_mfence);
5258 else
5259 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence);
5260 IEM_MC_ADVANCE_RIP();
5261 IEM_MC_END();
5262 return VINF_SUCCESS;
5263}
5264
5265
5266/** Opcode 0x0f 0xae 11b/7. */
5267FNIEMOP_DEF_1(iemOp_Grp15_sfence, uint8_t, bRm)
5268{
5269 IEMOP_MNEMONIC("sfence");
5270 IEMOP_HLP_NO_LOCK_PREFIX();
5271 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2)
5272 return IEMOP_RAISE_INVALID_OPCODE();
5273
5274 IEM_MC_BEGIN(0, 0);
5275 if (IEM_GET_HOST_CPU_FEATURES(pIemCpu)->fSse2)
5276 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_sfence);
5277 else
5278 IEM_MC_CALL_VOID_AIMPL_0(iemAImpl_alt_mem_fence);
5279 IEM_MC_ADVANCE_RIP();
5280 IEM_MC_END();
5281 return VINF_SUCCESS;
5282}
5283
5284
5285/** Opcode 0xf3 0x0f 0xae 11b/0. */
5286FNIEMOP_UD_STUB_1(iemOp_Grp15_rdfsbase, uint8_t, bRm);
5287
5288/** Opcode 0xf3 0x0f 0xae 11b/1. */
5289FNIEMOP_UD_STUB_1(iemOp_Grp15_rdgsbase, uint8_t, bRm);
5290
5291/** Opcode 0xf3 0x0f 0xae 11b/2. */
5292FNIEMOP_UD_STUB_1(iemOp_Grp15_wrfsbase, uint8_t, bRm);
5293
5294/** Opcode 0xf3 0x0f 0xae 11b/3. */
5295FNIEMOP_UD_STUB_1(iemOp_Grp15_wrgsbase, uint8_t, bRm);
5296
5297
5298/** Opcode 0x0f 0xae. */
5299FNIEMOP_DEF(iemOp_Grp15)
5300{
5301 IEMOP_HLP_MIN_586(); /* Not entirely accurate nor needed, but useful for debugging 286 code. */
5302 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5303 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
5304 {
5305 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
5306 {
5307 case 0: return FNIEMOP_CALL_1(iemOp_Grp15_fxsave, bRm);
5308 case 1: return FNIEMOP_CALL_1(iemOp_Grp15_fxrstor, bRm);
5309 case 2: return FNIEMOP_CALL_1(iemOp_Grp15_ldmxcsr, bRm);
5310 case 3: return FNIEMOP_CALL_1(iemOp_Grp15_stmxcsr, bRm);
5311 case 4: return FNIEMOP_CALL_1(iemOp_Grp15_xsave, bRm);
5312 case 5: return FNIEMOP_CALL_1(iemOp_Grp15_xrstor, bRm);
5313 case 6: return FNIEMOP_CALL_1(iemOp_Grp15_xsaveopt,bRm);
5314 case 7: return FNIEMOP_CALL_1(iemOp_Grp15_clflush, bRm);
5315 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5316 }
5317 }
5318 else
5319 {
5320 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_LOCK))
5321 {
5322 case 0:
5323 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
5324 {
5325 case 0: return IEMOP_RAISE_INVALID_OPCODE();
5326 case 1: return IEMOP_RAISE_INVALID_OPCODE();
5327 case 2: return IEMOP_RAISE_INVALID_OPCODE();
5328 case 3: return IEMOP_RAISE_INVALID_OPCODE();
5329 case 4: return IEMOP_RAISE_INVALID_OPCODE();
5330 case 5: return FNIEMOP_CALL_1(iemOp_Grp15_lfence, bRm);
5331 case 6: return FNIEMOP_CALL_1(iemOp_Grp15_mfence, bRm);
5332 case 7: return FNIEMOP_CALL_1(iemOp_Grp15_sfence, bRm);
5333 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5334 }
5335 break;
5336
5337 case IEM_OP_PRF_REPZ:
5338 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
5339 {
5340 case 0: return FNIEMOP_CALL_1(iemOp_Grp15_rdfsbase, bRm);
5341 case 1: return FNIEMOP_CALL_1(iemOp_Grp15_rdgsbase, bRm);
5342 case 2: return FNIEMOP_CALL_1(iemOp_Grp15_wrfsbase, bRm);
5343 case 3: return FNIEMOP_CALL_1(iemOp_Grp15_wrgsbase, bRm);
5344 case 4: return IEMOP_RAISE_INVALID_OPCODE();
5345 case 5: return IEMOP_RAISE_INVALID_OPCODE();
5346 case 6: return IEMOP_RAISE_INVALID_OPCODE();
5347 case 7: return IEMOP_RAISE_INVALID_OPCODE();
5348 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5349 }
5350 break;
5351
5352 default:
5353 return IEMOP_RAISE_INVALID_OPCODE();
5354 }
5355 }
5356}
5357
5358
5359/** Opcode 0x0f 0xaf. */
5360FNIEMOP_DEF(iemOp_imul_Gv_Ev)
5361{
5362 IEMOP_MNEMONIC("imul Gv,Ev");
5363 IEMOP_HLP_MIN_386();
5364 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5365 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_imul_two);
5366}
5367
5368
5369/** Opcode 0x0f 0xb0. */
5370FNIEMOP_DEF(iemOp_cmpxchg_Eb_Gb)
5371{
5372 IEMOP_MNEMONIC("cmpxchg Eb,Gb");
5373 IEMOP_HLP_MIN_486();
5374 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5375
5376 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5377 {
5378 IEMOP_HLP_DONE_DECODING();
5379 IEM_MC_BEGIN(4, 0);
5380 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
5381 IEM_MC_ARG(uint8_t *, pu8Al, 1);
5382 IEM_MC_ARG(uint8_t, u8Src, 2);
5383 IEM_MC_ARG(uint32_t *, pEFlags, 3);
5384
5385 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5386 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5387 IEM_MC_REF_GREG_U8(pu8Al, X86_GREG_xAX);
5388 IEM_MC_REF_EFLAGS(pEFlags);
5389 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5390 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u8, pu8Dst, pu8Al, u8Src, pEFlags);
5391 else
5392 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u8_locked, pu8Dst, pu8Al, u8Src, pEFlags);
5393
5394 IEM_MC_ADVANCE_RIP();
5395 IEM_MC_END();
5396 }
5397 else
5398 {
5399 IEM_MC_BEGIN(4, 3);
5400 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
5401 IEM_MC_ARG(uint8_t *, pu8Al, 1);
5402 IEM_MC_ARG(uint8_t, u8Src, 2);
5403 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
5404 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5405 IEM_MC_LOCAL(uint8_t, u8Al);
5406
5407 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5408 IEMOP_HLP_DONE_DECODING();
5409 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5410 IEM_MC_FETCH_GREG_U8(u8Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5411 IEM_MC_FETCH_GREG_U8(u8Al, X86_GREG_xAX);
5412 IEM_MC_FETCH_EFLAGS(EFlags);
5413 IEM_MC_REF_LOCAL(pu8Al, u8Al);
5414 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5415 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u8, pu8Dst, pu8Al, u8Src, pEFlags);
5416 else
5417 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u8_locked, pu8Dst, pu8Al, u8Src, pEFlags);
5418
5419 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
5420 IEM_MC_COMMIT_EFLAGS(EFlags);
5421 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Al);
5422 IEM_MC_ADVANCE_RIP();
5423 IEM_MC_END();
5424 }
5425 return VINF_SUCCESS;
5426}
5427
5428/** Opcode 0x0f 0xb1. */
5429FNIEMOP_DEF(iemOp_cmpxchg_Ev_Gv)
5430{
5431 IEMOP_MNEMONIC("cmpxchg Ev,Gv");
5432 IEMOP_HLP_MIN_486();
5433 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5434
5435 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5436 {
5437 IEMOP_HLP_DONE_DECODING();
5438 switch (pIemCpu->enmEffOpSize)
5439 {
5440 case IEMMODE_16BIT:
5441 IEM_MC_BEGIN(4, 0);
5442 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5443 IEM_MC_ARG(uint16_t *, pu16Ax, 1);
5444 IEM_MC_ARG(uint16_t, u16Src, 2);
5445 IEM_MC_ARG(uint32_t *, pEFlags, 3);
5446
5447 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5448 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5449 IEM_MC_REF_GREG_U16(pu16Ax, X86_GREG_xAX);
5450 IEM_MC_REF_EFLAGS(pEFlags);
5451 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5452 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u16, pu16Dst, pu16Ax, u16Src, pEFlags);
5453 else
5454 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u16_locked, pu16Dst, pu16Ax, u16Src, pEFlags);
5455
5456 IEM_MC_ADVANCE_RIP();
5457 IEM_MC_END();
5458 return VINF_SUCCESS;
5459
5460 case IEMMODE_32BIT:
5461 IEM_MC_BEGIN(4, 0);
5462 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5463 IEM_MC_ARG(uint32_t *, pu32Eax, 1);
5464 IEM_MC_ARG(uint32_t, u32Src, 2);
5465 IEM_MC_ARG(uint32_t *, pEFlags, 3);
5466
5467 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5468 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5469 IEM_MC_REF_GREG_U32(pu32Eax, X86_GREG_xAX);
5470 IEM_MC_REF_EFLAGS(pEFlags);
5471 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5472 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u32, pu32Dst, pu32Eax, u32Src, pEFlags);
5473 else
5474 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u32_locked, pu32Dst, pu32Eax, u32Src, pEFlags);
5475
5476 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Eax);
5477 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
5478 IEM_MC_ADVANCE_RIP();
5479 IEM_MC_END();
5480 return VINF_SUCCESS;
5481
5482 case IEMMODE_64BIT:
5483 IEM_MC_BEGIN(4, 0);
5484 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5485 IEM_MC_ARG(uint64_t *, pu64Rax, 1);
5486#ifdef RT_ARCH_X86
5487 IEM_MC_ARG(uint64_t *, pu64Src, 2);
5488#else
5489 IEM_MC_ARG(uint64_t, u64Src, 2);
5490#endif
5491 IEM_MC_ARG(uint32_t *, pEFlags, 3);
5492
5493 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5494 IEM_MC_REF_GREG_U64(pu64Rax, X86_GREG_xAX);
5495 IEM_MC_REF_EFLAGS(pEFlags);
5496#ifdef RT_ARCH_X86
5497 IEM_MC_REF_GREG_U64(pu64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5498 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5499 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64, pu64Dst, pu64Rax, pu64Src, pEFlags);
5500 else
5501 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64_locked, pu64Dst, pu64Rax, pu64Src, pEFlags);
5502#else
5503 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5504 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5505 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64, pu64Dst, pu64Rax, u64Src, pEFlags);
5506 else
5507 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64_locked, pu64Dst, pu64Rax, u64Src, pEFlags);
5508#endif
5509
5510 IEM_MC_ADVANCE_RIP();
5511 IEM_MC_END();
5512 return VINF_SUCCESS;
5513
5514 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5515 }
5516 }
5517 else
5518 {
5519 switch (pIemCpu->enmEffOpSize)
5520 {
5521 case IEMMODE_16BIT:
5522 IEM_MC_BEGIN(4, 3);
5523 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5524 IEM_MC_ARG(uint16_t *, pu16Ax, 1);
5525 IEM_MC_ARG(uint16_t, u16Src, 2);
5526 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
5527 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5528 IEM_MC_LOCAL(uint16_t, u16Ax);
5529
5530 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5531 IEMOP_HLP_DONE_DECODING();
5532 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5533 IEM_MC_FETCH_GREG_U16(u16Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5534 IEM_MC_FETCH_GREG_U16(u16Ax, X86_GREG_xAX);
5535 IEM_MC_FETCH_EFLAGS(EFlags);
5536 IEM_MC_REF_LOCAL(pu16Ax, u16Ax);
5537 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5538 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u16, pu16Dst, pu16Ax, u16Src, pEFlags);
5539 else
5540 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u16_locked, pu16Dst, pu16Ax, u16Src, pEFlags);
5541
5542 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
5543 IEM_MC_COMMIT_EFLAGS(EFlags);
5544 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Ax);
5545 IEM_MC_ADVANCE_RIP();
5546 IEM_MC_END();
5547 return VINF_SUCCESS;
5548
5549 case IEMMODE_32BIT:
5550 IEM_MC_BEGIN(4, 3);
5551 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5552 IEM_MC_ARG(uint32_t *, pu32Eax, 1);
5553 IEM_MC_ARG(uint32_t, u32Src, 2);
5554 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
5555 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5556 IEM_MC_LOCAL(uint32_t, u32Eax);
5557
5558 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5559 IEMOP_HLP_DONE_DECODING();
5560 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5561 IEM_MC_FETCH_GREG_U32(u32Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5562 IEM_MC_FETCH_GREG_U32(u32Eax, X86_GREG_xAX);
5563 IEM_MC_FETCH_EFLAGS(EFlags);
5564 IEM_MC_REF_LOCAL(pu32Eax, u32Eax);
5565 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5566 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u32, pu32Dst, pu32Eax, u32Src, pEFlags);
5567 else
5568 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u32_locked, pu32Dst, pu32Eax, u32Src, pEFlags);
5569
5570 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
5571 IEM_MC_COMMIT_EFLAGS(EFlags);
5572 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Eax);
5573 IEM_MC_ADVANCE_RIP();
5574 IEM_MC_END();
5575 return VINF_SUCCESS;
5576
5577 case IEMMODE_64BIT:
5578 IEM_MC_BEGIN(4, 3);
5579 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5580 IEM_MC_ARG(uint64_t *, pu64Rax, 1);
5581#ifdef RT_ARCH_X86
5582 IEM_MC_ARG(uint64_t *, pu64Src, 2);
5583#else
5584 IEM_MC_ARG(uint64_t, u64Src, 2);
5585#endif
5586 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 3);
5587 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5588 IEM_MC_LOCAL(uint64_t, u64Rax);
5589
5590 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5591 IEMOP_HLP_DONE_DECODING();
5592 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0);
5593 IEM_MC_FETCH_GREG_U64(u64Rax, X86_GREG_xAX);
5594 IEM_MC_FETCH_EFLAGS(EFlags);
5595 IEM_MC_REF_LOCAL(pu64Rax, u64Rax);
5596#ifdef RT_ARCH_X86
5597 IEM_MC_REF_GREG_U64(pu64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5598 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5599 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64, pu64Dst, pu64Rax, pu64Src, pEFlags);
5600 else
5601 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64_locked, pu64Dst, pu64Rax, pu64Src, pEFlags);
5602#else
5603 IEM_MC_FETCH_GREG_U64(u64Src, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
5604 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
5605 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64, pu64Dst, pu64Rax, u64Src, pEFlags);
5606 else
5607 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg_u64_locked, pu64Dst, pu64Rax, u64Src, pEFlags);
5608#endif
5609
5610 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
5611 IEM_MC_COMMIT_EFLAGS(EFlags);
5612 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Rax);
5613 IEM_MC_ADVANCE_RIP();
5614 IEM_MC_END();
5615 return VINF_SUCCESS;
5616
5617 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5618 }
5619 }
5620}
5621
5622
5623FNIEMOP_DEF_2(iemOpCommonLoadSRegAndGreg, uint8_t, iSegReg, uint8_t, bRm)
5624{
5625 Assert((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)); /* Caller checks this */
5626 uint8_t const iGReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg;
5627
5628 switch (pIemCpu->enmEffOpSize)
5629 {
5630 case IEMMODE_16BIT:
5631 IEM_MC_BEGIN(5, 1);
5632 IEM_MC_ARG(uint16_t, uSel, 0);
5633 IEM_MC_ARG(uint16_t, offSeg, 1);
5634 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
5635 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
5636 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
5637 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
5638 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
5639 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
5640 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEff);
5641 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 2);
5642 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
5643 IEM_MC_END();
5644 return VINF_SUCCESS;
5645
5646 case IEMMODE_32BIT:
5647 IEM_MC_BEGIN(5, 1);
5648 IEM_MC_ARG(uint16_t, uSel, 0);
5649 IEM_MC_ARG(uint32_t, offSeg, 1);
5650 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
5651 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
5652 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
5653 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
5654 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
5655 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
5656 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEff);
5657 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 4);
5658 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
5659 IEM_MC_END();
5660 return VINF_SUCCESS;
5661
5662 case IEMMODE_64BIT:
5663 IEM_MC_BEGIN(5, 1);
5664 IEM_MC_ARG(uint16_t, uSel, 0);
5665 IEM_MC_ARG(uint64_t, offSeg, 1);
5666 IEM_MC_ARG_CONST(uint8_t, iSegRegArg,/*=*/iSegReg, 2);
5667 IEM_MC_ARG_CONST(uint8_t, iGRegArg, /*=*/iGReg, 3);
5668 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pIemCpu->enmEffOpSize, 4);
5669 IEM_MC_LOCAL(RTGCPTR, GCPtrEff);
5670 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0);
5671 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
5672 if (IEM_IS_GUEST_CPU_AMD(pIemCpu)) /** @todo testcase: rev 3.15 of the amd manuals claims it only loads a 32-bit greg. */
5673 IEM_MC_FETCH_MEM_U32_SX_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff);
5674 else
5675 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEff);
5676 IEM_MC_FETCH_MEM_U16_DISP(uSel, pIemCpu->iEffSeg, GCPtrEff, 8);
5677 IEM_MC_CALL_CIMPL_5(iemCImpl_load_SReg_Greg, uSel, offSeg, iSegRegArg, iGRegArg, enmEffOpSize);
5678 IEM_MC_END();
5679 return VINF_SUCCESS;
5680
5681 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5682 }
5683}
5684
5685
5686/** Opcode 0x0f 0xb2. */
5687FNIEMOP_DEF(iemOp_lss_Gv_Mp)
5688{
5689 IEMOP_MNEMONIC("lss Gv,Mp");
5690 IEMOP_HLP_MIN_386();
5691 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5692 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5693 return IEMOP_RAISE_INVALID_OPCODE();
5694 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_SS, bRm);
5695}
5696
5697
5698/** Opcode 0x0f 0xb3. */
5699FNIEMOP_DEF(iemOp_btr_Ev_Gv)
5700{
5701 IEMOP_MNEMONIC("btr Ev,Gv");
5702 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btr);
5703}
5704
5705
5706/** Opcode 0x0f 0xb4. */
5707FNIEMOP_DEF(iemOp_lfs_Gv_Mp)
5708{
5709 IEMOP_MNEMONIC("lfs Gv,Mp");
5710 IEMOP_HLP_MIN_386();
5711 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5712 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5713 return IEMOP_RAISE_INVALID_OPCODE();
5714 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_FS, bRm);
5715}
5716
5717
5718/** Opcode 0x0f 0xb5. */
5719FNIEMOP_DEF(iemOp_lgs_Gv_Mp)
5720{
5721 IEMOP_MNEMONIC("lgs Gv,Mp");
5722 IEMOP_HLP_MIN_386();
5723 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5724 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5725 return IEMOP_RAISE_INVALID_OPCODE();
5726 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_GS, bRm);
5727}
5728
5729
5730/** Opcode 0x0f 0xb6. */
5731FNIEMOP_DEF(iemOp_movzx_Gv_Eb)
5732{
5733 IEMOP_MNEMONIC("movzx Gv,Eb");
5734 IEMOP_HLP_MIN_386();
5735
5736 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5737 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
5738
5739 /*
5740 * If rm is denoting a register, no more instruction bytes.
5741 */
5742 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5743 {
5744 switch (pIemCpu->enmEffOpSize)
5745 {
5746 case IEMMODE_16BIT:
5747 IEM_MC_BEGIN(0, 1);
5748 IEM_MC_LOCAL(uint16_t, u16Value);
5749 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5750 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
5751 IEM_MC_ADVANCE_RIP();
5752 IEM_MC_END();
5753 return VINF_SUCCESS;
5754
5755 case IEMMODE_32BIT:
5756 IEM_MC_BEGIN(0, 1);
5757 IEM_MC_LOCAL(uint32_t, u32Value);
5758 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5759 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5760 IEM_MC_ADVANCE_RIP();
5761 IEM_MC_END();
5762 return VINF_SUCCESS;
5763
5764 case IEMMODE_64BIT:
5765 IEM_MC_BEGIN(0, 1);
5766 IEM_MC_LOCAL(uint64_t, u64Value);
5767 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5768 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5769 IEM_MC_ADVANCE_RIP();
5770 IEM_MC_END();
5771 return VINF_SUCCESS;
5772
5773 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5774 }
5775 }
5776 else
5777 {
5778 /*
5779 * We're loading a register from memory.
5780 */
5781 switch (pIemCpu->enmEffOpSize)
5782 {
5783 case IEMMODE_16BIT:
5784 IEM_MC_BEGIN(0, 2);
5785 IEM_MC_LOCAL(uint16_t, u16Value);
5786 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5787 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5788 IEM_MC_FETCH_MEM_U8_ZX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
5789 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
5790 IEM_MC_ADVANCE_RIP();
5791 IEM_MC_END();
5792 return VINF_SUCCESS;
5793
5794 case IEMMODE_32BIT:
5795 IEM_MC_BEGIN(0, 2);
5796 IEM_MC_LOCAL(uint32_t, u32Value);
5797 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5798 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5799 IEM_MC_FETCH_MEM_U8_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
5800 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5801 IEM_MC_ADVANCE_RIP();
5802 IEM_MC_END();
5803 return VINF_SUCCESS;
5804
5805 case IEMMODE_64BIT:
5806 IEM_MC_BEGIN(0, 2);
5807 IEM_MC_LOCAL(uint64_t, u64Value);
5808 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5809 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5810 IEM_MC_FETCH_MEM_U8_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
5811 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5812 IEM_MC_ADVANCE_RIP();
5813 IEM_MC_END();
5814 return VINF_SUCCESS;
5815
5816 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5817 }
5818 }
5819}
5820
5821
5822/** Opcode 0x0f 0xb7. */
5823FNIEMOP_DEF(iemOp_movzx_Gv_Ew)
5824{
5825 IEMOP_MNEMONIC("movzx Gv,Ew");
5826 IEMOP_HLP_MIN_386();
5827
5828 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5829 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
5830
5831 /** @todo Not entirely sure how the operand size prefix is handled here,
5832 * assuming that it will be ignored. Would be nice to have a few
5833 * test for this. */
5834 /*
5835 * If rm is denoting a register, no more instruction bytes.
5836 */
5837 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5838 {
5839 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
5840 {
5841 IEM_MC_BEGIN(0, 1);
5842 IEM_MC_LOCAL(uint32_t, u32Value);
5843 IEM_MC_FETCH_GREG_U16_ZX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5844 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5845 IEM_MC_ADVANCE_RIP();
5846 IEM_MC_END();
5847 }
5848 else
5849 {
5850 IEM_MC_BEGIN(0, 1);
5851 IEM_MC_LOCAL(uint64_t, u64Value);
5852 IEM_MC_FETCH_GREG_U16_ZX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5853 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5854 IEM_MC_ADVANCE_RIP();
5855 IEM_MC_END();
5856 }
5857 }
5858 else
5859 {
5860 /*
5861 * We're loading a register from memory.
5862 */
5863 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
5864 {
5865 IEM_MC_BEGIN(0, 2);
5866 IEM_MC_LOCAL(uint32_t, u32Value);
5867 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5868 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5869 IEM_MC_FETCH_MEM_U16_ZX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
5870 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
5871 IEM_MC_ADVANCE_RIP();
5872 IEM_MC_END();
5873 }
5874 else
5875 {
5876 IEM_MC_BEGIN(0, 2);
5877 IEM_MC_LOCAL(uint64_t, u64Value);
5878 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5879 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
5880 IEM_MC_FETCH_MEM_U16_ZX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
5881 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
5882 IEM_MC_ADVANCE_RIP();
5883 IEM_MC_END();
5884 }
5885 }
5886 return VINF_SUCCESS;
5887}
5888
5889
5890/** Opcode 0x0f 0xb8. */
5891FNIEMOP_STUB(iemOp_popcnt_Gv_Ev_jmpe);
5892
5893
5894/** Opcode 0x0f 0xb9. */
5895FNIEMOP_DEF(iemOp_Grp10)
5896{
5897 Log(("iemOp_Grp10 -> #UD\n"));
5898 return IEMOP_RAISE_INVALID_OPCODE();
5899}
5900
5901
5902/** Opcode 0x0f 0xba. */
5903FNIEMOP_DEF(iemOp_Grp8)
5904{
5905 IEMOP_HLP_MIN_386();
5906 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
5907 PCIEMOPBINSIZES pImpl;
5908 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
5909 {
5910 case 0: case 1: case 2: case 3:
5911 return IEMOP_RAISE_INVALID_OPCODE();
5912 case 4: pImpl = &g_iemAImpl_bt; IEMOP_MNEMONIC("bt Ev,Ib"); break;
5913 case 5: pImpl = &g_iemAImpl_bts; IEMOP_MNEMONIC("bts Ev,Ib"); break;
5914 case 6: pImpl = &g_iemAImpl_btr; IEMOP_MNEMONIC("btr Ev,Ib"); break;
5915 case 7: pImpl = &g_iemAImpl_btc; IEMOP_MNEMONIC("btc Ev,Ib"); break;
5916 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5917 }
5918 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
5919
5920 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
5921 {
5922 /* register destination. */
5923 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
5924 IEMOP_HLP_NO_LOCK_PREFIX();
5925
5926 switch (pIemCpu->enmEffOpSize)
5927 {
5928 case IEMMODE_16BIT:
5929 IEM_MC_BEGIN(3, 0);
5930 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5931 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u8Bit & 0x0f, 1);
5932 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5933
5934 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5935 IEM_MC_REF_EFLAGS(pEFlags);
5936 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
5937
5938 IEM_MC_ADVANCE_RIP();
5939 IEM_MC_END();
5940 return VINF_SUCCESS;
5941
5942 case IEMMODE_32BIT:
5943 IEM_MC_BEGIN(3, 0);
5944 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
5945 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u8Bit & 0x1f, 1);
5946 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5947
5948 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5949 IEM_MC_REF_EFLAGS(pEFlags);
5950 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
5951
5952 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
5953 IEM_MC_ADVANCE_RIP();
5954 IEM_MC_END();
5955 return VINF_SUCCESS;
5956
5957 case IEMMODE_64BIT:
5958 IEM_MC_BEGIN(3, 0);
5959 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
5960 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u8Bit & 0x3f, 1);
5961 IEM_MC_ARG(uint32_t *, pEFlags, 2);
5962
5963 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
5964 IEM_MC_REF_EFLAGS(pEFlags);
5965 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
5966
5967 IEM_MC_ADVANCE_RIP();
5968 IEM_MC_END();
5969 return VINF_SUCCESS;
5970
5971 IEM_NOT_REACHED_DEFAULT_CASE_RET();
5972 }
5973 }
5974 else
5975 {
5976 /* memory destination. */
5977
5978 uint32_t fAccess;
5979 if (pImpl->pfnLockedU16)
5980 fAccess = IEM_ACCESS_DATA_RW;
5981 else /* BT */
5982 {
5983 IEMOP_HLP_NO_LOCK_PREFIX();
5984 fAccess = IEM_ACCESS_DATA_R;
5985 }
5986
5987 /** @todo test negative bit offsets! */
5988 switch (pIemCpu->enmEffOpSize)
5989 {
5990 case IEMMODE_16BIT:
5991 IEM_MC_BEGIN(3, 1);
5992 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
5993 IEM_MC_ARG(uint16_t, u16Src, 1);
5994 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
5995 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
5996
5997 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
5998 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
5999 IEM_MC_ASSIGN(u16Src, u8Bit & 0x0f);
6000 IEM_MC_FETCH_EFLAGS(EFlags);
6001 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0);
6002 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6003 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
6004 else
6005 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
6006 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
6007
6008 IEM_MC_COMMIT_EFLAGS(EFlags);
6009 IEM_MC_ADVANCE_RIP();
6010 IEM_MC_END();
6011 return VINF_SUCCESS;
6012
6013 case IEMMODE_32BIT:
6014 IEM_MC_BEGIN(3, 1);
6015 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6016 IEM_MC_ARG(uint32_t, u32Src, 1);
6017 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6018 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6019
6020 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
6021 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
6022 IEM_MC_ASSIGN(u32Src, u8Bit & 0x1f);
6023 IEM_MC_FETCH_EFLAGS(EFlags);
6024 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0);
6025 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6026 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
6027 else
6028 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
6029 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
6030
6031 IEM_MC_COMMIT_EFLAGS(EFlags);
6032 IEM_MC_ADVANCE_RIP();
6033 IEM_MC_END();
6034 return VINF_SUCCESS;
6035
6036 case IEMMODE_64BIT:
6037 IEM_MC_BEGIN(3, 1);
6038 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6039 IEM_MC_ARG(uint64_t, u64Src, 1);
6040 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
6041 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6042
6043 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
6044 uint8_t u8Bit; IEM_OPCODE_GET_NEXT_U8(&u8Bit);
6045 IEM_MC_ASSIGN(u64Src, u8Bit & 0x3f);
6046 IEM_MC_FETCH_EFLAGS(EFlags);
6047 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0);
6048 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6049 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
6050 else
6051 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
6052 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
6053
6054 IEM_MC_COMMIT_EFLAGS(EFlags);
6055 IEM_MC_ADVANCE_RIP();
6056 IEM_MC_END();
6057 return VINF_SUCCESS;
6058
6059 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6060 }
6061 }
6062
6063}
6064
6065
6066/** Opcode 0x0f 0xbb. */
6067FNIEMOP_DEF(iemOp_btc_Ev_Gv)
6068{
6069 IEMOP_MNEMONIC("btc Ev,Gv");
6070 IEMOP_HLP_MIN_386();
6071 return FNIEMOP_CALL_1(iemOpCommonBit_Ev_Gv, &g_iemAImpl_btc);
6072}
6073
6074
6075/** Opcode 0x0f 0xbc. */
6076FNIEMOP_DEF(iemOp_bsf_Gv_Ev)
6077{
6078 IEMOP_MNEMONIC("bsf Gv,Ev");
6079 IEMOP_HLP_MIN_386();
6080 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
6081 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsf);
6082}
6083
6084
6085/** Opcode 0x0f 0xbd. */
6086FNIEMOP_DEF(iemOp_bsr_Gv_Ev)
6087{
6088 IEMOP_MNEMONIC("bsr Gv,Ev");
6089 IEMOP_HLP_MIN_386();
6090 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_SF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
6091 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_bsr);
6092}
6093
6094
6095/** Opcode 0x0f 0xbe. */
6096FNIEMOP_DEF(iemOp_movsx_Gv_Eb)
6097{
6098 IEMOP_MNEMONIC("movsx Gv,Eb");
6099 IEMOP_HLP_MIN_386();
6100
6101 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6102 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
6103
6104 /*
6105 * If rm is denoting a register, no more instruction bytes.
6106 */
6107 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6108 {
6109 switch (pIemCpu->enmEffOpSize)
6110 {
6111 case IEMMODE_16BIT:
6112 IEM_MC_BEGIN(0, 1);
6113 IEM_MC_LOCAL(uint16_t, u16Value);
6114 IEM_MC_FETCH_GREG_U8_SX_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6115 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
6116 IEM_MC_ADVANCE_RIP();
6117 IEM_MC_END();
6118 return VINF_SUCCESS;
6119
6120 case IEMMODE_32BIT:
6121 IEM_MC_BEGIN(0, 1);
6122 IEM_MC_LOCAL(uint32_t, u32Value);
6123 IEM_MC_FETCH_GREG_U8_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6124 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
6125 IEM_MC_ADVANCE_RIP();
6126 IEM_MC_END();
6127 return VINF_SUCCESS;
6128
6129 case IEMMODE_64BIT:
6130 IEM_MC_BEGIN(0, 1);
6131 IEM_MC_LOCAL(uint64_t, u64Value);
6132 IEM_MC_FETCH_GREG_U8_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6133 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
6134 IEM_MC_ADVANCE_RIP();
6135 IEM_MC_END();
6136 return VINF_SUCCESS;
6137
6138 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6139 }
6140 }
6141 else
6142 {
6143 /*
6144 * We're loading a register from memory.
6145 */
6146 switch (pIemCpu->enmEffOpSize)
6147 {
6148 case IEMMODE_16BIT:
6149 IEM_MC_BEGIN(0, 2);
6150 IEM_MC_LOCAL(uint16_t, u16Value);
6151 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6152 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6153 IEM_MC_FETCH_MEM_U8_SX_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
6154 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
6155 IEM_MC_ADVANCE_RIP();
6156 IEM_MC_END();
6157 return VINF_SUCCESS;
6158
6159 case IEMMODE_32BIT:
6160 IEM_MC_BEGIN(0, 2);
6161 IEM_MC_LOCAL(uint32_t, u32Value);
6162 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6163 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6164 IEM_MC_FETCH_MEM_U8_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
6165 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
6166 IEM_MC_ADVANCE_RIP();
6167 IEM_MC_END();
6168 return VINF_SUCCESS;
6169
6170 case IEMMODE_64BIT:
6171 IEM_MC_BEGIN(0, 2);
6172 IEM_MC_LOCAL(uint64_t, u64Value);
6173 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6174 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6175 IEM_MC_FETCH_MEM_U8_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
6176 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
6177 IEM_MC_ADVANCE_RIP();
6178 IEM_MC_END();
6179 return VINF_SUCCESS;
6180
6181 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6182 }
6183 }
6184}
6185
6186
6187/** Opcode 0x0f 0xbf. */
6188FNIEMOP_DEF(iemOp_movsx_Gv_Ew)
6189{
6190 IEMOP_MNEMONIC("movsx Gv,Ew");
6191 IEMOP_HLP_MIN_386();
6192
6193 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6194 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
6195
6196 /** @todo Not entirely sure how the operand size prefix is handled here,
6197 * assuming that it will be ignored. Would be nice to have a few
6198 * test for this. */
6199 /*
6200 * If rm is denoting a register, no more instruction bytes.
6201 */
6202 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6203 {
6204 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
6205 {
6206 IEM_MC_BEGIN(0, 1);
6207 IEM_MC_LOCAL(uint32_t, u32Value);
6208 IEM_MC_FETCH_GREG_U16_SX_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6209 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
6210 IEM_MC_ADVANCE_RIP();
6211 IEM_MC_END();
6212 }
6213 else
6214 {
6215 IEM_MC_BEGIN(0, 1);
6216 IEM_MC_LOCAL(uint64_t, u64Value);
6217 IEM_MC_FETCH_GREG_U16_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6218 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
6219 IEM_MC_ADVANCE_RIP();
6220 IEM_MC_END();
6221 }
6222 }
6223 else
6224 {
6225 /*
6226 * We're loading a register from memory.
6227 */
6228 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
6229 {
6230 IEM_MC_BEGIN(0, 2);
6231 IEM_MC_LOCAL(uint32_t, u32Value);
6232 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6233 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6234 IEM_MC_FETCH_MEM_U16_SX_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
6235 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
6236 IEM_MC_ADVANCE_RIP();
6237 IEM_MC_END();
6238 }
6239 else
6240 {
6241 IEM_MC_BEGIN(0, 2);
6242 IEM_MC_LOCAL(uint64_t, u64Value);
6243 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6244 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6245 IEM_MC_FETCH_MEM_U16_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
6246 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
6247 IEM_MC_ADVANCE_RIP();
6248 IEM_MC_END();
6249 }
6250 }
6251 return VINF_SUCCESS;
6252}
6253
6254
6255/** Opcode 0x0f 0xc0. */
6256FNIEMOP_DEF(iemOp_xadd_Eb_Gb)
6257{
6258 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6259 IEMOP_HLP_MIN_486();
6260 IEMOP_MNEMONIC("xadd Eb,Gb");
6261
6262 /*
6263 * If rm is denoting a register, no more instruction bytes.
6264 */
6265 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6266 {
6267 IEMOP_HLP_NO_LOCK_PREFIX();
6268
6269 IEM_MC_BEGIN(3, 0);
6270 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6271 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
6272 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6273
6274 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6275 IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6276 IEM_MC_REF_EFLAGS(pEFlags);
6277 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8, pu8Dst, pu8Reg, pEFlags);
6278
6279 IEM_MC_ADVANCE_RIP();
6280 IEM_MC_END();
6281 }
6282 else
6283 {
6284 /*
6285 * We're accessing memory.
6286 */
6287 IEM_MC_BEGIN(3, 3);
6288 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
6289 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
6290 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
6291 IEM_MC_LOCAL(uint8_t, u8RegCopy);
6292 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6293
6294 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6295 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6296 IEM_MC_FETCH_GREG_U8(u8RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6297 IEM_MC_REF_LOCAL(pu8Reg, u8RegCopy);
6298 IEM_MC_FETCH_EFLAGS(EFlags);
6299 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6300 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8, pu8Dst, pu8Reg, pEFlags);
6301 else
6302 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u8_locked, pu8Dst, pu8Reg, pEFlags);
6303
6304 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
6305 IEM_MC_COMMIT_EFLAGS(EFlags);
6306 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8RegCopy);
6307 IEM_MC_ADVANCE_RIP();
6308 IEM_MC_END();
6309 return VINF_SUCCESS;
6310 }
6311 return VINF_SUCCESS;
6312}
6313
6314
6315/** Opcode 0x0f 0xc1. */
6316FNIEMOP_DEF(iemOp_xadd_Ev_Gv)
6317{
6318 IEMOP_MNEMONIC("xadd Ev,Gv");
6319 IEMOP_HLP_MIN_486();
6320 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6321
6322 /*
6323 * If rm is denoting a register, no more instruction bytes.
6324 */
6325 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6326 {
6327 IEMOP_HLP_NO_LOCK_PREFIX();
6328
6329 switch (pIemCpu->enmEffOpSize)
6330 {
6331 case IEMMODE_16BIT:
6332 IEM_MC_BEGIN(3, 0);
6333 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6334 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
6335 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6336
6337 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6338 IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6339 IEM_MC_REF_EFLAGS(pEFlags);
6340 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16, pu16Dst, pu16Reg, pEFlags);
6341
6342 IEM_MC_ADVANCE_RIP();
6343 IEM_MC_END();
6344 return VINF_SUCCESS;
6345
6346 case IEMMODE_32BIT:
6347 IEM_MC_BEGIN(3, 0);
6348 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6349 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
6350 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6351
6352 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6353 IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6354 IEM_MC_REF_EFLAGS(pEFlags);
6355 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags);
6356
6357 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
6358 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Reg);
6359 IEM_MC_ADVANCE_RIP();
6360 IEM_MC_END();
6361 return VINF_SUCCESS;
6362
6363 case IEMMODE_64BIT:
6364 IEM_MC_BEGIN(3, 0);
6365 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6366 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
6367 IEM_MC_ARG(uint32_t *, pEFlags, 2);
6368
6369 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6370 IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6371 IEM_MC_REF_EFLAGS(pEFlags);
6372 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64, pu64Dst, pu64Reg, pEFlags);
6373
6374 IEM_MC_ADVANCE_RIP();
6375 IEM_MC_END();
6376 return VINF_SUCCESS;
6377
6378 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6379 }
6380 }
6381 else
6382 {
6383 /*
6384 * We're accessing memory.
6385 */
6386 switch (pIemCpu->enmEffOpSize)
6387 {
6388 case IEMMODE_16BIT:
6389 IEM_MC_BEGIN(3, 3);
6390 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
6391 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
6392 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
6393 IEM_MC_LOCAL(uint16_t, u16RegCopy);
6394 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6395
6396 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6397 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6398 IEM_MC_FETCH_GREG_U16(u16RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6399 IEM_MC_REF_LOCAL(pu16Reg, u16RegCopy);
6400 IEM_MC_FETCH_EFLAGS(EFlags);
6401 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6402 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16, pu16Dst, pu16Reg, pEFlags);
6403 else
6404 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u16_locked, pu16Dst, pu16Reg, pEFlags);
6405
6406 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
6407 IEM_MC_COMMIT_EFLAGS(EFlags);
6408 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16RegCopy);
6409 IEM_MC_ADVANCE_RIP();
6410 IEM_MC_END();
6411 return VINF_SUCCESS;
6412
6413 case IEMMODE_32BIT:
6414 IEM_MC_BEGIN(3, 3);
6415 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6416 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
6417 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
6418 IEM_MC_LOCAL(uint32_t, u32RegCopy);
6419 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6420
6421 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6422 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6423 IEM_MC_FETCH_GREG_U32(u32RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6424 IEM_MC_REF_LOCAL(pu32Reg, u32RegCopy);
6425 IEM_MC_FETCH_EFLAGS(EFlags);
6426 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6427 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32, pu32Dst, pu32Reg, pEFlags);
6428 else
6429 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u32_locked, pu32Dst, pu32Reg, pEFlags);
6430
6431 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
6432 IEM_MC_COMMIT_EFLAGS(EFlags);
6433 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32RegCopy);
6434 IEM_MC_ADVANCE_RIP();
6435 IEM_MC_END();
6436 return VINF_SUCCESS;
6437
6438 case IEMMODE_64BIT:
6439 IEM_MC_BEGIN(3, 3);
6440 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6441 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
6442 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
6443 IEM_MC_LOCAL(uint64_t, u64RegCopy);
6444 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6445
6446 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6447 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6448 IEM_MC_FETCH_GREG_U64(u64RegCopy, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6449 IEM_MC_REF_LOCAL(pu64Reg, u64RegCopy);
6450 IEM_MC_FETCH_EFLAGS(EFlags);
6451 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6452 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64, pu64Dst, pu64Reg, pEFlags);
6453 else
6454 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_xadd_u64_locked, pu64Dst, pu64Reg, pEFlags);
6455
6456 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
6457 IEM_MC_COMMIT_EFLAGS(EFlags);
6458 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64RegCopy);
6459 IEM_MC_ADVANCE_RIP();
6460 IEM_MC_END();
6461 return VINF_SUCCESS;
6462
6463 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6464 }
6465 }
6466}
6467
6468/** Opcode 0x0f 0xc2. */
6469FNIEMOP_STUB(iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib);
6470
6471
6472/** Opcode 0x0f 0xc3. */
6473FNIEMOP_DEF(iemOp_movnti_My_Gy)
6474{
6475 IEMOP_MNEMONIC("movnti My,Gy");
6476
6477 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6478
6479 /* Only the register -> memory form makes sense, assuming #UD for the other form. */
6480 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
6481 {
6482 switch (pIemCpu->enmEffOpSize)
6483 {
6484 case IEMMODE_32BIT:
6485 IEM_MC_BEGIN(0, 2);
6486 IEM_MC_LOCAL(uint32_t, u32Value);
6487 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6488
6489 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6490 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
6491 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2)
6492 return IEMOP_RAISE_INVALID_OPCODE();
6493
6494 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6495 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Value);
6496 IEM_MC_ADVANCE_RIP();
6497 IEM_MC_END();
6498 break;
6499
6500 case IEMMODE_64BIT:
6501 IEM_MC_BEGIN(0, 2);
6502 IEM_MC_LOCAL(uint64_t, u64Value);
6503 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6504
6505 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6506 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
6507 if (!IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fSse2)
6508 return IEMOP_RAISE_INVALID_OPCODE();
6509
6510 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6511 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Value);
6512 IEM_MC_ADVANCE_RIP();
6513 IEM_MC_END();
6514 break;
6515
6516 case IEMMODE_16BIT:
6517 /** @todo check this form. */
6518 return IEMOP_RAISE_INVALID_OPCODE();
6519 }
6520 }
6521 else
6522 return IEMOP_RAISE_INVALID_OPCODE();
6523 return VINF_SUCCESS;
6524}
6525
6526
6527/** Opcode 0x0f 0xc4. */
6528FNIEMOP_STUB(iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib);
6529
6530/** Opcode 0x0f 0xc5. */
6531FNIEMOP_STUB(iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib);
6532
6533/** Opcode 0x0f 0xc6. */
6534FNIEMOP_STUB(iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib);
6535
6536
6537/** Opcode 0x0f 0xc7 !11/1. */
6538FNIEMOP_DEF_1(iemOp_Grp9_cmpxchg8b_Mq, uint8_t, bRm)
6539{
6540 IEMOP_MNEMONIC("cmpxchg8b Mq");
6541
6542 IEM_MC_BEGIN(4, 3);
6543 IEM_MC_ARG(uint64_t *, pu64MemDst, 0);
6544 IEM_MC_ARG(PRTUINT64U, pu64EaxEdx, 1);
6545 IEM_MC_ARG(PRTUINT64U, pu64EbxEcx, 2);
6546 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 3);
6547 IEM_MC_LOCAL(RTUINT64U, u64EaxEdx);
6548 IEM_MC_LOCAL(RTUINT64U, u64EbxEcx);
6549 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
6550
6551 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
6552 IEMOP_HLP_DONE_DECODING();
6553 IEM_MC_MEM_MAP(pu64MemDst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
6554
6555 IEM_MC_FETCH_GREG_U32(u64EaxEdx.s.Lo, X86_GREG_xAX);
6556 IEM_MC_FETCH_GREG_U32(u64EaxEdx.s.Hi, X86_GREG_xDX);
6557 IEM_MC_REF_LOCAL(pu64EaxEdx, u64EaxEdx);
6558
6559 IEM_MC_FETCH_GREG_U32(u64EbxEcx.s.Lo, X86_GREG_xBX);
6560 IEM_MC_FETCH_GREG_U32(u64EbxEcx.s.Hi, X86_GREG_xCX);
6561 IEM_MC_REF_LOCAL(pu64EbxEcx, u64EbxEcx);
6562
6563 IEM_MC_FETCH_EFLAGS(EFlags);
6564 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
6565 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg8b, pu64MemDst, pu64EaxEdx, pu64EbxEcx, pEFlags);
6566 else
6567 IEM_MC_CALL_VOID_AIMPL_4(iemAImpl_cmpxchg8b_locked, pu64MemDst, pu64EaxEdx, pu64EbxEcx, pEFlags);
6568
6569 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64MemDst, IEM_ACCESS_DATA_RW);
6570 IEM_MC_COMMIT_EFLAGS(EFlags);
6571 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF)
6572 /** @todo Testcase: Check effect of cmpxchg8b on bits 63:32 in rax and rdx. */
6573 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u64EaxEdx.s.Lo);
6574 IEM_MC_STORE_GREG_U32(X86_GREG_xDX, u64EaxEdx.s.Hi);
6575 IEM_MC_ENDIF();
6576 IEM_MC_ADVANCE_RIP();
6577
6578 IEM_MC_END();
6579 return VINF_SUCCESS;
6580}
6581
6582
6583/** Opcode REX.W 0x0f 0xc7 !11/1. */
6584FNIEMOP_UD_STUB_1(iemOp_Grp9_cmpxchg16b_Mdq, uint8_t, bRm);
6585
6586/** Opcode 0x0f 0xc7 11/6. */
6587FNIEMOP_UD_STUB_1(iemOp_Grp9_rdrand_Rv, uint8_t, bRm);
6588
6589/** Opcode 0x0f 0xc7 !11/6. */
6590FNIEMOP_UD_STUB_1(iemOp_Grp9_vmptrld_Mq, uint8_t, bRm);
6591
6592/** Opcode 0x66 0x0f 0xc7 !11/6. */
6593FNIEMOP_UD_STUB_1(iemOp_Grp9_vmclear_Mq, uint8_t, bRm);
6594
6595/** Opcode 0xf3 0x0f 0xc7 !11/6. */
6596FNIEMOP_UD_STUB_1(iemOp_Grp9_vmxon_Mq, uint8_t, bRm);
6597
6598/** Opcode [0xf3] 0x0f 0xc7 !11/7. */
6599FNIEMOP_UD_STUB_1(iemOp_Grp9_vmptrst_Mq, uint8_t, bRm);
6600
6601
6602/** Opcode 0x0f 0xc7. */
6603FNIEMOP_DEF(iemOp_Grp9)
6604{
6605 /** @todo Testcase: Check mixing 0x66 and 0xf3. Check the effect of 0xf2. */
6606 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6607 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
6608 {
6609 case 0: case 2: case 3: case 4: case 5:
6610 return IEMOP_RAISE_INVALID_OPCODE();
6611 case 1:
6612 /** @todo Testcase: Check prefix effects on cmpxchg8b/16b. */
6613 if ( (bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)
6614 || (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ))) /** @todo Testcase: AMD seems to express a different idea here wrt prefixes. */
6615 return IEMOP_RAISE_INVALID_OPCODE();
6616 if (bRm & IEM_OP_PRF_SIZE_REX_W)
6617 return FNIEMOP_CALL_1(iemOp_Grp9_cmpxchg16b_Mdq, bRm);
6618 return FNIEMOP_CALL_1(iemOp_Grp9_cmpxchg8b_Mq, bRm);
6619 case 6:
6620 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
6621 return FNIEMOP_CALL_1(iemOp_Grp9_rdrand_Rv, bRm);
6622 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ))
6623 {
6624 case 0:
6625 return FNIEMOP_CALL_1(iemOp_Grp9_vmptrld_Mq, bRm);
6626 case IEM_OP_PRF_SIZE_OP:
6627 return FNIEMOP_CALL_1(iemOp_Grp9_vmclear_Mq, bRm);
6628 case IEM_OP_PRF_REPZ:
6629 return FNIEMOP_CALL_1(iemOp_Grp9_vmxon_Mq, bRm);
6630 default:
6631 return IEMOP_RAISE_INVALID_OPCODE();
6632 }
6633 case 7:
6634 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ))
6635 {
6636 case 0:
6637 case IEM_OP_PRF_REPZ:
6638 return FNIEMOP_CALL_1(iemOp_Grp9_vmptrst_Mq, bRm);
6639 default:
6640 return IEMOP_RAISE_INVALID_OPCODE();
6641 }
6642 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6643 }
6644}
6645
6646
6647/**
6648 * Common 'bswap register' helper.
6649 */
6650FNIEMOP_DEF_1(iemOpCommonBswapGReg, uint8_t, iReg)
6651{
6652 IEMOP_HLP_NO_LOCK_PREFIX();
6653 switch (pIemCpu->enmEffOpSize)
6654 {
6655 case IEMMODE_16BIT:
6656 IEM_MC_BEGIN(1, 0);
6657 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6658 IEM_MC_REF_GREG_U32(pu32Dst, iReg); /* Don't clear the high dword! */
6659 IEM_MC_CALL_VOID_AIMPL_1(iemAImpl_bswap_u16, pu32Dst);
6660 IEM_MC_ADVANCE_RIP();
6661 IEM_MC_END();
6662 return VINF_SUCCESS;
6663
6664 case IEMMODE_32BIT:
6665 IEM_MC_BEGIN(1, 0);
6666 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
6667 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
6668 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
6669 IEM_MC_CALL_VOID_AIMPL_1(iemAImpl_bswap_u32, pu32Dst);
6670 IEM_MC_ADVANCE_RIP();
6671 IEM_MC_END();
6672 return VINF_SUCCESS;
6673
6674 case IEMMODE_64BIT:
6675 IEM_MC_BEGIN(1, 0);
6676 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
6677 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
6678 IEM_MC_CALL_VOID_AIMPL_1(iemAImpl_bswap_u64, pu64Dst);
6679 IEM_MC_ADVANCE_RIP();
6680 IEM_MC_END();
6681 return VINF_SUCCESS;
6682
6683 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6684 }
6685}
6686
6687
6688/** Opcode 0x0f 0xc8. */
6689FNIEMOP_DEF(iemOp_bswap_rAX_r8)
6690{
6691 IEMOP_MNEMONIC("bswap rAX/r8");
6692 /* Note! Intel manuals states that R8-R15 can be accessed by using a REX.X
6693 prefix. REX.B is the correct prefix it appears. For a parallel
6694 case, see iemOp_mov_AL_Ib and iemOp_mov_eAX_Iv. */
6695 IEMOP_HLP_MIN_486();
6696 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xAX | pIemCpu->uRexB);
6697}
6698
6699
6700/** Opcode 0x0f 0xc9. */
6701FNIEMOP_DEF(iemOp_bswap_rCX_r9)
6702{
6703 IEMOP_MNEMONIC("bswap rCX/r9");
6704 IEMOP_HLP_MIN_486();
6705 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xCX | pIemCpu->uRexB);
6706}
6707
6708
6709/** Opcode 0x0f 0xca. */
6710FNIEMOP_DEF(iemOp_bswap_rDX_r10)
6711{
6712 IEMOP_MNEMONIC("bswap rDX/r9");
6713 IEMOP_HLP_MIN_486();
6714 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDX | pIemCpu->uRexB);
6715}
6716
6717
6718/** Opcode 0x0f 0xcb. */
6719FNIEMOP_DEF(iemOp_bswap_rBX_r11)
6720{
6721 IEMOP_MNEMONIC("bswap rBX/r9");
6722 IEMOP_HLP_MIN_486();
6723 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBX | pIemCpu->uRexB);
6724}
6725
6726
6727/** Opcode 0x0f 0xcc. */
6728FNIEMOP_DEF(iemOp_bswap_rSP_r12)
6729{
6730 IEMOP_MNEMONIC("bswap rSP/r12");
6731 IEMOP_HLP_MIN_486();
6732 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSP | pIemCpu->uRexB);
6733}
6734
6735
6736/** Opcode 0x0f 0xcd. */
6737FNIEMOP_DEF(iemOp_bswap_rBP_r13)
6738{
6739 IEMOP_MNEMONIC("bswap rBP/r13");
6740 IEMOP_HLP_MIN_486();
6741 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xBP | pIemCpu->uRexB);
6742}
6743
6744
6745/** Opcode 0x0f 0xce. */
6746FNIEMOP_DEF(iemOp_bswap_rSI_r14)
6747{
6748 IEMOP_MNEMONIC("bswap rSI/r14");
6749 IEMOP_HLP_MIN_486();
6750 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xSI | pIemCpu->uRexB);
6751}
6752
6753
6754/** Opcode 0x0f 0xcf. */
6755FNIEMOP_DEF(iemOp_bswap_rDI_r15)
6756{
6757 IEMOP_MNEMONIC("bswap rDI/r15");
6758 IEMOP_HLP_MIN_486();
6759 return FNIEMOP_CALL_1(iemOpCommonBswapGReg, X86_GREG_xDI | pIemCpu->uRexB);
6760}
6761
6762
6763
6764/** Opcode 0x0f 0xd0. */
6765FNIEMOP_STUB(iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps);
6766/** Opcode 0x0f 0xd1. */
6767FNIEMOP_STUB(iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq);
6768/** Opcode 0x0f 0xd2. */
6769FNIEMOP_STUB(iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq);
6770/** Opcode 0x0f 0xd3. */
6771FNIEMOP_STUB(iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq);
6772/** Opcode 0x0f 0xd4. */
6773FNIEMOP_STUB(iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq);
6774/** Opcode 0x0f 0xd5. */
6775FNIEMOP_STUB(iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq);
6776/** Opcode 0x0f 0xd6. */
6777FNIEMOP_STUB(iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq);
6778
6779
6780/** Opcode 0x0f 0xd7. */
6781FNIEMOP_DEF(iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq)
6782{
6783 /* Docs says register only. */
6784 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6785 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)) /** @todo test that this is registers only. */
6786 return IEMOP_RAISE_INVALID_OPCODE();
6787
6788 /* Note! Taking the lazy approch here wrt the high 32-bits of the GREG. */
6789 /** @todo testcase: Check that the instruction implicitly clears the high
6790 * bits in 64-bit mode. The REX.W is first necessary when VLMAX > 256
6791 * and opcode modifications are made to work with the whole width (not
6792 * just 128). */
6793 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
6794 {
6795 case IEM_OP_PRF_SIZE_OP: /* SSE */
6796 IEMOP_MNEMONIC("pmovmskb Gd,Nq");
6797 IEMOP_HLP_DECODED_NL_2(OP_PMOVMSKB, IEMOPFORM_RM_REG, OP_PARM_Gd, OP_PARM_Vdq, DISOPTYPE_SSE | DISOPTYPE_HARMLESS);
6798 IEM_MC_BEGIN(2, 0);
6799 IEM_MC_ARG(uint64_t *, pDst, 0);
6800 IEM_MC_ARG(uint128_t const *, pSrc, 1);
6801 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
6802 IEM_MC_PREPARE_SSE_USAGE();
6803 IEM_MC_REF_GREG_U64(pDst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6804 IEM_MC_REF_XREG_U128_CONST(pSrc, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
6805 IEM_MC_CALL_SSE_AIMPL_2(iemAImpl_pmovmskb_u128, pDst, pSrc);
6806 IEM_MC_ADVANCE_RIP();
6807 IEM_MC_END();
6808 return VINF_SUCCESS;
6809
6810 case 0: /* MMX */
6811 IEMOP_MNEMONIC("pmovmskb Gd,Udq");
6812 IEMOP_HLP_DECODED_NL_2(OP_PMOVMSKB, IEMOPFORM_RM_REG, OP_PARM_Gd, OP_PARM_Vdq, DISOPTYPE_MMX | DISOPTYPE_HARMLESS);
6813 IEM_MC_BEGIN(2, 0);
6814 IEM_MC_ARG(uint64_t *, pDst, 0);
6815 IEM_MC_ARG(uint64_t const *, pSrc, 1);
6816 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT();
6817 IEM_MC_PREPARE_FPU_USAGE();
6818 IEM_MC_REF_GREG_U64(pDst, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
6819 IEM_MC_REF_MREG_U64_CONST(pSrc, bRm & X86_MODRM_RM_MASK);
6820 IEM_MC_CALL_MMX_AIMPL_2(iemAImpl_pmovmskb_u64, pDst, pSrc);
6821 IEM_MC_ADVANCE_RIP();
6822 IEM_MC_END();
6823 return VINF_SUCCESS;
6824
6825 default:
6826 return IEMOP_RAISE_INVALID_OPCODE();
6827 }
6828}
6829
6830
6831/** Opcode 0x0f 0xd8. */
6832FNIEMOP_STUB(iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq);
6833/** Opcode 0x0f 0xd9. */
6834FNIEMOP_STUB(iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq);
6835/** Opcode 0x0f 0xda. */
6836FNIEMOP_STUB(iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq);
6837/** Opcode 0x0f 0xdb. */
6838FNIEMOP_STUB(iemOp_pand_Pq_Qq__pand_Vdq_Wdq);
6839/** Opcode 0x0f 0xdc. */
6840FNIEMOP_STUB(iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq);
6841/** Opcode 0x0f 0xdd. */
6842FNIEMOP_STUB(iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq);
6843/** Opcode 0x0f 0xde. */
6844FNIEMOP_STUB(iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq);
6845/** Opcode 0x0f 0xdf. */
6846FNIEMOP_STUB(iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq);
6847/** Opcode 0x0f 0xe0. */
6848FNIEMOP_STUB(iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq);
6849/** Opcode 0x0f 0xe1. */
6850FNIEMOP_STUB(iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq);
6851/** Opcode 0x0f 0xe2. */
6852FNIEMOP_STUB(iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq);
6853/** Opcode 0x0f 0xe3. */
6854FNIEMOP_STUB(iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq);
6855/** Opcode 0x0f 0xe4. */
6856FNIEMOP_STUB(iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq);
6857/** Opcode 0x0f 0xe5. */
6858FNIEMOP_STUB(iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq);
6859/** Opcode 0x0f 0xe6. */
6860FNIEMOP_STUB(iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd);
6861
6862
6863/** Opcode 0x0f 0xe7. */
6864FNIEMOP_DEF(iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq)
6865{
6866 IEMOP_MNEMONIC(!(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? "movntq mr,r" : "movntdq mr,r");
6867 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
6868 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
6869 {
6870 /*
6871 * Register, memory.
6872 */
6873/** @todo check when the REPNZ/Z bits kick in. Same as lock, probably... */
6874 switch (pIemCpu->fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
6875 {
6876
6877 case IEM_OP_PRF_SIZE_OP: /* SSE */
6878 IEM_MC_BEGIN(0, 2);
6879 IEM_MC_LOCAL(uint128_t, uSrc);
6880 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
6881
6882 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
6883 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
6884 IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT();
6885 IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ();
6886
6887 IEM_MC_FETCH_XREG_U128(uSrc, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
6888 IEM_MC_STORE_MEM_U128_ALIGN_SSE(pIemCpu->iEffSeg, GCPtrEffSrc, uSrc);
6889
6890 IEM_MC_ADVANCE_RIP();
6891 IEM_MC_END();
6892 break;
6893
6894 case 0: /* MMX */
6895 IEM_MC_BEGIN(0, 2);
6896 IEM_MC_LOCAL(uint64_t, uSrc);
6897 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
6898
6899 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
6900 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
6901 IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT();
6902 IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ();
6903
6904 IEM_MC_FETCH_MREG_U64(uSrc, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
6905 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffSrc, uSrc);
6906
6907 IEM_MC_ADVANCE_RIP();
6908 IEM_MC_END();
6909 break;
6910
6911 default:
6912 return IEMOP_RAISE_INVALID_OPCODE();
6913 }
6914 }
6915 /* The register, register encoding is invalid. */
6916 else
6917 return IEMOP_RAISE_INVALID_OPCODE();
6918 return VINF_SUCCESS;
6919}
6920
6921
6922/** Opcode 0x0f 0xe8. */
6923FNIEMOP_STUB(iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq);
6924/** Opcode 0x0f 0xe9. */
6925FNIEMOP_STUB(iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq);
6926/** Opcode 0x0f 0xea. */
6927FNIEMOP_STUB(iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq);
6928/** Opcode 0x0f 0xeb. */
6929FNIEMOP_STUB(iemOp_por_Pq_Qq__por_Vdq_Wdq);
6930/** Opcode 0x0f 0xec. */
6931FNIEMOP_STUB(iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq);
6932/** Opcode 0x0f 0xed. */
6933FNIEMOP_STUB(iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq);
6934/** Opcode 0x0f 0xee. */
6935FNIEMOP_STUB(iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq);
6936
6937
6938/** Opcode 0x0f 0xef. */
6939FNIEMOP_DEF(iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq)
6940{
6941 IEMOP_MNEMONIC("pxor");
6942 return FNIEMOP_CALL_1(iemOpCommonMmxSse2_FullFull_To_Full, &g_iemAImpl_pxor);
6943}
6944
6945
6946/** Opcode 0x0f 0xf0. */
6947FNIEMOP_STUB(iemOp_lddqu_Vdq_Mdq);
6948/** Opcode 0x0f 0xf1. */
6949FNIEMOP_STUB(iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq);
6950/** Opcode 0x0f 0xf2. */
6951FNIEMOP_STUB(iemOp_psld_Pq_Qq__pslld_Vdq_Wdq);
6952/** Opcode 0x0f 0xf3. */
6953FNIEMOP_STUB(iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq);
6954/** Opcode 0x0f 0xf4. */
6955FNIEMOP_STUB(iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq);
6956/** Opcode 0x0f 0xf5. */
6957FNIEMOP_STUB(iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq);
6958/** Opcode 0x0f 0xf6. */
6959FNIEMOP_STUB(iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq);
6960/** Opcode 0x0f 0xf7. */
6961FNIEMOP_STUB(iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq);
6962/** Opcode 0x0f 0xf8. */
6963FNIEMOP_STUB(iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq); //NEXT
6964/** Opcode 0x0f 0xf9. */
6965FNIEMOP_STUB(iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq);
6966/** Opcode 0x0f 0xfa. */
6967FNIEMOP_STUB(iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq);
6968/** Opcode 0x0f 0xfb. */
6969FNIEMOP_STUB(iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq);
6970/** Opcode 0x0f 0xfc. */
6971FNIEMOP_STUB(iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq);
6972/** Opcode 0x0f 0xfd. */
6973FNIEMOP_STUB(iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq);
6974/** Opcode 0x0f 0xfe. */
6975FNIEMOP_STUB(iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq);
6976
6977
6978const PFNIEMOP g_apfnTwoByteMap[256] =
6979{
6980 /* 0x00 */ iemOp_Grp6,
6981 /* 0x01 */ iemOp_Grp7,
6982 /* 0x02 */ iemOp_lar_Gv_Ew,
6983 /* 0x03 */ iemOp_lsl_Gv_Ew,
6984 /* 0x04 */ iemOp_Invalid,
6985 /* 0x05 */ iemOp_syscall,
6986 /* 0x06 */ iemOp_clts,
6987 /* 0x07 */ iemOp_sysret,
6988 /* 0x08 */ iemOp_invd,
6989 /* 0x09 */ iemOp_wbinvd,
6990 /* 0x0a */ iemOp_Invalid,
6991 /* 0x0b */ iemOp_ud2,
6992 /* 0x0c */ iemOp_Invalid,
6993 /* 0x0d */ iemOp_nop_Ev_GrpP,
6994 /* 0x0e */ iemOp_femms,
6995 /* 0x0f */ iemOp_3Dnow,
6996 /* 0x10 */ iemOp_movups_Vps_Wps__movupd_Vpd_Wpd__movss_Vss_Wss__movsd_Vsd_Wsd,
6997 /* 0x11 */ iemOp_movups_Wps_Vps__movupd_Wpd_Vpd__movss_Wss_Vss__movsd_Vsd_Wsd,
6998 /* 0x12 */ iemOp_movlps_Vq_Mq__movhlps_Vq_Uq__movlpd_Vq_Mq__movsldup_Vq_Wq__movddup_Vq_Wq,
6999 /* 0x13 */ iemOp_movlps_Mq_Vq__movlpd_Mq_Vq,
7000 /* 0x14 */ iemOp_unpckhlps_Vps_Wq__unpcklpd_Vpd_Wq,
7001 /* 0x15 */ iemOp_unpckhps_Vps_Wq__unpckhpd_Vpd_Wq,
7002 /* 0x16 */ iemOp_movhps_Vq_Mq__movlhps_Vq_Uq__movhpd_Vq_Mq__movshdup_Vq_Wq,
7003 /* 0x17 */ iemOp_movhps_Mq_Vq__movhpd_Mq_Vq,
7004 /* 0x18 */ iemOp_prefetch_Grp16,
7005 /* 0x19 */ iemOp_nop_Ev,
7006 /* 0x1a */ iemOp_nop_Ev,
7007 /* 0x1b */ iemOp_nop_Ev,
7008 /* 0x1c */ iemOp_nop_Ev,
7009 /* 0x1d */ iemOp_nop_Ev,
7010 /* 0x1e */ iemOp_nop_Ev,
7011 /* 0x1f */ iemOp_nop_Ev,
7012 /* 0x20 */ iemOp_mov_Rd_Cd,
7013 /* 0x21 */ iemOp_mov_Rd_Dd,
7014 /* 0x22 */ iemOp_mov_Cd_Rd,
7015 /* 0x23 */ iemOp_mov_Dd_Rd,
7016 /* 0x24 */ iemOp_mov_Rd_Td,
7017 /* 0x25 */ iemOp_Invalid,
7018 /* 0x26 */ iemOp_mov_Td_Rd,
7019 /* 0x27 */ iemOp_Invalid,
7020 /* 0x28 */ iemOp_movaps_Vps_Wps__movapd_Vpd_Wpd,
7021 /* 0x29 */ iemOp_movaps_Wps_Vps__movapd_Wpd_Vpd,
7022 /* 0x2a */ iemOp_cvtpi2ps_Vps_Qpi__cvtpi2pd_Vpd_Qpi__cvtsi2ss_Vss_Ey__cvtsi2sd_Vsd_Ey,
7023 /* 0x2b */ iemOp_movntps_Mps_Vps__movntpd_Mpd_Vpd,
7024 /* 0x2c */ iemOp_cvttps2pi_Ppi_Wps__cvttpd2pi_Ppi_Wpd__cvttss2si_Gy_Wss__cvttsd2si_Yu_Wsd,
7025 /* 0x2d */ iemOp_cvtps2pi_Ppi_Wps__cvtpd2pi_QpiWpd__cvtss2si_Gy_Wss__cvtsd2si_Gy_Wsd,
7026 /* 0x2e */ iemOp_ucomiss_Vss_Wss__ucomisd_Vsd_Wsd,
7027 /* 0x2f */ iemOp_comiss_Vss_Wss__comisd_Vsd_Wsd,
7028 /* 0x30 */ iemOp_wrmsr,
7029 /* 0x31 */ iemOp_rdtsc,
7030 /* 0x32 */ iemOp_rdmsr,
7031 /* 0x33 */ iemOp_rdpmc,
7032 /* 0x34 */ iemOp_sysenter,
7033 /* 0x35 */ iemOp_sysexit,
7034 /* 0x36 */ iemOp_Invalid,
7035 /* 0x37 */ iemOp_getsec,
7036 /* 0x38 */ iemOp_3byte_Esc_A4,
7037 /* 0x39 */ iemOp_Invalid,
7038 /* 0x3a */ iemOp_3byte_Esc_A5,
7039 /* 0x3b */ iemOp_Invalid,
7040 /* 0x3c */ iemOp_Invalid,
7041 /* 0x3d */ iemOp_Invalid,
7042 /* 0x3e */ iemOp_Invalid,
7043 /* 0x3f */ iemOp_Invalid,
7044 /* 0x40 */ iemOp_cmovo_Gv_Ev,
7045 /* 0x41 */ iemOp_cmovno_Gv_Ev,
7046 /* 0x42 */ iemOp_cmovc_Gv_Ev,
7047 /* 0x43 */ iemOp_cmovnc_Gv_Ev,
7048 /* 0x44 */ iemOp_cmove_Gv_Ev,
7049 /* 0x45 */ iemOp_cmovne_Gv_Ev,
7050 /* 0x46 */ iemOp_cmovbe_Gv_Ev,
7051 /* 0x47 */ iemOp_cmovnbe_Gv_Ev,
7052 /* 0x48 */ iemOp_cmovs_Gv_Ev,
7053 /* 0x49 */ iemOp_cmovns_Gv_Ev,
7054 /* 0x4a */ iemOp_cmovp_Gv_Ev,
7055 /* 0x4b */ iemOp_cmovnp_Gv_Ev,
7056 /* 0x4c */ iemOp_cmovl_Gv_Ev,
7057 /* 0x4d */ iemOp_cmovnl_Gv_Ev,
7058 /* 0x4e */ iemOp_cmovle_Gv_Ev,
7059 /* 0x4f */ iemOp_cmovnle_Gv_Ev,
7060 /* 0x50 */ iemOp_movmskps_Gy_Ups__movmskpd_Gy_Upd,
7061 /* 0x51 */ iemOp_sqrtps_Wps_Vps__sqrtpd_Wpd_Vpd__sqrtss_Vss_Wss__sqrtsd_Vsd_Wsd,
7062 /* 0x52 */ iemOp_rsqrtps_Wps_Vps__rsqrtss_Vss_Wss,
7063 /* 0x53 */ iemOp_rcpps_Wps_Vps__rcpss_Vs_Wss,
7064 /* 0x54 */ iemOp_andps_Vps_Wps__andpd_Wpd_Vpd,
7065 /* 0x55 */ iemOp_andnps_Vps_Wps__andnpd_Wpd_Vpd,
7066 /* 0x56 */ iemOp_orps_Wpd_Vpd__orpd_Wpd_Vpd,
7067 /* 0x57 */ iemOp_xorps_Vps_Wps__xorpd_Wpd_Vpd,
7068 /* 0x58 */ iemOp_addps_Vps_Wps__addpd_Vpd_Wpd__addss_Vss_Wss__addsd_Vsd_Wsd,
7069 /* 0x59 */ iemOp_mulps_Vps_Wps__mulpd_Vpd_Wpd__mulss_Vss__Wss__mulsd_Vsd_Wsd,
7070 /* 0x5a */ iemOp_cvtps2pd_Vpd_Wps__cvtpd2ps_Vps_Wpd__cvtss2sd_Vsd_Wss__cvtsd2ss_Vss_Wsd,
7071 /* 0x5b */ iemOp_cvtdq2ps_Vps_Wdq__cvtps2dq_Vdq_Wps__cvtps2dq_Vdq_Wps,
7072 /* 0x5c */ iemOp_subps_Vps_Wps__subpd_Vps_Wdp__subss_Vss_Wss__subsd_Vsd_Wsd,
7073 /* 0x5d */ iemOp_minps_Vps_Wps__minpd_Vpd_Wpd__minss_Vss_Wss__minsd_Vsd_Wsd,
7074 /* 0x5e */ iemOp_divps_Vps_Wps__divpd_Vpd_Wpd__divss_Vss_Wss__divsd_Vsd_Wsd,
7075 /* 0x5f */ iemOp_maxps_Vps_Wps__maxpd_Vpd_Wpd__maxss_Vss_Wss__maxsd_Vsd_Wsd,
7076 /* 0x60 */ iemOp_punpcklbw_Pq_Qd__punpcklbw_Vdq_Wdq,
7077 /* 0x61 */ iemOp_punpcklwd_Pq_Qd__punpcklwd_Vdq_Wdq,
7078 /* 0x62 */ iemOp_punpckldq_Pq_Qd__punpckldq_Vdq_Wdq,
7079 /* 0x63 */ iemOp_packsswb_Pq_Qq__packsswb_Vdq_Wdq,
7080 /* 0x64 */ iemOp_pcmpgtb_Pq_Qq__pcmpgtb_Vdq_Wdq,
7081 /* 0x65 */ iemOp_pcmpgtw_Pq_Qq__pcmpgtw_Vdq_Wdq,
7082 /* 0x66 */ iemOp_pcmpgtd_Pq_Qq__pcmpgtd_Vdq_Wdq,
7083 /* 0x67 */ iemOp_packuswb_Pq_Qq__packuswb_Vdq_Wdq,
7084 /* 0x68 */ iemOp_punpckhbw_Pq_Qq__punpckhbw_Vdq_Wdq,
7085 /* 0x69 */ iemOp_punpckhwd_Pq_Qd__punpckhwd_Vdq_Wdq,
7086 /* 0x6a */ iemOp_punpckhdq_Pq_Qd__punpckhdq_Vdq_Wdq,
7087 /* 0x6b */ iemOp_packssdw_Pq_Qd__packssdq_Vdq_Wdq,
7088 /* 0x6c */ iemOp_punpcklqdq_Vdq_Wdq,
7089 /* 0x6d */ iemOp_punpckhqdq_Vdq_Wdq,
7090 /* 0x6e */ iemOp_movd_q_Pd_Ey__movd_q_Vy_Ey,
7091 /* 0x6f */ iemOp_movq_Pq_Qq__movdqa_Vdq_Wdq__movdqu_Vdq_Wdq,
7092 /* 0x70 */ iemOp_pshufw_Pq_Qq_Ib__pshufd_Vdq_Wdq_Ib__pshufhw_Vdq_Wdq_Ib__pshuflq_Vdq_Wdq_Ib,
7093 /* 0x71 */ iemOp_Grp12,
7094 /* 0x72 */ iemOp_Grp13,
7095 /* 0x73 */ iemOp_Grp14,
7096 /* 0x74 */ iemOp_pcmpeqb_Pq_Qq__pcmpeqb_Vdq_Wdq,
7097 /* 0x75 */ iemOp_pcmpeqw_Pq_Qq__pcmpeqw_Vdq_Wdq,
7098 /* 0x76 */ iemOp_pcmped_Pq_Qq__pcmpeqd_Vdq_Wdq,
7099 /* 0x77 */ iemOp_emms,
7100 /* 0x78 */ iemOp_vmread_AmdGrp17,
7101 /* 0x79 */ iemOp_vmwrite,
7102 /* 0x7a */ iemOp_Invalid,
7103 /* 0x7b */ iemOp_Invalid,
7104 /* 0x7c */ iemOp_haddpd_Vdp_Wpd__haddps_Vps_Wps,
7105 /* 0x7d */ iemOp_hsubpd_Vpd_Wpd__hsubps_Vps_Wps,
7106 /* 0x7e */ iemOp_movd_q_Ey_Pd__movd_q_Ey_Vy__movq_Vq_Wq,
7107 /* 0x7f */ iemOp_movq_Qq_Pq__movq_movdqa_Wdq_Vdq__movdqu_Wdq_Vdq,
7108 /* 0x80 */ iemOp_jo_Jv,
7109 /* 0x81 */ iemOp_jno_Jv,
7110 /* 0x82 */ iemOp_jc_Jv,
7111 /* 0x83 */ iemOp_jnc_Jv,
7112 /* 0x84 */ iemOp_je_Jv,
7113 /* 0x85 */ iemOp_jne_Jv,
7114 /* 0x86 */ iemOp_jbe_Jv,
7115 /* 0x87 */ iemOp_jnbe_Jv,
7116 /* 0x88 */ iemOp_js_Jv,
7117 /* 0x89 */ iemOp_jns_Jv,
7118 /* 0x8a */ iemOp_jp_Jv,
7119 /* 0x8b */ iemOp_jnp_Jv,
7120 /* 0x8c */ iemOp_jl_Jv,
7121 /* 0x8d */ iemOp_jnl_Jv,
7122 /* 0x8e */ iemOp_jle_Jv,
7123 /* 0x8f */ iemOp_jnle_Jv,
7124 /* 0x90 */ iemOp_seto_Eb,
7125 /* 0x91 */ iemOp_setno_Eb,
7126 /* 0x92 */ iemOp_setc_Eb,
7127 /* 0x93 */ iemOp_setnc_Eb,
7128 /* 0x94 */ iemOp_sete_Eb,
7129 /* 0x95 */ iemOp_setne_Eb,
7130 /* 0x96 */ iemOp_setbe_Eb,
7131 /* 0x97 */ iemOp_setnbe_Eb,
7132 /* 0x98 */ iemOp_sets_Eb,
7133 /* 0x99 */ iemOp_setns_Eb,
7134 /* 0x9a */ iemOp_setp_Eb,
7135 /* 0x9b */ iemOp_setnp_Eb,
7136 /* 0x9c */ iemOp_setl_Eb,
7137 /* 0x9d */ iemOp_setnl_Eb,
7138 /* 0x9e */ iemOp_setle_Eb,
7139 /* 0x9f */ iemOp_setnle_Eb,
7140 /* 0xa0 */ iemOp_push_fs,
7141 /* 0xa1 */ iemOp_pop_fs,
7142 /* 0xa2 */ iemOp_cpuid,
7143 /* 0xa3 */ iemOp_bt_Ev_Gv,
7144 /* 0xa4 */ iemOp_shld_Ev_Gv_Ib,
7145 /* 0xa5 */ iemOp_shld_Ev_Gv_CL,
7146 /* 0xa6 */ iemOp_Invalid,
7147 /* 0xa7 */ iemOp_Invalid,
7148 /* 0xa8 */ iemOp_push_gs,
7149 /* 0xa9 */ iemOp_pop_gs,
7150 /* 0xaa */ iemOp_rsm,
7151 /* 0xab */ iemOp_bts_Ev_Gv,
7152 /* 0xac */ iemOp_shrd_Ev_Gv_Ib,
7153 /* 0xad */ iemOp_shrd_Ev_Gv_CL,
7154 /* 0xae */ iemOp_Grp15,
7155 /* 0xaf */ iemOp_imul_Gv_Ev,
7156 /* 0xb0 */ iemOp_cmpxchg_Eb_Gb,
7157 /* 0xb1 */ iemOp_cmpxchg_Ev_Gv,
7158 /* 0xb2 */ iemOp_lss_Gv_Mp,
7159 /* 0xb3 */ iemOp_btr_Ev_Gv,
7160 /* 0xb4 */ iemOp_lfs_Gv_Mp,
7161 /* 0xb5 */ iemOp_lgs_Gv_Mp,
7162 /* 0xb6 */ iemOp_movzx_Gv_Eb,
7163 /* 0xb7 */ iemOp_movzx_Gv_Ew,
7164 /* 0xb8 */ iemOp_popcnt_Gv_Ev_jmpe,
7165 /* 0xb9 */ iemOp_Grp10,
7166 /* 0xba */ iemOp_Grp8,
7167 /* 0xbd */ iemOp_btc_Ev_Gv,
7168 /* 0xbc */ iemOp_bsf_Gv_Ev,
7169 /* 0xbd */ iemOp_bsr_Gv_Ev,
7170 /* 0xbe */ iemOp_movsx_Gv_Eb,
7171 /* 0xbf */ iemOp_movsx_Gv_Ew,
7172 /* 0xc0 */ iemOp_xadd_Eb_Gb,
7173 /* 0xc1 */ iemOp_xadd_Ev_Gv,
7174 /* 0xc2 */ iemOp_cmpps_Vps_Wps_Ib__cmppd_Vpd_Wpd_Ib__cmpss_Vss_Wss_Ib__cmpsd_Vsd_Wsd_Ib,
7175 /* 0xc3 */ iemOp_movnti_My_Gy,
7176 /* 0xc4 */ iemOp_pinsrw_Pq_Ry_Mw_Ib__pinsrw_Vdq_Ry_Mw_Ib,
7177 /* 0xc5 */ iemOp_pextrw_Gd_Nq_Ib__pextrw_Gd_Udq_Ib,
7178 /* 0xc6 */ iemOp_shufps_Vps_Wps_Ib__shufdp_Vpd_Wpd_Ib,
7179 /* 0xc7 */ iemOp_Grp9,
7180 /* 0xc8 */ iemOp_bswap_rAX_r8,
7181 /* 0xc9 */ iemOp_bswap_rCX_r9,
7182 /* 0xca */ iemOp_bswap_rDX_r10,
7183 /* 0xcb */ iemOp_bswap_rBX_r11,
7184 /* 0xcc */ iemOp_bswap_rSP_r12,
7185 /* 0xcd */ iemOp_bswap_rBP_r13,
7186 /* 0xce */ iemOp_bswap_rSI_r14,
7187 /* 0xcf */ iemOp_bswap_rDI_r15,
7188 /* 0xd0 */ iemOp_addsubpd_Vpd_Wpd__addsubps_Vps_Wps,
7189 /* 0xd1 */ iemOp_psrlw_Pp_Qp__psrlw_Vdp_Wdq,
7190 /* 0xd2 */ iemOp_psrld_Pq_Qq__psrld_Vdq_Wdq,
7191 /* 0xd3 */ iemOp_psrlq_Pq_Qq__psrlq_Vdq_Wdq,
7192 /* 0xd4 */ iemOp_paddq_Pq_Qq__paddq_Vdq_Wdq,
7193 /* 0xd5 */ iemOp_pmulq_Pq_Qq__pmullw_Vdq_Wdq,
7194 /* 0xd6 */ iemOp_movq_Wq_Vq__movq2dq_Vdq_Nq__movdq2q_Pq_Uq,
7195 /* 0xd7 */ iemOp_pmovmskb_Gd_Nq__pmovmskb_Gd_Udq,
7196 /* 0xd8 */ iemOp_psubusb_Pq_Qq__psubusb_Vdq_Wdq,
7197 /* 0xd9 */ iemOp_psubusw_Pq_Qq__psubusw_Vdq_Wdq,
7198 /* 0xda */ iemOp_pminub_Pq_Qq__pminub_Vdq_Wdq,
7199 /* 0xdb */ iemOp_pand_Pq_Qq__pand_Vdq_Wdq,
7200 /* 0xdc */ iemOp_paddusb_Pq_Qq__paddusb_Vdq_Wdq,
7201 /* 0xdd */ iemOp_paddusw_Pq_Qq__paddusw_Vdq_Wdq,
7202 /* 0xde */ iemOp_pmaxub_Pq_Qq__pamxub_Vdq_Wdq,
7203 /* 0xdf */ iemOp_pandn_Pq_Qq__pandn_Vdq_Wdq,
7204 /* 0xe0 */ iemOp_pavgb_Pq_Qq__pavgb_Vdq_Wdq,
7205 /* 0xe1 */ iemOp_psraw_Pq_Qq__psraw_Vdq_Wdq,
7206 /* 0xe2 */ iemOp_psrad_Pq_Qq__psrad_Vdq_Wdq,
7207 /* 0xe3 */ iemOp_pavgw_Pq_Qq__pavgw_Vdq_Wdq,
7208 /* 0xe4 */ iemOp_pmulhuw_Pq_Qq__pmulhuw_Vdq_Wdq,
7209 /* 0xe5 */ iemOp_pmulhw_Pq_Qq__pmulhw_Vdq_Wdq,
7210 /* 0xe6 */ iemOp_cvttpd2dq_Vdq_Wdp__cvtdq2pd_Vdq_Wpd__cvtpd2dq_Vdq_Wpd,
7211 /* 0xe7 */ iemOp_movntq_Mq_Pq__movntdq_Mdq_Vdq,
7212 /* 0xe8 */ iemOp_psubsb_Pq_Qq__psubsb_Vdq_Wdq,
7213 /* 0xe9 */ iemOp_psubsw_Pq_Qq__psubsw_Vdq_Wdq,
7214 /* 0xea */ iemOp_pminsw_Pq_Qq__pminsw_Vdq_Wdq,
7215 /* 0xeb */ iemOp_por_Pq_Qq__por_Vdq_Wdq,
7216 /* 0xec */ iemOp_paddsb_Pq_Qq__paddsb_Vdq_Wdq,
7217 /* 0xed */ iemOp_paddsw_Pq_Qq__paddsw_Vdq_Wdq,
7218 /* 0xee */ iemOp_pmaxsw_Pq_Qq__pmaxsw_Vdq_Wdq,
7219 /* 0xef */ iemOp_pxor_Pq_Qq__pxor_Vdq_Wdq,
7220 /* 0xf0 */ iemOp_lddqu_Vdq_Mdq,
7221 /* 0xf1 */ iemOp_psllw_Pq_Qq__pslw_Vdq_Wdq,
7222 /* 0xf2 */ iemOp_psld_Pq_Qq__pslld_Vdq_Wdq,
7223 /* 0xf3 */ iemOp_psllq_Pq_Qq__pslq_Vdq_Wdq,
7224 /* 0xf4 */ iemOp_pmuludq_Pq_Qq__pmuludq_Vdq_Wdq,
7225 /* 0xf5 */ iemOp_pmaddwd_Pq_Qq__pmaddwd_Vdq_Wdq,
7226 /* 0xf6 */ iemOp_psadbw_Pq_Qq__psadbw_Vdq_Wdq,
7227 /* 0xf7 */ iemOp_maskmovq_Pq_Nq__maskmovdqu_Vdq_Udq,
7228 /* 0xf8 */ iemOp_psubb_Pq_Qq_psubb_Vdq_Wdq,
7229 /* 0xf9 */ iemOp_psubw_Pq_Qq__psubw_Vdq_Wdq,
7230 /* 0xfa */ iemOp_psubd_Pq_Qq__psubd_Vdq_Wdq,
7231 /* 0xfb */ iemOp_psubq_Pq_Qq__psbuq_Vdq_Wdq,
7232 /* 0xfc */ iemOp_paddb_Pq_Qq__paddb_Vdq_Wdq,
7233 /* 0xfd */ iemOp_paddw_Pq_Qq__paddw_Vdq_Wdq,
7234 /* 0xfe */ iemOp_paddd_Pq_Qq__paddd_Vdq_Wdq,
7235 /* 0xff */ iemOp_Invalid
7236};
7237
7238/** @} */
7239
7240
7241/** @name One byte opcodes.
7242 *
7243 * @{
7244 */
7245
7246/** Opcode 0x00. */
7247FNIEMOP_DEF(iemOp_add_Eb_Gb)
7248{
7249 IEMOP_MNEMONIC("add Eb,Gb");
7250 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_add);
7251}
7252
7253
7254/** Opcode 0x01. */
7255FNIEMOP_DEF(iemOp_add_Ev_Gv)
7256{
7257 IEMOP_MNEMONIC("add Ev,Gv");
7258 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_add);
7259}
7260
7261
7262/** Opcode 0x02. */
7263FNIEMOP_DEF(iemOp_add_Gb_Eb)
7264{
7265 IEMOP_MNEMONIC("add Gb,Eb");
7266 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_add);
7267}
7268
7269
7270/** Opcode 0x03. */
7271FNIEMOP_DEF(iemOp_add_Gv_Ev)
7272{
7273 IEMOP_MNEMONIC("add Gv,Ev");
7274 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_add);
7275}
7276
7277
7278/** Opcode 0x04. */
7279FNIEMOP_DEF(iemOp_add_Al_Ib)
7280{
7281 IEMOP_MNEMONIC("add al,Ib");
7282 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_add);
7283}
7284
7285
7286/** Opcode 0x05. */
7287FNIEMOP_DEF(iemOp_add_eAX_Iz)
7288{
7289 IEMOP_MNEMONIC("add rAX,Iz");
7290 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_add);
7291}
7292
7293
7294/** Opcode 0x06. */
7295FNIEMOP_DEF(iemOp_push_ES)
7296{
7297 IEMOP_MNEMONIC("push es");
7298 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_ES);
7299}
7300
7301
7302/** Opcode 0x07. */
7303FNIEMOP_DEF(iemOp_pop_ES)
7304{
7305 IEMOP_MNEMONIC("pop es");
7306 IEMOP_HLP_NO_64BIT();
7307 IEMOP_HLP_NO_LOCK_PREFIX();
7308 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_ES, pIemCpu->enmEffOpSize);
7309}
7310
7311
7312/** Opcode 0x08. */
7313FNIEMOP_DEF(iemOp_or_Eb_Gb)
7314{
7315 IEMOP_MNEMONIC("or Eb,Gb");
7316 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7317 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_or);
7318}
7319
7320
7321/** Opcode 0x09. */
7322FNIEMOP_DEF(iemOp_or_Ev_Gv)
7323{
7324 IEMOP_MNEMONIC("or Ev,Gv ");
7325 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7326 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_or);
7327}
7328
7329
7330/** Opcode 0x0a. */
7331FNIEMOP_DEF(iemOp_or_Gb_Eb)
7332{
7333 IEMOP_MNEMONIC("or Gb,Eb");
7334 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7335 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_or);
7336}
7337
7338
7339/** Opcode 0x0b. */
7340FNIEMOP_DEF(iemOp_or_Gv_Ev)
7341{
7342 IEMOP_MNEMONIC("or Gv,Ev");
7343 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7344 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_or);
7345}
7346
7347
7348/** Opcode 0x0c. */
7349FNIEMOP_DEF(iemOp_or_Al_Ib)
7350{
7351 IEMOP_MNEMONIC("or al,Ib");
7352 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7353 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_or);
7354}
7355
7356
7357/** Opcode 0x0d. */
7358FNIEMOP_DEF(iemOp_or_eAX_Iz)
7359{
7360 IEMOP_MNEMONIC("or rAX,Iz");
7361 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7362 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_or);
7363}
7364
7365
7366/** Opcode 0x0e. */
7367FNIEMOP_DEF(iemOp_push_CS)
7368{
7369 IEMOP_MNEMONIC("push cs");
7370 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_CS);
7371}
7372
7373
7374/** Opcode 0x0f. */
7375FNIEMOP_DEF(iemOp_2byteEscape)
7376{
7377 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7378 /** @todo PUSH CS on 8086, undefined on 80186. */
7379 IEMOP_HLP_MIN_286();
7380 return FNIEMOP_CALL(g_apfnTwoByteMap[b]);
7381}
7382
7383/** Opcode 0x10. */
7384FNIEMOP_DEF(iemOp_adc_Eb_Gb)
7385{
7386 IEMOP_MNEMONIC("adc Eb,Gb");
7387 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_adc);
7388}
7389
7390
7391/** Opcode 0x11. */
7392FNIEMOP_DEF(iemOp_adc_Ev_Gv)
7393{
7394 IEMOP_MNEMONIC("adc Ev,Gv");
7395 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_adc);
7396}
7397
7398
7399/** Opcode 0x12. */
7400FNIEMOP_DEF(iemOp_adc_Gb_Eb)
7401{
7402 IEMOP_MNEMONIC("adc Gb,Eb");
7403 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_adc);
7404}
7405
7406
7407/** Opcode 0x13. */
7408FNIEMOP_DEF(iemOp_adc_Gv_Ev)
7409{
7410 IEMOP_MNEMONIC("adc Gv,Ev");
7411 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_adc);
7412}
7413
7414
7415/** Opcode 0x14. */
7416FNIEMOP_DEF(iemOp_adc_Al_Ib)
7417{
7418 IEMOP_MNEMONIC("adc al,Ib");
7419 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_adc);
7420}
7421
7422
7423/** Opcode 0x15. */
7424FNIEMOP_DEF(iemOp_adc_eAX_Iz)
7425{
7426 IEMOP_MNEMONIC("adc rAX,Iz");
7427 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_adc);
7428}
7429
7430
7431/** Opcode 0x16. */
7432FNIEMOP_DEF(iemOp_push_SS)
7433{
7434 IEMOP_MNEMONIC("push ss");
7435 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_SS);
7436}
7437
7438
7439/** Opcode 0x17. */
7440FNIEMOP_DEF(iemOp_pop_SS)
7441{
7442 IEMOP_MNEMONIC("pop ss"); /** @todo implies instruction fusing? */
7443 IEMOP_HLP_NO_LOCK_PREFIX();
7444 IEMOP_HLP_NO_64BIT();
7445 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_SS, pIemCpu->enmEffOpSize);
7446}
7447
7448
7449/** Opcode 0x18. */
7450FNIEMOP_DEF(iemOp_sbb_Eb_Gb)
7451{
7452 IEMOP_MNEMONIC("sbb Eb,Gb");
7453 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sbb);
7454}
7455
7456
7457/** Opcode 0x19. */
7458FNIEMOP_DEF(iemOp_sbb_Ev_Gv)
7459{
7460 IEMOP_MNEMONIC("sbb Ev,Gv");
7461 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sbb);
7462}
7463
7464
7465/** Opcode 0x1a. */
7466FNIEMOP_DEF(iemOp_sbb_Gb_Eb)
7467{
7468 IEMOP_MNEMONIC("sbb Gb,Eb");
7469 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sbb);
7470}
7471
7472
7473/** Opcode 0x1b. */
7474FNIEMOP_DEF(iemOp_sbb_Gv_Ev)
7475{
7476 IEMOP_MNEMONIC("sbb Gv,Ev");
7477 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sbb);
7478}
7479
7480
7481/** Opcode 0x1c. */
7482FNIEMOP_DEF(iemOp_sbb_Al_Ib)
7483{
7484 IEMOP_MNEMONIC("sbb al,Ib");
7485 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sbb);
7486}
7487
7488
7489/** Opcode 0x1d. */
7490FNIEMOP_DEF(iemOp_sbb_eAX_Iz)
7491{
7492 IEMOP_MNEMONIC("sbb rAX,Iz");
7493 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sbb);
7494}
7495
7496
7497/** Opcode 0x1e. */
7498FNIEMOP_DEF(iemOp_push_DS)
7499{
7500 IEMOP_MNEMONIC("push ds");
7501 return FNIEMOP_CALL_1(iemOpCommonPushSReg, X86_SREG_DS);
7502}
7503
7504
7505/** Opcode 0x1f. */
7506FNIEMOP_DEF(iemOp_pop_DS)
7507{
7508 IEMOP_MNEMONIC("pop ds");
7509 IEMOP_HLP_NO_LOCK_PREFIX();
7510 IEMOP_HLP_NO_64BIT();
7511 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_pop_Sreg, X86_SREG_DS, pIemCpu->enmEffOpSize);
7512}
7513
7514
7515/** Opcode 0x20. */
7516FNIEMOP_DEF(iemOp_and_Eb_Gb)
7517{
7518 IEMOP_MNEMONIC("and Eb,Gb");
7519 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7520 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_and);
7521}
7522
7523
7524/** Opcode 0x21. */
7525FNIEMOP_DEF(iemOp_and_Ev_Gv)
7526{
7527 IEMOP_MNEMONIC("and Ev,Gv");
7528 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7529 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_and);
7530}
7531
7532
7533/** Opcode 0x22. */
7534FNIEMOP_DEF(iemOp_and_Gb_Eb)
7535{
7536 IEMOP_MNEMONIC("and Gb,Eb");
7537 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7538 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_and);
7539}
7540
7541
7542/** Opcode 0x23. */
7543FNIEMOP_DEF(iemOp_and_Gv_Ev)
7544{
7545 IEMOP_MNEMONIC("and Gv,Ev");
7546 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7547 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_and);
7548}
7549
7550
7551/** Opcode 0x24. */
7552FNIEMOP_DEF(iemOp_and_Al_Ib)
7553{
7554 IEMOP_MNEMONIC("and al,Ib");
7555 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7556 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_and);
7557}
7558
7559
7560/** Opcode 0x25. */
7561FNIEMOP_DEF(iemOp_and_eAX_Iz)
7562{
7563 IEMOP_MNEMONIC("and rAX,Iz");
7564 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7565 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_and);
7566}
7567
7568
7569/** Opcode 0x26. */
7570FNIEMOP_DEF(iemOp_seg_ES)
7571{
7572 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg es");
7573 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_ES;
7574 pIemCpu->iEffSeg = X86_SREG_ES;
7575
7576 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7577 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7578}
7579
7580
7581/** Opcode 0x27. */
7582FNIEMOP_DEF(iemOp_daa)
7583{
7584 IEMOP_MNEMONIC("daa AL");
7585 IEMOP_HLP_NO_64BIT();
7586 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
7587 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF);
7588 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_daa);
7589}
7590
7591
7592/** Opcode 0x28. */
7593FNIEMOP_DEF(iemOp_sub_Eb_Gb)
7594{
7595 IEMOP_MNEMONIC("sub Eb,Gb");
7596 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_sub);
7597}
7598
7599
7600/** Opcode 0x29. */
7601FNIEMOP_DEF(iemOp_sub_Ev_Gv)
7602{
7603 IEMOP_MNEMONIC("sub Ev,Gv");
7604 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_sub);
7605}
7606
7607
7608/** Opcode 0x2a. */
7609FNIEMOP_DEF(iemOp_sub_Gb_Eb)
7610{
7611 IEMOP_MNEMONIC("sub Gb,Eb");
7612 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_sub);
7613}
7614
7615
7616/** Opcode 0x2b. */
7617FNIEMOP_DEF(iemOp_sub_Gv_Ev)
7618{
7619 IEMOP_MNEMONIC("sub Gv,Ev");
7620 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_sub);
7621}
7622
7623
7624/** Opcode 0x2c. */
7625FNIEMOP_DEF(iemOp_sub_Al_Ib)
7626{
7627 IEMOP_MNEMONIC("sub al,Ib");
7628 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_sub);
7629}
7630
7631
7632/** Opcode 0x2d. */
7633FNIEMOP_DEF(iemOp_sub_eAX_Iz)
7634{
7635 IEMOP_MNEMONIC("sub rAX,Iz");
7636 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_sub);
7637}
7638
7639
7640/** Opcode 0x2e. */
7641FNIEMOP_DEF(iemOp_seg_CS)
7642{
7643 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg cs");
7644 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_CS;
7645 pIemCpu->iEffSeg = X86_SREG_CS;
7646
7647 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7648 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7649}
7650
7651
7652/** Opcode 0x2f. */
7653FNIEMOP_DEF(iemOp_das)
7654{
7655 IEMOP_MNEMONIC("das AL");
7656 IEMOP_HLP_NO_64BIT();
7657 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
7658 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF);
7659 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_das);
7660}
7661
7662
7663/** Opcode 0x30. */
7664FNIEMOP_DEF(iemOp_xor_Eb_Gb)
7665{
7666 IEMOP_MNEMONIC("xor Eb,Gb");
7667 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7668 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_xor);
7669}
7670
7671
7672/** Opcode 0x31. */
7673FNIEMOP_DEF(iemOp_xor_Ev_Gv)
7674{
7675 IEMOP_MNEMONIC("xor Ev,Gv");
7676 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7677 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_xor);
7678}
7679
7680
7681/** Opcode 0x32. */
7682FNIEMOP_DEF(iemOp_xor_Gb_Eb)
7683{
7684 IEMOP_MNEMONIC("xor Gb,Eb");
7685 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7686 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_xor);
7687}
7688
7689
7690/** Opcode 0x33. */
7691FNIEMOP_DEF(iemOp_xor_Gv_Ev)
7692{
7693 IEMOP_MNEMONIC("xor Gv,Ev");
7694 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7695 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_xor);
7696}
7697
7698
7699/** Opcode 0x34. */
7700FNIEMOP_DEF(iemOp_xor_Al_Ib)
7701{
7702 IEMOP_MNEMONIC("xor al,Ib");
7703 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7704 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_xor);
7705}
7706
7707
7708/** Opcode 0x35. */
7709FNIEMOP_DEF(iemOp_xor_eAX_Iz)
7710{
7711 IEMOP_MNEMONIC("xor rAX,Iz");
7712 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
7713 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_xor);
7714}
7715
7716
7717/** Opcode 0x36. */
7718FNIEMOP_DEF(iemOp_seg_SS)
7719{
7720 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg ss");
7721 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_SS;
7722 pIemCpu->iEffSeg = X86_SREG_SS;
7723
7724 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7725 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7726}
7727
7728
7729/** Opcode 0x37. */
7730FNIEMOP_STUB(iemOp_aaa);
7731
7732
7733/** Opcode 0x38. */
7734FNIEMOP_DEF(iemOp_cmp_Eb_Gb)
7735{
7736 IEMOP_MNEMONIC("cmp Eb,Gb");
7737 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
7738 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_cmp);
7739}
7740
7741
7742/** Opcode 0x39. */
7743FNIEMOP_DEF(iemOp_cmp_Ev_Gv)
7744{
7745 IEMOP_MNEMONIC("cmp Ev,Gv");
7746 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
7747 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_cmp);
7748}
7749
7750
7751/** Opcode 0x3a. */
7752FNIEMOP_DEF(iemOp_cmp_Gb_Eb)
7753{
7754 IEMOP_MNEMONIC("cmp Gb,Eb");
7755 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_r8_rm, &g_iemAImpl_cmp);
7756}
7757
7758
7759/** Opcode 0x3b. */
7760FNIEMOP_DEF(iemOp_cmp_Gv_Ev)
7761{
7762 IEMOP_MNEMONIC("cmp Gv,Ev");
7763 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rv_rm, &g_iemAImpl_cmp);
7764}
7765
7766
7767/** Opcode 0x3c. */
7768FNIEMOP_DEF(iemOp_cmp_Al_Ib)
7769{
7770 IEMOP_MNEMONIC("cmp al,Ib");
7771 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_cmp);
7772}
7773
7774
7775/** Opcode 0x3d. */
7776FNIEMOP_DEF(iemOp_cmp_eAX_Iz)
7777{
7778 IEMOP_MNEMONIC("cmp rAX,Iz");
7779 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_cmp);
7780}
7781
7782
7783/** Opcode 0x3e. */
7784FNIEMOP_DEF(iemOp_seg_DS)
7785{
7786 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg ds");
7787 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_DS;
7788 pIemCpu->iEffSeg = X86_SREG_DS;
7789
7790 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7791 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7792}
7793
7794
7795/** Opcode 0x3f. */
7796FNIEMOP_STUB(iemOp_aas);
7797
7798/**
7799 * Common 'inc/dec/not/neg register' helper.
7800 */
7801FNIEMOP_DEF_2(iemOpCommonUnaryGReg, PCIEMOPUNARYSIZES, pImpl, uint8_t, iReg)
7802{
7803 IEMOP_HLP_NO_LOCK_PREFIX();
7804 switch (pIemCpu->enmEffOpSize)
7805 {
7806 case IEMMODE_16BIT:
7807 IEM_MC_BEGIN(2, 0);
7808 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
7809 IEM_MC_ARG(uint32_t *, pEFlags, 1);
7810 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
7811 IEM_MC_REF_EFLAGS(pEFlags);
7812 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
7813 IEM_MC_ADVANCE_RIP();
7814 IEM_MC_END();
7815 return VINF_SUCCESS;
7816
7817 case IEMMODE_32BIT:
7818 IEM_MC_BEGIN(2, 0);
7819 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
7820 IEM_MC_ARG(uint32_t *, pEFlags, 1);
7821 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
7822 IEM_MC_REF_EFLAGS(pEFlags);
7823 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
7824 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
7825 IEM_MC_ADVANCE_RIP();
7826 IEM_MC_END();
7827 return VINF_SUCCESS;
7828
7829 case IEMMODE_64BIT:
7830 IEM_MC_BEGIN(2, 0);
7831 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
7832 IEM_MC_ARG(uint32_t *, pEFlags, 1);
7833 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
7834 IEM_MC_REF_EFLAGS(pEFlags);
7835 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
7836 IEM_MC_ADVANCE_RIP();
7837 IEM_MC_END();
7838 return VINF_SUCCESS;
7839 }
7840 return VINF_SUCCESS;
7841}
7842
7843
7844/** Opcode 0x40. */
7845FNIEMOP_DEF(iemOp_inc_eAX)
7846{
7847 /*
7848 * This is a REX prefix in 64-bit mode.
7849 */
7850 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7851 {
7852 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex");
7853 pIemCpu->fPrefixes |= IEM_OP_PRF_REX;
7854
7855 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7856 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7857 }
7858
7859 IEMOP_MNEMONIC("inc eAX");
7860 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xAX);
7861}
7862
7863
7864/** Opcode 0x41. */
7865FNIEMOP_DEF(iemOp_inc_eCX)
7866{
7867 /*
7868 * This is a REX prefix in 64-bit mode.
7869 */
7870 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7871 {
7872 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.b");
7873 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B;
7874 pIemCpu->uRexB = 1 << 3;
7875
7876 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7877 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7878 }
7879
7880 IEMOP_MNEMONIC("inc eCX");
7881 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xCX);
7882}
7883
7884
7885/** Opcode 0x42. */
7886FNIEMOP_DEF(iemOp_inc_eDX)
7887{
7888 /*
7889 * This is a REX prefix in 64-bit mode.
7890 */
7891 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7892 {
7893 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.x");
7894 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X;
7895 pIemCpu->uRexIndex = 1 << 3;
7896
7897 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7898 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7899 }
7900
7901 IEMOP_MNEMONIC("inc eDX");
7902 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDX);
7903}
7904
7905
7906
7907/** Opcode 0x43. */
7908FNIEMOP_DEF(iemOp_inc_eBX)
7909{
7910 /*
7911 * This is a REX prefix in 64-bit mode.
7912 */
7913 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7914 {
7915 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bx");
7916 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
7917 pIemCpu->uRexB = 1 << 3;
7918 pIemCpu->uRexIndex = 1 << 3;
7919
7920 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7921 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7922 }
7923
7924 IEMOP_MNEMONIC("inc eBX");
7925 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBX);
7926}
7927
7928
7929/** Opcode 0x44. */
7930FNIEMOP_DEF(iemOp_inc_eSP)
7931{
7932 /*
7933 * This is a REX prefix in 64-bit mode.
7934 */
7935 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7936 {
7937 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.r");
7938 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R;
7939 pIemCpu->uRexReg = 1 << 3;
7940
7941 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7942 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7943 }
7944
7945 IEMOP_MNEMONIC("inc eSP");
7946 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSP);
7947}
7948
7949
7950/** Opcode 0x45. */
7951FNIEMOP_DEF(iemOp_inc_eBP)
7952{
7953 /*
7954 * This is a REX prefix in 64-bit mode.
7955 */
7956 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7957 {
7958 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rb");
7959 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B;
7960 pIemCpu->uRexReg = 1 << 3;
7961 pIemCpu->uRexB = 1 << 3;
7962
7963 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7964 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7965 }
7966
7967 IEMOP_MNEMONIC("inc eBP");
7968 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xBP);
7969}
7970
7971
7972/** Opcode 0x46. */
7973FNIEMOP_DEF(iemOp_inc_eSI)
7974{
7975 /*
7976 * This is a REX prefix in 64-bit mode.
7977 */
7978 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
7979 {
7980 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rx");
7981 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X;
7982 pIemCpu->uRexReg = 1 << 3;
7983 pIemCpu->uRexIndex = 1 << 3;
7984
7985 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
7986 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
7987 }
7988
7989 IEMOP_MNEMONIC("inc eSI");
7990 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xSI);
7991}
7992
7993
7994/** Opcode 0x47. */
7995FNIEMOP_DEF(iemOp_inc_eDI)
7996{
7997 /*
7998 * This is a REX prefix in 64-bit mode.
7999 */
8000 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
8001 {
8002 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbx");
8003 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X;
8004 pIemCpu->uRexReg = 1 << 3;
8005 pIemCpu->uRexB = 1 << 3;
8006 pIemCpu->uRexIndex = 1 << 3;
8007
8008 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8009 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8010 }
8011
8012 IEMOP_MNEMONIC("inc eDI");
8013 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_inc, X86_GREG_xDI);
8014}
8015
8016
8017/** Opcode 0x48. */
8018FNIEMOP_DEF(iemOp_dec_eAX)
8019{
8020 /*
8021 * This is a REX prefix in 64-bit mode.
8022 */
8023 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
8024 {
8025 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.w");
8026 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_SIZE_REX_W;
8027 iemRecalEffOpSize(pIemCpu);
8028
8029 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8030 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8031 }
8032
8033 IEMOP_MNEMONIC("dec eAX");
8034 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xAX);
8035}
8036
8037
8038/** Opcode 0x49. */
8039FNIEMOP_DEF(iemOp_dec_eCX)
8040{
8041 /*
8042 * This is a REX prefix in 64-bit mode.
8043 */
8044 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
8045 {
8046 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bw");
8047 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
8048 pIemCpu->uRexB = 1 << 3;
8049 iemRecalEffOpSize(pIemCpu);
8050
8051 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8052 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8053 }
8054
8055 IEMOP_MNEMONIC("dec eCX");
8056 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xCX);
8057}
8058
8059
8060/** Opcode 0x4a. */
8061FNIEMOP_DEF(iemOp_dec_eDX)
8062{
8063 /*
8064 * This is a REX prefix in 64-bit mode.
8065 */
8066 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
8067 {
8068 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.xw");
8069 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
8070 pIemCpu->uRexIndex = 1 << 3;
8071 iemRecalEffOpSize(pIemCpu);
8072
8073 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8074 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8075 }
8076
8077 IEMOP_MNEMONIC("dec eDX");
8078 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDX);
8079}
8080
8081
8082/** Opcode 0x4b. */
8083FNIEMOP_DEF(iemOp_dec_eBX)
8084{
8085 /*
8086 * This is a REX prefix in 64-bit mode.
8087 */
8088 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
8089 {
8090 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.bxw");
8091 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
8092 pIemCpu->uRexB = 1 << 3;
8093 pIemCpu->uRexIndex = 1 << 3;
8094 iemRecalEffOpSize(pIemCpu);
8095
8096 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8097 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8098 }
8099
8100 IEMOP_MNEMONIC("dec eBX");
8101 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBX);
8102}
8103
8104
8105/** Opcode 0x4c. */
8106FNIEMOP_DEF(iemOp_dec_eSP)
8107{
8108 /*
8109 * This is a REX prefix in 64-bit mode.
8110 */
8111 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
8112 {
8113 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rw");
8114 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_SIZE_REX_W;
8115 pIemCpu->uRexReg = 1 << 3;
8116 iemRecalEffOpSize(pIemCpu);
8117
8118 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8119 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8120 }
8121
8122 IEMOP_MNEMONIC("dec eSP");
8123 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSP);
8124}
8125
8126
8127/** Opcode 0x4d. */
8128FNIEMOP_DEF(iemOp_dec_eBP)
8129{
8130 /*
8131 * This is a REX prefix in 64-bit mode.
8132 */
8133 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
8134 {
8135 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbw");
8136 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_SIZE_REX_W;
8137 pIemCpu->uRexReg = 1 << 3;
8138 pIemCpu->uRexB = 1 << 3;
8139 iemRecalEffOpSize(pIemCpu);
8140
8141 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8142 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8143 }
8144
8145 IEMOP_MNEMONIC("dec eBP");
8146 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xBP);
8147}
8148
8149
8150/** Opcode 0x4e. */
8151FNIEMOP_DEF(iemOp_dec_eSI)
8152{
8153 /*
8154 * This is a REX prefix in 64-bit mode.
8155 */
8156 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
8157 {
8158 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rxw");
8159 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
8160 pIemCpu->uRexReg = 1 << 3;
8161 pIemCpu->uRexIndex = 1 << 3;
8162 iemRecalEffOpSize(pIemCpu);
8163
8164 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8165 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8166 }
8167
8168 IEMOP_MNEMONIC("dec eSI");
8169 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xSI);
8170}
8171
8172
8173/** Opcode 0x4f. */
8174FNIEMOP_DEF(iemOp_dec_eDI)
8175{
8176 /*
8177 * This is a REX prefix in 64-bit mode.
8178 */
8179 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
8180 {
8181 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("rex.rbxw");
8182 pIemCpu->fPrefixes |= IEM_OP_PRF_REX | IEM_OP_PRF_REX_R | IEM_OP_PRF_REX_B | IEM_OP_PRF_REX_X | IEM_OP_PRF_SIZE_REX_W;
8183 pIemCpu->uRexReg = 1 << 3;
8184 pIemCpu->uRexB = 1 << 3;
8185 pIemCpu->uRexIndex = 1 << 3;
8186 iemRecalEffOpSize(pIemCpu);
8187
8188 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8189 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8190 }
8191
8192 IEMOP_MNEMONIC("dec eDI");
8193 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, &g_iemAImpl_dec, X86_GREG_xDI);
8194}
8195
8196
8197/**
8198 * Common 'push register' helper.
8199 */
8200FNIEMOP_DEF_1(iemOpCommonPushGReg, uint8_t, iReg)
8201{
8202 IEMOP_HLP_NO_LOCK_PREFIX();
8203 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
8204 {
8205 iReg |= pIemCpu->uRexB;
8206 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
8207 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
8208 }
8209
8210 switch (pIemCpu->enmEffOpSize)
8211 {
8212 case IEMMODE_16BIT:
8213 IEM_MC_BEGIN(0, 1);
8214 IEM_MC_LOCAL(uint16_t, u16Value);
8215 IEM_MC_FETCH_GREG_U16(u16Value, iReg);
8216 IEM_MC_PUSH_U16(u16Value);
8217 IEM_MC_ADVANCE_RIP();
8218 IEM_MC_END();
8219 break;
8220
8221 case IEMMODE_32BIT:
8222 IEM_MC_BEGIN(0, 1);
8223 IEM_MC_LOCAL(uint32_t, u32Value);
8224 IEM_MC_FETCH_GREG_U32(u32Value, iReg);
8225 IEM_MC_PUSH_U32(u32Value);
8226 IEM_MC_ADVANCE_RIP();
8227 IEM_MC_END();
8228 break;
8229
8230 case IEMMODE_64BIT:
8231 IEM_MC_BEGIN(0, 1);
8232 IEM_MC_LOCAL(uint64_t, u64Value);
8233 IEM_MC_FETCH_GREG_U64(u64Value, iReg);
8234 IEM_MC_PUSH_U64(u64Value);
8235 IEM_MC_ADVANCE_RIP();
8236 IEM_MC_END();
8237 break;
8238 }
8239
8240 return VINF_SUCCESS;
8241}
8242
8243
8244/** Opcode 0x50. */
8245FNIEMOP_DEF(iemOp_push_eAX)
8246{
8247 IEMOP_MNEMONIC("push rAX");
8248 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xAX);
8249}
8250
8251
8252/** Opcode 0x51. */
8253FNIEMOP_DEF(iemOp_push_eCX)
8254{
8255 IEMOP_MNEMONIC("push rCX");
8256 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xCX);
8257}
8258
8259
8260/** Opcode 0x52. */
8261FNIEMOP_DEF(iemOp_push_eDX)
8262{
8263 IEMOP_MNEMONIC("push rDX");
8264 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDX);
8265}
8266
8267
8268/** Opcode 0x53. */
8269FNIEMOP_DEF(iemOp_push_eBX)
8270{
8271 IEMOP_MNEMONIC("push rBX");
8272 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBX);
8273}
8274
8275
8276/** Opcode 0x54. */
8277FNIEMOP_DEF(iemOp_push_eSP)
8278{
8279 IEMOP_MNEMONIC("push rSP");
8280 if (IEM_GET_TARGET_CPU(pIemCpu) == IEMTARGETCPU_8086)
8281 {
8282 IEM_MC_BEGIN(0, 1);
8283 IEM_MC_LOCAL(uint16_t, u16Value);
8284 IEM_MC_FETCH_GREG_U16(u16Value, X86_GREG_xSP);
8285 IEM_MC_SUB_LOCAL_U16(u16Value, 2);
8286 IEM_MC_PUSH_U16(u16Value);
8287 IEM_MC_ADVANCE_RIP();
8288 IEM_MC_END();
8289 }
8290 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSP);
8291}
8292
8293
8294/** Opcode 0x55. */
8295FNIEMOP_DEF(iemOp_push_eBP)
8296{
8297 IEMOP_MNEMONIC("push rBP");
8298 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xBP);
8299}
8300
8301
8302/** Opcode 0x56. */
8303FNIEMOP_DEF(iemOp_push_eSI)
8304{
8305 IEMOP_MNEMONIC("push rSI");
8306 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xSI);
8307}
8308
8309
8310/** Opcode 0x57. */
8311FNIEMOP_DEF(iemOp_push_eDI)
8312{
8313 IEMOP_MNEMONIC("push rDI");
8314 return FNIEMOP_CALL_1(iemOpCommonPushGReg, X86_GREG_xDI);
8315}
8316
8317
8318/**
8319 * Common 'pop register' helper.
8320 */
8321FNIEMOP_DEF_1(iemOpCommonPopGReg, uint8_t, iReg)
8322{
8323 IEMOP_HLP_NO_LOCK_PREFIX();
8324 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
8325 {
8326 iReg |= pIemCpu->uRexB;
8327 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
8328 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
8329 }
8330
8331 switch (pIemCpu->enmEffOpSize)
8332 {
8333 case IEMMODE_16BIT:
8334 IEM_MC_BEGIN(0, 1);
8335 IEM_MC_LOCAL(uint16_t *, pu16Dst);
8336 IEM_MC_REF_GREG_U16(pu16Dst, iReg);
8337 IEM_MC_POP_U16(pu16Dst);
8338 IEM_MC_ADVANCE_RIP();
8339 IEM_MC_END();
8340 break;
8341
8342 case IEMMODE_32BIT:
8343 IEM_MC_BEGIN(0, 1);
8344 IEM_MC_LOCAL(uint32_t *, pu32Dst);
8345 IEM_MC_REF_GREG_U32(pu32Dst, iReg);
8346 IEM_MC_POP_U32(pu32Dst);
8347 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst); /** @todo testcase*/
8348 IEM_MC_ADVANCE_RIP();
8349 IEM_MC_END();
8350 break;
8351
8352 case IEMMODE_64BIT:
8353 IEM_MC_BEGIN(0, 1);
8354 IEM_MC_LOCAL(uint64_t *, pu64Dst);
8355 IEM_MC_REF_GREG_U64(pu64Dst, iReg);
8356 IEM_MC_POP_U64(pu64Dst);
8357 IEM_MC_ADVANCE_RIP();
8358 IEM_MC_END();
8359 break;
8360 }
8361
8362 return VINF_SUCCESS;
8363}
8364
8365
8366/** Opcode 0x58. */
8367FNIEMOP_DEF(iemOp_pop_eAX)
8368{
8369 IEMOP_MNEMONIC("pop rAX");
8370 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xAX);
8371}
8372
8373
8374/** Opcode 0x59. */
8375FNIEMOP_DEF(iemOp_pop_eCX)
8376{
8377 IEMOP_MNEMONIC("pop rCX");
8378 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xCX);
8379}
8380
8381
8382/** Opcode 0x5a. */
8383FNIEMOP_DEF(iemOp_pop_eDX)
8384{
8385 IEMOP_MNEMONIC("pop rDX");
8386 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDX);
8387}
8388
8389
8390/** Opcode 0x5b. */
8391FNIEMOP_DEF(iemOp_pop_eBX)
8392{
8393 IEMOP_MNEMONIC("pop rBX");
8394 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBX);
8395}
8396
8397
8398/** Opcode 0x5c. */
8399FNIEMOP_DEF(iemOp_pop_eSP)
8400{
8401 IEMOP_MNEMONIC("pop rSP");
8402 if (pIemCpu->enmCpuMode == IEMMODE_64BIT)
8403 {
8404 if (pIemCpu->uRexB)
8405 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSP);
8406 pIemCpu->enmDefOpSize = IEMMODE_64BIT;
8407 pIemCpu->enmEffOpSize = !(pIemCpu->fPrefixes & IEM_OP_PRF_SIZE_OP) ? IEMMODE_64BIT : IEMMODE_16BIT;
8408 }
8409
8410 IEMOP_HLP_DECODED_NL_1(OP_POP, IEMOPFORM_FIXED, OP_PARM_REG_ESP,
8411 DISOPTYPE_HARMLESS | DISOPTYPE_DEFAULT_64_OP_SIZE | DISOPTYPE_REXB_EXTENDS_OPREG);
8412 /** @todo add testcase for this instruction. */
8413 switch (pIemCpu->enmEffOpSize)
8414 {
8415 case IEMMODE_16BIT:
8416 IEM_MC_BEGIN(0, 1);
8417 IEM_MC_LOCAL(uint16_t, u16Dst);
8418 IEM_MC_POP_U16(&u16Dst); /** @todo not correct MC, fix later. */
8419 IEM_MC_STORE_GREG_U16(X86_GREG_xSP, u16Dst);
8420 IEM_MC_ADVANCE_RIP();
8421 IEM_MC_END();
8422 break;
8423
8424 case IEMMODE_32BIT:
8425 IEM_MC_BEGIN(0, 1);
8426 IEM_MC_LOCAL(uint32_t, u32Dst);
8427 IEM_MC_POP_U32(&u32Dst);
8428 IEM_MC_STORE_GREG_U32(X86_GREG_xSP, u32Dst);
8429 IEM_MC_ADVANCE_RIP();
8430 IEM_MC_END();
8431 break;
8432
8433 case IEMMODE_64BIT:
8434 IEM_MC_BEGIN(0, 1);
8435 IEM_MC_LOCAL(uint64_t, u64Dst);
8436 IEM_MC_POP_U64(&u64Dst);
8437 IEM_MC_STORE_GREG_U64(X86_GREG_xSP, u64Dst);
8438 IEM_MC_ADVANCE_RIP();
8439 IEM_MC_END();
8440 break;
8441 }
8442
8443 return VINF_SUCCESS;
8444}
8445
8446
8447/** Opcode 0x5d. */
8448FNIEMOP_DEF(iemOp_pop_eBP)
8449{
8450 IEMOP_MNEMONIC("pop rBP");
8451 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xBP);
8452}
8453
8454
8455/** Opcode 0x5e. */
8456FNIEMOP_DEF(iemOp_pop_eSI)
8457{
8458 IEMOP_MNEMONIC("pop rSI");
8459 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xSI);
8460}
8461
8462
8463/** Opcode 0x5f. */
8464FNIEMOP_DEF(iemOp_pop_eDI)
8465{
8466 IEMOP_MNEMONIC("pop rDI");
8467 return FNIEMOP_CALL_1(iemOpCommonPopGReg, X86_GREG_xDI);
8468}
8469
8470
8471/** Opcode 0x60. */
8472FNIEMOP_DEF(iemOp_pusha)
8473{
8474 IEMOP_MNEMONIC("pusha");
8475 IEMOP_HLP_MIN_186();
8476 IEMOP_HLP_NO_64BIT();
8477 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
8478 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_16);
8479 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
8480 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_pusha_32);
8481}
8482
8483
8484/** Opcode 0x61. */
8485FNIEMOP_DEF(iemOp_popa)
8486{
8487 IEMOP_MNEMONIC("popa");
8488 IEMOP_HLP_MIN_186();
8489 IEMOP_HLP_NO_64BIT();
8490 if (pIemCpu->enmEffOpSize == IEMMODE_16BIT)
8491 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_16);
8492 Assert(pIemCpu->enmEffOpSize == IEMMODE_32BIT);
8493 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_popa_32);
8494}
8495
8496
8497/** Opcode 0x62. */
8498FNIEMOP_STUB(iemOp_bound_Gv_Ma_evex);
8499// IEMOP_HLP_MIN_186();
8500
8501
8502/** Opcode 0x63 - non-64-bit modes. */
8503FNIEMOP_DEF(iemOp_arpl_Ew_Gw)
8504{
8505 IEMOP_MNEMONIC("arpl Ew,Gw");
8506 IEMOP_HLP_MIN_286();
8507 IEMOP_HLP_NO_REAL_OR_V86_MODE();
8508 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
8509
8510 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8511 {
8512 /* Register */
8513 IEMOP_HLP_DECODED_NL_2(OP_ARPL, IEMOPFORM_MR_REG, OP_PARM_Ew, OP_PARM_Gw, DISOPTYPE_HARMLESS);
8514 IEM_MC_BEGIN(3, 0);
8515 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8516 IEM_MC_ARG(uint16_t, u16Src, 1);
8517 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8518
8519 IEM_MC_FETCH_GREG_U16(u16Src, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
8520 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK));
8521 IEM_MC_REF_EFLAGS(pEFlags);
8522 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_arpl, pu16Dst, u16Src, pEFlags);
8523
8524 IEM_MC_ADVANCE_RIP();
8525 IEM_MC_END();
8526 }
8527 else
8528 {
8529 /* Memory */
8530 IEM_MC_BEGIN(3, 2);
8531 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8532 IEM_MC_ARG(uint16_t, u16Src, 1);
8533 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
8534 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8535
8536 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
8537 IEMOP_HLP_DECODED_NL_2(OP_ARPL, IEMOPFORM_MR_REG, OP_PARM_Ew, OP_PARM_Gw, DISOPTYPE_HARMLESS);
8538 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
8539 IEM_MC_FETCH_GREG_U16(u16Src, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
8540 IEM_MC_FETCH_EFLAGS(EFlags);
8541 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_arpl, pu16Dst, u16Src, pEFlags);
8542
8543 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
8544 IEM_MC_COMMIT_EFLAGS(EFlags);
8545 IEM_MC_ADVANCE_RIP();
8546 IEM_MC_END();
8547 }
8548 return VINF_SUCCESS;
8549
8550}
8551
8552
8553/** Opcode 0x63.
8554 * @note This is a weird one. It works like a regular move instruction if
8555 * REX.W isn't set, at least according to AMD docs (rev 3.15, 2009-11).
8556 * @todo This definitely needs a testcase to verify the odd cases. */
8557FNIEMOP_DEF(iemOp_movsxd_Gv_Ev)
8558{
8559 Assert(pIemCpu->enmEffOpSize == IEMMODE_64BIT); /* Caller branched already . */
8560
8561 IEMOP_MNEMONIC("movsxd Gv,Ev");
8562 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
8563
8564 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8565 {
8566 /*
8567 * Register to register.
8568 */
8569 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8570 IEM_MC_BEGIN(0, 1);
8571 IEM_MC_LOCAL(uint64_t, u64Value);
8572 IEM_MC_FETCH_GREG_U32_SX_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8573 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
8574 IEM_MC_ADVANCE_RIP();
8575 IEM_MC_END();
8576 }
8577 else
8578 {
8579 /*
8580 * We're loading a register from memory.
8581 */
8582 IEM_MC_BEGIN(0, 2);
8583 IEM_MC_LOCAL(uint64_t, u64Value);
8584 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8585 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
8586 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8587 IEM_MC_FETCH_MEM_U32_SX_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
8588 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
8589 IEM_MC_ADVANCE_RIP();
8590 IEM_MC_END();
8591 }
8592 return VINF_SUCCESS;
8593}
8594
8595
8596/** Opcode 0x64. */
8597FNIEMOP_DEF(iemOp_seg_FS)
8598{
8599 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg fs");
8600 IEMOP_HLP_MIN_386();
8601
8602 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_FS;
8603 pIemCpu->iEffSeg = X86_SREG_FS;
8604
8605 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8606 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8607}
8608
8609
8610/** Opcode 0x65. */
8611FNIEMOP_DEF(iemOp_seg_GS)
8612{
8613 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("seg gs");
8614 IEMOP_HLP_MIN_386();
8615
8616 pIemCpu->fPrefixes |= IEM_OP_PRF_SEG_GS;
8617 pIemCpu->iEffSeg = X86_SREG_GS;
8618
8619 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8620 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8621}
8622
8623
8624/** Opcode 0x66. */
8625FNIEMOP_DEF(iemOp_op_size)
8626{
8627 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("op size");
8628 IEMOP_HLP_MIN_386();
8629
8630 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_OP;
8631 iemRecalEffOpSize(pIemCpu);
8632
8633 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8634 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8635}
8636
8637
8638/** Opcode 0x67. */
8639FNIEMOP_DEF(iemOp_addr_size)
8640{
8641 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("addr size");
8642 IEMOP_HLP_MIN_386();
8643
8644 pIemCpu->fPrefixes |= IEM_OP_PRF_SIZE_ADDR;
8645 switch (pIemCpu->enmDefAddrMode)
8646 {
8647 case IEMMODE_16BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
8648 case IEMMODE_32BIT: pIemCpu->enmEffAddrMode = IEMMODE_16BIT; break;
8649 case IEMMODE_64BIT: pIemCpu->enmEffAddrMode = IEMMODE_32BIT; break;
8650 default: AssertFailed();
8651 }
8652
8653 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
8654 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
8655}
8656
8657
8658/** Opcode 0x68. */
8659FNIEMOP_DEF(iemOp_push_Iz)
8660{
8661 IEMOP_MNEMONIC("push Iz");
8662 IEMOP_HLP_MIN_186();
8663 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8664 switch (pIemCpu->enmEffOpSize)
8665 {
8666 case IEMMODE_16BIT:
8667 {
8668 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
8669 IEMOP_HLP_NO_LOCK_PREFIX();
8670 IEM_MC_BEGIN(0,0);
8671 IEM_MC_PUSH_U16(u16Imm);
8672 IEM_MC_ADVANCE_RIP();
8673 IEM_MC_END();
8674 return VINF_SUCCESS;
8675 }
8676
8677 case IEMMODE_32BIT:
8678 {
8679 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
8680 IEMOP_HLP_NO_LOCK_PREFIX();
8681 IEM_MC_BEGIN(0,0);
8682 IEM_MC_PUSH_U32(u32Imm);
8683 IEM_MC_ADVANCE_RIP();
8684 IEM_MC_END();
8685 return VINF_SUCCESS;
8686 }
8687
8688 case IEMMODE_64BIT:
8689 {
8690 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
8691 IEMOP_HLP_NO_LOCK_PREFIX();
8692 IEM_MC_BEGIN(0,0);
8693 IEM_MC_PUSH_U64(u64Imm);
8694 IEM_MC_ADVANCE_RIP();
8695 IEM_MC_END();
8696 return VINF_SUCCESS;
8697 }
8698
8699 IEM_NOT_REACHED_DEFAULT_CASE_RET();
8700 }
8701}
8702
8703
8704/** Opcode 0x69. */
8705FNIEMOP_DEF(iemOp_imul_Gv_Ev_Iz)
8706{
8707 IEMOP_MNEMONIC("imul Gv,Ev,Iz"); /* Gv = Ev * Iz; */
8708 IEMOP_HLP_MIN_186();
8709 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
8710 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
8711
8712 switch (pIemCpu->enmEffOpSize)
8713 {
8714 case IEMMODE_16BIT:
8715 {
8716 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8717 {
8718 /* register operand */
8719 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
8720 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8721
8722 IEM_MC_BEGIN(3, 1);
8723 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8724 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm,1);
8725 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8726 IEM_MC_LOCAL(uint16_t, u16Tmp);
8727
8728 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8729 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
8730 IEM_MC_REF_EFLAGS(pEFlags);
8731 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
8732 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
8733
8734 IEM_MC_ADVANCE_RIP();
8735 IEM_MC_END();
8736 }
8737 else
8738 {
8739 /* memory operand */
8740 IEM_MC_BEGIN(3, 2);
8741 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8742 IEM_MC_ARG(uint16_t, u16Src, 1);
8743 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8744 IEM_MC_LOCAL(uint16_t, u16Tmp);
8745 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8746
8747 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2);
8748 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
8749 IEM_MC_ASSIGN(u16Src, u16Imm);
8750 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8751 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8752 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
8753 IEM_MC_REF_EFLAGS(pEFlags);
8754 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
8755 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
8756
8757 IEM_MC_ADVANCE_RIP();
8758 IEM_MC_END();
8759 }
8760 return VINF_SUCCESS;
8761 }
8762
8763 case IEMMODE_32BIT:
8764 {
8765 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8766 {
8767 /* register operand */
8768 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
8769 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8770
8771 IEM_MC_BEGIN(3, 1);
8772 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
8773 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm,1);
8774 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8775 IEM_MC_LOCAL(uint32_t, u32Tmp);
8776
8777 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8778 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
8779 IEM_MC_REF_EFLAGS(pEFlags);
8780 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
8781 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
8782
8783 IEM_MC_ADVANCE_RIP();
8784 IEM_MC_END();
8785 }
8786 else
8787 {
8788 /* memory operand */
8789 IEM_MC_BEGIN(3, 2);
8790 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
8791 IEM_MC_ARG(uint32_t, u32Src, 1);
8792 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8793 IEM_MC_LOCAL(uint32_t, u32Tmp);
8794 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8795
8796 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
8797 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
8798 IEM_MC_ASSIGN(u32Src, u32Imm);
8799 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8800 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8801 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
8802 IEM_MC_REF_EFLAGS(pEFlags);
8803 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
8804 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
8805
8806 IEM_MC_ADVANCE_RIP();
8807 IEM_MC_END();
8808 }
8809 return VINF_SUCCESS;
8810 }
8811
8812 case IEMMODE_64BIT:
8813 {
8814 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8815 {
8816 /* register operand */
8817 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
8818 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8819
8820 IEM_MC_BEGIN(3, 1);
8821 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
8822 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm,1);
8823 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8824 IEM_MC_LOCAL(uint64_t, u64Tmp);
8825
8826 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8827 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
8828 IEM_MC_REF_EFLAGS(pEFlags);
8829 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
8830 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
8831
8832 IEM_MC_ADVANCE_RIP();
8833 IEM_MC_END();
8834 }
8835 else
8836 {
8837 /* memory operand */
8838 IEM_MC_BEGIN(3, 2);
8839 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
8840 IEM_MC_ARG(uint64_t, u64Src, 1);
8841 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8842 IEM_MC_LOCAL(uint64_t, u64Tmp);
8843 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8844
8845 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
8846 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
8847 IEM_MC_ASSIGN(u64Src, u64Imm);
8848 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8849 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8850 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
8851 IEM_MC_REF_EFLAGS(pEFlags);
8852 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
8853 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
8854
8855 IEM_MC_ADVANCE_RIP();
8856 IEM_MC_END();
8857 }
8858 return VINF_SUCCESS;
8859 }
8860 }
8861 AssertFailedReturn(VERR_IEM_IPE_9);
8862}
8863
8864
8865/** Opcode 0x6a. */
8866FNIEMOP_DEF(iemOp_push_Ib)
8867{
8868 IEMOP_MNEMONIC("push Ib");
8869 IEMOP_HLP_MIN_186();
8870 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
8871 IEMOP_HLP_NO_LOCK_PREFIX();
8872 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
8873
8874 IEM_MC_BEGIN(0,0);
8875 switch (pIemCpu->enmEffOpSize)
8876 {
8877 case IEMMODE_16BIT:
8878 IEM_MC_PUSH_U16(i8Imm);
8879 break;
8880 case IEMMODE_32BIT:
8881 IEM_MC_PUSH_U32(i8Imm);
8882 break;
8883 case IEMMODE_64BIT:
8884 IEM_MC_PUSH_U64(i8Imm);
8885 break;
8886 }
8887 IEM_MC_ADVANCE_RIP();
8888 IEM_MC_END();
8889 return VINF_SUCCESS;
8890}
8891
8892
8893/** Opcode 0x6b. */
8894FNIEMOP_DEF(iemOp_imul_Gv_Ev_Ib)
8895{
8896 IEMOP_MNEMONIC("imul Gv,Ev,Ib"); /* Gv = Ev * Iz; */
8897 IEMOP_HLP_MIN_186();
8898 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
8899 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
8900
8901 switch (pIemCpu->enmEffOpSize)
8902 {
8903 case IEMMODE_16BIT:
8904 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8905 {
8906 /* register operand */
8907 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
8908 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8909
8910 IEM_MC_BEGIN(3, 1);
8911 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8912 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ (int8_t)u8Imm, 1);
8913 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8914 IEM_MC_LOCAL(uint16_t, u16Tmp);
8915
8916 IEM_MC_FETCH_GREG_U16(u16Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8917 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
8918 IEM_MC_REF_EFLAGS(pEFlags);
8919 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
8920 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
8921
8922 IEM_MC_ADVANCE_RIP();
8923 IEM_MC_END();
8924 }
8925 else
8926 {
8927 /* memory operand */
8928 IEM_MC_BEGIN(3, 2);
8929 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
8930 IEM_MC_ARG(uint16_t, u16Src, 1);
8931 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8932 IEM_MC_LOCAL(uint16_t, u16Tmp);
8933 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8934
8935 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
8936 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16Imm);
8937 IEM_MC_ASSIGN(u16Src, u16Imm);
8938 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8939 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8940 IEM_MC_REF_LOCAL(pu16Dst, u16Tmp);
8941 IEM_MC_REF_EFLAGS(pEFlags);
8942 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u16, pu16Dst, u16Src, pEFlags);
8943 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Tmp);
8944
8945 IEM_MC_ADVANCE_RIP();
8946 IEM_MC_END();
8947 }
8948 return VINF_SUCCESS;
8949
8950 case IEMMODE_32BIT:
8951 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8952 {
8953 /* register operand */
8954 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
8955 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8956
8957 IEM_MC_BEGIN(3, 1);
8958 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
8959 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ (int8_t)u8Imm, 1);
8960 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8961 IEM_MC_LOCAL(uint32_t, u32Tmp);
8962
8963 IEM_MC_FETCH_GREG_U32(u32Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
8964 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
8965 IEM_MC_REF_EFLAGS(pEFlags);
8966 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
8967 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
8968
8969 IEM_MC_ADVANCE_RIP();
8970 IEM_MC_END();
8971 }
8972 else
8973 {
8974 /* memory operand */
8975 IEM_MC_BEGIN(3, 2);
8976 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
8977 IEM_MC_ARG(uint32_t, u32Src, 1);
8978 IEM_MC_ARG(uint32_t *, pEFlags, 2);
8979 IEM_MC_LOCAL(uint32_t, u32Tmp);
8980 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
8981
8982 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
8983 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_S8_SX_U32(&u32Imm);
8984 IEM_MC_ASSIGN(u32Src, u32Imm);
8985 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
8986 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
8987 IEM_MC_REF_LOCAL(pu32Dst, u32Tmp);
8988 IEM_MC_REF_EFLAGS(pEFlags);
8989 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u32, pu32Dst, u32Src, pEFlags);
8990 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Tmp);
8991
8992 IEM_MC_ADVANCE_RIP();
8993 IEM_MC_END();
8994 }
8995 return VINF_SUCCESS;
8996
8997 case IEMMODE_64BIT:
8998 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
8999 {
9000 /* register operand */
9001 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9002 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
9003
9004 IEM_MC_BEGIN(3, 1);
9005 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9006 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ (int8_t)u8Imm, 1);
9007 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9008 IEM_MC_LOCAL(uint64_t, u64Tmp);
9009
9010 IEM_MC_FETCH_GREG_U64(u64Tmp, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9011 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
9012 IEM_MC_REF_EFLAGS(pEFlags);
9013 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
9014 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
9015
9016 IEM_MC_ADVANCE_RIP();
9017 IEM_MC_END();
9018 }
9019 else
9020 {
9021 /* memory operand */
9022 IEM_MC_BEGIN(3, 2);
9023 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9024 IEM_MC_ARG(uint64_t, u64Src, 1);
9025 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9026 IEM_MC_LOCAL(uint64_t, u64Tmp);
9027 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9028
9029 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
9030 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S8_SX_U64(&u64Imm);
9031 IEM_MC_ASSIGN(u64Src, u64Imm);
9032 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
9033 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrEffDst);
9034 IEM_MC_REF_LOCAL(pu64Dst, u64Tmp);
9035 IEM_MC_REF_EFLAGS(pEFlags);
9036 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_imul_two_u64, pu64Dst, u64Src, pEFlags);
9037 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Tmp);
9038
9039 IEM_MC_ADVANCE_RIP();
9040 IEM_MC_END();
9041 }
9042 return VINF_SUCCESS;
9043 }
9044 AssertFailedReturn(VERR_IEM_IPE_8);
9045}
9046
9047
9048/** Opcode 0x6c. */
9049FNIEMOP_DEF(iemOp_insb_Yb_DX)
9050{
9051 IEMOP_HLP_MIN_186();
9052 IEMOP_HLP_NO_LOCK_PREFIX();
9053 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
9054 {
9055 IEMOP_MNEMONIC("rep ins Yb,DX");
9056 switch (pIemCpu->enmEffAddrMode)
9057 {
9058 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr16, false);
9059 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr32, false);
9060 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op8_addr64, false);
9061 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9062 }
9063 }
9064 else
9065 {
9066 IEMOP_MNEMONIC("ins Yb,DX");
9067 switch (pIemCpu->enmEffAddrMode)
9068 {
9069 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr16, false);
9070 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr32, false);
9071 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op8_addr64, false);
9072 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9073 }
9074 }
9075}
9076
9077
9078/** Opcode 0x6d. */
9079FNIEMOP_DEF(iemOp_inswd_Yv_DX)
9080{
9081 IEMOP_HLP_MIN_186();
9082 IEMOP_HLP_NO_LOCK_PREFIX();
9083 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
9084 {
9085 IEMOP_MNEMONIC("rep ins Yv,DX");
9086 switch (pIemCpu->enmEffOpSize)
9087 {
9088 case IEMMODE_16BIT:
9089 switch (pIemCpu->enmEffAddrMode)
9090 {
9091 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr16, false);
9092 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr32, false);
9093 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op16_addr64, false);
9094 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9095 }
9096 break;
9097 case IEMMODE_64BIT:
9098 case IEMMODE_32BIT:
9099 switch (pIemCpu->enmEffAddrMode)
9100 {
9101 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr16, false);
9102 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr32, false);
9103 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_ins_op32_addr64, false);
9104 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9105 }
9106 break;
9107 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9108 }
9109 }
9110 else
9111 {
9112 IEMOP_MNEMONIC("ins Yv,DX");
9113 switch (pIemCpu->enmEffOpSize)
9114 {
9115 case IEMMODE_16BIT:
9116 switch (pIemCpu->enmEffAddrMode)
9117 {
9118 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr16, false);
9119 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr32, false);
9120 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op16_addr64, false);
9121 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9122 }
9123 break;
9124 case IEMMODE_64BIT:
9125 case IEMMODE_32BIT:
9126 switch (pIemCpu->enmEffAddrMode)
9127 {
9128 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr16, false);
9129 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr32, false);
9130 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_ins_op32_addr64, false);
9131 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9132 }
9133 break;
9134 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9135 }
9136 }
9137}
9138
9139
9140/** Opcode 0x6e. */
9141FNIEMOP_DEF(iemOp_outsb_Yb_DX)
9142{
9143 IEMOP_HLP_MIN_186();
9144 IEMOP_HLP_NO_LOCK_PREFIX();
9145 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
9146 {
9147 IEMOP_MNEMONIC("rep outs DX,Yb");
9148 switch (pIemCpu->enmEffAddrMode)
9149 {
9150 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr16, pIemCpu->iEffSeg, false);
9151 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr32, pIemCpu->iEffSeg, false);
9152 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op8_addr64, pIemCpu->iEffSeg, false);
9153 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9154 }
9155 }
9156 else
9157 {
9158 IEMOP_MNEMONIC("outs DX,Yb");
9159 switch (pIemCpu->enmEffAddrMode)
9160 {
9161 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr16, pIemCpu->iEffSeg, false);
9162 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr32, pIemCpu->iEffSeg, false);
9163 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op8_addr64, pIemCpu->iEffSeg, false);
9164 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9165 }
9166 }
9167}
9168
9169
9170/** Opcode 0x6f. */
9171FNIEMOP_DEF(iemOp_outswd_Yv_DX)
9172{
9173 IEMOP_HLP_MIN_186();
9174 IEMOP_HLP_NO_LOCK_PREFIX();
9175 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ))
9176 {
9177 IEMOP_MNEMONIC("rep outs DX,Yv");
9178 switch (pIemCpu->enmEffOpSize)
9179 {
9180 case IEMMODE_16BIT:
9181 switch (pIemCpu->enmEffAddrMode)
9182 {
9183 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr16, pIemCpu->iEffSeg, false);
9184 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr32, pIemCpu->iEffSeg, false);
9185 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op16_addr64, pIemCpu->iEffSeg, false);
9186 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9187 }
9188 break;
9189 case IEMMODE_64BIT:
9190 case IEMMODE_32BIT:
9191 switch (pIemCpu->enmEffAddrMode)
9192 {
9193 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr16, pIemCpu->iEffSeg, false);
9194 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr32, pIemCpu->iEffSeg, false);
9195 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_rep_outs_op32_addr64, pIemCpu->iEffSeg, false);
9196 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9197 }
9198 break;
9199 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9200 }
9201 }
9202 else
9203 {
9204 IEMOP_MNEMONIC("outs DX,Yv");
9205 switch (pIemCpu->enmEffOpSize)
9206 {
9207 case IEMMODE_16BIT:
9208 switch (pIemCpu->enmEffAddrMode)
9209 {
9210 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr16, pIemCpu->iEffSeg, false);
9211 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr32, pIemCpu->iEffSeg, false);
9212 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op16_addr64, pIemCpu->iEffSeg, false);
9213 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9214 }
9215 break;
9216 case IEMMODE_64BIT:
9217 case IEMMODE_32BIT:
9218 switch (pIemCpu->enmEffAddrMode)
9219 {
9220 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr16, pIemCpu->iEffSeg, false);
9221 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr32, pIemCpu->iEffSeg, false);
9222 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_outs_op32_addr64, pIemCpu->iEffSeg, false);
9223 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9224 }
9225 break;
9226 IEM_NOT_REACHED_DEFAULT_CASE_RET();
9227 }
9228 }
9229}
9230
9231
9232/** Opcode 0x70. */
9233FNIEMOP_DEF(iemOp_jo_Jb)
9234{
9235 IEMOP_MNEMONIC("jo Jb");
9236 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9237 IEMOP_HLP_NO_LOCK_PREFIX();
9238 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9239
9240 IEM_MC_BEGIN(0, 0);
9241 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
9242 IEM_MC_REL_JMP_S8(i8Imm);
9243 } IEM_MC_ELSE() {
9244 IEM_MC_ADVANCE_RIP();
9245 } IEM_MC_ENDIF();
9246 IEM_MC_END();
9247 return VINF_SUCCESS;
9248}
9249
9250
9251/** Opcode 0x71. */
9252FNIEMOP_DEF(iemOp_jno_Jb)
9253{
9254 IEMOP_MNEMONIC("jno Jb");
9255 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9256 IEMOP_HLP_NO_LOCK_PREFIX();
9257 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9258
9259 IEM_MC_BEGIN(0, 0);
9260 IEM_MC_IF_EFL_BIT_SET(X86_EFL_OF) {
9261 IEM_MC_ADVANCE_RIP();
9262 } IEM_MC_ELSE() {
9263 IEM_MC_REL_JMP_S8(i8Imm);
9264 } IEM_MC_ENDIF();
9265 IEM_MC_END();
9266 return VINF_SUCCESS;
9267}
9268
9269/** Opcode 0x72. */
9270FNIEMOP_DEF(iemOp_jc_Jb)
9271{
9272 IEMOP_MNEMONIC("jc/jnae Jb");
9273 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9274 IEMOP_HLP_NO_LOCK_PREFIX();
9275 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9276
9277 IEM_MC_BEGIN(0, 0);
9278 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
9279 IEM_MC_REL_JMP_S8(i8Imm);
9280 } IEM_MC_ELSE() {
9281 IEM_MC_ADVANCE_RIP();
9282 } IEM_MC_ENDIF();
9283 IEM_MC_END();
9284 return VINF_SUCCESS;
9285}
9286
9287
9288/** Opcode 0x73. */
9289FNIEMOP_DEF(iemOp_jnc_Jb)
9290{
9291 IEMOP_MNEMONIC("jnc/jnb Jb");
9292 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9293 IEMOP_HLP_NO_LOCK_PREFIX();
9294 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9295
9296 IEM_MC_BEGIN(0, 0);
9297 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
9298 IEM_MC_ADVANCE_RIP();
9299 } IEM_MC_ELSE() {
9300 IEM_MC_REL_JMP_S8(i8Imm);
9301 } IEM_MC_ENDIF();
9302 IEM_MC_END();
9303 return VINF_SUCCESS;
9304}
9305
9306
9307/** Opcode 0x74. */
9308FNIEMOP_DEF(iemOp_je_Jb)
9309{
9310 IEMOP_MNEMONIC("je/jz Jb");
9311 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9312 IEMOP_HLP_NO_LOCK_PREFIX();
9313 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9314
9315 IEM_MC_BEGIN(0, 0);
9316 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
9317 IEM_MC_REL_JMP_S8(i8Imm);
9318 } IEM_MC_ELSE() {
9319 IEM_MC_ADVANCE_RIP();
9320 } IEM_MC_ENDIF();
9321 IEM_MC_END();
9322 return VINF_SUCCESS;
9323}
9324
9325
9326/** Opcode 0x75. */
9327FNIEMOP_DEF(iemOp_jne_Jb)
9328{
9329 IEMOP_MNEMONIC("jne/jnz Jb");
9330 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9331 IEMOP_HLP_NO_LOCK_PREFIX();
9332 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9333
9334 IEM_MC_BEGIN(0, 0);
9335 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF) {
9336 IEM_MC_ADVANCE_RIP();
9337 } IEM_MC_ELSE() {
9338 IEM_MC_REL_JMP_S8(i8Imm);
9339 } IEM_MC_ENDIF();
9340 IEM_MC_END();
9341 return VINF_SUCCESS;
9342}
9343
9344
9345/** Opcode 0x76. */
9346FNIEMOP_DEF(iemOp_jbe_Jb)
9347{
9348 IEMOP_MNEMONIC("jbe/jna Jb");
9349 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9350 IEMOP_HLP_NO_LOCK_PREFIX();
9351 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9352
9353 IEM_MC_BEGIN(0, 0);
9354 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
9355 IEM_MC_REL_JMP_S8(i8Imm);
9356 } IEM_MC_ELSE() {
9357 IEM_MC_ADVANCE_RIP();
9358 } IEM_MC_ENDIF();
9359 IEM_MC_END();
9360 return VINF_SUCCESS;
9361}
9362
9363
9364/** Opcode 0x77. */
9365FNIEMOP_DEF(iemOp_jnbe_Jb)
9366{
9367 IEMOP_MNEMONIC("jnbe/ja Jb");
9368 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9369 IEMOP_HLP_NO_LOCK_PREFIX();
9370 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9371
9372 IEM_MC_BEGIN(0, 0);
9373 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF) {
9374 IEM_MC_ADVANCE_RIP();
9375 } IEM_MC_ELSE() {
9376 IEM_MC_REL_JMP_S8(i8Imm);
9377 } IEM_MC_ENDIF();
9378 IEM_MC_END();
9379 return VINF_SUCCESS;
9380}
9381
9382
9383/** Opcode 0x78. */
9384FNIEMOP_DEF(iemOp_js_Jb)
9385{
9386 IEMOP_MNEMONIC("js Jb");
9387 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9388 IEMOP_HLP_NO_LOCK_PREFIX();
9389 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9390
9391 IEM_MC_BEGIN(0, 0);
9392 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
9393 IEM_MC_REL_JMP_S8(i8Imm);
9394 } IEM_MC_ELSE() {
9395 IEM_MC_ADVANCE_RIP();
9396 } IEM_MC_ENDIF();
9397 IEM_MC_END();
9398 return VINF_SUCCESS;
9399}
9400
9401
9402/** Opcode 0x79. */
9403FNIEMOP_DEF(iemOp_jns_Jb)
9404{
9405 IEMOP_MNEMONIC("jns Jb");
9406 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9407 IEMOP_HLP_NO_LOCK_PREFIX();
9408 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9409
9410 IEM_MC_BEGIN(0, 0);
9411 IEM_MC_IF_EFL_BIT_SET(X86_EFL_SF) {
9412 IEM_MC_ADVANCE_RIP();
9413 } IEM_MC_ELSE() {
9414 IEM_MC_REL_JMP_S8(i8Imm);
9415 } IEM_MC_ENDIF();
9416 IEM_MC_END();
9417 return VINF_SUCCESS;
9418}
9419
9420
9421/** Opcode 0x7a. */
9422FNIEMOP_DEF(iemOp_jp_Jb)
9423{
9424 IEMOP_MNEMONIC("jp Jb");
9425 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9426 IEMOP_HLP_NO_LOCK_PREFIX();
9427 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9428
9429 IEM_MC_BEGIN(0, 0);
9430 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
9431 IEM_MC_REL_JMP_S8(i8Imm);
9432 } IEM_MC_ELSE() {
9433 IEM_MC_ADVANCE_RIP();
9434 } IEM_MC_ENDIF();
9435 IEM_MC_END();
9436 return VINF_SUCCESS;
9437}
9438
9439
9440/** Opcode 0x7b. */
9441FNIEMOP_DEF(iemOp_jnp_Jb)
9442{
9443 IEMOP_MNEMONIC("jnp Jb");
9444 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9445 IEMOP_HLP_NO_LOCK_PREFIX();
9446 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9447
9448 IEM_MC_BEGIN(0, 0);
9449 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF) {
9450 IEM_MC_ADVANCE_RIP();
9451 } IEM_MC_ELSE() {
9452 IEM_MC_REL_JMP_S8(i8Imm);
9453 } IEM_MC_ENDIF();
9454 IEM_MC_END();
9455 return VINF_SUCCESS;
9456}
9457
9458
9459/** Opcode 0x7c. */
9460FNIEMOP_DEF(iemOp_jl_Jb)
9461{
9462 IEMOP_MNEMONIC("jl/jnge Jb");
9463 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9464 IEMOP_HLP_NO_LOCK_PREFIX();
9465 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9466
9467 IEM_MC_BEGIN(0, 0);
9468 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
9469 IEM_MC_REL_JMP_S8(i8Imm);
9470 } IEM_MC_ELSE() {
9471 IEM_MC_ADVANCE_RIP();
9472 } IEM_MC_ENDIF();
9473 IEM_MC_END();
9474 return VINF_SUCCESS;
9475}
9476
9477
9478/** Opcode 0x7d. */
9479FNIEMOP_DEF(iemOp_jnl_Jb)
9480{
9481 IEMOP_MNEMONIC("jnl/jge Jb");
9482 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9483 IEMOP_HLP_NO_LOCK_PREFIX();
9484 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9485
9486 IEM_MC_BEGIN(0, 0);
9487 IEM_MC_IF_EFL_BITS_NE(X86_EFL_SF, X86_EFL_OF) {
9488 IEM_MC_ADVANCE_RIP();
9489 } IEM_MC_ELSE() {
9490 IEM_MC_REL_JMP_S8(i8Imm);
9491 } IEM_MC_ENDIF();
9492 IEM_MC_END();
9493 return VINF_SUCCESS;
9494}
9495
9496
9497/** Opcode 0x7e. */
9498FNIEMOP_DEF(iemOp_jle_Jb)
9499{
9500 IEMOP_MNEMONIC("jle/jng Jb");
9501 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9502 IEMOP_HLP_NO_LOCK_PREFIX();
9503 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9504
9505 IEM_MC_BEGIN(0, 0);
9506 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
9507 IEM_MC_REL_JMP_S8(i8Imm);
9508 } IEM_MC_ELSE() {
9509 IEM_MC_ADVANCE_RIP();
9510 } IEM_MC_ENDIF();
9511 IEM_MC_END();
9512 return VINF_SUCCESS;
9513}
9514
9515
9516/** Opcode 0x7f. */
9517FNIEMOP_DEF(iemOp_jnle_Jb)
9518{
9519 IEMOP_MNEMONIC("jnle/jg Jb");
9520 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
9521 IEMOP_HLP_NO_LOCK_PREFIX();
9522 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
9523
9524 IEM_MC_BEGIN(0, 0);
9525 IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(X86_EFL_ZF, X86_EFL_SF, X86_EFL_OF) {
9526 IEM_MC_ADVANCE_RIP();
9527 } IEM_MC_ELSE() {
9528 IEM_MC_REL_JMP_S8(i8Imm);
9529 } IEM_MC_ENDIF();
9530 IEM_MC_END();
9531 return VINF_SUCCESS;
9532}
9533
9534
9535/** Opcode 0x80. */
9536FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_80)
9537{
9538 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9539 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Eb,Ib");
9540 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
9541
9542 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9543 {
9544 /* register target */
9545 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9546 IEMOP_HLP_NO_LOCK_PREFIX();
9547 IEM_MC_BEGIN(3, 0);
9548 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9549 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
9550 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9551
9552 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9553 IEM_MC_REF_EFLAGS(pEFlags);
9554 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
9555
9556 IEM_MC_ADVANCE_RIP();
9557 IEM_MC_END();
9558 }
9559 else
9560 {
9561 /* memory target */
9562 uint32_t fAccess;
9563 if (pImpl->pfnLockedU8)
9564 fAccess = IEM_ACCESS_DATA_RW;
9565 else
9566 { /* CMP */
9567 IEMOP_HLP_NO_LOCK_PREFIX();
9568 fAccess = IEM_ACCESS_DATA_R;
9569 }
9570 IEM_MC_BEGIN(3, 2);
9571 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
9572 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9573 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9574
9575 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
9576 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9577 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1);
9578
9579 IEM_MC_MEM_MAP(pu8Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9580 IEM_MC_FETCH_EFLAGS(EFlags);
9581 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9582 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, u8Src, pEFlags);
9583 else
9584 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU8, pu8Dst, u8Src, pEFlags);
9585
9586 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, fAccess);
9587 IEM_MC_COMMIT_EFLAGS(EFlags);
9588 IEM_MC_ADVANCE_RIP();
9589 IEM_MC_END();
9590 }
9591 return VINF_SUCCESS;
9592}
9593
9594
9595/** Opcode 0x81. */
9596FNIEMOP_DEF(iemOp_Grp1_Ev_Iz)
9597{
9598 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9599 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Iz");
9600 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
9601
9602 switch (pIemCpu->enmEffOpSize)
9603 {
9604 case IEMMODE_16BIT:
9605 {
9606 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9607 {
9608 /* register target */
9609 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9610 IEMOP_HLP_NO_LOCK_PREFIX();
9611 IEM_MC_BEGIN(3, 0);
9612 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9613 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ u16Imm, 1);
9614 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9615
9616 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9617 IEM_MC_REF_EFLAGS(pEFlags);
9618 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
9619
9620 IEM_MC_ADVANCE_RIP();
9621 IEM_MC_END();
9622 }
9623 else
9624 {
9625 /* memory target */
9626 uint32_t fAccess;
9627 if (pImpl->pfnLockedU16)
9628 fAccess = IEM_ACCESS_DATA_RW;
9629 else
9630 { /* CMP, TEST */
9631 IEMOP_HLP_NO_LOCK_PREFIX();
9632 fAccess = IEM_ACCESS_DATA_R;
9633 }
9634 IEM_MC_BEGIN(3, 2);
9635 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9636 IEM_MC_ARG(uint16_t, u16Src, 1);
9637 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9638 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9639
9640 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2);
9641 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
9642 IEM_MC_ASSIGN(u16Src, u16Imm);
9643 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9644 IEM_MC_FETCH_EFLAGS(EFlags);
9645 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9646 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
9647 else
9648 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
9649
9650 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
9651 IEM_MC_COMMIT_EFLAGS(EFlags);
9652 IEM_MC_ADVANCE_RIP();
9653 IEM_MC_END();
9654 }
9655 break;
9656 }
9657
9658 case IEMMODE_32BIT:
9659 {
9660 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9661 {
9662 /* register target */
9663 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9664 IEMOP_HLP_NO_LOCK_PREFIX();
9665 IEM_MC_BEGIN(3, 0);
9666 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9667 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ u32Imm, 1);
9668 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9669
9670 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9671 IEM_MC_REF_EFLAGS(pEFlags);
9672 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
9673 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
9674
9675 IEM_MC_ADVANCE_RIP();
9676 IEM_MC_END();
9677 }
9678 else
9679 {
9680 /* memory target */
9681 uint32_t fAccess;
9682 if (pImpl->pfnLockedU32)
9683 fAccess = IEM_ACCESS_DATA_RW;
9684 else
9685 { /* CMP, TEST */
9686 IEMOP_HLP_NO_LOCK_PREFIX();
9687 fAccess = IEM_ACCESS_DATA_R;
9688 }
9689 IEM_MC_BEGIN(3, 2);
9690 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9691 IEM_MC_ARG(uint32_t, u32Src, 1);
9692 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9693 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9694
9695 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
9696 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
9697 IEM_MC_ASSIGN(u32Src, u32Imm);
9698 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9699 IEM_MC_FETCH_EFLAGS(EFlags);
9700 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9701 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
9702 else
9703 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
9704
9705 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
9706 IEM_MC_COMMIT_EFLAGS(EFlags);
9707 IEM_MC_ADVANCE_RIP();
9708 IEM_MC_END();
9709 }
9710 break;
9711 }
9712
9713 case IEMMODE_64BIT:
9714 {
9715 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9716 {
9717 /* register target */
9718 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
9719 IEMOP_HLP_NO_LOCK_PREFIX();
9720 IEM_MC_BEGIN(3, 0);
9721 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9722 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ u64Imm, 1);
9723 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9724
9725 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9726 IEM_MC_REF_EFLAGS(pEFlags);
9727 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
9728
9729 IEM_MC_ADVANCE_RIP();
9730 IEM_MC_END();
9731 }
9732 else
9733 {
9734 /* memory target */
9735 uint32_t fAccess;
9736 if (pImpl->pfnLockedU64)
9737 fAccess = IEM_ACCESS_DATA_RW;
9738 else
9739 { /* CMP */
9740 IEMOP_HLP_NO_LOCK_PREFIX();
9741 fAccess = IEM_ACCESS_DATA_R;
9742 }
9743 IEM_MC_BEGIN(3, 2);
9744 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9745 IEM_MC_ARG(uint64_t, u64Src, 1);
9746 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9747 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9748
9749 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
9750 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
9751 IEM_MC_ASSIGN(u64Src, u64Imm);
9752 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9753 IEM_MC_FETCH_EFLAGS(EFlags);
9754 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9755 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
9756 else
9757 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
9758
9759 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
9760 IEM_MC_COMMIT_EFLAGS(EFlags);
9761 IEM_MC_ADVANCE_RIP();
9762 IEM_MC_END();
9763 }
9764 break;
9765 }
9766 }
9767 return VINF_SUCCESS;
9768}
9769
9770
9771/** Opcode 0x82. */
9772FNIEMOP_DEF(iemOp_Grp1_Eb_Ib_82)
9773{
9774 IEMOP_HLP_NO_64BIT(); /** @todo do we need to decode the whole instruction or is this ok? */
9775 return FNIEMOP_CALL(iemOp_Grp1_Eb_Ib_80);
9776}
9777
9778
9779/** Opcode 0x83. */
9780FNIEMOP_DEF(iemOp_Grp1_Ev_Ib)
9781{
9782 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9783 IEMOP_MNEMONIC2("add\0or\0\0adc\0sbb\0and\0sub\0xor\0cmp" + ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)*4, "Ev,Ib");
9784 /* Note! Seems the OR, AND, and XOR instructions are present on CPUs prior
9785 to the 386 even if absent in the intel reference manuals and some
9786 3rd party opcode listings. */
9787 PCIEMOPBINSIZES pImpl = g_apIemImplGrp1[(bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK];
9788
9789 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9790 {
9791 /*
9792 * Register target
9793 */
9794 IEMOP_HLP_NO_LOCK_PREFIX();
9795 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9796 switch (pIemCpu->enmEffOpSize)
9797 {
9798 case IEMMODE_16BIT:
9799 {
9800 IEM_MC_BEGIN(3, 0);
9801 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9802 IEM_MC_ARG_CONST(uint16_t, u16Src, /*=*/ (int8_t)u8Imm,1);
9803 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9804
9805 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9806 IEM_MC_REF_EFLAGS(pEFlags);
9807 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
9808
9809 IEM_MC_ADVANCE_RIP();
9810 IEM_MC_END();
9811 break;
9812 }
9813
9814 case IEMMODE_32BIT:
9815 {
9816 IEM_MC_BEGIN(3, 0);
9817 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9818 IEM_MC_ARG_CONST(uint32_t, u32Src, /*=*/ (int8_t)u8Imm,1);
9819 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9820
9821 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9822 IEM_MC_REF_EFLAGS(pEFlags);
9823 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
9824 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
9825
9826 IEM_MC_ADVANCE_RIP();
9827 IEM_MC_END();
9828 break;
9829 }
9830
9831 case IEMMODE_64BIT:
9832 {
9833 IEM_MC_BEGIN(3, 0);
9834 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9835 IEM_MC_ARG_CONST(uint64_t, u64Src, /*=*/ (int8_t)u8Imm,1);
9836 IEM_MC_ARG(uint32_t *, pEFlags, 2);
9837
9838 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9839 IEM_MC_REF_EFLAGS(pEFlags);
9840 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
9841
9842 IEM_MC_ADVANCE_RIP();
9843 IEM_MC_END();
9844 break;
9845 }
9846 }
9847 }
9848 else
9849 {
9850 /*
9851 * Memory target.
9852 */
9853 uint32_t fAccess;
9854 if (pImpl->pfnLockedU16)
9855 fAccess = IEM_ACCESS_DATA_RW;
9856 else
9857 { /* CMP */
9858 IEMOP_HLP_NO_LOCK_PREFIX();
9859 fAccess = IEM_ACCESS_DATA_R;
9860 }
9861
9862 switch (pIemCpu->enmEffOpSize)
9863 {
9864 case IEMMODE_16BIT:
9865 {
9866 IEM_MC_BEGIN(3, 2);
9867 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
9868 IEM_MC_ARG(uint16_t, u16Src, 1);
9869 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9870 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9871
9872 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
9873 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9874 IEM_MC_ASSIGN(u16Src, (int8_t)u8Imm);
9875 IEM_MC_MEM_MAP(pu16Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9876 IEM_MC_FETCH_EFLAGS(EFlags);
9877 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9878 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, u16Src, pEFlags);
9879 else
9880 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU16, pu16Dst, u16Src, pEFlags);
9881
9882 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, fAccess);
9883 IEM_MC_COMMIT_EFLAGS(EFlags);
9884 IEM_MC_ADVANCE_RIP();
9885 IEM_MC_END();
9886 break;
9887 }
9888
9889 case IEMMODE_32BIT:
9890 {
9891 IEM_MC_BEGIN(3, 2);
9892 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
9893 IEM_MC_ARG(uint32_t, u32Src, 1);
9894 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9895 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9896
9897 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
9898 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9899 IEM_MC_ASSIGN(u32Src, (int8_t)u8Imm);
9900 IEM_MC_MEM_MAP(pu32Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9901 IEM_MC_FETCH_EFLAGS(EFlags);
9902 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9903 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, u32Src, pEFlags);
9904 else
9905 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU32, pu32Dst, u32Src, pEFlags);
9906
9907 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, fAccess);
9908 IEM_MC_COMMIT_EFLAGS(EFlags);
9909 IEM_MC_ADVANCE_RIP();
9910 IEM_MC_END();
9911 break;
9912 }
9913
9914 case IEMMODE_64BIT:
9915 {
9916 IEM_MC_BEGIN(3, 2);
9917 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
9918 IEM_MC_ARG(uint64_t, u64Src, 1);
9919 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
9920 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9921
9922 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
9923 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
9924 IEM_MC_ASSIGN(u64Src, (int8_t)u8Imm);
9925 IEM_MC_MEM_MAP(pu64Dst, fAccess, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
9926 IEM_MC_FETCH_EFLAGS(EFlags);
9927 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
9928 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, u64Src, pEFlags);
9929 else
9930 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnLockedU64, pu64Dst, u64Src, pEFlags);
9931
9932 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, fAccess);
9933 IEM_MC_COMMIT_EFLAGS(EFlags);
9934 IEM_MC_ADVANCE_RIP();
9935 IEM_MC_END();
9936 break;
9937 }
9938 }
9939 }
9940 return VINF_SUCCESS;
9941}
9942
9943
9944/** Opcode 0x84. */
9945FNIEMOP_DEF(iemOp_test_Eb_Gb)
9946{
9947 IEMOP_MNEMONIC("test Eb,Gb");
9948 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
9949 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
9950 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_r8, &g_iemAImpl_test);
9951}
9952
9953
9954/** Opcode 0x85. */
9955FNIEMOP_DEF(iemOp_test_Ev_Gv)
9956{
9957 IEMOP_MNEMONIC("test Ev,Gv");
9958 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo do we have to decode the whole instruction first? */
9959 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
9960 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rm_rv, &g_iemAImpl_test);
9961}
9962
9963
9964/** Opcode 0x86. */
9965FNIEMOP_DEF(iemOp_xchg_Eb_Gb)
9966{
9967 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
9968 IEMOP_MNEMONIC("xchg Eb,Gb");
9969
9970 /*
9971 * If rm is denoting a register, no more instruction bytes.
9972 */
9973 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
9974 {
9975 IEMOP_HLP_NO_LOCK_PREFIX();
9976
9977 IEM_MC_BEGIN(0, 2);
9978 IEM_MC_LOCAL(uint8_t, uTmp1);
9979 IEM_MC_LOCAL(uint8_t, uTmp2);
9980
9981 IEM_MC_FETCH_GREG_U8(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
9982 IEM_MC_FETCH_GREG_U8(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
9983 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
9984 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
9985
9986 IEM_MC_ADVANCE_RIP();
9987 IEM_MC_END();
9988 }
9989 else
9990 {
9991 /*
9992 * We're accessing memory.
9993 */
9994/** @todo the register must be committed separately! */
9995 IEM_MC_BEGIN(2, 2);
9996 IEM_MC_ARG(uint8_t *, pu8Mem, 0);
9997 IEM_MC_ARG(uint8_t *, pu8Reg, 1);
9998 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
9999
10000 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
10001 IEM_MC_MEM_MAP(pu8Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10002 IEM_MC_REF_GREG_U8(pu8Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
10003 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u8, pu8Mem, pu8Reg);
10004 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Mem, IEM_ACCESS_DATA_RW);
10005
10006 IEM_MC_ADVANCE_RIP();
10007 IEM_MC_END();
10008 }
10009 return VINF_SUCCESS;
10010}
10011
10012
10013/** Opcode 0x87. */
10014FNIEMOP_DEF(iemOp_xchg_Ev_Gv)
10015{
10016 IEMOP_MNEMONIC("xchg Ev,Gv");
10017 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10018
10019 /*
10020 * If rm is denoting a register, no more instruction bytes.
10021 */
10022 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10023 {
10024 IEMOP_HLP_NO_LOCK_PREFIX();
10025
10026 switch (pIemCpu->enmEffOpSize)
10027 {
10028 case IEMMODE_16BIT:
10029 IEM_MC_BEGIN(0, 2);
10030 IEM_MC_LOCAL(uint16_t, uTmp1);
10031 IEM_MC_LOCAL(uint16_t, uTmp2);
10032
10033 IEM_MC_FETCH_GREG_U16(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
10034 IEM_MC_FETCH_GREG_U16(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10035 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
10036 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
10037
10038 IEM_MC_ADVANCE_RIP();
10039 IEM_MC_END();
10040 return VINF_SUCCESS;
10041
10042 case IEMMODE_32BIT:
10043 IEM_MC_BEGIN(0, 2);
10044 IEM_MC_LOCAL(uint32_t, uTmp1);
10045 IEM_MC_LOCAL(uint32_t, uTmp2);
10046
10047 IEM_MC_FETCH_GREG_U32(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
10048 IEM_MC_FETCH_GREG_U32(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10049 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
10050 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
10051
10052 IEM_MC_ADVANCE_RIP();
10053 IEM_MC_END();
10054 return VINF_SUCCESS;
10055
10056 case IEMMODE_64BIT:
10057 IEM_MC_BEGIN(0, 2);
10058 IEM_MC_LOCAL(uint64_t, uTmp1);
10059 IEM_MC_LOCAL(uint64_t, uTmp2);
10060
10061 IEM_MC_FETCH_GREG_U64(uTmp1, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
10062 IEM_MC_FETCH_GREG_U64(uTmp2, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10063 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, uTmp1);
10064 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, uTmp2);
10065
10066 IEM_MC_ADVANCE_RIP();
10067 IEM_MC_END();
10068 return VINF_SUCCESS;
10069
10070 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10071 }
10072 }
10073 else
10074 {
10075 /*
10076 * We're accessing memory.
10077 */
10078 switch (pIemCpu->enmEffOpSize)
10079 {
10080/** @todo the register must be committed separately! */
10081 case IEMMODE_16BIT:
10082 IEM_MC_BEGIN(2, 2);
10083 IEM_MC_ARG(uint16_t *, pu16Mem, 0);
10084 IEM_MC_ARG(uint16_t *, pu16Reg, 1);
10085 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10086
10087 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
10088 IEM_MC_MEM_MAP(pu16Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10089 IEM_MC_REF_GREG_U16(pu16Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
10090 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u16, pu16Mem, pu16Reg);
10091 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Mem, IEM_ACCESS_DATA_RW);
10092
10093 IEM_MC_ADVANCE_RIP();
10094 IEM_MC_END();
10095 return VINF_SUCCESS;
10096
10097 case IEMMODE_32BIT:
10098 IEM_MC_BEGIN(2, 2);
10099 IEM_MC_ARG(uint32_t *, pu32Mem, 0);
10100 IEM_MC_ARG(uint32_t *, pu32Reg, 1);
10101 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10102
10103 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
10104 IEM_MC_MEM_MAP(pu32Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10105 IEM_MC_REF_GREG_U32(pu32Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
10106 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u32, pu32Mem, pu32Reg);
10107 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Mem, IEM_ACCESS_DATA_RW);
10108
10109 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Reg);
10110 IEM_MC_ADVANCE_RIP();
10111 IEM_MC_END();
10112 return VINF_SUCCESS;
10113
10114 case IEMMODE_64BIT:
10115 IEM_MC_BEGIN(2, 2);
10116 IEM_MC_ARG(uint64_t *, pu64Mem, 0);
10117 IEM_MC_ARG(uint64_t *, pu64Reg, 1);
10118 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10119
10120 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
10121 IEM_MC_MEM_MAP(pu64Mem, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
10122 IEM_MC_REF_GREG_U64(pu64Reg, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
10123 IEM_MC_CALL_VOID_AIMPL_2(iemAImpl_xchg_u64, pu64Mem, pu64Reg);
10124 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Mem, IEM_ACCESS_DATA_RW);
10125
10126 IEM_MC_ADVANCE_RIP();
10127 IEM_MC_END();
10128 return VINF_SUCCESS;
10129
10130 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10131 }
10132 }
10133}
10134
10135
10136/** Opcode 0x88. */
10137FNIEMOP_DEF(iemOp_mov_Eb_Gb)
10138{
10139 IEMOP_MNEMONIC("mov Eb,Gb");
10140
10141 uint8_t bRm;
10142 IEM_OPCODE_GET_NEXT_U8(&bRm);
10143 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10144
10145 /*
10146 * If rm is denoting a register, no more instruction bytes.
10147 */
10148 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10149 {
10150 IEM_MC_BEGIN(0, 1);
10151 IEM_MC_LOCAL(uint8_t, u8Value);
10152 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
10153 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Value);
10154 IEM_MC_ADVANCE_RIP();
10155 IEM_MC_END();
10156 }
10157 else
10158 {
10159 /*
10160 * We're writing a register to memory.
10161 */
10162 IEM_MC_BEGIN(0, 2);
10163 IEM_MC_LOCAL(uint8_t, u8Value);
10164 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10165 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
10166 IEM_MC_FETCH_GREG_U8(u8Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
10167 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Value);
10168 IEM_MC_ADVANCE_RIP();
10169 IEM_MC_END();
10170 }
10171 return VINF_SUCCESS;
10172
10173}
10174
10175
10176/** Opcode 0x89. */
10177FNIEMOP_DEF(iemOp_mov_Ev_Gv)
10178{
10179 IEMOP_MNEMONIC("mov Ev,Gv");
10180
10181 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10182 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10183
10184 /*
10185 * If rm is denoting a register, no more instruction bytes.
10186 */
10187 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10188 {
10189 switch (pIemCpu->enmEffOpSize)
10190 {
10191 case IEMMODE_16BIT:
10192 IEM_MC_BEGIN(0, 1);
10193 IEM_MC_LOCAL(uint16_t, u16Value);
10194 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
10195 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
10196 IEM_MC_ADVANCE_RIP();
10197 IEM_MC_END();
10198 break;
10199
10200 case IEMMODE_32BIT:
10201 IEM_MC_BEGIN(0, 1);
10202 IEM_MC_LOCAL(uint32_t, u32Value);
10203 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
10204 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
10205 IEM_MC_ADVANCE_RIP();
10206 IEM_MC_END();
10207 break;
10208
10209 case IEMMODE_64BIT:
10210 IEM_MC_BEGIN(0, 1);
10211 IEM_MC_LOCAL(uint64_t, u64Value);
10212 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
10213 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
10214 IEM_MC_ADVANCE_RIP();
10215 IEM_MC_END();
10216 break;
10217 }
10218 }
10219 else
10220 {
10221 /*
10222 * We're writing a register to memory.
10223 */
10224 switch (pIemCpu->enmEffOpSize)
10225 {
10226 case IEMMODE_16BIT:
10227 IEM_MC_BEGIN(0, 2);
10228 IEM_MC_LOCAL(uint16_t, u16Value);
10229 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10230 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
10231 IEM_MC_FETCH_GREG_U16(u16Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
10232 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
10233 IEM_MC_ADVANCE_RIP();
10234 IEM_MC_END();
10235 break;
10236
10237 case IEMMODE_32BIT:
10238 IEM_MC_BEGIN(0, 2);
10239 IEM_MC_LOCAL(uint32_t, u32Value);
10240 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10241 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
10242 IEM_MC_FETCH_GREG_U32(u32Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
10243 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Value);
10244 IEM_MC_ADVANCE_RIP();
10245 IEM_MC_END();
10246 break;
10247
10248 case IEMMODE_64BIT:
10249 IEM_MC_BEGIN(0, 2);
10250 IEM_MC_LOCAL(uint64_t, u64Value);
10251 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10252 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
10253 IEM_MC_FETCH_GREG_U64(u64Value, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg);
10254 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Value);
10255 IEM_MC_ADVANCE_RIP();
10256 IEM_MC_END();
10257 break;
10258 }
10259 }
10260 return VINF_SUCCESS;
10261}
10262
10263
10264/** Opcode 0x8a. */
10265FNIEMOP_DEF(iemOp_mov_Gb_Eb)
10266{
10267 IEMOP_MNEMONIC("mov Gb,Eb");
10268
10269 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10270 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10271
10272 /*
10273 * If rm is denoting a register, no more instruction bytes.
10274 */
10275 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10276 {
10277 IEM_MC_BEGIN(0, 1);
10278 IEM_MC_LOCAL(uint8_t, u8Value);
10279 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10280 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
10281 IEM_MC_ADVANCE_RIP();
10282 IEM_MC_END();
10283 }
10284 else
10285 {
10286 /*
10287 * We're loading a register from memory.
10288 */
10289 IEM_MC_BEGIN(0, 2);
10290 IEM_MC_LOCAL(uint8_t, u8Value);
10291 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10292 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
10293 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
10294 IEM_MC_STORE_GREG_U8(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u8Value);
10295 IEM_MC_ADVANCE_RIP();
10296 IEM_MC_END();
10297 }
10298 return VINF_SUCCESS;
10299}
10300
10301
10302/** Opcode 0x8b. */
10303FNIEMOP_DEF(iemOp_mov_Gv_Ev)
10304{
10305 IEMOP_MNEMONIC("mov Gv,Ev");
10306
10307 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10308 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10309
10310 /*
10311 * If rm is denoting a register, no more instruction bytes.
10312 */
10313 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10314 {
10315 switch (pIemCpu->enmEffOpSize)
10316 {
10317 case IEMMODE_16BIT:
10318 IEM_MC_BEGIN(0, 1);
10319 IEM_MC_LOCAL(uint16_t, u16Value);
10320 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10321 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
10322 IEM_MC_ADVANCE_RIP();
10323 IEM_MC_END();
10324 break;
10325
10326 case IEMMODE_32BIT:
10327 IEM_MC_BEGIN(0, 1);
10328 IEM_MC_LOCAL(uint32_t, u32Value);
10329 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10330 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
10331 IEM_MC_ADVANCE_RIP();
10332 IEM_MC_END();
10333 break;
10334
10335 case IEMMODE_64BIT:
10336 IEM_MC_BEGIN(0, 1);
10337 IEM_MC_LOCAL(uint64_t, u64Value);
10338 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10339 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
10340 IEM_MC_ADVANCE_RIP();
10341 IEM_MC_END();
10342 break;
10343 }
10344 }
10345 else
10346 {
10347 /*
10348 * We're loading a register from memory.
10349 */
10350 switch (pIemCpu->enmEffOpSize)
10351 {
10352 case IEMMODE_16BIT:
10353 IEM_MC_BEGIN(0, 2);
10354 IEM_MC_LOCAL(uint16_t, u16Value);
10355 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10356 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
10357 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
10358 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Value);
10359 IEM_MC_ADVANCE_RIP();
10360 IEM_MC_END();
10361 break;
10362
10363 case IEMMODE_32BIT:
10364 IEM_MC_BEGIN(0, 2);
10365 IEM_MC_LOCAL(uint32_t, u32Value);
10366 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10367 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
10368 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
10369 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Value);
10370 IEM_MC_ADVANCE_RIP();
10371 IEM_MC_END();
10372 break;
10373
10374 case IEMMODE_64BIT:
10375 IEM_MC_BEGIN(0, 2);
10376 IEM_MC_LOCAL(uint64_t, u64Value);
10377 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10378 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
10379 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
10380 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u64Value);
10381 IEM_MC_ADVANCE_RIP();
10382 IEM_MC_END();
10383 break;
10384 }
10385 }
10386 return VINF_SUCCESS;
10387}
10388
10389
10390/** Opcode 0x63. */
10391FNIEMOP_DEF(iemOp_arpl_Ew_Gw_movsx_Gv_Ev)
10392{
10393 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
10394 return FNIEMOP_CALL(iemOp_arpl_Ew_Gw);
10395 if (pIemCpu->enmEffOpSize != IEMMODE_64BIT)
10396 return FNIEMOP_CALL(iemOp_mov_Gv_Ev);
10397 return FNIEMOP_CALL(iemOp_movsxd_Gv_Ev);
10398}
10399
10400
10401/** Opcode 0x8c. */
10402FNIEMOP_DEF(iemOp_mov_Ev_Sw)
10403{
10404 IEMOP_MNEMONIC("mov Ev,Sw");
10405
10406 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10407 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10408
10409 /*
10410 * Check that the destination register exists. The REX.R prefix is ignored.
10411 */
10412 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
10413 if ( iSegReg > X86_SREG_GS)
10414 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10415
10416 /*
10417 * If rm is denoting a register, no more instruction bytes.
10418 * In that case, the operand size is respected and the upper bits are
10419 * cleared (starting with some pentium).
10420 */
10421 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10422 {
10423 switch (pIemCpu->enmEffOpSize)
10424 {
10425 case IEMMODE_16BIT:
10426 IEM_MC_BEGIN(0, 1);
10427 IEM_MC_LOCAL(uint16_t, u16Value);
10428 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
10429 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Value);
10430 IEM_MC_ADVANCE_RIP();
10431 IEM_MC_END();
10432 break;
10433
10434 case IEMMODE_32BIT:
10435 IEM_MC_BEGIN(0, 1);
10436 IEM_MC_LOCAL(uint32_t, u32Value);
10437 IEM_MC_FETCH_SREG_ZX_U32(u32Value, iSegReg);
10438 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Value);
10439 IEM_MC_ADVANCE_RIP();
10440 IEM_MC_END();
10441 break;
10442
10443 case IEMMODE_64BIT:
10444 IEM_MC_BEGIN(0, 1);
10445 IEM_MC_LOCAL(uint64_t, u64Value);
10446 IEM_MC_FETCH_SREG_ZX_U64(u64Value, iSegReg);
10447 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Value);
10448 IEM_MC_ADVANCE_RIP();
10449 IEM_MC_END();
10450 break;
10451 }
10452 }
10453 else
10454 {
10455 /*
10456 * We're saving the register to memory. The access is word sized
10457 * regardless of operand size prefixes.
10458 */
10459#if 0 /* not necessary */
10460 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
10461#endif
10462 IEM_MC_BEGIN(0, 2);
10463 IEM_MC_LOCAL(uint16_t, u16Value);
10464 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10465 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
10466 IEM_MC_FETCH_SREG_U16(u16Value, iSegReg);
10467 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Value);
10468 IEM_MC_ADVANCE_RIP();
10469 IEM_MC_END();
10470 }
10471 return VINF_SUCCESS;
10472}
10473
10474
10475
10476
10477/** Opcode 0x8d. */
10478FNIEMOP_DEF(iemOp_lea_Gv_M)
10479{
10480 IEMOP_MNEMONIC("lea Gv,M");
10481 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10482 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10483 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10484 return IEMOP_RAISE_INVALID_OPCODE(); /* no register form */
10485
10486 switch (pIemCpu->enmEffOpSize)
10487 {
10488 case IEMMODE_16BIT:
10489 IEM_MC_BEGIN(0, 2);
10490 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
10491 IEM_MC_LOCAL(uint16_t, u16Cast);
10492 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
10493 IEM_MC_ASSIGN_TO_SMALLER(u16Cast, GCPtrEffSrc);
10494 IEM_MC_STORE_GREG_U16(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u16Cast);
10495 IEM_MC_ADVANCE_RIP();
10496 IEM_MC_END();
10497 return VINF_SUCCESS;
10498
10499 case IEMMODE_32BIT:
10500 IEM_MC_BEGIN(0, 2);
10501 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
10502 IEM_MC_LOCAL(uint32_t, u32Cast);
10503 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
10504 IEM_MC_ASSIGN_TO_SMALLER(u32Cast, GCPtrEffSrc);
10505 IEM_MC_STORE_GREG_U32(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, u32Cast);
10506 IEM_MC_ADVANCE_RIP();
10507 IEM_MC_END();
10508 return VINF_SUCCESS;
10509
10510 case IEMMODE_64BIT:
10511 IEM_MC_BEGIN(0, 1);
10512 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
10513 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
10514 IEM_MC_STORE_GREG_U64(((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg, GCPtrEffSrc);
10515 IEM_MC_ADVANCE_RIP();
10516 IEM_MC_END();
10517 return VINF_SUCCESS;
10518 }
10519 AssertFailedReturn(VERR_IEM_IPE_7);
10520}
10521
10522
10523/** Opcode 0x8e. */
10524FNIEMOP_DEF(iemOp_mov_Sw_Ev)
10525{
10526 IEMOP_MNEMONIC("mov Sw,Ev");
10527
10528 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10529 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10530
10531 /*
10532 * The practical operand size is 16-bit.
10533 */
10534#if 0 /* not necessary */
10535 pIemCpu->enmEffOpSize = pIemCpu->enmDefOpSize = IEMMODE_16BIT;
10536#endif
10537
10538 /*
10539 * Check that the destination register exists and can be used with this
10540 * instruction. The REX.R prefix is ignored.
10541 */
10542 uint8_t const iSegReg = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK);
10543 if ( iSegReg == X86_SREG_CS
10544 || iSegReg > X86_SREG_GS)
10545 return IEMOP_RAISE_INVALID_OPCODE(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10546
10547 /*
10548 * If rm is denoting a register, no more instruction bytes.
10549 */
10550 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10551 {
10552 IEM_MC_BEGIN(2, 0);
10553 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
10554 IEM_MC_ARG(uint16_t, u16Value, 1);
10555 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10556 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
10557 IEM_MC_END();
10558 }
10559 else
10560 {
10561 /*
10562 * We're loading the register from memory. The access is word sized
10563 * regardless of operand size prefixes.
10564 */
10565 IEM_MC_BEGIN(2, 1);
10566 IEM_MC_ARG_CONST(uint8_t, iSRegArg, iSegReg, 0);
10567 IEM_MC_ARG(uint16_t, u16Value, 1);
10568 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
10569 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
10570 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
10571 IEM_MC_CALL_CIMPL_2(iemCImpl_load_SReg, iSRegArg, u16Value);
10572 IEM_MC_END();
10573 }
10574 return VINF_SUCCESS;
10575}
10576
10577
10578/** Opcode 0x8f /0. */
10579FNIEMOP_DEF_1(iemOp_pop_Ev, uint8_t, bRm)
10580{
10581 /* This bugger is rather annoying as it requires rSP to be updated before
10582 doing the effective address calculations. Will eventually require a
10583 split between the R/M+SIB decoding and the effective address
10584 calculation - which is something that is required for any attempt at
10585 reusing this code for a recompiler. It may also be good to have if we
10586 need to delay #UD exception caused by invalid lock prefixes.
10587
10588 For now, we'll do a mostly safe interpreter-only implementation here. */
10589 /** @todo What's the deal with the 'reg' field and pop Ev? Ignorning it for
10590 * now until tests show it's checked.. */
10591 IEMOP_MNEMONIC("pop Ev");
10592 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
10593
10594 /* Register access is relatively easy and can share code. */
10595 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
10596 return FNIEMOP_CALL_1(iemOpCommonPopGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
10597
10598 /*
10599 * Memory target.
10600 *
10601 * Intel says that RSP is incremented before it's used in any effective
10602 * address calcuations. This means some serious extra annoyance here since
10603 * we decode and calculate the effective address in one step and like to
10604 * delay committing registers till everything is done.
10605 *
10606 * So, we'll decode and calculate the effective address twice. This will
10607 * require some recoding if turned into a recompiler.
10608 */
10609 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE(); /* The common code does this differently. */
10610
10611#ifndef TST_IEM_CHECK_MC
10612 /* Calc effective address with modified ESP. */
10613 uint8_t const offOpcodeSaved = pIemCpu->offOpcode;
10614 RTGCPTR GCPtrEff;
10615 VBOXSTRICTRC rcStrict;
10616 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, 0, &GCPtrEff);
10617 if (rcStrict != VINF_SUCCESS)
10618 return rcStrict;
10619 pIemCpu->offOpcode = offOpcodeSaved;
10620
10621 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
10622 uint64_t const RspSaved = pCtx->rsp;
10623 switch (pIemCpu->enmEffOpSize)
10624 {
10625 case IEMMODE_16BIT: iemRegAddToRsp(pIemCpu, pCtx, 2); break;
10626 case IEMMODE_32BIT: iemRegAddToRsp(pIemCpu, pCtx, 4); break;
10627 case IEMMODE_64BIT: iemRegAddToRsp(pIemCpu, pCtx, 8); break;
10628 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10629 }
10630 rcStrict = iemOpHlpCalcRmEffAddr(pIemCpu, bRm, 0, &GCPtrEff);
10631 Assert(rcStrict == VINF_SUCCESS);
10632 pCtx->rsp = RspSaved;
10633
10634 /* Perform the operation - this should be CImpl. */
10635 RTUINT64U TmpRsp;
10636 TmpRsp.u = pCtx->rsp;
10637 switch (pIemCpu->enmEffOpSize)
10638 {
10639 case IEMMODE_16BIT:
10640 {
10641 uint16_t u16Value;
10642 rcStrict = iemMemStackPopU16Ex(pIemCpu, &u16Value, &TmpRsp);
10643 if (rcStrict == VINF_SUCCESS)
10644 rcStrict = iemMemStoreDataU16(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u16Value);
10645 break;
10646 }
10647
10648 case IEMMODE_32BIT:
10649 {
10650 uint32_t u32Value;
10651 rcStrict = iemMemStackPopU32Ex(pIemCpu, &u32Value, &TmpRsp);
10652 if (rcStrict == VINF_SUCCESS)
10653 rcStrict = iemMemStoreDataU32(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u32Value);
10654 break;
10655 }
10656
10657 case IEMMODE_64BIT:
10658 {
10659 uint64_t u64Value;
10660 rcStrict = iemMemStackPopU64Ex(pIemCpu, &u64Value, &TmpRsp);
10661 if (rcStrict == VINF_SUCCESS)
10662 rcStrict = iemMemStoreDataU64(pIemCpu, pIemCpu->iEffSeg, GCPtrEff, u64Value);
10663 break;
10664 }
10665
10666 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10667 }
10668 if (rcStrict == VINF_SUCCESS)
10669 {
10670 pCtx->rsp = TmpRsp.u;
10671 iemRegUpdateRipAndClearRF(pIemCpu);
10672 }
10673 return rcStrict;
10674
10675#else
10676 return VERR_IEM_IPE_2;
10677#endif
10678}
10679
10680
10681/** Opcode 0x8f. */
10682FNIEMOP_DEF(iemOp_Grp1A)
10683{
10684 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
10685 if ((bRm & X86_MODRM_REG_MASK) == (0 << X86_MODRM_REG_SHIFT)) /* /0 */
10686 return FNIEMOP_CALL_1(iemOp_pop_Ev, bRm);
10687
10688 /* AMD has defined /1 thru /7 as XOP prefix (similar to three byte VEX). */
10689 /** @todo XOP decoding. */
10690 IEMOP_MNEMONIC("3-byte-xop");
10691 return IEMOP_RAISE_INVALID_OPCODE();
10692}
10693
10694
10695/**
10696 * Common 'xchg reg,rAX' helper.
10697 */
10698FNIEMOP_DEF_1(iemOpCommonXchgGRegRax, uint8_t, iReg)
10699{
10700 IEMOP_HLP_NO_LOCK_PREFIX();
10701
10702 iReg |= pIemCpu->uRexB;
10703 switch (pIemCpu->enmEffOpSize)
10704 {
10705 case IEMMODE_16BIT:
10706 IEM_MC_BEGIN(0, 2);
10707 IEM_MC_LOCAL(uint16_t, u16Tmp1);
10708 IEM_MC_LOCAL(uint16_t, u16Tmp2);
10709 IEM_MC_FETCH_GREG_U16(u16Tmp1, iReg);
10710 IEM_MC_FETCH_GREG_U16(u16Tmp2, X86_GREG_xAX);
10711 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp1);
10712 IEM_MC_STORE_GREG_U16(iReg, u16Tmp2);
10713 IEM_MC_ADVANCE_RIP();
10714 IEM_MC_END();
10715 return VINF_SUCCESS;
10716
10717 case IEMMODE_32BIT:
10718 IEM_MC_BEGIN(0, 2);
10719 IEM_MC_LOCAL(uint32_t, u32Tmp1);
10720 IEM_MC_LOCAL(uint32_t, u32Tmp2);
10721 IEM_MC_FETCH_GREG_U32(u32Tmp1, iReg);
10722 IEM_MC_FETCH_GREG_U32(u32Tmp2, X86_GREG_xAX);
10723 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp1);
10724 IEM_MC_STORE_GREG_U32(iReg, u32Tmp2);
10725 IEM_MC_ADVANCE_RIP();
10726 IEM_MC_END();
10727 return VINF_SUCCESS;
10728
10729 case IEMMODE_64BIT:
10730 IEM_MC_BEGIN(0, 2);
10731 IEM_MC_LOCAL(uint64_t, u64Tmp1);
10732 IEM_MC_LOCAL(uint64_t, u64Tmp2);
10733 IEM_MC_FETCH_GREG_U64(u64Tmp1, iReg);
10734 IEM_MC_FETCH_GREG_U64(u64Tmp2, X86_GREG_xAX);
10735 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp1);
10736 IEM_MC_STORE_GREG_U64(iReg, u64Tmp2);
10737 IEM_MC_ADVANCE_RIP();
10738 IEM_MC_END();
10739 return VINF_SUCCESS;
10740
10741 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10742 }
10743}
10744
10745
10746/** Opcode 0x90. */
10747FNIEMOP_DEF(iemOp_nop)
10748{
10749 /* R8/R8D and RAX/EAX can be exchanged. */
10750 if (pIemCpu->fPrefixes & IEM_OP_PRF_REX_B)
10751 {
10752 IEMOP_MNEMONIC("xchg r8,rAX");
10753 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xAX);
10754 }
10755
10756 if (pIemCpu->fPrefixes & IEM_OP_PRF_LOCK)
10757 IEMOP_MNEMONIC("pause");
10758 else
10759 IEMOP_MNEMONIC("nop");
10760 IEM_MC_BEGIN(0, 0);
10761 IEM_MC_ADVANCE_RIP();
10762 IEM_MC_END();
10763 return VINF_SUCCESS;
10764}
10765
10766
10767/** Opcode 0x91. */
10768FNIEMOP_DEF(iemOp_xchg_eCX_eAX)
10769{
10770 IEMOP_MNEMONIC("xchg rCX,rAX");
10771 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xCX);
10772}
10773
10774
10775/** Opcode 0x92. */
10776FNIEMOP_DEF(iemOp_xchg_eDX_eAX)
10777{
10778 IEMOP_MNEMONIC("xchg rDX,rAX");
10779 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDX);
10780}
10781
10782
10783/** Opcode 0x93. */
10784FNIEMOP_DEF(iemOp_xchg_eBX_eAX)
10785{
10786 IEMOP_MNEMONIC("xchg rBX,rAX");
10787 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBX);
10788}
10789
10790
10791/** Opcode 0x94. */
10792FNIEMOP_DEF(iemOp_xchg_eSP_eAX)
10793{
10794 IEMOP_MNEMONIC("xchg rSX,rAX");
10795 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSP);
10796}
10797
10798
10799/** Opcode 0x95. */
10800FNIEMOP_DEF(iemOp_xchg_eBP_eAX)
10801{
10802 IEMOP_MNEMONIC("xchg rBP,rAX");
10803 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xBP);
10804}
10805
10806
10807/** Opcode 0x96. */
10808FNIEMOP_DEF(iemOp_xchg_eSI_eAX)
10809{
10810 IEMOP_MNEMONIC("xchg rSI,rAX");
10811 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xSI);
10812}
10813
10814
10815/** Opcode 0x97. */
10816FNIEMOP_DEF(iemOp_xchg_eDI_eAX)
10817{
10818 IEMOP_MNEMONIC("xchg rDI,rAX");
10819 return FNIEMOP_CALL_1(iemOpCommonXchgGRegRax, X86_GREG_xDI);
10820}
10821
10822
10823/** Opcode 0x98. */
10824FNIEMOP_DEF(iemOp_cbw)
10825{
10826 IEMOP_HLP_NO_LOCK_PREFIX();
10827 switch (pIemCpu->enmEffOpSize)
10828 {
10829 case IEMMODE_16BIT:
10830 IEMOP_MNEMONIC("cbw");
10831 IEM_MC_BEGIN(0, 1);
10832 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 7) {
10833 IEM_MC_OR_GREG_U16(X86_GREG_xAX, UINT16_C(0xff00));
10834 } IEM_MC_ELSE() {
10835 IEM_MC_AND_GREG_U16(X86_GREG_xAX, UINT16_C(0x00ff));
10836 } IEM_MC_ENDIF();
10837 IEM_MC_ADVANCE_RIP();
10838 IEM_MC_END();
10839 return VINF_SUCCESS;
10840
10841 case IEMMODE_32BIT:
10842 IEMOP_MNEMONIC("cwde");
10843 IEM_MC_BEGIN(0, 1);
10844 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 15) {
10845 IEM_MC_OR_GREG_U32(X86_GREG_xAX, UINT32_C(0xffff0000));
10846 } IEM_MC_ELSE() {
10847 IEM_MC_AND_GREG_U32(X86_GREG_xAX, UINT32_C(0x0000ffff));
10848 } IEM_MC_ENDIF();
10849 IEM_MC_ADVANCE_RIP();
10850 IEM_MC_END();
10851 return VINF_SUCCESS;
10852
10853 case IEMMODE_64BIT:
10854 IEMOP_MNEMONIC("cdqe");
10855 IEM_MC_BEGIN(0, 1);
10856 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 31) {
10857 IEM_MC_OR_GREG_U64(X86_GREG_xAX, UINT64_C(0xffffffff00000000));
10858 } IEM_MC_ELSE() {
10859 IEM_MC_AND_GREG_U64(X86_GREG_xAX, UINT64_C(0x00000000ffffffff));
10860 } IEM_MC_ENDIF();
10861 IEM_MC_ADVANCE_RIP();
10862 IEM_MC_END();
10863 return VINF_SUCCESS;
10864
10865 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10866 }
10867}
10868
10869
10870/** Opcode 0x99. */
10871FNIEMOP_DEF(iemOp_cwd)
10872{
10873 IEMOP_HLP_NO_LOCK_PREFIX();
10874 switch (pIemCpu->enmEffOpSize)
10875 {
10876 case IEMMODE_16BIT:
10877 IEMOP_MNEMONIC("cwd");
10878 IEM_MC_BEGIN(0, 1);
10879 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 15) {
10880 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xDX, UINT16_C(0xffff));
10881 } IEM_MC_ELSE() {
10882 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xDX, 0);
10883 } IEM_MC_ENDIF();
10884 IEM_MC_ADVANCE_RIP();
10885 IEM_MC_END();
10886 return VINF_SUCCESS;
10887
10888 case IEMMODE_32BIT:
10889 IEMOP_MNEMONIC("cdq");
10890 IEM_MC_BEGIN(0, 1);
10891 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 31) {
10892 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xDX, UINT32_C(0xffffffff));
10893 } IEM_MC_ELSE() {
10894 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xDX, 0);
10895 } IEM_MC_ENDIF();
10896 IEM_MC_ADVANCE_RIP();
10897 IEM_MC_END();
10898 return VINF_SUCCESS;
10899
10900 case IEMMODE_64BIT:
10901 IEMOP_MNEMONIC("cqo");
10902 IEM_MC_BEGIN(0, 1);
10903 IEM_MC_IF_GREG_BIT_SET(X86_GREG_xAX, 63) {
10904 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xDX, UINT64_C(0xffffffffffffffff));
10905 } IEM_MC_ELSE() {
10906 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xDX, 0);
10907 } IEM_MC_ENDIF();
10908 IEM_MC_ADVANCE_RIP();
10909 IEM_MC_END();
10910 return VINF_SUCCESS;
10911
10912 IEM_NOT_REACHED_DEFAULT_CASE_RET();
10913 }
10914}
10915
10916
10917/** Opcode 0x9a. */
10918FNIEMOP_DEF(iemOp_call_Ap)
10919{
10920 IEMOP_MNEMONIC("call Ap");
10921 IEMOP_HLP_NO_64BIT();
10922
10923 /* Decode the far pointer address and pass it on to the far call C implementation. */
10924 uint32_t offSeg;
10925 if (pIemCpu->enmEffOpSize != IEMMODE_16BIT)
10926 IEM_OPCODE_GET_NEXT_U32(&offSeg);
10927 else
10928 IEM_OPCODE_GET_NEXT_U16_ZX_U32(&offSeg);
10929 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(&uSel);
10930 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
10931 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_callf, uSel, offSeg, pIemCpu->enmEffOpSize);
10932}
10933
10934
10935/** Opcode 0x9b. (aka fwait) */
10936FNIEMOP_DEF(iemOp_wait)
10937{
10938 IEMOP_MNEMONIC("wait");
10939 IEMOP_HLP_NO_LOCK_PREFIX();
10940
10941 IEM_MC_BEGIN(0, 0);
10942 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
10943 IEM_MC_MAYBE_RAISE_FPU_XCPT();
10944 IEM_MC_ADVANCE_RIP();
10945 IEM_MC_END();
10946 return VINF_SUCCESS;
10947}
10948
10949
10950/** Opcode 0x9c. */
10951FNIEMOP_DEF(iemOp_pushf_Fv)
10952{
10953 IEMOP_HLP_NO_LOCK_PREFIX();
10954 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10955 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_pushf, pIemCpu->enmEffOpSize);
10956}
10957
10958
10959/** Opcode 0x9d. */
10960FNIEMOP_DEF(iemOp_popf_Fv)
10961{
10962 IEMOP_HLP_NO_LOCK_PREFIX();
10963 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
10964 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_popf, pIemCpu->enmEffOpSize);
10965}
10966
10967
10968/** Opcode 0x9e. */
10969FNIEMOP_DEF(iemOp_sahf)
10970{
10971 IEMOP_MNEMONIC("sahf");
10972 IEMOP_HLP_NO_LOCK_PREFIX();
10973 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
10974 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fLahfSahf)
10975 return IEMOP_RAISE_INVALID_OPCODE();
10976 IEM_MC_BEGIN(0, 2);
10977 IEM_MC_LOCAL(uint32_t, u32Flags);
10978 IEM_MC_LOCAL(uint32_t, EFlags);
10979 IEM_MC_FETCH_EFLAGS(EFlags);
10980 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Flags, X86_GREG_xSP/*=AH*/);
10981 IEM_MC_AND_LOCAL_U32(u32Flags, X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_CF);
10982 IEM_MC_AND_LOCAL_U32(EFlags, UINT32_C(0xffffff00));
10983 IEM_MC_OR_LOCAL_U32(u32Flags, X86_EFL_1);
10984 IEM_MC_OR_2LOCS_U32(EFlags, u32Flags);
10985 IEM_MC_COMMIT_EFLAGS(EFlags);
10986 IEM_MC_ADVANCE_RIP();
10987 IEM_MC_END();
10988 return VINF_SUCCESS;
10989}
10990
10991
10992/** Opcode 0x9f. */
10993FNIEMOP_DEF(iemOp_lahf)
10994{
10995 IEMOP_MNEMONIC("lahf");
10996 IEMOP_HLP_NO_LOCK_PREFIX();
10997 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
10998 && !IEM_GET_GUEST_CPU_FEATURES(pIemCpu)->fLahfSahf)
10999 return IEMOP_RAISE_INVALID_OPCODE();
11000 IEM_MC_BEGIN(0, 1);
11001 IEM_MC_LOCAL(uint8_t, u8Flags);
11002 IEM_MC_FETCH_EFLAGS_U8(u8Flags);
11003 IEM_MC_STORE_GREG_U8(X86_GREG_xSP/*=AH*/, u8Flags);
11004 IEM_MC_ADVANCE_RIP();
11005 IEM_MC_END();
11006 return VINF_SUCCESS;
11007}
11008
11009
11010/**
11011 * Macro used by iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL and
11012 * iemOp_mov_Ov_rAX to fetch the moffsXX bit of the opcode and fend of lock
11013 * prefixes. Will return on failures.
11014 * @param a_GCPtrMemOff The variable to store the offset in.
11015 */
11016#define IEMOP_FETCH_MOFFS_XX(a_GCPtrMemOff) \
11017 do \
11018 { \
11019 switch (pIemCpu->enmEffAddrMode) \
11020 { \
11021 case IEMMODE_16BIT: \
11022 IEM_OPCODE_GET_NEXT_U16_ZX_U64(&(a_GCPtrMemOff)); \
11023 break; \
11024 case IEMMODE_32BIT: \
11025 IEM_OPCODE_GET_NEXT_U32_ZX_U64(&(a_GCPtrMemOff)); \
11026 break; \
11027 case IEMMODE_64BIT: \
11028 IEM_OPCODE_GET_NEXT_U64(&(a_GCPtrMemOff)); \
11029 break; \
11030 IEM_NOT_REACHED_DEFAULT_CASE_RET(); \
11031 } \
11032 IEMOP_HLP_NO_LOCK_PREFIX(); \
11033 } while (0)
11034
11035/** Opcode 0xa0. */
11036FNIEMOP_DEF(iemOp_mov_Al_Ob)
11037{
11038 /*
11039 * Get the offset and fend of lock prefixes.
11040 */
11041 RTGCPTR GCPtrMemOff;
11042 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
11043
11044 /*
11045 * Fetch AL.
11046 */
11047 IEM_MC_BEGIN(0,1);
11048 IEM_MC_LOCAL(uint8_t, u8Tmp);
11049 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
11050 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
11051 IEM_MC_ADVANCE_RIP();
11052 IEM_MC_END();
11053 return VINF_SUCCESS;
11054}
11055
11056
11057/** Opcode 0xa1. */
11058FNIEMOP_DEF(iemOp_mov_rAX_Ov)
11059{
11060 /*
11061 * Get the offset and fend of lock prefixes.
11062 */
11063 IEMOP_MNEMONIC("mov rAX,Ov");
11064 RTGCPTR GCPtrMemOff;
11065 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
11066
11067 /*
11068 * Fetch rAX.
11069 */
11070 switch (pIemCpu->enmEffOpSize)
11071 {
11072 case IEMMODE_16BIT:
11073 IEM_MC_BEGIN(0,1);
11074 IEM_MC_LOCAL(uint16_t, u16Tmp);
11075 IEM_MC_FETCH_MEM_U16(u16Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
11076 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
11077 IEM_MC_ADVANCE_RIP();
11078 IEM_MC_END();
11079 return VINF_SUCCESS;
11080
11081 case IEMMODE_32BIT:
11082 IEM_MC_BEGIN(0,1);
11083 IEM_MC_LOCAL(uint32_t, u32Tmp);
11084 IEM_MC_FETCH_MEM_U32(u32Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
11085 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Tmp);
11086 IEM_MC_ADVANCE_RIP();
11087 IEM_MC_END();
11088 return VINF_SUCCESS;
11089
11090 case IEMMODE_64BIT:
11091 IEM_MC_BEGIN(0,1);
11092 IEM_MC_LOCAL(uint64_t, u64Tmp);
11093 IEM_MC_FETCH_MEM_U64(u64Tmp, pIemCpu->iEffSeg, GCPtrMemOff);
11094 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Tmp);
11095 IEM_MC_ADVANCE_RIP();
11096 IEM_MC_END();
11097 return VINF_SUCCESS;
11098
11099 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11100 }
11101}
11102
11103
11104/** Opcode 0xa2. */
11105FNIEMOP_DEF(iemOp_mov_Ob_AL)
11106{
11107 /*
11108 * Get the offset and fend of lock prefixes.
11109 */
11110 RTGCPTR GCPtrMemOff;
11111 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
11112
11113 /*
11114 * Store AL.
11115 */
11116 IEM_MC_BEGIN(0,1);
11117 IEM_MC_LOCAL(uint8_t, u8Tmp);
11118 IEM_MC_FETCH_GREG_U8(u8Tmp, X86_GREG_xAX);
11119 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrMemOff, u8Tmp);
11120 IEM_MC_ADVANCE_RIP();
11121 IEM_MC_END();
11122 return VINF_SUCCESS;
11123}
11124
11125
11126/** Opcode 0xa3. */
11127FNIEMOP_DEF(iemOp_mov_Ov_rAX)
11128{
11129 /*
11130 * Get the offset and fend of lock prefixes.
11131 */
11132 RTGCPTR GCPtrMemOff;
11133 IEMOP_FETCH_MOFFS_XX(GCPtrMemOff);
11134
11135 /*
11136 * Store rAX.
11137 */
11138 switch (pIemCpu->enmEffOpSize)
11139 {
11140 case IEMMODE_16BIT:
11141 IEM_MC_BEGIN(0,1);
11142 IEM_MC_LOCAL(uint16_t, u16Tmp);
11143 IEM_MC_FETCH_GREG_U16(u16Tmp, X86_GREG_xAX);
11144 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrMemOff, u16Tmp);
11145 IEM_MC_ADVANCE_RIP();
11146 IEM_MC_END();
11147 return VINF_SUCCESS;
11148
11149 case IEMMODE_32BIT:
11150 IEM_MC_BEGIN(0,1);
11151 IEM_MC_LOCAL(uint32_t, u32Tmp);
11152 IEM_MC_FETCH_GREG_U32(u32Tmp, X86_GREG_xAX);
11153 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrMemOff, u32Tmp);
11154 IEM_MC_ADVANCE_RIP();
11155 IEM_MC_END();
11156 return VINF_SUCCESS;
11157
11158 case IEMMODE_64BIT:
11159 IEM_MC_BEGIN(0,1);
11160 IEM_MC_LOCAL(uint64_t, u64Tmp);
11161 IEM_MC_FETCH_GREG_U64(u64Tmp, X86_GREG_xAX);
11162 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrMemOff, u64Tmp);
11163 IEM_MC_ADVANCE_RIP();
11164 IEM_MC_END();
11165 return VINF_SUCCESS;
11166
11167 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11168 }
11169}
11170
11171/** Macro used by iemOp_movsb_Xb_Yb and iemOp_movswd_Xv_Yv */
11172#define IEM_MOVS_CASE(ValBits, AddrBits) \
11173 IEM_MC_BEGIN(0, 2); \
11174 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
11175 IEM_MC_LOCAL(RTGCPTR, uAddr); \
11176 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
11177 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
11178 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
11179 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
11180 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
11181 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
11182 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
11183 } IEM_MC_ELSE() { \
11184 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
11185 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
11186 } IEM_MC_ENDIF(); \
11187 IEM_MC_ADVANCE_RIP(); \
11188 IEM_MC_END();
11189
11190/** Opcode 0xa4. */
11191FNIEMOP_DEF(iemOp_movsb_Xb_Yb)
11192{
11193 IEMOP_HLP_NO_LOCK_PREFIX();
11194
11195 /*
11196 * Use the C implementation if a repeat prefix is encountered.
11197 */
11198 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
11199 {
11200 IEMOP_MNEMONIC("rep movsb Xb,Yb");
11201 switch (pIemCpu->enmEffAddrMode)
11202 {
11203 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr16, pIemCpu->iEffSeg);
11204 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr32, pIemCpu->iEffSeg);
11205 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op8_addr64, pIemCpu->iEffSeg);
11206 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11207 }
11208 }
11209 IEMOP_MNEMONIC("movsb Xb,Yb");
11210
11211 /*
11212 * Sharing case implementation with movs[wdq] below.
11213 */
11214 switch (pIemCpu->enmEffAddrMode)
11215 {
11216 case IEMMODE_16BIT: IEM_MOVS_CASE(8, 16); break;
11217 case IEMMODE_32BIT: IEM_MOVS_CASE(8, 32); break;
11218 case IEMMODE_64BIT: IEM_MOVS_CASE(8, 64); break;
11219 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11220 }
11221 return VINF_SUCCESS;
11222}
11223
11224
11225/** Opcode 0xa5. */
11226FNIEMOP_DEF(iemOp_movswd_Xv_Yv)
11227{
11228 IEMOP_HLP_NO_LOCK_PREFIX();
11229
11230 /*
11231 * Use the C implementation if a repeat prefix is encountered.
11232 */
11233 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
11234 {
11235 IEMOP_MNEMONIC("rep movs Xv,Yv");
11236 switch (pIemCpu->enmEffOpSize)
11237 {
11238 case IEMMODE_16BIT:
11239 switch (pIemCpu->enmEffAddrMode)
11240 {
11241 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr16, pIemCpu->iEffSeg);
11242 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr32, pIemCpu->iEffSeg);
11243 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op16_addr64, pIemCpu->iEffSeg);
11244 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11245 }
11246 break;
11247 case IEMMODE_32BIT:
11248 switch (pIemCpu->enmEffAddrMode)
11249 {
11250 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr16, pIemCpu->iEffSeg);
11251 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr32, pIemCpu->iEffSeg);
11252 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op32_addr64, pIemCpu->iEffSeg);
11253 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11254 }
11255 case IEMMODE_64BIT:
11256 switch (pIemCpu->enmEffAddrMode)
11257 {
11258 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_6);
11259 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr32, pIemCpu->iEffSeg);
11260 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_rep_movs_op64_addr64, pIemCpu->iEffSeg);
11261 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11262 }
11263 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11264 }
11265 }
11266 IEMOP_MNEMONIC("movs Xv,Yv");
11267
11268 /*
11269 * Annoying double switch here.
11270 * Using ugly macro for implementing the cases, sharing it with movsb.
11271 */
11272 switch (pIemCpu->enmEffOpSize)
11273 {
11274 case IEMMODE_16BIT:
11275 switch (pIemCpu->enmEffAddrMode)
11276 {
11277 case IEMMODE_16BIT: IEM_MOVS_CASE(16, 16); break;
11278 case IEMMODE_32BIT: IEM_MOVS_CASE(16, 32); break;
11279 case IEMMODE_64BIT: IEM_MOVS_CASE(16, 64); break;
11280 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11281 }
11282 break;
11283
11284 case IEMMODE_32BIT:
11285 switch (pIemCpu->enmEffAddrMode)
11286 {
11287 case IEMMODE_16BIT: IEM_MOVS_CASE(32, 16); break;
11288 case IEMMODE_32BIT: IEM_MOVS_CASE(32, 32); break;
11289 case IEMMODE_64BIT: IEM_MOVS_CASE(32, 64); break;
11290 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11291 }
11292 break;
11293
11294 case IEMMODE_64BIT:
11295 switch (pIemCpu->enmEffAddrMode)
11296 {
11297 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_1); /* cannot be encoded */ break;
11298 case IEMMODE_32BIT: IEM_MOVS_CASE(64, 32); break;
11299 case IEMMODE_64BIT: IEM_MOVS_CASE(64, 64); break;
11300 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11301 }
11302 break;
11303 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11304 }
11305 return VINF_SUCCESS;
11306}
11307
11308#undef IEM_MOVS_CASE
11309
11310/** Macro used by iemOp_cmpsb_Xb_Yb and iemOp_cmpswd_Xv_Yv */
11311#define IEM_CMPS_CASE(ValBits, AddrBits) \
11312 IEM_MC_BEGIN(3, 3); \
11313 IEM_MC_ARG(uint##ValBits##_t *, puValue1, 0); \
11314 IEM_MC_ARG(uint##ValBits##_t, uValue2, 1); \
11315 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
11316 IEM_MC_LOCAL(uint##ValBits##_t, uValue1); \
11317 IEM_MC_LOCAL(RTGCPTR, uAddr); \
11318 \
11319 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
11320 IEM_MC_FETCH_MEM_U##ValBits(uValue1, pIemCpu->iEffSeg, uAddr); \
11321 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
11322 IEM_MC_FETCH_MEM_U##ValBits(uValue2, X86_SREG_ES, uAddr); \
11323 IEM_MC_REF_LOCAL(puValue1, uValue1); \
11324 IEM_MC_REF_EFLAGS(pEFlags); \
11325 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puValue1, uValue2, pEFlags); \
11326 \
11327 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
11328 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
11329 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
11330 } IEM_MC_ELSE() { \
11331 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
11332 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
11333 } IEM_MC_ENDIF(); \
11334 IEM_MC_ADVANCE_RIP(); \
11335 IEM_MC_END(); \
11336
11337/** Opcode 0xa6. */
11338FNIEMOP_DEF(iemOp_cmpsb_Xb_Yb)
11339{
11340 IEMOP_HLP_NO_LOCK_PREFIX();
11341
11342 /*
11343 * Use the C implementation if a repeat prefix is encountered.
11344 */
11345 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
11346 {
11347 IEMOP_MNEMONIC("repe cmps Xb,Yb");
11348 switch (pIemCpu->enmEffAddrMode)
11349 {
11350 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr16, pIemCpu->iEffSeg);
11351 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr32, pIemCpu->iEffSeg);
11352 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op8_addr64, pIemCpu->iEffSeg);
11353 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11354 }
11355 }
11356 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
11357 {
11358 IEMOP_MNEMONIC("repe cmps Xb,Yb");
11359 switch (pIemCpu->enmEffAddrMode)
11360 {
11361 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr16, pIemCpu->iEffSeg);
11362 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr32, pIemCpu->iEffSeg);
11363 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op8_addr64, pIemCpu->iEffSeg);
11364 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11365 }
11366 }
11367 IEMOP_MNEMONIC("cmps Xb,Yb");
11368
11369 /*
11370 * Sharing case implementation with cmps[wdq] below.
11371 */
11372 switch (pIemCpu->enmEffAddrMode)
11373 {
11374 case IEMMODE_16BIT: IEM_CMPS_CASE(8, 16); break;
11375 case IEMMODE_32BIT: IEM_CMPS_CASE(8, 32); break;
11376 case IEMMODE_64BIT: IEM_CMPS_CASE(8, 64); break;
11377 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11378 }
11379 return VINF_SUCCESS;
11380
11381}
11382
11383
11384/** Opcode 0xa7. */
11385FNIEMOP_DEF(iemOp_cmpswd_Xv_Yv)
11386{
11387 IEMOP_HLP_NO_LOCK_PREFIX();
11388
11389 /*
11390 * Use the C implementation if a repeat prefix is encountered.
11391 */
11392 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
11393 {
11394 IEMOP_MNEMONIC("repe cmps Xv,Yv");
11395 switch (pIemCpu->enmEffOpSize)
11396 {
11397 case IEMMODE_16BIT:
11398 switch (pIemCpu->enmEffAddrMode)
11399 {
11400 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr16, pIemCpu->iEffSeg);
11401 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr32, pIemCpu->iEffSeg);
11402 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op16_addr64, pIemCpu->iEffSeg);
11403 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11404 }
11405 break;
11406 case IEMMODE_32BIT:
11407 switch (pIemCpu->enmEffAddrMode)
11408 {
11409 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr16, pIemCpu->iEffSeg);
11410 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr32, pIemCpu->iEffSeg);
11411 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op32_addr64, pIemCpu->iEffSeg);
11412 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11413 }
11414 case IEMMODE_64BIT:
11415 switch (pIemCpu->enmEffAddrMode)
11416 {
11417 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_4);
11418 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr32, pIemCpu->iEffSeg);
11419 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repe_cmps_op64_addr64, pIemCpu->iEffSeg);
11420 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11421 }
11422 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11423 }
11424 }
11425
11426 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
11427 {
11428 IEMOP_MNEMONIC("repne cmps Xv,Yv");
11429 switch (pIemCpu->enmEffOpSize)
11430 {
11431 case IEMMODE_16BIT:
11432 switch (pIemCpu->enmEffAddrMode)
11433 {
11434 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr16, pIemCpu->iEffSeg);
11435 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr32, pIemCpu->iEffSeg);
11436 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op16_addr64, pIemCpu->iEffSeg);
11437 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11438 }
11439 break;
11440 case IEMMODE_32BIT:
11441 switch (pIemCpu->enmEffAddrMode)
11442 {
11443 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr16, pIemCpu->iEffSeg);
11444 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr32, pIemCpu->iEffSeg);
11445 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op32_addr64, pIemCpu->iEffSeg);
11446 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11447 }
11448 case IEMMODE_64BIT:
11449 switch (pIemCpu->enmEffAddrMode)
11450 {
11451 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_2);
11452 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr32, pIemCpu->iEffSeg);
11453 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_repne_cmps_op64_addr64, pIemCpu->iEffSeg);
11454 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11455 }
11456 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11457 }
11458 }
11459
11460 IEMOP_MNEMONIC("cmps Xv,Yv");
11461
11462 /*
11463 * Annoying double switch here.
11464 * Using ugly macro for implementing the cases, sharing it with cmpsb.
11465 */
11466 switch (pIemCpu->enmEffOpSize)
11467 {
11468 case IEMMODE_16BIT:
11469 switch (pIemCpu->enmEffAddrMode)
11470 {
11471 case IEMMODE_16BIT: IEM_CMPS_CASE(16, 16); break;
11472 case IEMMODE_32BIT: IEM_CMPS_CASE(16, 32); break;
11473 case IEMMODE_64BIT: IEM_CMPS_CASE(16, 64); break;
11474 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11475 }
11476 break;
11477
11478 case IEMMODE_32BIT:
11479 switch (pIemCpu->enmEffAddrMode)
11480 {
11481 case IEMMODE_16BIT: IEM_CMPS_CASE(32, 16); break;
11482 case IEMMODE_32BIT: IEM_CMPS_CASE(32, 32); break;
11483 case IEMMODE_64BIT: IEM_CMPS_CASE(32, 64); break;
11484 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11485 }
11486 break;
11487
11488 case IEMMODE_64BIT:
11489 switch (pIemCpu->enmEffAddrMode)
11490 {
11491 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_1); /* cannot be encoded */ break;
11492 case IEMMODE_32BIT: IEM_CMPS_CASE(64, 32); break;
11493 case IEMMODE_64BIT: IEM_CMPS_CASE(64, 64); break;
11494 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11495 }
11496 break;
11497 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11498 }
11499 return VINF_SUCCESS;
11500
11501}
11502
11503#undef IEM_CMPS_CASE
11504
11505/** Opcode 0xa8. */
11506FNIEMOP_DEF(iemOp_test_AL_Ib)
11507{
11508 IEMOP_MNEMONIC("test al,Ib");
11509 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
11510 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_AL_Ib, &g_iemAImpl_test);
11511}
11512
11513
11514/** Opcode 0xa9. */
11515FNIEMOP_DEF(iemOp_test_eAX_Iz)
11516{
11517 IEMOP_MNEMONIC("test rAX,Iz");
11518 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
11519 return FNIEMOP_CALL_1(iemOpHlpBinaryOperator_rAX_Iz, &g_iemAImpl_test);
11520}
11521
11522
11523/** Macro used by iemOp_stosb_Yb_AL and iemOp_stoswd_Yv_eAX */
11524#define IEM_STOS_CASE(ValBits, AddrBits) \
11525 IEM_MC_BEGIN(0, 2); \
11526 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
11527 IEM_MC_LOCAL(RTGCPTR, uAddr); \
11528 IEM_MC_FETCH_GREG_U##ValBits(uValue, X86_GREG_xAX); \
11529 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
11530 IEM_MC_STORE_MEM_U##ValBits(X86_SREG_ES, uAddr, uValue); \
11531 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
11532 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
11533 } IEM_MC_ELSE() { \
11534 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
11535 } IEM_MC_ENDIF(); \
11536 IEM_MC_ADVANCE_RIP(); \
11537 IEM_MC_END(); \
11538
11539/** Opcode 0xaa. */
11540FNIEMOP_DEF(iemOp_stosb_Yb_AL)
11541{
11542 IEMOP_HLP_NO_LOCK_PREFIX();
11543
11544 /*
11545 * Use the C implementation if a repeat prefix is encountered.
11546 */
11547 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
11548 {
11549 IEMOP_MNEMONIC("rep stos Yb,al");
11550 switch (pIemCpu->enmEffAddrMode)
11551 {
11552 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m16);
11553 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m32);
11554 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_al_m64);
11555 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11556 }
11557 }
11558 IEMOP_MNEMONIC("stos Yb,al");
11559
11560 /*
11561 * Sharing case implementation with stos[wdq] below.
11562 */
11563 switch (pIemCpu->enmEffAddrMode)
11564 {
11565 case IEMMODE_16BIT: IEM_STOS_CASE(8, 16); break;
11566 case IEMMODE_32BIT: IEM_STOS_CASE(8, 32); break;
11567 case IEMMODE_64BIT: IEM_STOS_CASE(8, 64); break;
11568 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11569 }
11570 return VINF_SUCCESS;
11571}
11572
11573
11574/** Opcode 0xab. */
11575FNIEMOP_DEF(iemOp_stoswd_Yv_eAX)
11576{
11577 IEMOP_HLP_NO_LOCK_PREFIX();
11578
11579 /*
11580 * Use the C implementation if a repeat prefix is encountered.
11581 */
11582 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
11583 {
11584 IEMOP_MNEMONIC("rep stos Yv,rAX");
11585 switch (pIemCpu->enmEffOpSize)
11586 {
11587 case IEMMODE_16BIT:
11588 switch (pIemCpu->enmEffAddrMode)
11589 {
11590 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m16);
11591 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m32);
11592 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_ax_m64);
11593 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11594 }
11595 break;
11596 case IEMMODE_32BIT:
11597 switch (pIemCpu->enmEffAddrMode)
11598 {
11599 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m16);
11600 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m32);
11601 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_eax_m64);
11602 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11603 }
11604 case IEMMODE_64BIT:
11605 switch (pIemCpu->enmEffAddrMode)
11606 {
11607 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_9);
11608 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m32);
11609 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_stos_rax_m64);
11610 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11611 }
11612 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11613 }
11614 }
11615 IEMOP_MNEMONIC("stos Yv,rAX");
11616
11617 /*
11618 * Annoying double switch here.
11619 * Using ugly macro for implementing the cases, sharing it with stosb.
11620 */
11621 switch (pIemCpu->enmEffOpSize)
11622 {
11623 case IEMMODE_16BIT:
11624 switch (pIemCpu->enmEffAddrMode)
11625 {
11626 case IEMMODE_16BIT: IEM_STOS_CASE(16, 16); break;
11627 case IEMMODE_32BIT: IEM_STOS_CASE(16, 32); break;
11628 case IEMMODE_64BIT: IEM_STOS_CASE(16, 64); break;
11629 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11630 }
11631 break;
11632
11633 case IEMMODE_32BIT:
11634 switch (pIemCpu->enmEffAddrMode)
11635 {
11636 case IEMMODE_16BIT: IEM_STOS_CASE(32, 16); break;
11637 case IEMMODE_32BIT: IEM_STOS_CASE(32, 32); break;
11638 case IEMMODE_64BIT: IEM_STOS_CASE(32, 64); break;
11639 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11640 }
11641 break;
11642
11643 case IEMMODE_64BIT:
11644 switch (pIemCpu->enmEffAddrMode)
11645 {
11646 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_1); /* cannot be encoded */ break;
11647 case IEMMODE_32BIT: IEM_STOS_CASE(64, 32); break;
11648 case IEMMODE_64BIT: IEM_STOS_CASE(64, 64); break;
11649 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11650 }
11651 break;
11652 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11653 }
11654 return VINF_SUCCESS;
11655}
11656
11657#undef IEM_STOS_CASE
11658
11659/** Macro used by iemOp_lodsb_AL_Xb and iemOp_lodswd_eAX_Xv */
11660#define IEM_LODS_CASE(ValBits, AddrBits) \
11661 IEM_MC_BEGIN(0, 2); \
11662 IEM_MC_LOCAL(uint##ValBits##_t, uValue); \
11663 IEM_MC_LOCAL(RTGCPTR, uAddr); \
11664 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xSI); \
11665 IEM_MC_FETCH_MEM_U##ValBits(uValue, pIemCpu->iEffSeg, uAddr); \
11666 IEM_MC_STORE_GREG_U##ValBits(X86_GREG_xAX, uValue); \
11667 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
11668 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
11669 } IEM_MC_ELSE() { \
11670 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xSI, ValBits / 8); \
11671 } IEM_MC_ENDIF(); \
11672 IEM_MC_ADVANCE_RIP(); \
11673 IEM_MC_END();
11674
11675/** Opcode 0xac. */
11676FNIEMOP_DEF(iemOp_lodsb_AL_Xb)
11677{
11678 IEMOP_HLP_NO_LOCK_PREFIX();
11679
11680 /*
11681 * Use the C implementation if a repeat prefix is encountered.
11682 */
11683 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
11684 {
11685 IEMOP_MNEMONIC("rep lodsb al,Xb");
11686 switch (pIemCpu->enmEffAddrMode)
11687 {
11688 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m16, pIemCpu->iEffSeg);
11689 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m32, pIemCpu->iEffSeg);
11690 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_al_m64, pIemCpu->iEffSeg);
11691 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11692 }
11693 }
11694 IEMOP_MNEMONIC("lodsb al,Xb");
11695
11696 /*
11697 * Sharing case implementation with stos[wdq] below.
11698 */
11699 switch (pIemCpu->enmEffAddrMode)
11700 {
11701 case IEMMODE_16BIT: IEM_LODS_CASE(8, 16); break;
11702 case IEMMODE_32BIT: IEM_LODS_CASE(8, 32); break;
11703 case IEMMODE_64BIT: IEM_LODS_CASE(8, 64); break;
11704 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11705 }
11706 return VINF_SUCCESS;
11707}
11708
11709
11710/** Opcode 0xad. */
11711FNIEMOP_DEF(iemOp_lodswd_eAX_Xv)
11712{
11713 IEMOP_HLP_NO_LOCK_PREFIX();
11714
11715 /*
11716 * Use the C implementation if a repeat prefix is encountered.
11717 */
11718 if (pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ))
11719 {
11720 IEMOP_MNEMONIC("rep lods rAX,Xv");
11721 switch (pIemCpu->enmEffOpSize)
11722 {
11723 case IEMMODE_16BIT:
11724 switch (pIemCpu->enmEffAddrMode)
11725 {
11726 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m16, pIemCpu->iEffSeg);
11727 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m32, pIemCpu->iEffSeg);
11728 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_ax_m64, pIemCpu->iEffSeg);
11729 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11730 }
11731 break;
11732 case IEMMODE_32BIT:
11733 switch (pIemCpu->enmEffAddrMode)
11734 {
11735 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m16, pIemCpu->iEffSeg);
11736 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m32, pIemCpu->iEffSeg);
11737 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_eax_m64, pIemCpu->iEffSeg);
11738 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11739 }
11740 case IEMMODE_64BIT:
11741 switch (pIemCpu->enmEffAddrMode)
11742 {
11743 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_7);
11744 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m32, pIemCpu->iEffSeg);
11745 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_lods_rax_m64, pIemCpu->iEffSeg);
11746 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11747 }
11748 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11749 }
11750 }
11751 IEMOP_MNEMONIC("lods rAX,Xv");
11752
11753 /*
11754 * Annoying double switch here.
11755 * Using ugly macro for implementing the cases, sharing it with lodsb.
11756 */
11757 switch (pIemCpu->enmEffOpSize)
11758 {
11759 case IEMMODE_16BIT:
11760 switch (pIemCpu->enmEffAddrMode)
11761 {
11762 case IEMMODE_16BIT: IEM_LODS_CASE(16, 16); break;
11763 case IEMMODE_32BIT: IEM_LODS_CASE(16, 32); break;
11764 case IEMMODE_64BIT: IEM_LODS_CASE(16, 64); break;
11765 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11766 }
11767 break;
11768
11769 case IEMMODE_32BIT:
11770 switch (pIemCpu->enmEffAddrMode)
11771 {
11772 case IEMMODE_16BIT: IEM_LODS_CASE(32, 16); break;
11773 case IEMMODE_32BIT: IEM_LODS_CASE(32, 32); break;
11774 case IEMMODE_64BIT: IEM_LODS_CASE(32, 64); break;
11775 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11776 }
11777 break;
11778
11779 case IEMMODE_64BIT:
11780 switch (pIemCpu->enmEffAddrMode)
11781 {
11782 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_1); /* cannot be encoded */ break;
11783 case IEMMODE_32BIT: IEM_LODS_CASE(64, 32); break;
11784 case IEMMODE_64BIT: IEM_LODS_CASE(64, 64); break;
11785 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11786 }
11787 break;
11788 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11789 }
11790 return VINF_SUCCESS;
11791}
11792
11793#undef IEM_LODS_CASE
11794
11795/** Macro used by iemOp_scasb_AL_Xb and iemOp_scaswd_eAX_Xv */
11796#define IEM_SCAS_CASE(ValBits, AddrBits) \
11797 IEM_MC_BEGIN(3, 2); \
11798 IEM_MC_ARG(uint##ValBits##_t *, puRax, 0); \
11799 IEM_MC_ARG(uint##ValBits##_t, uValue, 1); \
11800 IEM_MC_ARG(uint32_t *, pEFlags, 2); \
11801 IEM_MC_LOCAL(RTGCPTR, uAddr); \
11802 \
11803 IEM_MC_FETCH_GREG_U##AddrBits##_ZX_U64(uAddr, X86_GREG_xDI); \
11804 IEM_MC_FETCH_MEM_U##ValBits(uValue, X86_SREG_ES, uAddr); \
11805 IEM_MC_REF_GREG_U##ValBits(puRax, X86_GREG_xAX); \
11806 IEM_MC_REF_EFLAGS(pEFlags); \
11807 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_cmp_u##ValBits, puRax, uValue, pEFlags); \
11808 \
11809 IEM_MC_IF_EFL_BIT_SET(X86_EFL_DF) { \
11810 IEM_MC_SUB_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
11811 } IEM_MC_ELSE() { \
11812 IEM_MC_ADD_GREG_U##AddrBits(X86_GREG_xDI, ValBits / 8); \
11813 } IEM_MC_ENDIF(); \
11814 IEM_MC_ADVANCE_RIP(); \
11815 IEM_MC_END();
11816
11817/** Opcode 0xae. */
11818FNIEMOP_DEF(iemOp_scasb_AL_Xb)
11819{
11820 IEMOP_HLP_NO_LOCK_PREFIX();
11821
11822 /*
11823 * Use the C implementation if a repeat prefix is encountered.
11824 */
11825 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
11826 {
11827 IEMOP_MNEMONIC("repe scasb al,Xb");
11828 switch (pIemCpu->enmEffAddrMode)
11829 {
11830 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m16);
11831 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m32);
11832 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_al_m64);
11833 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11834 }
11835 }
11836 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
11837 {
11838 IEMOP_MNEMONIC("repne scasb al,Xb");
11839 switch (pIemCpu->enmEffAddrMode)
11840 {
11841 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m16);
11842 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m32);
11843 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_al_m64);
11844 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11845 }
11846 }
11847 IEMOP_MNEMONIC("scasb al,Xb");
11848
11849 /*
11850 * Sharing case implementation with stos[wdq] below.
11851 */
11852 switch (pIemCpu->enmEffAddrMode)
11853 {
11854 case IEMMODE_16BIT: IEM_SCAS_CASE(8, 16); break;
11855 case IEMMODE_32BIT: IEM_SCAS_CASE(8, 32); break;
11856 case IEMMODE_64BIT: IEM_SCAS_CASE(8, 64); break;
11857 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11858 }
11859 return VINF_SUCCESS;
11860}
11861
11862
11863/** Opcode 0xaf. */
11864FNIEMOP_DEF(iemOp_scaswd_eAX_Xv)
11865{
11866 IEMOP_HLP_NO_LOCK_PREFIX();
11867
11868 /*
11869 * Use the C implementation if a repeat prefix is encountered.
11870 */
11871 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPZ)
11872 {
11873 IEMOP_MNEMONIC("repe scas rAX,Xv");
11874 switch (pIemCpu->enmEffOpSize)
11875 {
11876 case IEMMODE_16BIT:
11877 switch (pIemCpu->enmEffAddrMode)
11878 {
11879 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m16);
11880 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m32);
11881 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_ax_m64);
11882 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11883 }
11884 break;
11885 case IEMMODE_32BIT:
11886 switch (pIemCpu->enmEffAddrMode)
11887 {
11888 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m16);
11889 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m32);
11890 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_eax_m64);
11891 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11892 }
11893 case IEMMODE_64BIT:
11894 switch (pIemCpu->enmEffAddrMode)
11895 {
11896 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_6); /** @todo It's this wrong, we can do 16-bit addressing in 64-bit mode, but not 32-bit. right? */
11897 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m32);
11898 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repe_scas_rax_m64);
11899 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11900 }
11901 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11902 }
11903 }
11904 if (pIemCpu->fPrefixes & IEM_OP_PRF_REPNZ)
11905 {
11906 IEMOP_MNEMONIC("repne scas rAX,Xv");
11907 switch (pIemCpu->enmEffOpSize)
11908 {
11909 case IEMMODE_16BIT:
11910 switch (pIemCpu->enmEffAddrMode)
11911 {
11912 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_ax_m16);
11913 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_ax_m32);
11914 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_ax_m64);
11915 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11916 }
11917 break;
11918 case IEMMODE_32BIT:
11919 switch (pIemCpu->enmEffAddrMode)
11920 {
11921 case IEMMODE_16BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_eax_m16);
11922 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_eax_m32);
11923 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_eax_m64);
11924 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11925 }
11926 case IEMMODE_64BIT:
11927 switch (pIemCpu->enmEffAddrMode)
11928 {
11929 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_5);
11930 case IEMMODE_32BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_rax_m32);
11931 case IEMMODE_64BIT: return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_repne_scas_rax_m64);
11932 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11933 }
11934 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11935 }
11936 }
11937 IEMOP_MNEMONIC("scas rAX,Xv");
11938
11939 /*
11940 * Annoying double switch here.
11941 * Using ugly macro for implementing the cases, sharing it with scasb.
11942 */
11943 switch (pIemCpu->enmEffOpSize)
11944 {
11945 case IEMMODE_16BIT:
11946 switch (pIemCpu->enmEffAddrMode)
11947 {
11948 case IEMMODE_16BIT: IEM_SCAS_CASE(16, 16); break;
11949 case IEMMODE_32BIT: IEM_SCAS_CASE(16, 32); break;
11950 case IEMMODE_64BIT: IEM_SCAS_CASE(16, 64); break;
11951 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11952 }
11953 break;
11954
11955 case IEMMODE_32BIT:
11956 switch (pIemCpu->enmEffAddrMode)
11957 {
11958 case IEMMODE_16BIT: IEM_SCAS_CASE(32, 16); break;
11959 case IEMMODE_32BIT: IEM_SCAS_CASE(32, 32); break;
11960 case IEMMODE_64BIT: IEM_SCAS_CASE(32, 64); break;
11961 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11962 }
11963 break;
11964
11965 case IEMMODE_64BIT:
11966 switch (pIemCpu->enmEffAddrMode)
11967 {
11968 case IEMMODE_16BIT: AssertFailedReturn(VERR_IEM_IPE_1); /* cannot be encoded */ break;
11969 case IEMMODE_32BIT: IEM_SCAS_CASE(64, 32); break;
11970 case IEMMODE_64BIT: IEM_SCAS_CASE(64, 64); break;
11971 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11972 }
11973 break;
11974 IEM_NOT_REACHED_DEFAULT_CASE_RET();
11975 }
11976 return VINF_SUCCESS;
11977}
11978
11979#undef IEM_SCAS_CASE
11980
11981/**
11982 * Common 'mov r8, imm8' helper.
11983 */
11984FNIEMOP_DEF_1(iemOpCommonMov_r8_Ib, uint8_t, iReg)
11985{
11986 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
11987 IEMOP_HLP_NO_LOCK_PREFIX();
11988
11989 IEM_MC_BEGIN(0, 1);
11990 IEM_MC_LOCAL_CONST(uint8_t, u8Value,/*=*/ u8Imm);
11991 IEM_MC_STORE_GREG_U8(iReg, u8Value);
11992 IEM_MC_ADVANCE_RIP();
11993 IEM_MC_END();
11994
11995 return VINF_SUCCESS;
11996}
11997
11998
11999/** Opcode 0xb0. */
12000FNIEMOP_DEF(iemOp_mov_AL_Ib)
12001{
12002 IEMOP_MNEMONIC("mov AL,Ib");
12003 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xAX | pIemCpu->uRexB);
12004}
12005
12006
12007/** Opcode 0xb1. */
12008FNIEMOP_DEF(iemOp_CL_Ib)
12009{
12010 IEMOP_MNEMONIC("mov CL,Ib");
12011 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xCX | pIemCpu->uRexB);
12012}
12013
12014
12015/** Opcode 0xb2. */
12016FNIEMOP_DEF(iemOp_DL_Ib)
12017{
12018 IEMOP_MNEMONIC("mov DL,Ib");
12019 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDX | pIemCpu->uRexB);
12020}
12021
12022
12023/** Opcode 0xb3. */
12024FNIEMOP_DEF(iemOp_BL_Ib)
12025{
12026 IEMOP_MNEMONIC("mov BL,Ib");
12027 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBX | pIemCpu->uRexB);
12028}
12029
12030
12031/** Opcode 0xb4. */
12032FNIEMOP_DEF(iemOp_mov_AH_Ib)
12033{
12034 IEMOP_MNEMONIC("mov AH,Ib");
12035 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSP | pIemCpu->uRexB);
12036}
12037
12038
12039/** Opcode 0xb5. */
12040FNIEMOP_DEF(iemOp_CH_Ib)
12041{
12042 IEMOP_MNEMONIC("mov CH,Ib");
12043 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xBP | pIemCpu->uRexB);
12044}
12045
12046
12047/** Opcode 0xb6. */
12048FNIEMOP_DEF(iemOp_DH_Ib)
12049{
12050 IEMOP_MNEMONIC("mov DH,Ib");
12051 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xSI | pIemCpu->uRexB);
12052}
12053
12054
12055/** Opcode 0xb7. */
12056FNIEMOP_DEF(iemOp_BH_Ib)
12057{
12058 IEMOP_MNEMONIC("mov BH,Ib");
12059 return FNIEMOP_CALL_1(iemOpCommonMov_r8_Ib, X86_GREG_xDI | pIemCpu->uRexB);
12060}
12061
12062
12063/**
12064 * Common 'mov regX,immX' helper.
12065 */
12066FNIEMOP_DEF_1(iemOpCommonMov_Rv_Iv, uint8_t, iReg)
12067{
12068 switch (pIemCpu->enmEffOpSize)
12069 {
12070 case IEMMODE_16BIT:
12071 {
12072 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
12073 IEMOP_HLP_NO_LOCK_PREFIX();
12074
12075 IEM_MC_BEGIN(0, 1);
12076 IEM_MC_LOCAL_CONST(uint16_t, u16Value,/*=*/ u16Imm);
12077 IEM_MC_STORE_GREG_U16(iReg, u16Value);
12078 IEM_MC_ADVANCE_RIP();
12079 IEM_MC_END();
12080 break;
12081 }
12082
12083 case IEMMODE_32BIT:
12084 {
12085 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
12086 IEMOP_HLP_NO_LOCK_PREFIX();
12087
12088 IEM_MC_BEGIN(0, 1);
12089 IEM_MC_LOCAL_CONST(uint32_t, u32Value,/*=*/ u32Imm);
12090 IEM_MC_STORE_GREG_U32(iReg, u32Value);
12091 IEM_MC_ADVANCE_RIP();
12092 IEM_MC_END();
12093 break;
12094 }
12095 case IEMMODE_64BIT:
12096 {
12097 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_U64(&u64Imm); /* 64-bit immediate! */
12098 IEMOP_HLP_NO_LOCK_PREFIX();
12099
12100 IEM_MC_BEGIN(0, 1);
12101 IEM_MC_LOCAL_CONST(uint64_t, u64Value,/*=*/ u64Imm);
12102 IEM_MC_STORE_GREG_U64(iReg, u64Value);
12103 IEM_MC_ADVANCE_RIP();
12104 IEM_MC_END();
12105 break;
12106 }
12107 }
12108
12109 return VINF_SUCCESS;
12110}
12111
12112
12113/** Opcode 0xb8. */
12114FNIEMOP_DEF(iemOp_eAX_Iv)
12115{
12116 IEMOP_MNEMONIC("mov rAX,IV");
12117 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xAX | pIemCpu->uRexB);
12118}
12119
12120
12121/** Opcode 0xb9. */
12122FNIEMOP_DEF(iemOp_eCX_Iv)
12123{
12124 IEMOP_MNEMONIC("mov rCX,IV");
12125 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xCX | pIemCpu->uRexB);
12126}
12127
12128
12129/** Opcode 0xba. */
12130FNIEMOP_DEF(iemOp_eDX_Iv)
12131{
12132 IEMOP_MNEMONIC("mov rDX,IV");
12133 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDX | pIemCpu->uRexB);
12134}
12135
12136
12137/** Opcode 0xbb. */
12138FNIEMOP_DEF(iemOp_eBX_Iv)
12139{
12140 IEMOP_MNEMONIC("mov rBX,IV");
12141 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBX | pIemCpu->uRexB);
12142}
12143
12144
12145/** Opcode 0xbc. */
12146FNIEMOP_DEF(iemOp_eSP_Iv)
12147{
12148 IEMOP_MNEMONIC("mov rSP,IV");
12149 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSP | pIemCpu->uRexB);
12150}
12151
12152
12153/** Opcode 0xbd. */
12154FNIEMOP_DEF(iemOp_eBP_Iv)
12155{
12156 IEMOP_MNEMONIC("mov rBP,IV");
12157 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xBP | pIemCpu->uRexB);
12158}
12159
12160
12161/** Opcode 0xbe. */
12162FNIEMOP_DEF(iemOp_eSI_Iv)
12163{
12164 IEMOP_MNEMONIC("mov rSI,IV");
12165 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xSI | pIemCpu->uRexB);
12166}
12167
12168
12169/** Opcode 0xbf. */
12170FNIEMOP_DEF(iemOp_eDI_Iv)
12171{
12172 IEMOP_MNEMONIC("mov rDI,IV");
12173 return FNIEMOP_CALL_1(iemOpCommonMov_Rv_Iv, X86_GREG_xDI | pIemCpu->uRexB);
12174}
12175
12176
12177/** Opcode 0xc0. */
12178FNIEMOP_DEF(iemOp_Grp2_Eb_Ib)
12179{
12180 IEMOP_HLP_MIN_186();
12181 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12182 PCIEMOPSHIFTSIZES pImpl;
12183 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12184 {
12185 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,Ib"); break;
12186 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,Ib"); break;
12187 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,Ib"); break;
12188 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,Ib"); break;
12189 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,Ib"); break;
12190 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,Ib"); break;
12191 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,Ib"); break;
12192 case 6: return IEMOP_RAISE_INVALID_OPCODE();
12193 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
12194 }
12195 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
12196
12197 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12198 {
12199 /* register */
12200 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
12201 IEMOP_HLP_NO_LOCK_PREFIX();
12202 IEM_MC_BEGIN(3, 0);
12203 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
12204 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
12205 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12206 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12207 IEM_MC_REF_EFLAGS(pEFlags);
12208 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
12209 IEM_MC_ADVANCE_RIP();
12210 IEM_MC_END();
12211 }
12212 else
12213 {
12214 /* memory */
12215 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12216 IEM_MC_BEGIN(3, 2);
12217 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
12218 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12219 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12220 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12221
12222 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
12223 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
12224 IEM_MC_ASSIGN(cShiftArg, cShift);
12225 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12226 IEM_MC_FETCH_EFLAGS(EFlags);
12227 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
12228
12229 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
12230 IEM_MC_COMMIT_EFLAGS(EFlags);
12231 IEM_MC_ADVANCE_RIP();
12232 IEM_MC_END();
12233 }
12234 return VINF_SUCCESS;
12235}
12236
12237
12238/** Opcode 0xc1. */
12239FNIEMOP_DEF(iemOp_Grp2_Ev_Ib)
12240{
12241 IEMOP_HLP_MIN_186();
12242 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12243 PCIEMOPSHIFTSIZES pImpl;
12244 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12245 {
12246 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,Ib"); break;
12247 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,Ib"); break;
12248 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,Ib"); break;
12249 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,Ib"); break;
12250 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,Ib"); break;
12251 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,Ib"); break;
12252 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,Ib"); break;
12253 case 6: return IEMOP_RAISE_INVALID_OPCODE();
12254 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
12255 }
12256 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
12257
12258 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12259 {
12260 /* register */
12261 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
12262 IEMOP_HLP_NO_LOCK_PREFIX();
12263 switch (pIemCpu->enmEffOpSize)
12264 {
12265 case IEMMODE_16BIT:
12266 IEM_MC_BEGIN(3, 0);
12267 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
12268 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
12269 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12270 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12271 IEM_MC_REF_EFLAGS(pEFlags);
12272 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
12273 IEM_MC_ADVANCE_RIP();
12274 IEM_MC_END();
12275 return VINF_SUCCESS;
12276
12277 case IEMMODE_32BIT:
12278 IEM_MC_BEGIN(3, 0);
12279 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
12280 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
12281 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12282 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12283 IEM_MC_REF_EFLAGS(pEFlags);
12284 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
12285 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
12286 IEM_MC_ADVANCE_RIP();
12287 IEM_MC_END();
12288 return VINF_SUCCESS;
12289
12290 case IEMMODE_64BIT:
12291 IEM_MC_BEGIN(3, 0);
12292 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
12293 IEM_MC_ARG_CONST(uint8_t, cShiftArg, cShift, 1);
12294 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12295 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12296 IEM_MC_REF_EFLAGS(pEFlags);
12297 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
12298 IEM_MC_ADVANCE_RIP();
12299 IEM_MC_END();
12300 return VINF_SUCCESS;
12301
12302 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12303 }
12304 }
12305 else
12306 {
12307 /* memory */
12308 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12309 switch (pIemCpu->enmEffOpSize)
12310 {
12311 case IEMMODE_16BIT:
12312 IEM_MC_BEGIN(3, 2);
12313 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
12314 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12315 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12316 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12317
12318 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
12319 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
12320 IEM_MC_ASSIGN(cShiftArg, cShift);
12321 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12322 IEM_MC_FETCH_EFLAGS(EFlags);
12323 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
12324
12325 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
12326 IEM_MC_COMMIT_EFLAGS(EFlags);
12327 IEM_MC_ADVANCE_RIP();
12328 IEM_MC_END();
12329 return VINF_SUCCESS;
12330
12331 case IEMMODE_32BIT:
12332 IEM_MC_BEGIN(3, 2);
12333 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
12334 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12335 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12336 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12337
12338 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
12339 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
12340 IEM_MC_ASSIGN(cShiftArg, cShift);
12341 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12342 IEM_MC_FETCH_EFLAGS(EFlags);
12343 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
12344
12345 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
12346 IEM_MC_COMMIT_EFLAGS(EFlags);
12347 IEM_MC_ADVANCE_RIP();
12348 IEM_MC_END();
12349 return VINF_SUCCESS;
12350
12351 case IEMMODE_64BIT:
12352 IEM_MC_BEGIN(3, 2);
12353 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
12354 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12355 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12356 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12357
12358 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
12359 uint8_t cShift; IEM_OPCODE_GET_NEXT_U8(&cShift);
12360 IEM_MC_ASSIGN(cShiftArg, cShift);
12361 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12362 IEM_MC_FETCH_EFLAGS(EFlags);
12363 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
12364
12365 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
12366 IEM_MC_COMMIT_EFLAGS(EFlags);
12367 IEM_MC_ADVANCE_RIP();
12368 IEM_MC_END();
12369 return VINF_SUCCESS;
12370
12371 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12372 }
12373 }
12374}
12375
12376
12377/** Opcode 0xc2. */
12378FNIEMOP_DEF(iemOp_retn_Iw)
12379{
12380 IEMOP_MNEMONIC("retn Iw");
12381 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
12382 IEMOP_HLP_NO_LOCK_PREFIX();
12383 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12384 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, u16Imm);
12385}
12386
12387
12388/** Opcode 0xc3. */
12389FNIEMOP_DEF(iemOp_retn)
12390{
12391 IEMOP_MNEMONIC("retn");
12392 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12393 IEMOP_HLP_NO_LOCK_PREFIX();
12394 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retn, pIemCpu->enmEffOpSize, 0);
12395}
12396
12397
12398/** Opcode 0xc4. */
12399FNIEMOP_DEF(iemOp_les_Gv_Mp_vex2)
12400{
12401 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12402 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT
12403 || (bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12404 {
12405 IEMOP_MNEMONIC("2-byte-vex");
12406 /* The LES instruction is invalid 64-bit mode. In legacy and
12407 compatability mode it is invalid with MOD=3.
12408 The use as a VEX prefix is made possible by assigning the inverted
12409 REX.R to the top MOD bit, and the top bit in the inverted register
12410 specifier to the bottom MOD bit, thereby effectively limiting 32-bit
12411 to accessing registers 0..7 in this VEX form. */
12412 /** @todo VEX: Just use new tables for it. */
12413 return IEMOP_RAISE_INVALID_OPCODE();
12414 }
12415 IEMOP_MNEMONIC("les Gv,Mp");
12416 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_ES, bRm);
12417}
12418
12419
12420/** Opcode 0xc5. */
12421FNIEMOP_DEF(iemOp_lds_Gv_Mp_vex3)
12422{
12423 /* The LDS instruction is invalid 64-bit mode. In legacy and
12424 compatability mode it is invalid with MOD=3.
12425 The use as a VEX prefix is made possible by assigning the inverted
12426 REX.R and REX.X to the two MOD bits, since the REX bits are ignored
12427 outside of 64-bit mode. VEX is not available in real or v86 mode. */
12428 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12429 if (pIemCpu->enmCpuMode != IEMMODE_64BIT)
12430 {
12431 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT))
12432 {
12433 IEMOP_MNEMONIC("lds Gv,Mp");
12434 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_DS, bRm);
12435 }
12436 IEMOP_HLP_NO_REAL_OR_V86_MODE();
12437 }
12438
12439 IEMOP_MNEMONIC("3-byte-vex");
12440 /** @todo Test when exctly the VEX conformance checks kick in during
12441 * instruction decoding and fetching (using \#PF). */
12442 uint8_t bVex1; IEM_OPCODE_GET_NEXT_U8(&bVex1);
12443 uint8_t bVex2; IEM_OPCODE_GET_NEXT_U8(&bVex2);
12444 uint8_t bOpcode; IEM_OPCODE_GET_NEXT_U8(&bOpcode);
12445#if 0 /* will make sense of this next week... */
12446 if ( !(pIemCpu->fPrefixes & (IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX))
12447 &&
12448 )
12449 {
12450
12451 }
12452#endif
12453
12454 /** @todo VEX: Just use new tables for it. */
12455 return IEMOP_RAISE_INVALID_OPCODE();
12456}
12457
12458
12459/** Opcode 0xc6. */
12460FNIEMOP_DEF(iemOp_Grp11_Eb_Ib)
12461{
12462 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12463 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12464 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
12465 return IEMOP_RAISE_INVALID_OPCODE();
12466 IEMOP_MNEMONIC("mov Eb,Ib");
12467
12468 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12469 {
12470 /* register access */
12471 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
12472 IEM_MC_BEGIN(0, 0);
12473 IEM_MC_STORE_GREG_U8((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u8Imm);
12474 IEM_MC_ADVANCE_RIP();
12475 IEM_MC_END();
12476 }
12477 else
12478 {
12479 /* memory access. */
12480 IEM_MC_BEGIN(0, 1);
12481 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12482 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
12483 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
12484 IEM_MC_STORE_MEM_U8(pIemCpu->iEffSeg, GCPtrEffDst, u8Imm);
12485 IEM_MC_ADVANCE_RIP();
12486 IEM_MC_END();
12487 }
12488 return VINF_SUCCESS;
12489}
12490
12491
12492/** Opcode 0xc7. */
12493FNIEMOP_DEF(iemOp_Grp11_Ev_Iz)
12494{
12495 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12496 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12497 if ((bRm & X86_MODRM_REG_MASK) != (0 << X86_MODRM_REG_SHIFT)) /* only mov Eb,Ib in this group. */
12498 return IEMOP_RAISE_INVALID_OPCODE();
12499 IEMOP_MNEMONIC("mov Ev,Iz");
12500
12501 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12502 {
12503 /* register access */
12504 switch (pIemCpu->enmEffOpSize)
12505 {
12506 case IEMMODE_16BIT:
12507 IEM_MC_BEGIN(0, 0);
12508 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
12509 IEM_MC_STORE_GREG_U16((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u16Imm);
12510 IEM_MC_ADVANCE_RIP();
12511 IEM_MC_END();
12512 return VINF_SUCCESS;
12513
12514 case IEMMODE_32BIT:
12515 IEM_MC_BEGIN(0, 0);
12516 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
12517 IEM_MC_STORE_GREG_U32((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u32Imm);
12518 IEM_MC_ADVANCE_RIP();
12519 IEM_MC_END();
12520 return VINF_SUCCESS;
12521
12522 case IEMMODE_64BIT:
12523 IEM_MC_BEGIN(0, 0);
12524 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
12525 IEM_MC_STORE_GREG_U64((bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB, u64Imm);
12526 IEM_MC_ADVANCE_RIP();
12527 IEM_MC_END();
12528 return VINF_SUCCESS;
12529
12530 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12531 }
12532 }
12533 else
12534 {
12535 /* memory access. */
12536 switch (pIemCpu->enmEffOpSize)
12537 {
12538 case IEMMODE_16BIT:
12539 IEM_MC_BEGIN(0, 1);
12540 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12541 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2);
12542 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
12543 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Imm);
12544 IEM_MC_ADVANCE_RIP();
12545 IEM_MC_END();
12546 return VINF_SUCCESS;
12547
12548 case IEMMODE_32BIT:
12549 IEM_MC_BEGIN(0, 1);
12550 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12551 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
12552 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
12553 IEM_MC_STORE_MEM_U32(pIemCpu->iEffSeg, GCPtrEffDst, u32Imm);
12554 IEM_MC_ADVANCE_RIP();
12555 IEM_MC_END();
12556 return VINF_SUCCESS;
12557
12558 case IEMMODE_64BIT:
12559 IEM_MC_BEGIN(0, 1);
12560 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12561 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
12562 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
12563 IEM_MC_STORE_MEM_U64(pIemCpu->iEffSeg, GCPtrEffDst, u64Imm);
12564 IEM_MC_ADVANCE_RIP();
12565 IEM_MC_END();
12566 return VINF_SUCCESS;
12567
12568 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12569 }
12570 }
12571}
12572
12573
12574
12575
12576/** Opcode 0xc8. */
12577FNIEMOP_DEF(iemOp_enter_Iw_Ib)
12578{
12579 IEMOP_MNEMONIC("enter Iw,Ib");
12580 IEMOP_HLP_MIN_186();
12581 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12582 IEMOP_HLP_NO_LOCK_PREFIX();
12583 uint16_t cbFrame; IEM_OPCODE_GET_NEXT_U16(&cbFrame);
12584 uint8_t u8NestingLevel; IEM_OPCODE_GET_NEXT_U8(&u8NestingLevel);
12585 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_enter, pIemCpu->enmEffOpSize, cbFrame, u8NestingLevel);
12586}
12587
12588
12589/** Opcode 0xc9. */
12590FNIEMOP_DEF(iemOp_leave)
12591{
12592 IEMOP_MNEMONIC("retn");
12593 IEMOP_HLP_MIN_186();
12594 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12595 IEMOP_HLP_NO_LOCK_PREFIX();
12596 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_leave, pIemCpu->enmEffOpSize);
12597}
12598
12599
12600/** Opcode 0xca. */
12601FNIEMOP_DEF(iemOp_retf_Iw)
12602{
12603 IEMOP_MNEMONIC("retf Iw");
12604 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
12605 IEMOP_HLP_NO_LOCK_PREFIX();
12606 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12607 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, u16Imm);
12608}
12609
12610
12611/** Opcode 0xcb. */
12612FNIEMOP_DEF(iemOp_retf)
12613{
12614 IEMOP_MNEMONIC("retf");
12615 IEMOP_HLP_NO_LOCK_PREFIX();
12616 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
12617 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_retf, pIemCpu->enmEffOpSize, 0);
12618}
12619
12620
12621/** Opcode 0xcc. */
12622FNIEMOP_DEF(iemOp_int_3)
12623{
12624 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12625 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, X86_XCPT_BP, true /*fIsBpInstr*/);
12626}
12627
12628
12629/** Opcode 0xcd. */
12630FNIEMOP_DEF(iemOp_int_Ib)
12631{
12632 uint8_t u8Int; IEM_OPCODE_GET_NEXT_U8(&u8Int);
12633 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
12634 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, u8Int, false /*fIsBpInstr*/);
12635}
12636
12637
12638/** Opcode 0xce. */
12639FNIEMOP_DEF(iemOp_into)
12640{
12641 IEMOP_MNEMONIC("into");
12642 IEMOP_HLP_NO_64BIT();
12643
12644 IEM_MC_BEGIN(2, 0);
12645 IEM_MC_ARG_CONST(uint8_t, u8Int, /*=*/ X86_XCPT_OF, 0);
12646 IEM_MC_ARG_CONST(bool, fIsBpInstr, /*=*/ false, 1);
12647 IEM_MC_CALL_CIMPL_2(iemCImpl_int, u8Int, fIsBpInstr);
12648 IEM_MC_END();
12649 return VINF_SUCCESS;
12650}
12651
12652
12653/** Opcode 0xcf. */
12654FNIEMOP_DEF(iemOp_iret)
12655{
12656 IEMOP_MNEMONIC("iret");
12657 IEMOP_HLP_NO_LOCK_PREFIX();
12658 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_iret, pIemCpu->enmEffOpSize);
12659}
12660
12661
12662/** Opcode 0xd0. */
12663FNIEMOP_DEF(iemOp_Grp2_Eb_1)
12664{
12665 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12666 PCIEMOPSHIFTSIZES pImpl;
12667 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12668 {
12669 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,1"); break;
12670 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,1"); break;
12671 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,1"); break;
12672 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,1"); break;
12673 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,1"); break;
12674 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,1"); break;
12675 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,1"); break;
12676 case 6: return IEMOP_RAISE_INVALID_OPCODE();
12677 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
12678 }
12679 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
12680
12681 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12682 {
12683 /* register */
12684 IEMOP_HLP_NO_LOCK_PREFIX();
12685 IEM_MC_BEGIN(3, 0);
12686 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
12687 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
12688 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12689 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12690 IEM_MC_REF_EFLAGS(pEFlags);
12691 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
12692 IEM_MC_ADVANCE_RIP();
12693 IEM_MC_END();
12694 }
12695 else
12696 {
12697 /* memory */
12698 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12699 IEM_MC_BEGIN(3, 2);
12700 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
12701 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=*/1, 1);
12702 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12703 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12704
12705 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12706 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12707 IEM_MC_FETCH_EFLAGS(EFlags);
12708 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
12709
12710 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
12711 IEM_MC_COMMIT_EFLAGS(EFlags);
12712 IEM_MC_ADVANCE_RIP();
12713 IEM_MC_END();
12714 }
12715 return VINF_SUCCESS;
12716}
12717
12718
12719
12720/** Opcode 0xd1. */
12721FNIEMOP_DEF(iemOp_Grp2_Ev_1)
12722{
12723 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12724 PCIEMOPSHIFTSIZES pImpl;
12725 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12726 {
12727 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,1"); break;
12728 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,1"); break;
12729 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,1"); break;
12730 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,1"); break;
12731 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,1"); break;
12732 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,1"); break;
12733 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,1"); break;
12734 case 6: return IEMOP_RAISE_INVALID_OPCODE();
12735 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe, well... */
12736 }
12737 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
12738
12739 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12740 {
12741 /* register */
12742 IEMOP_HLP_NO_LOCK_PREFIX();
12743 switch (pIemCpu->enmEffOpSize)
12744 {
12745 case IEMMODE_16BIT:
12746 IEM_MC_BEGIN(3, 0);
12747 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
12748 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12749 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12750 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12751 IEM_MC_REF_EFLAGS(pEFlags);
12752 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
12753 IEM_MC_ADVANCE_RIP();
12754 IEM_MC_END();
12755 return VINF_SUCCESS;
12756
12757 case IEMMODE_32BIT:
12758 IEM_MC_BEGIN(3, 0);
12759 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
12760 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12761 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12762 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12763 IEM_MC_REF_EFLAGS(pEFlags);
12764 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
12765 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
12766 IEM_MC_ADVANCE_RIP();
12767 IEM_MC_END();
12768 return VINF_SUCCESS;
12769
12770 case IEMMODE_64BIT:
12771 IEM_MC_BEGIN(3, 0);
12772 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
12773 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12774 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12775 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12776 IEM_MC_REF_EFLAGS(pEFlags);
12777 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
12778 IEM_MC_ADVANCE_RIP();
12779 IEM_MC_END();
12780 return VINF_SUCCESS;
12781
12782 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12783 }
12784 }
12785 else
12786 {
12787 /* memory */
12788 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12789 switch (pIemCpu->enmEffOpSize)
12790 {
12791 case IEMMODE_16BIT:
12792 IEM_MC_BEGIN(3, 2);
12793 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
12794 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12795 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12796 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12797
12798 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12799 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12800 IEM_MC_FETCH_EFLAGS(EFlags);
12801 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
12802
12803 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
12804 IEM_MC_COMMIT_EFLAGS(EFlags);
12805 IEM_MC_ADVANCE_RIP();
12806 IEM_MC_END();
12807 return VINF_SUCCESS;
12808
12809 case IEMMODE_32BIT:
12810 IEM_MC_BEGIN(3, 2);
12811 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
12812 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12813 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12814 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12815
12816 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12817 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12818 IEM_MC_FETCH_EFLAGS(EFlags);
12819 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
12820
12821 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
12822 IEM_MC_COMMIT_EFLAGS(EFlags);
12823 IEM_MC_ADVANCE_RIP();
12824 IEM_MC_END();
12825 return VINF_SUCCESS;
12826
12827 case IEMMODE_64BIT:
12828 IEM_MC_BEGIN(3, 2);
12829 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
12830 IEM_MC_ARG_CONST(uint8_t, cShiftArg,/*=1*/1, 1);
12831 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12832 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12833
12834 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12835 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12836 IEM_MC_FETCH_EFLAGS(EFlags);
12837 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
12838
12839 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
12840 IEM_MC_COMMIT_EFLAGS(EFlags);
12841 IEM_MC_ADVANCE_RIP();
12842 IEM_MC_END();
12843 return VINF_SUCCESS;
12844
12845 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12846 }
12847 }
12848}
12849
12850
12851/** Opcode 0xd2. */
12852FNIEMOP_DEF(iemOp_Grp2_Eb_CL)
12853{
12854 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12855 PCIEMOPSHIFTSIZES pImpl;
12856 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12857 {
12858 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Eb,CL"); break;
12859 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Eb,CL"); break;
12860 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Eb,CL"); break;
12861 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Eb,CL"); break;
12862 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Eb,CL"); break;
12863 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Eb,CL"); break;
12864 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Eb,CL"); break;
12865 case 6: return IEMOP_RAISE_INVALID_OPCODE();
12866 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc, grr. */
12867 }
12868 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
12869
12870 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12871 {
12872 /* register */
12873 IEMOP_HLP_NO_LOCK_PREFIX();
12874 IEM_MC_BEGIN(3, 0);
12875 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
12876 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12877 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12878 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12879 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12880 IEM_MC_REF_EFLAGS(pEFlags);
12881 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
12882 IEM_MC_ADVANCE_RIP();
12883 IEM_MC_END();
12884 }
12885 else
12886 {
12887 /* memory */
12888 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12889 IEM_MC_BEGIN(3, 2);
12890 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
12891 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12892 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12893 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12894
12895 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12896 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12897 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12898 IEM_MC_FETCH_EFLAGS(EFlags);
12899 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU8, pu8Dst, cShiftArg, pEFlags);
12900
12901 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
12902 IEM_MC_COMMIT_EFLAGS(EFlags);
12903 IEM_MC_ADVANCE_RIP();
12904 IEM_MC_END();
12905 }
12906 return VINF_SUCCESS;
12907}
12908
12909
12910/** Opcode 0xd3. */
12911FNIEMOP_DEF(iemOp_Grp2_Ev_CL)
12912{
12913 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
12914 PCIEMOPSHIFTSIZES pImpl;
12915 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
12916 {
12917 case 0: pImpl = &g_iemAImpl_rol; IEMOP_MNEMONIC("rol Ev,CL"); break;
12918 case 1: pImpl = &g_iemAImpl_ror; IEMOP_MNEMONIC("ror Ev,CL"); break;
12919 case 2: pImpl = &g_iemAImpl_rcl; IEMOP_MNEMONIC("rcl Ev,CL"); break;
12920 case 3: pImpl = &g_iemAImpl_rcr; IEMOP_MNEMONIC("rcr Ev,CL"); break;
12921 case 4: pImpl = &g_iemAImpl_shl; IEMOP_MNEMONIC("shl Ev,CL"); break;
12922 case 5: pImpl = &g_iemAImpl_shr; IEMOP_MNEMONIC("shr Ev,CL"); break;
12923 case 7: pImpl = &g_iemAImpl_sar; IEMOP_MNEMONIC("sar Ev,CL"); break;
12924 case 6: return IEMOP_RAISE_INVALID_OPCODE();
12925 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* gcc maybe stupid */
12926 }
12927 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_OF | X86_EFL_AF);
12928
12929 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
12930 {
12931 /* register */
12932 IEMOP_HLP_NO_LOCK_PREFIX();
12933 switch (pIemCpu->enmEffOpSize)
12934 {
12935 case IEMMODE_16BIT:
12936 IEM_MC_BEGIN(3, 0);
12937 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
12938 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12939 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12940 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12941 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12942 IEM_MC_REF_EFLAGS(pEFlags);
12943 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
12944 IEM_MC_ADVANCE_RIP();
12945 IEM_MC_END();
12946 return VINF_SUCCESS;
12947
12948 case IEMMODE_32BIT:
12949 IEM_MC_BEGIN(3, 0);
12950 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
12951 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12952 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12953 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12954 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12955 IEM_MC_REF_EFLAGS(pEFlags);
12956 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
12957 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32Dst);
12958 IEM_MC_ADVANCE_RIP();
12959 IEM_MC_END();
12960 return VINF_SUCCESS;
12961
12962 case IEMMODE_64BIT:
12963 IEM_MC_BEGIN(3, 0);
12964 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
12965 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12966 IEM_MC_ARG(uint32_t *, pEFlags, 2);
12967 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
12968 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12969 IEM_MC_REF_EFLAGS(pEFlags);
12970 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
12971 IEM_MC_ADVANCE_RIP();
12972 IEM_MC_END();
12973 return VINF_SUCCESS;
12974
12975 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12976 }
12977 }
12978 else
12979 {
12980 /* memory */
12981 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
12982 switch (pIemCpu->enmEffOpSize)
12983 {
12984 case IEMMODE_16BIT:
12985 IEM_MC_BEGIN(3, 2);
12986 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
12987 IEM_MC_ARG(uint8_t, cShiftArg, 1);
12988 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
12989 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
12990
12991 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
12992 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
12993 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
12994 IEM_MC_FETCH_EFLAGS(EFlags);
12995 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU16, pu16Dst, cShiftArg, pEFlags);
12996
12997 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
12998 IEM_MC_COMMIT_EFLAGS(EFlags);
12999 IEM_MC_ADVANCE_RIP();
13000 IEM_MC_END();
13001 return VINF_SUCCESS;
13002
13003 case IEMMODE_32BIT:
13004 IEM_MC_BEGIN(3, 2);
13005 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
13006 IEM_MC_ARG(uint8_t, cShiftArg, 1);
13007 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
13008 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
13009
13010 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
13011 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
13012 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
13013 IEM_MC_FETCH_EFLAGS(EFlags);
13014 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU32, pu32Dst, cShiftArg, pEFlags);
13015
13016 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
13017 IEM_MC_COMMIT_EFLAGS(EFlags);
13018 IEM_MC_ADVANCE_RIP();
13019 IEM_MC_END();
13020 return VINF_SUCCESS;
13021
13022 case IEMMODE_64BIT:
13023 IEM_MC_BEGIN(3, 2);
13024 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
13025 IEM_MC_ARG(uint8_t, cShiftArg, 1);
13026 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2);
13027 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
13028
13029 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
13030 IEM_MC_FETCH_GREG_U8(cShiftArg, X86_GREG_xCX);
13031 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
13032 IEM_MC_FETCH_EFLAGS(EFlags);
13033 IEM_MC_CALL_VOID_AIMPL_3(pImpl->pfnNormalU64, pu64Dst, cShiftArg, pEFlags);
13034
13035 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
13036 IEM_MC_COMMIT_EFLAGS(EFlags);
13037 IEM_MC_ADVANCE_RIP();
13038 IEM_MC_END();
13039 return VINF_SUCCESS;
13040
13041 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13042 }
13043 }
13044}
13045
13046/** Opcode 0xd4. */
13047FNIEMOP_DEF(iemOp_aam_Ib)
13048{
13049 IEMOP_MNEMONIC("aam Ib");
13050 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm);
13051 IEMOP_HLP_NO_LOCK_PREFIX();
13052 IEMOP_HLP_NO_64BIT();
13053 if (!bImm)
13054 return IEMOP_RAISE_DIVIDE_ERROR();
13055 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_aam, bImm);
13056}
13057
13058
13059/** Opcode 0xd5. */
13060FNIEMOP_DEF(iemOp_aad_Ib)
13061{
13062 IEMOP_MNEMONIC("aad Ib");
13063 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm);
13064 IEMOP_HLP_NO_LOCK_PREFIX();
13065 IEMOP_HLP_NO_64BIT();
13066 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_aad, bImm);
13067}
13068
13069
13070/** Opcode 0xd6. */
13071FNIEMOP_DEF(iemOp_salc)
13072{
13073 IEMOP_MNEMONIC("salc");
13074 IEMOP_HLP_MIN_286(); /* (undocument at the time) */
13075 uint8_t bImm; IEM_OPCODE_GET_NEXT_U8(&bImm);
13076 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13077 IEMOP_HLP_NO_64BIT();
13078
13079 IEM_MC_BEGIN(0, 0);
13080 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF) {
13081 IEM_MC_STORE_GREG_U8_CONST(X86_GREG_xAX, 0xff);
13082 } IEM_MC_ELSE() {
13083 IEM_MC_STORE_GREG_U8_CONST(X86_GREG_xAX, 0x00);
13084 } IEM_MC_ENDIF();
13085 IEM_MC_ADVANCE_RIP();
13086 IEM_MC_END();
13087 return VINF_SUCCESS;
13088}
13089
13090
13091/** Opcode 0xd7. */
13092FNIEMOP_DEF(iemOp_xlat)
13093{
13094 IEMOP_MNEMONIC("xlat");
13095 IEMOP_HLP_NO_LOCK_PREFIX();
13096 switch (pIemCpu->enmEffAddrMode)
13097 {
13098 case IEMMODE_16BIT:
13099 IEM_MC_BEGIN(2, 0);
13100 IEM_MC_LOCAL(uint8_t, u8Tmp);
13101 IEM_MC_LOCAL(uint16_t, u16Addr);
13102 IEM_MC_FETCH_GREG_U8_ZX_U16(u16Addr, X86_GREG_xAX);
13103 IEM_MC_ADD_GREG_U16_TO_LOCAL(u16Addr, X86_GREG_xBX);
13104 IEM_MC_FETCH_MEM16_U8(u8Tmp, pIemCpu->iEffSeg, u16Addr);
13105 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
13106 IEM_MC_ADVANCE_RIP();
13107 IEM_MC_END();
13108 return VINF_SUCCESS;
13109
13110 case IEMMODE_32BIT:
13111 IEM_MC_BEGIN(2, 0);
13112 IEM_MC_LOCAL(uint8_t, u8Tmp);
13113 IEM_MC_LOCAL(uint32_t, u32Addr);
13114 IEM_MC_FETCH_GREG_U8_ZX_U32(u32Addr, X86_GREG_xAX);
13115 IEM_MC_ADD_GREG_U32_TO_LOCAL(u32Addr, X86_GREG_xBX);
13116 IEM_MC_FETCH_MEM32_U8(u8Tmp, pIemCpu->iEffSeg, u32Addr);
13117 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
13118 IEM_MC_ADVANCE_RIP();
13119 IEM_MC_END();
13120 return VINF_SUCCESS;
13121
13122 case IEMMODE_64BIT:
13123 IEM_MC_BEGIN(2, 0);
13124 IEM_MC_LOCAL(uint8_t, u8Tmp);
13125 IEM_MC_LOCAL(uint64_t, u64Addr);
13126 IEM_MC_FETCH_GREG_U8_ZX_U64(u64Addr, X86_GREG_xAX);
13127 IEM_MC_ADD_GREG_U64_TO_LOCAL(u64Addr, X86_GREG_xBX);
13128 IEM_MC_FETCH_MEM_U8(u8Tmp, pIemCpu->iEffSeg, u64Addr);
13129 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Tmp);
13130 IEM_MC_ADVANCE_RIP();
13131 IEM_MC_END();
13132 return VINF_SUCCESS;
13133
13134 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13135 }
13136}
13137
13138
13139/**
13140 * Common worker for FPU instructions working on ST0 and STn, and storing the
13141 * result in ST0.
13142 *
13143 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13144 */
13145FNIEMOP_DEF_2(iemOpHlpFpu_st0_stN, uint8_t, bRm, PFNIEMAIMPLFPUR80, pfnAImpl)
13146{
13147 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13148
13149 IEM_MC_BEGIN(3, 1);
13150 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13151 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
13152 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
13153 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
13154
13155 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13156 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13157 IEM_MC_PREPARE_FPU_USAGE();
13158 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, 0, pr80Value2, bRm & X86_MODRM_RM_MASK)
13159 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pr80Value2);
13160 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
13161 IEM_MC_ELSE()
13162 IEM_MC_FPU_STACK_UNDERFLOW(0);
13163 IEM_MC_ENDIF();
13164 IEM_MC_ADVANCE_RIP();
13165
13166 IEM_MC_END();
13167 return VINF_SUCCESS;
13168}
13169
13170
13171/**
13172 * Common worker for FPU instructions working on ST0 and STn, and only affecting
13173 * flags.
13174 *
13175 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13176 */
13177FNIEMOP_DEF_2(iemOpHlpFpuNoStore_st0_stN, uint8_t, bRm, PFNIEMAIMPLFPUR80FSW, pfnAImpl)
13178{
13179 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13180
13181 IEM_MC_BEGIN(3, 1);
13182 IEM_MC_LOCAL(uint16_t, u16Fsw);
13183 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13184 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
13185 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
13186
13187 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13188 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13189 IEM_MC_PREPARE_FPU_USAGE();
13190 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, 0, pr80Value2, bRm & X86_MODRM_RM_MASK)
13191 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pu16Fsw, pr80Value1, pr80Value2);
13192 IEM_MC_UPDATE_FSW(u16Fsw);
13193 IEM_MC_ELSE()
13194 IEM_MC_FPU_STACK_UNDERFLOW(UINT8_MAX);
13195 IEM_MC_ENDIF();
13196 IEM_MC_ADVANCE_RIP();
13197
13198 IEM_MC_END();
13199 return VINF_SUCCESS;
13200}
13201
13202
13203/**
13204 * Common worker for FPU instructions working on ST0 and STn, only affecting
13205 * flags, and popping when done.
13206 *
13207 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13208 */
13209FNIEMOP_DEF_2(iemOpHlpFpuNoStore_st0_stN_pop, uint8_t, bRm, PFNIEMAIMPLFPUR80FSW, pfnAImpl)
13210{
13211 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13212
13213 IEM_MC_BEGIN(3, 1);
13214 IEM_MC_LOCAL(uint16_t, u16Fsw);
13215 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13216 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
13217 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
13218
13219 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13220 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13221 IEM_MC_PREPARE_FPU_USAGE();
13222 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, 0, pr80Value2, bRm & X86_MODRM_RM_MASK)
13223 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pu16Fsw, pr80Value1, pr80Value2);
13224 IEM_MC_UPDATE_FSW_THEN_POP(u16Fsw);
13225 IEM_MC_ELSE()
13226 IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(UINT8_MAX);
13227 IEM_MC_ENDIF();
13228 IEM_MC_ADVANCE_RIP();
13229
13230 IEM_MC_END();
13231 return VINF_SUCCESS;
13232}
13233
13234
13235/** Opcode 0xd8 11/0. */
13236FNIEMOP_DEF_1(iemOp_fadd_stN, uint8_t, bRm)
13237{
13238 IEMOP_MNEMONIC("fadd st0,stN");
13239 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fadd_r80_by_r80);
13240}
13241
13242
13243/** Opcode 0xd8 11/1. */
13244FNIEMOP_DEF_1(iemOp_fmul_stN, uint8_t, bRm)
13245{
13246 IEMOP_MNEMONIC("fmul st0,stN");
13247 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fmul_r80_by_r80);
13248}
13249
13250
13251/** Opcode 0xd8 11/2. */
13252FNIEMOP_DEF_1(iemOp_fcom_stN, uint8_t, bRm)
13253{
13254 IEMOP_MNEMONIC("fcom st0,stN");
13255 return FNIEMOP_CALL_2(iemOpHlpFpuNoStore_st0_stN, bRm, iemAImpl_fcom_r80_by_r80);
13256}
13257
13258
13259/** Opcode 0xd8 11/3. */
13260FNIEMOP_DEF_1(iemOp_fcomp_stN, uint8_t, bRm)
13261{
13262 IEMOP_MNEMONIC("fcomp st0,stN");
13263 return FNIEMOP_CALL_2(iemOpHlpFpuNoStore_st0_stN_pop, bRm, iemAImpl_fcom_r80_by_r80);
13264}
13265
13266
13267/** Opcode 0xd8 11/4. */
13268FNIEMOP_DEF_1(iemOp_fsub_stN, uint8_t, bRm)
13269{
13270 IEMOP_MNEMONIC("fsub st0,stN");
13271 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fsub_r80_by_r80);
13272}
13273
13274
13275/** Opcode 0xd8 11/5. */
13276FNIEMOP_DEF_1(iemOp_fsubr_stN, uint8_t, bRm)
13277{
13278 IEMOP_MNEMONIC("fsubr st0,stN");
13279 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fsubr_r80_by_r80);
13280}
13281
13282
13283/** Opcode 0xd8 11/6. */
13284FNIEMOP_DEF_1(iemOp_fdiv_stN, uint8_t, bRm)
13285{
13286 IEMOP_MNEMONIC("fdiv st0,stN");
13287 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fdiv_r80_by_r80);
13288}
13289
13290
13291/** Opcode 0xd8 11/7. */
13292FNIEMOP_DEF_1(iemOp_fdivr_stN, uint8_t, bRm)
13293{
13294 IEMOP_MNEMONIC("fdivr st0,stN");
13295 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, bRm, iemAImpl_fdivr_r80_by_r80);
13296}
13297
13298
13299/**
13300 * Common worker for FPU instructions working on ST0 and an m32r, and storing
13301 * the result in ST0.
13302 *
13303 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13304 */
13305FNIEMOP_DEF_2(iemOpHlpFpu_st0_m32r, uint8_t, bRm, PFNIEMAIMPLFPUR32, pfnAImpl)
13306{
13307 IEM_MC_BEGIN(3, 3);
13308 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
13309 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13310 IEM_MC_LOCAL(RTFLOAT32U, r32Val2);
13311 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
13312 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
13313 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val2, r32Val2, 2);
13314
13315 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13316 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13317
13318 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13319 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13320 IEM_MC_FETCH_MEM_R32(r32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
13321
13322 IEM_MC_PREPARE_FPU_USAGE();
13323 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
13324 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pr32Val2);
13325 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
13326 IEM_MC_ELSE()
13327 IEM_MC_FPU_STACK_UNDERFLOW(0);
13328 IEM_MC_ENDIF();
13329 IEM_MC_ADVANCE_RIP();
13330
13331 IEM_MC_END();
13332 return VINF_SUCCESS;
13333}
13334
13335
13336/** Opcode 0xd8 !11/0. */
13337FNIEMOP_DEF_1(iemOp_fadd_m32r, uint8_t, bRm)
13338{
13339 IEMOP_MNEMONIC("fadd st0,m32r");
13340 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fadd_r80_by_r32);
13341}
13342
13343
13344/** Opcode 0xd8 !11/1. */
13345FNIEMOP_DEF_1(iemOp_fmul_m32r, uint8_t, bRm)
13346{
13347 IEMOP_MNEMONIC("fmul st0,m32r");
13348 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fmul_r80_by_r32);
13349}
13350
13351
13352/** Opcode 0xd8 !11/2. */
13353FNIEMOP_DEF_1(iemOp_fcom_m32r, uint8_t, bRm)
13354{
13355 IEMOP_MNEMONIC("fcom st0,m32r");
13356
13357 IEM_MC_BEGIN(3, 3);
13358 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
13359 IEM_MC_LOCAL(uint16_t, u16Fsw);
13360 IEM_MC_LOCAL(RTFLOAT32U, r32Val2);
13361 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13362 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
13363 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val2, r32Val2, 2);
13364
13365 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13366 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13367
13368 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13369 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13370 IEM_MC_FETCH_MEM_R32(r32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
13371
13372 IEM_MC_PREPARE_FPU_USAGE();
13373 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
13374 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fcom_r80_by_r32, pu16Fsw, pr80Value1, pr32Val2);
13375 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
13376 IEM_MC_ELSE()
13377 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
13378 IEM_MC_ENDIF();
13379 IEM_MC_ADVANCE_RIP();
13380
13381 IEM_MC_END();
13382 return VINF_SUCCESS;
13383}
13384
13385
13386/** Opcode 0xd8 !11/3. */
13387FNIEMOP_DEF_1(iemOp_fcomp_m32r, uint8_t, bRm)
13388{
13389 IEMOP_MNEMONIC("fcomp st0,m32r");
13390
13391 IEM_MC_BEGIN(3, 3);
13392 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
13393 IEM_MC_LOCAL(uint16_t, u16Fsw);
13394 IEM_MC_LOCAL(RTFLOAT32U, r32Val2);
13395 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13396 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
13397 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val2, r32Val2, 2);
13398
13399 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13400 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13401
13402 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13403 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13404 IEM_MC_FETCH_MEM_R32(r32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
13405
13406 IEM_MC_PREPARE_FPU_USAGE();
13407 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
13408 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fcom_r80_by_r32, pu16Fsw, pr80Value1, pr32Val2);
13409 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
13410 IEM_MC_ELSE()
13411 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
13412 IEM_MC_ENDIF();
13413 IEM_MC_ADVANCE_RIP();
13414
13415 IEM_MC_END();
13416 return VINF_SUCCESS;
13417}
13418
13419
13420/** Opcode 0xd8 !11/4. */
13421FNIEMOP_DEF_1(iemOp_fsub_m32r, uint8_t, bRm)
13422{
13423 IEMOP_MNEMONIC("fsub st0,m32r");
13424 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fsub_r80_by_r32);
13425}
13426
13427
13428/** Opcode 0xd8 !11/5. */
13429FNIEMOP_DEF_1(iemOp_fsubr_m32r, uint8_t, bRm)
13430{
13431 IEMOP_MNEMONIC("fsubr st0,m32r");
13432 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fsubr_r80_by_r32);
13433}
13434
13435
13436/** Opcode 0xd8 !11/6. */
13437FNIEMOP_DEF_1(iemOp_fdiv_m32r, uint8_t, bRm)
13438{
13439 IEMOP_MNEMONIC("fdiv st0,m32r");
13440 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fdiv_r80_by_r32);
13441}
13442
13443
13444/** Opcode 0xd8 !11/7. */
13445FNIEMOP_DEF_1(iemOp_fdivr_m32r, uint8_t, bRm)
13446{
13447 IEMOP_MNEMONIC("fdivr st0,m32r");
13448 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32r, bRm, iemAImpl_fdivr_r80_by_r32);
13449}
13450
13451
13452/** Opcode 0xd8. */
13453FNIEMOP_DEF(iemOp_EscF0)
13454{
13455 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
13456 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
13457
13458 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
13459 {
13460 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
13461 {
13462 case 0: return FNIEMOP_CALL_1(iemOp_fadd_stN, bRm);
13463 case 1: return FNIEMOP_CALL_1(iemOp_fmul_stN, bRm);
13464 case 2: return FNIEMOP_CALL_1(iemOp_fcom_stN, bRm);
13465 case 3: return FNIEMOP_CALL_1(iemOp_fcomp_stN, bRm);
13466 case 4: return FNIEMOP_CALL_1(iemOp_fsub_stN, bRm);
13467 case 5: return FNIEMOP_CALL_1(iemOp_fsubr_stN, bRm);
13468 case 6: return FNIEMOP_CALL_1(iemOp_fdiv_stN, bRm);
13469 case 7: return FNIEMOP_CALL_1(iemOp_fdivr_stN, bRm);
13470 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13471 }
13472 }
13473 else
13474 {
13475 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
13476 {
13477 case 0: return FNIEMOP_CALL_1(iemOp_fadd_m32r, bRm);
13478 case 1: return FNIEMOP_CALL_1(iemOp_fmul_m32r, bRm);
13479 case 2: return FNIEMOP_CALL_1(iemOp_fcom_m32r, bRm);
13480 case 3: return FNIEMOP_CALL_1(iemOp_fcomp_m32r, bRm);
13481 case 4: return FNIEMOP_CALL_1(iemOp_fsub_m32r, bRm);
13482 case 5: return FNIEMOP_CALL_1(iemOp_fsubr_m32r, bRm);
13483 case 6: return FNIEMOP_CALL_1(iemOp_fdiv_m32r, bRm);
13484 case 7: return FNIEMOP_CALL_1(iemOp_fdivr_m32r, bRm);
13485 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13486 }
13487 }
13488}
13489
13490
13491/** Opcode 0xd9 /0 mem32real
13492 * @sa iemOp_fld_m64r */
13493FNIEMOP_DEF_1(iemOp_fld_m32r, uint8_t, bRm)
13494{
13495 IEMOP_MNEMONIC("fld m32r");
13496
13497 IEM_MC_BEGIN(2, 3);
13498 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
13499 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13500 IEM_MC_LOCAL(RTFLOAT32U, r32Val);
13501 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
13502 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT32U, pr32Val, r32Val, 1);
13503
13504 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13505 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13506
13507 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13508 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13509 IEM_MC_FETCH_MEM_R32(r32Val, pIemCpu->iEffSeg, GCPtrEffSrc);
13510
13511 IEM_MC_PREPARE_FPU_USAGE();
13512 IEM_MC_IF_FPUREG_IS_EMPTY(7)
13513 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fld_r32_to_r80, pFpuRes, pr32Val);
13514 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pIemCpu->iEffSeg, GCPtrEffSrc);
13515 IEM_MC_ELSE()
13516 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pIemCpu->iEffSeg, GCPtrEffSrc);
13517 IEM_MC_ENDIF();
13518 IEM_MC_ADVANCE_RIP();
13519
13520 IEM_MC_END();
13521 return VINF_SUCCESS;
13522}
13523
13524
13525/** Opcode 0xd9 !11/2 mem32real */
13526FNIEMOP_DEF_1(iemOp_fst_m32r, uint8_t, bRm)
13527{
13528 IEMOP_MNEMONIC("fst m32r");
13529 IEM_MC_BEGIN(3, 2);
13530 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
13531 IEM_MC_LOCAL(uint16_t, u16Fsw);
13532 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13533 IEM_MC_ARG(PRTFLOAT32U, pr32Dst, 1);
13534 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
13535
13536 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
13537 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13538 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13539 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13540
13541 IEM_MC_MEM_MAP(pr32Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
13542 IEM_MC_PREPARE_FPU_USAGE();
13543 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13544 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r32, pu16Fsw, pr32Dst, pr80Value);
13545 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr32Dst, IEM_ACCESS_DATA_W, u16Fsw);
13546 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
13547 IEM_MC_ELSE()
13548 IEM_MC_IF_FCW_IM()
13549 IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(pr32Dst);
13550 IEM_MC_MEM_COMMIT_AND_UNMAP(pr32Dst, IEM_ACCESS_DATA_W);
13551 IEM_MC_ENDIF();
13552 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
13553 IEM_MC_ENDIF();
13554 IEM_MC_ADVANCE_RIP();
13555
13556 IEM_MC_END();
13557 return VINF_SUCCESS;
13558}
13559
13560
13561/** Opcode 0xd9 !11/3 */
13562FNIEMOP_DEF_1(iemOp_fstp_m32r, uint8_t, bRm)
13563{
13564 IEMOP_MNEMONIC("fstp m32r");
13565 IEM_MC_BEGIN(3, 2);
13566 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
13567 IEM_MC_LOCAL(uint16_t, u16Fsw);
13568 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13569 IEM_MC_ARG(PRTFLOAT32U, pr32Dst, 1);
13570 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
13571
13572 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
13573 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13574 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13575 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13576
13577 IEM_MC_MEM_MAP(pr32Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
13578 IEM_MC_PREPARE_FPU_USAGE();
13579 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13580 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r32, pu16Fsw, pr32Dst, pr80Value);
13581 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr32Dst, IEM_ACCESS_DATA_W, u16Fsw);
13582 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
13583 IEM_MC_ELSE()
13584 IEM_MC_IF_FCW_IM()
13585 IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(pr32Dst);
13586 IEM_MC_MEM_COMMIT_AND_UNMAP(pr32Dst, IEM_ACCESS_DATA_W);
13587 IEM_MC_ENDIF();
13588 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
13589 IEM_MC_ENDIF();
13590 IEM_MC_ADVANCE_RIP();
13591
13592 IEM_MC_END();
13593 return VINF_SUCCESS;
13594}
13595
13596
13597/** Opcode 0xd9 !11/4 */
13598FNIEMOP_DEF_1(iemOp_fldenv, uint8_t, bRm)
13599{
13600 IEMOP_MNEMONIC("fldenv m14/28byte");
13601 IEM_MC_BEGIN(3, 0);
13602 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0);
13603 IEM_MC_ARG(uint8_t, iEffSeg, 1);
13604 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 2);
13605 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13606 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13607 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13608 IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE();
13609 IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg);
13610 IEM_MC_CALL_CIMPL_3(iemCImpl_fldenv, enmEffOpSize, iEffSeg, GCPtrEffSrc);
13611 IEM_MC_END();
13612 return VINF_SUCCESS;
13613}
13614
13615
13616/** Opcode 0xd9 !11/5 */
13617FNIEMOP_DEF_1(iemOp_fldcw, uint8_t, bRm)
13618{
13619 IEMOP_MNEMONIC("fldcw m2byte");
13620 IEM_MC_BEGIN(1, 1);
13621 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
13622 IEM_MC_ARG(uint16_t, u16Fsw, 0);
13623 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
13624 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13625 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13626 IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE();
13627 IEM_MC_FETCH_MEM_U16(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
13628 IEM_MC_CALL_CIMPL_1(iemCImpl_fldcw, u16Fsw);
13629 IEM_MC_END();
13630 return VINF_SUCCESS;
13631}
13632
13633
13634/** Opcode 0xd9 !11/6 */
13635FNIEMOP_DEF_1(iemOp_fnstenv, uint8_t, bRm)
13636{
13637 IEMOP_MNEMONIC("fstenv m14/m28byte");
13638 IEM_MC_BEGIN(3, 0);
13639 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0);
13640 IEM_MC_ARG(uint8_t, iEffSeg, 1);
13641 IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 2);
13642 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
13643 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13644 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13645 IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ();
13646 IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg);
13647 IEM_MC_CALL_CIMPL_3(iemCImpl_fnstenv, enmEffOpSize, iEffSeg, GCPtrEffDst);
13648 IEM_MC_END();
13649 return VINF_SUCCESS;
13650}
13651
13652
13653/** Opcode 0xd9 !11/7 */
13654FNIEMOP_DEF_1(iemOp_fnstcw, uint8_t, bRm)
13655{
13656 IEMOP_MNEMONIC("fnstcw m2byte");
13657 IEM_MC_BEGIN(2, 0);
13658 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
13659 IEM_MC_LOCAL(uint16_t, u16Fcw);
13660 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
13661 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13662 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13663 IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ();
13664 IEM_MC_FETCH_FCW(u16Fcw);
13665 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Fcw);
13666 IEM_MC_ADVANCE_RIP(); /* C0-C3 are documented as undefined, we leave them unmodified. */
13667 IEM_MC_END();
13668 return VINF_SUCCESS;
13669}
13670
13671
13672/** Opcode 0xd9 0xd0, 0xd9 0xd8-0xdf, ++?. */
13673FNIEMOP_DEF(iemOp_fnop)
13674{
13675 IEMOP_MNEMONIC("fnop");
13676 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13677
13678 IEM_MC_BEGIN(0, 0);
13679 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13680 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13681 IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE();
13682 /** @todo Testcase: looks like FNOP leaves FOP alone but updates FPUIP. Could be
13683 * intel optimizations. Investigate. */
13684 IEM_MC_UPDATE_FPU_OPCODE_IP();
13685 IEM_MC_ADVANCE_RIP(); /* C0-C3 are documented as undefined, we leave them unmodified. */
13686 IEM_MC_END();
13687 return VINF_SUCCESS;
13688}
13689
13690
13691/** Opcode 0xd9 11/0 stN */
13692FNIEMOP_DEF_1(iemOp_fld_stN, uint8_t, bRm)
13693{
13694 IEMOP_MNEMONIC("fld stN");
13695 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13696
13697 /** @todo Testcase: Check if this raises \#MF? Intel mentioned it not. AMD
13698 * indicates that it does. */
13699 IEM_MC_BEGIN(0, 2);
13700 IEM_MC_LOCAL(PCRTFLOAT80U, pr80Value);
13701 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13702 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13703 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13704
13705 IEM_MC_PREPARE_FPU_USAGE();
13706 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, bRm & X86_MODRM_RM_MASK)
13707 IEM_MC_SET_FPU_RESULT(FpuRes, 0 /*FSW*/, pr80Value);
13708 IEM_MC_PUSH_FPU_RESULT(FpuRes);
13709 IEM_MC_ELSE()
13710 IEM_MC_FPU_STACK_PUSH_UNDERFLOW();
13711 IEM_MC_ENDIF();
13712
13713 IEM_MC_ADVANCE_RIP();
13714 IEM_MC_END();
13715
13716 return VINF_SUCCESS;
13717}
13718
13719
13720/** Opcode 0xd9 11/3 stN */
13721FNIEMOP_DEF_1(iemOp_fxch_stN, uint8_t, bRm)
13722{
13723 IEMOP_MNEMONIC("fxch stN");
13724 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13725
13726 /** @todo Testcase: Check if this raises \#MF? Intel mentioned it not. AMD
13727 * indicates that it does. */
13728 IEM_MC_BEGIN(1, 3);
13729 IEM_MC_LOCAL(PCRTFLOAT80U, pr80Value1);
13730 IEM_MC_LOCAL(PCRTFLOAT80U, pr80Value2);
13731 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13732 IEM_MC_ARG_CONST(uint8_t, iStReg, /*=*/ bRm & X86_MODRM_RM_MASK, 0);
13733 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13734 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13735
13736 IEM_MC_PREPARE_FPU_USAGE();
13737 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, 0, pr80Value2, bRm & X86_MODRM_RM_MASK)
13738 IEM_MC_SET_FPU_RESULT(FpuRes, X86_FSW_C1, pr80Value2);
13739 IEM_MC_STORE_FPUREG_R80_SRC_REF(bRm & X86_MODRM_RM_MASK, pr80Value1);
13740 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
13741 IEM_MC_ELSE()
13742 IEM_MC_CALL_CIMPL_1(iemCImpl_fxch_underflow, iStReg);
13743 IEM_MC_ENDIF();
13744
13745 IEM_MC_ADVANCE_RIP();
13746 IEM_MC_END();
13747
13748 return VINF_SUCCESS;
13749}
13750
13751
13752/** Opcode 0xd9 11/4, 0xdd 11/2. */
13753FNIEMOP_DEF_1(iemOp_fstp_stN, uint8_t, bRm)
13754{
13755 IEMOP_MNEMONIC("fstp st0,stN");
13756 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13757
13758 /* fstp st0, st0 is frequendly used as an official 'ffreep st0' sequence. */
13759 uint8_t const iDstReg = bRm & X86_MODRM_RM_MASK;
13760 if (!iDstReg)
13761 {
13762 IEM_MC_BEGIN(0, 1);
13763 IEM_MC_LOCAL_CONST(uint16_t, u16Fsw, /*=*/ 0);
13764 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13765 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13766
13767 IEM_MC_PREPARE_FPU_USAGE();
13768 IEM_MC_IF_FPUREG_NOT_EMPTY(0)
13769 IEM_MC_UPDATE_FSW_THEN_POP(u16Fsw);
13770 IEM_MC_ELSE()
13771 IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(0);
13772 IEM_MC_ENDIF();
13773
13774 IEM_MC_ADVANCE_RIP();
13775 IEM_MC_END();
13776 }
13777 else
13778 {
13779 IEM_MC_BEGIN(0, 2);
13780 IEM_MC_LOCAL(PCRTFLOAT80U, pr80Value);
13781 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13782 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13783 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13784
13785 IEM_MC_PREPARE_FPU_USAGE();
13786 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13787 IEM_MC_SET_FPU_RESULT(FpuRes, 0 /*FSW*/, pr80Value);
13788 IEM_MC_STORE_FPU_RESULT_THEN_POP(FpuRes, iDstReg);
13789 IEM_MC_ELSE()
13790 IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(iDstReg);
13791 IEM_MC_ENDIF();
13792
13793 IEM_MC_ADVANCE_RIP();
13794 IEM_MC_END();
13795 }
13796 return VINF_SUCCESS;
13797}
13798
13799
13800/**
13801 * Common worker for FPU instructions working on ST0 and replaces it with the
13802 * result, i.e. unary operators.
13803 *
13804 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13805 */
13806FNIEMOP_DEF_1(iemOpHlpFpu_st0, PFNIEMAIMPLFPUR80UNARY, pfnAImpl)
13807{
13808 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13809
13810 IEM_MC_BEGIN(2, 1);
13811 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13812 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
13813 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 1);
13814
13815 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13816 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13817 IEM_MC_PREPARE_FPU_USAGE();
13818 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13819 IEM_MC_CALL_FPU_AIMPL_2(pfnAImpl, pFpuRes, pr80Value);
13820 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
13821 IEM_MC_ELSE()
13822 IEM_MC_FPU_STACK_UNDERFLOW(0);
13823 IEM_MC_ENDIF();
13824 IEM_MC_ADVANCE_RIP();
13825
13826 IEM_MC_END();
13827 return VINF_SUCCESS;
13828}
13829
13830
13831/** Opcode 0xd9 0xe0. */
13832FNIEMOP_DEF(iemOp_fchs)
13833{
13834 IEMOP_MNEMONIC("fchs st0");
13835 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fchs_r80);
13836}
13837
13838
13839/** Opcode 0xd9 0xe1. */
13840FNIEMOP_DEF(iemOp_fabs)
13841{
13842 IEMOP_MNEMONIC("fabs st0");
13843 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fabs_r80);
13844}
13845
13846
13847/**
13848 * Common worker for FPU instructions working on ST0 and only returns FSW.
13849 *
13850 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13851 */
13852FNIEMOP_DEF_1(iemOpHlpFpuNoStore_st0, PFNIEMAIMPLFPUR80UNARYFSW, pfnAImpl)
13853{
13854 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13855
13856 IEM_MC_BEGIN(2, 1);
13857 IEM_MC_LOCAL(uint16_t, u16Fsw);
13858 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
13859 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 1);
13860
13861 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13862 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13863 IEM_MC_PREPARE_FPU_USAGE();
13864 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
13865 IEM_MC_CALL_FPU_AIMPL_2(pfnAImpl, pu16Fsw, pr80Value);
13866 IEM_MC_UPDATE_FSW(u16Fsw);
13867 IEM_MC_ELSE()
13868 IEM_MC_FPU_STACK_UNDERFLOW(UINT8_MAX);
13869 IEM_MC_ENDIF();
13870 IEM_MC_ADVANCE_RIP();
13871
13872 IEM_MC_END();
13873 return VINF_SUCCESS;
13874}
13875
13876
13877/** Opcode 0xd9 0xe4. */
13878FNIEMOP_DEF(iemOp_ftst)
13879{
13880 IEMOP_MNEMONIC("ftst st0");
13881 return FNIEMOP_CALL_1(iemOpHlpFpuNoStore_st0, iemAImpl_ftst_r80);
13882}
13883
13884
13885/** Opcode 0xd9 0xe5. */
13886FNIEMOP_DEF(iemOp_fxam)
13887{
13888 IEMOP_MNEMONIC("fxam st0");
13889 return FNIEMOP_CALL_1(iemOpHlpFpuNoStore_st0, iemAImpl_fxam_r80);
13890}
13891
13892
13893/**
13894 * Common worker for FPU instructions pushing a constant onto the FPU stack.
13895 *
13896 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13897 */
13898FNIEMOP_DEF_1(iemOpHlpFpuPushConstant, PFNIEMAIMPLFPUR80LDCONST, pfnAImpl)
13899{
13900 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
13901
13902 IEM_MC_BEGIN(1, 1);
13903 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
13904 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
13905
13906 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
13907 IEM_MC_MAYBE_RAISE_FPU_XCPT();
13908 IEM_MC_PREPARE_FPU_USAGE();
13909 IEM_MC_IF_FPUREG_IS_EMPTY(7)
13910 IEM_MC_CALL_FPU_AIMPL_1(pfnAImpl, pFpuRes);
13911 IEM_MC_PUSH_FPU_RESULT(FpuRes);
13912 IEM_MC_ELSE()
13913 IEM_MC_FPU_STACK_PUSH_OVERFLOW();
13914 IEM_MC_ENDIF();
13915 IEM_MC_ADVANCE_RIP();
13916
13917 IEM_MC_END();
13918 return VINF_SUCCESS;
13919}
13920
13921
13922/** Opcode 0xd9 0xe8. */
13923FNIEMOP_DEF(iemOp_fld1)
13924{
13925 IEMOP_MNEMONIC("fld1");
13926 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fld1);
13927}
13928
13929
13930/** Opcode 0xd9 0xe9. */
13931FNIEMOP_DEF(iemOp_fldl2t)
13932{
13933 IEMOP_MNEMONIC("fldl2t");
13934 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldl2t);
13935}
13936
13937
13938/** Opcode 0xd9 0xea. */
13939FNIEMOP_DEF(iemOp_fldl2e)
13940{
13941 IEMOP_MNEMONIC("fldl2e");
13942 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldl2e);
13943}
13944
13945/** Opcode 0xd9 0xeb. */
13946FNIEMOP_DEF(iemOp_fldpi)
13947{
13948 IEMOP_MNEMONIC("fldpi");
13949 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldpi);
13950}
13951
13952
13953/** Opcode 0xd9 0xec. */
13954FNIEMOP_DEF(iemOp_fldlg2)
13955{
13956 IEMOP_MNEMONIC("fldlg2");
13957 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldlg2);
13958}
13959
13960/** Opcode 0xd9 0xed. */
13961FNIEMOP_DEF(iemOp_fldln2)
13962{
13963 IEMOP_MNEMONIC("fldln2");
13964 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldln2);
13965}
13966
13967
13968/** Opcode 0xd9 0xee. */
13969FNIEMOP_DEF(iemOp_fldz)
13970{
13971 IEMOP_MNEMONIC("fldz");
13972 return FNIEMOP_CALL_1(iemOpHlpFpuPushConstant, iemAImpl_fldz);
13973}
13974
13975
13976/** Opcode 0xd9 0xf0. */
13977FNIEMOP_DEF(iemOp_f2xm1)
13978{
13979 IEMOP_MNEMONIC("f2xm1 st0");
13980 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_f2xm1_r80);
13981}
13982
13983
13984/** Opcode 0xd9 0xf1. */
13985FNIEMOP_DEF(iemOp_fylx2)
13986{
13987 IEMOP_MNEMONIC("fylx2 st0");
13988 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fyl2x_r80);
13989}
13990
13991
13992/**
13993 * Common worker for FPU instructions working on ST0 and having two outputs, one
13994 * replacing ST0 and one pushed onto the stack.
13995 *
13996 * @param pfnAImpl Pointer to the instruction implementation (assembly).
13997 */
13998FNIEMOP_DEF_1(iemOpHlpFpuReplace_st0_push, PFNIEMAIMPLFPUR80UNARYTWO, pfnAImpl)
13999{
14000 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14001
14002 IEM_MC_BEGIN(2, 1);
14003 IEM_MC_LOCAL(IEMFPURESULTTWO, FpuResTwo);
14004 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULTTWO, pFpuResTwo, FpuResTwo, 0);
14005 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 1);
14006
14007 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14008 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14009 IEM_MC_PREPARE_FPU_USAGE();
14010 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14011 IEM_MC_CALL_FPU_AIMPL_2(pfnAImpl, pFpuResTwo, pr80Value);
14012 IEM_MC_PUSH_FPU_RESULT_TWO(FpuResTwo);
14013 IEM_MC_ELSE()
14014 IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO();
14015 IEM_MC_ENDIF();
14016 IEM_MC_ADVANCE_RIP();
14017
14018 IEM_MC_END();
14019 return VINF_SUCCESS;
14020}
14021
14022
14023/** Opcode 0xd9 0xf2. */
14024FNIEMOP_DEF(iemOp_fptan)
14025{
14026 IEMOP_MNEMONIC("fptan st0");
14027 return FNIEMOP_CALL_1(iemOpHlpFpuReplace_st0_push, iemAImpl_fptan_r80_r80);
14028}
14029
14030
14031/**
14032 * Common worker for FPU instructions working on STn and ST0, storing the result
14033 * in STn, and popping the stack unless IE, DE or ZE was raised.
14034 *
14035 * @param pfnAImpl Pointer to the instruction implementation (assembly).
14036 */
14037FNIEMOP_DEF_2(iemOpHlpFpu_stN_st0_pop, uint8_t, bRm, PFNIEMAIMPLFPUR80, pfnAImpl)
14038{
14039 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14040
14041 IEM_MC_BEGIN(3, 1);
14042 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
14043 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
14044 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
14045 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
14046
14047 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14048 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14049
14050 IEM_MC_PREPARE_FPU_USAGE();
14051 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, bRm & X86_MODRM_RM_MASK, pr80Value2, 0)
14052 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pr80Value2);
14053 IEM_MC_STORE_FPU_RESULT_THEN_POP(FpuRes, bRm & X86_MODRM_RM_MASK);
14054 IEM_MC_ELSE()
14055 IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(bRm & X86_MODRM_RM_MASK);
14056 IEM_MC_ENDIF();
14057 IEM_MC_ADVANCE_RIP();
14058
14059 IEM_MC_END();
14060 return VINF_SUCCESS;
14061}
14062
14063
14064/** Opcode 0xd9 0xf3. */
14065FNIEMOP_DEF(iemOp_fpatan)
14066{
14067 IEMOP_MNEMONIC("fpatan st1,st0");
14068 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, 1, iemAImpl_fpatan_r80_by_r80);
14069}
14070
14071
14072/** Opcode 0xd9 0xf4. */
14073FNIEMOP_DEF(iemOp_fxtract)
14074{
14075 IEMOP_MNEMONIC("fxtract st0");
14076 return FNIEMOP_CALL_1(iemOpHlpFpuReplace_st0_push, iemAImpl_fxtract_r80_r80);
14077}
14078
14079
14080/** Opcode 0xd9 0xf5. */
14081FNIEMOP_DEF(iemOp_fprem1)
14082{
14083 IEMOP_MNEMONIC("fprem1 st0, st1");
14084 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, 1, iemAImpl_fprem1_r80_by_r80);
14085}
14086
14087
14088/** Opcode 0xd9 0xf6. */
14089FNIEMOP_DEF(iemOp_fdecstp)
14090{
14091 IEMOP_MNEMONIC("fdecstp");
14092 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14093 /* Note! C0, C2 and C3 are documented as undefined, we clear them. */
14094 /** @todo Testcase: Check whether FOP, FPUIP and FPUCS are affected by
14095 * FINCSTP and FDECSTP. */
14096
14097 IEM_MC_BEGIN(0,0);
14098
14099 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14100 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14101
14102 IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE();
14103 IEM_MC_FPU_STACK_DEC_TOP();
14104 IEM_MC_UPDATE_FSW_CONST(0);
14105
14106 IEM_MC_ADVANCE_RIP();
14107 IEM_MC_END();
14108 return VINF_SUCCESS;
14109}
14110
14111
14112/** Opcode 0xd9 0xf7. */
14113FNIEMOP_DEF(iemOp_fincstp)
14114{
14115 IEMOP_MNEMONIC("fincstp");
14116 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14117 /* Note! C0, C2 and C3 are documented as undefined, we clear them. */
14118 /** @todo Testcase: Check whether FOP, FPUIP and FPUCS are affected by
14119 * FINCSTP and FDECSTP. */
14120
14121 IEM_MC_BEGIN(0,0);
14122
14123 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14124 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14125
14126 IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE();
14127 IEM_MC_FPU_STACK_INC_TOP();
14128 IEM_MC_UPDATE_FSW_CONST(0);
14129
14130 IEM_MC_ADVANCE_RIP();
14131 IEM_MC_END();
14132 return VINF_SUCCESS;
14133}
14134
14135
14136/** Opcode 0xd9 0xf8. */
14137FNIEMOP_DEF(iemOp_fprem)
14138{
14139 IEMOP_MNEMONIC("fprem st0, st1");
14140 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, 1, iemAImpl_fprem_r80_by_r80);
14141}
14142
14143
14144/** Opcode 0xd9 0xf9. */
14145FNIEMOP_DEF(iemOp_fyl2xp1)
14146{
14147 IEMOP_MNEMONIC("fyl2xp1 st1,st0");
14148 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, 1, iemAImpl_fyl2xp1_r80_by_r80);
14149}
14150
14151
14152/** Opcode 0xd9 0xfa. */
14153FNIEMOP_DEF(iemOp_fsqrt)
14154{
14155 IEMOP_MNEMONIC("fsqrt st0");
14156 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fsqrt_r80);
14157}
14158
14159
14160/** Opcode 0xd9 0xfb. */
14161FNIEMOP_DEF(iemOp_fsincos)
14162{
14163 IEMOP_MNEMONIC("fsincos st0");
14164 return FNIEMOP_CALL_1(iemOpHlpFpuReplace_st0_push, iemAImpl_fsincos_r80_r80);
14165}
14166
14167
14168/** Opcode 0xd9 0xfc. */
14169FNIEMOP_DEF(iemOp_frndint)
14170{
14171 IEMOP_MNEMONIC("frndint st0");
14172 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_frndint_r80);
14173}
14174
14175
14176/** Opcode 0xd9 0xfd. */
14177FNIEMOP_DEF(iemOp_fscale)
14178{
14179 IEMOP_MNEMONIC("fscale st0, st1");
14180 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_stN, 1, iemAImpl_fscale_r80_by_r80);
14181}
14182
14183
14184/** Opcode 0xd9 0xfe. */
14185FNIEMOP_DEF(iemOp_fsin)
14186{
14187 IEMOP_MNEMONIC("fsin st0");
14188 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fsin_r80);
14189}
14190
14191
14192/** Opcode 0xd9 0xff. */
14193FNIEMOP_DEF(iemOp_fcos)
14194{
14195 IEMOP_MNEMONIC("fcos st0");
14196 return FNIEMOP_CALL_1(iemOpHlpFpu_st0, iemAImpl_fcos_r80);
14197}
14198
14199
14200/** Used by iemOp_EscF1. */
14201static const PFNIEMOP g_apfnEscF1_E0toFF[32] =
14202{
14203 /* 0xe0 */ iemOp_fchs,
14204 /* 0xe1 */ iemOp_fabs,
14205 /* 0xe2 */ iemOp_Invalid,
14206 /* 0xe3 */ iemOp_Invalid,
14207 /* 0xe4 */ iemOp_ftst,
14208 /* 0xe5 */ iemOp_fxam,
14209 /* 0xe6 */ iemOp_Invalid,
14210 /* 0xe7 */ iemOp_Invalid,
14211 /* 0xe8 */ iemOp_fld1,
14212 /* 0xe9 */ iemOp_fldl2t,
14213 /* 0xea */ iemOp_fldl2e,
14214 /* 0xeb */ iemOp_fldpi,
14215 /* 0xec */ iemOp_fldlg2,
14216 /* 0xed */ iemOp_fldln2,
14217 /* 0xee */ iemOp_fldz,
14218 /* 0xef */ iemOp_Invalid,
14219 /* 0xf0 */ iemOp_f2xm1,
14220 /* 0xf1 */ iemOp_fylx2,
14221 /* 0xf2 */ iemOp_fptan,
14222 /* 0xf3 */ iemOp_fpatan,
14223 /* 0xf4 */ iemOp_fxtract,
14224 /* 0xf5 */ iemOp_fprem1,
14225 /* 0xf6 */ iemOp_fdecstp,
14226 /* 0xf7 */ iemOp_fincstp,
14227 /* 0xf8 */ iemOp_fprem,
14228 /* 0xf9 */ iemOp_fyl2xp1,
14229 /* 0xfa */ iemOp_fsqrt,
14230 /* 0xfb */ iemOp_fsincos,
14231 /* 0xfc */ iemOp_frndint,
14232 /* 0xfd */ iemOp_fscale,
14233 /* 0xfe */ iemOp_fsin,
14234 /* 0xff */ iemOp_fcos
14235};
14236
14237
14238/** Opcode 0xd9. */
14239FNIEMOP_DEF(iemOp_EscF1)
14240{
14241 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
14242 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
14243 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
14244 {
14245 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14246 {
14247 case 0: return FNIEMOP_CALL_1(iemOp_fld_stN, bRm);
14248 case 1: return FNIEMOP_CALL_1(iemOp_fxch_stN, bRm);
14249 case 2:
14250 if (bRm == 0xd0)
14251 return FNIEMOP_CALL(iemOp_fnop);
14252 return IEMOP_RAISE_INVALID_OPCODE();
14253 case 3: return FNIEMOP_CALL_1(iemOp_fstp_stN, bRm); /* Reserved. Intel behavior seems to be FSTP ST(i) though. */
14254 case 4:
14255 case 5:
14256 case 6:
14257 case 7:
14258 Assert((unsigned)bRm - 0xe0U < RT_ELEMENTS(g_apfnEscF1_E0toFF));
14259 return FNIEMOP_CALL(g_apfnEscF1_E0toFF[bRm - 0xe0]);
14260 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14261 }
14262 }
14263 else
14264 {
14265 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14266 {
14267 case 0: return FNIEMOP_CALL_1(iemOp_fld_m32r, bRm);
14268 case 1: return IEMOP_RAISE_INVALID_OPCODE();
14269 case 2: return FNIEMOP_CALL_1(iemOp_fst_m32r, bRm);
14270 case 3: return FNIEMOP_CALL_1(iemOp_fstp_m32r, bRm);
14271 case 4: return FNIEMOP_CALL_1(iemOp_fldenv, bRm);
14272 case 5: return FNIEMOP_CALL_1(iemOp_fldcw, bRm);
14273 case 6: return FNIEMOP_CALL_1(iemOp_fnstenv, bRm);
14274 case 7: return FNIEMOP_CALL_1(iemOp_fnstcw, bRm);
14275 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14276 }
14277 }
14278}
14279
14280
14281/** Opcode 0xda 11/0. */
14282FNIEMOP_DEF_1(iemOp_fcmovb_stN, uint8_t, bRm)
14283{
14284 IEMOP_MNEMONIC("fcmovb st0,stN");
14285 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14286
14287 IEM_MC_BEGIN(0, 1);
14288 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
14289
14290 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14291 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14292
14293 IEM_MC_PREPARE_FPU_USAGE();
14294 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
14295 IEM_MC_IF_EFL_BIT_SET(X86_EFL_CF)
14296 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
14297 IEM_MC_ENDIF();
14298 IEM_MC_UPDATE_FPU_OPCODE_IP();
14299 IEM_MC_ELSE()
14300 IEM_MC_FPU_STACK_UNDERFLOW(0);
14301 IEM_MC_ENDIF();
14302 IEM_MC_ADVANCE_RIP();
14303
14304 IEM_MC_END();
14305 return VINF_SUCCESS;
14306}
14307
14308
14309/** Opcode 0xda 11/1. */
14310FNIEMOP_DEF_1(iemOp_fcmove_stN, uint8_t, bRm)
14311{
14312 IEMOP_MNEMONIC("fcmove st0,stN");
14313 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14314
14315 IEM_MC_BEGIN(0, 1);
14316 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
14317
14318 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14319 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14320
14321 IEM_MC_PREPARE_FPU_USAGE();
14322 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
14323 IEM_MC_IF_EFL_BIT_SET(X86_EFL_ZF)
14324 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
14325 IEM_MC_ENDIF();
14326 IEM_MC_UPDATE_FPU_OPCODE_IP();
14327 IEM_MC_ELSE()
14328 IEM_MC_FPU_STACK_UNDERFLOW(0);
14329 IEM_MC_ENDIF();
14330 IEM_MC_ADVANCE_RIP();
14331
14332 IEM_MC_END();
14333 return VINF_SUCCESS;
14334}
14335
14336
14337/** Opcode 0xda 11/2. */
14338FNIEMOP_DEF_1(iemOp_fcmovbe_stN, uint8_t, bRm)
14339{
14340 IEMOP_MNEMONIC("fcmovbe st0,stN");
14341 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14342
14343 IEM_MC_BEGIN(0, 1);
14344 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
14345
14346 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14347 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14348
14349 IEM_MC_PREPARE_FPU_USAGE();
14350 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
14351 IEM_MC_IF_EFL_ANY_BITS_SET(X86_EFL_CF | X86_EFL_ZF)
14352 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
14353 IEM_MC_ENDIF();
14354 IEM_MC_UPDATE_FPU_OPCODE_IP();
14355 IEM_MC_ELSE()
14356 IEM_MC_FPU_STACK_UNDERFLOW(0);
14357 IEM_MC_ENDIF();
14358 IEM_MC_ADVANCE_RIP();
14359
14360 IEM_MC_END();
14361 return VINF_SUCCESS;
14362}
14363
14364
14365/** Opcode 0xda 11/3. */
14366FNIEMOP_DEF_1(iemOp_fcmovu_stN, uint8_t, bRm)
14367{
14368 IEMOP_MNEMONIC("fcmovu st0,stN");
14369 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14370
14371 IEM_MC_BEGIN(0, 1);
14372 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
14373
14374 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14375 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14376
14377 IEM_MC_PREPARE_FPU_USAGE();
14378 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
14379 IEM_MC_IF_EFL_BIT_SET(X86_EFL_PF)
14380 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
14381 IEM_MC_ENDIF();
14382 IEM_MC_UPDATE_FPU_OPCODE_IP();
14383 IEM_MC_ELSE()
14384 IEM_MC_FPU_STACK_UNDERFLOW(0);
14385 IEM_MC_ENDIF();
14386 IEM_MC_ADVANCE_RIP();
14387
14388 IEM_MC_END();
14389 return VINF_SUCCESS;
14390}
14391
14392
14393/**
14394 * Common worker for FPU instructions working on ST0 and STn, only affecting
14395 * flags, and popping twice when done.
14396 *
14397 * @param pfnAImpl Pointer to the instruction implementation (assembly).
14398 */
14399FNIEMOP_DEF_1(iemOpHlpFpuNoStore_st0_stN_pop_pop, PFNIEMAIMPLFPUR80FSW, pfnAImpl)
14400{
14401 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14402
14403 IEM_MC_BEGIN(3, 1);
14404 IEM_MC_LOCAL(uint16_t, u16Fsw);
14405 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14406 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
14407 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
14408
14409 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14410 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14411
14412 IEM_MC_PREPARE_FPU_USAGE();
14413 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, 0, pr80Value2, 1)
14414 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pu16Fsw, pr80Value1, pr80Value2);
14415 IEM_MC_UPDATE_FSW_THEN_POP_POP(u16Fsw);
14416 IEM_MC_ELSE()
14417 IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP();
14418 IEM_MC_ENDIF();
14419 IEM_MC_ADVANCE_RIP();
14420
14421 IEM_MC_END();
14422 return VINF_SUCCESS;
14423}
14424
14425
14426/** Opcode 0xda 0xe9. */
14427FNIEMOP_DEF(iemOp_fucompp)
14428{
14429 IEMOP_MNEMONIC("fucompp st0,stN");
14430 return FNIEMOP_CALL_1(iemOpHlpFpuNoStore_st0_stN_pop_pop, iemAImpl_fucom_r80_by_r80);
14431}
14432
14433
14434/**
14435 * Common worker for FPU instructions working on ST0 and an m32i, and storing
14436 * the result in ST0.
14437 *
14438 * @param pfnAImpl Pointer to the instruction implementation (assembly).
14439 */
14440FNIEMOP_DEF_2(iemOpHlpFpu_st0_m32i, uint8_t, bRm, PFNIEMAIMPLFPUI32, pfnAImpl)
14441{
14442 IEM_MC_BEGIN(3, 3);
14443 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14444 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
14445 IEM_MC_LOCAL(int32_t, i32Val2);
14446 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
14447 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
14448 IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val2, i32Val2, 2);
14449
14450 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14451 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14452
14453 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14454 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14455 IEM_MC_FETCH_MEM_I32(i32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
14456
14457 IEM_MC_PREPARE_FPU_USAGE();
14458 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
14459 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pi32Val2);
14460 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
14461 IEM_MC_ELSE()
14462 IEM_MC_FPU_STACK_UNDERFLOW(0);
14463 IEM_MC_ENDIF();
14464 IEM_MC_ADVANCE_RIP();
14465
14466 IEM_MC_END();
14467 return VINF_SUCCESS;
14468}
14469
14470
14471/** Opcode 0xda !11/0. */
14472FNIEMOP_DEF_1(iemOp_fiadd_m32i, uint8_t, bRm)
14473{
14474 IEMOP_MNEMONIC("fiadd m32i");
14475 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fiadd_r80_by_i32);
14476}
14477
14478
14479/** Opcode 0xda !11/1. */
14480FNIEMOP_DEF_1(iemOp_fimul_m32i, uint8_t, bRm)
14481{
14482 IEMOP_MNEMONIC("fimul m32i");
14483 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fimul_r80_by_i32);
14484}
14485
14486
14487/** Opcode 0xda !11/2. */
14488FNIEMOP_DEF_1(iemOp_ficom_m32i, uint8_t, bRm)
14489{
14490 IEMOP_MNEMONIC("ficom st0,m32i");
14491
14492 IEM_MC_BEGIN(3, 3);
14493 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14494 IEM_MC_LOCAL(uint16_t, u16Fsw);
14495 IEM_MC_LOCAL(int32_t, i32Val2);
14496 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14497 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
14498 IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val2, i32Val2, 2);
14499
14500 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14501 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14502
14503 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14504 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14505 IEM_MC_FETCH_MEM_I32(i32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
14506
14507 IEM_MC_PREPARE_FPU_USAGE();
14508 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
14509 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_ficom_r80_by_i32, pu16Fsw, pr80Value1, pi32Val2);
14510 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
14511 IEM_MC_ELSE()
14512 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
14513 IEM_MC_ENDIF();
14514 IEM_MC_ADVANCE_RIP();
14515
14516 IEM_MC_END();
14517 return VINF_SUCCESS;
14518}
14519
14520
14521/** Opcode 0xda !11/3. */
14522FNIEMOP_DEF_1(iemOp_ficomp_m32i, uint8_t, bRm)
14523{
14524 IEMOP_MNEMONIC("ficomp st0,m32i");
14525
14526 IEM_MC_BEGIN(3, 3);
14527 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14528 IEM_MC_LOCAL(uint16_t, u16Fsw);
14529 IEM_MC_LOCAL(int32_t, i32Val2);
14530 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14531 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
14532 IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val2, i32Val2, 2);
14533
14534 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14535 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14536
14537 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14538 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14539 IEM_MC_FETCH_MEM_I32(i32Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
14540
14541 IEM_MC_PREPARE_FPU_USAGE();
14542 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
14543 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_ficom_r80_by_i32, pu16Fsw, pr80Value1, pi32Val2);
14544 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
14545 IEM_MC_ELSE()
14546 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
14547 IEM_MC_ENDIF();
14548 IEM_MC_ADVANCE_RIP();
14549
14550 IEM_MC_END();
14551 return VINF_SUCCESS;
14552}
14553
14554
14555/** Opcode 0xda !11/4. */
14556FNIEMOP_DEF_1(iemOp_fisub_m32i, uint8_t, bRm)
14557{
14558 IEMOP_MNEMONIC("fisub m32i");
14559 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fisub_r80_by_i32);
14560}
14561
14562
14563/** Opcode 0xda !11/5. */
14564FNIEMOP_DEF_1(iemOp_fisubr_m32i, uint8_t, bRm)
14565{
14566 IEMOP_MNEMONIC("fisubr m32i");
14567 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fisubr_r80_by_i32);
14568}
14569
14570
14571/** Opcode 0xda !11/6. */
14572FNIEMOP_DEF_1(iemOp_fidiv_m32i, uint8_t, bRm)
14573{
14574 IEMOP_MNEMONIC("fidiv m32i");
14575 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fidiv_r80_by_i32);
14576}
14577
14578
14579/** Opcode 0xda !11/7. */
14580FNIEMOP_DEF_1(iemOp_fidivr_m32i, uint8_t, bRm)
14581{
14582 IEMOP_MNEMONIC("fidivr m32i");
14583 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m32i, bRm, iemAImpl_fidivr_r80_by_i32);
14584}
14585
14586
14587/** Opcode 0xda. */
14588FNIEMOP_DEF(iemOp_EscF2)
14589{
14590 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
14591 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
14592 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
14593 {
14594 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14595 {
14596 case 0: return FNIEMOP_CALL_1(iemOp_fcmovb_stN, bRm);
14597 case 1: return FNIEMOP_CALL_1(iemOp_fcmove_stN, bRm);
14598 case 2: return FNIEMOP_CALL_1(iemOp_fcmovbe_stN, bRm);
14599 case 3: return FNIEMOP_CALL_1(iemOp_fcmovu_stN, bRm);
14600 case 4: return IEMOP_RAISE_INVALID_OPCODE();
14601 case 5:
14602 if (bRm == 0xe9)
14603 return FNIEMOP_CALL(iemOp_fucompp);
14604 return IEMOP_RAISE_INVALID_OPCODE();
14605 case 6: return IEMOP_RAISE_INVALID_OPCODE();
14606 case 7: return IEMOP_RAISE_INVALID_OPCODE();
14607 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14608 }
14609 }
14610 else
14611 {
14612 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
14613 {
14614 case 0: return FNIEMOP_CALL_1(iemOp_fiadd_m32i, bRm);
14615 case 1: return FNIEMOP_CALL_1(iemOp_fimul_m32i, bRm);
14616 case 2: return FNIEMOP_CALL_1(iemOp_ficom_m32i, bRm);
14617 case 3: return FNIEMOP_CALL_1(iemOp_ficomp_m32i, bRm);
14618 case 4: return FNIEMOP_CALL_1(iemOp_fisub_m32i, bRm);
14619 case 5: return FNIEMOP_CALL_1(iemOp_fisubr_m32i, bRm);
14620 case 6: return FNIEMOP_CALL_1(iemOp_fidiv_m32i, bRm);
14621 case 7: return FNIEMOP_CALL_1(iemOp_fidivr_m32i, bRm);
14622 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14623 }
14624 }
14625}
14626
14627
14628/** Opcode 0xdb !11/0. */
14629FNIEMOP_DEF_1(iemOp_fild_m32i, uint8_t, bRm)
14630{
14631 IEMOP_MNEMONIC("fild m32i");
14632
14633 IEM_MC_BEGIN(2, 3);
14634 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14635 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
14636 IEM_MC_LOCAL(int32_t, i32Val);
14637 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
14638 IEM_MC_ARG_LOCAL_REF(int32_t const *, pi32Val, i32Val, 1);
14639
14640 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14641 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14642
14643 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14644 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14645 IEM_MC_FETCH_MEM_I32(i32Val, pIemCpu->iEffSeg, GCPtrEffSrc);
14646
14647 IEM_MC_PREPARE_FPU_USAGE();
14648 IEM_MC_IF_FPUREG_IS_EMPTY(7)
14649 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fild_i32_to_r80, pFpuRes, pi32Val);
14650 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pIemCpu->iEffSeg, GCPtrEffSrc);
14651 IEM_MC_ELSE()
14652 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pIemCpu->iEffSeg, GCPtrEffSrc);
14653 IEM_MC_ENDIF();
14654 IEM_MC_ADVANCE_RIP();
14655
14656 IEM_MC_END();
14657 return VINF_SUCCESS;
14658}
14659
14660
14661/** Opcode 0xdb !11/1. */
14662FNIEMOP_DEF_1(iemOp_fisttp_m32i, uint8_t, bRm)
14663{
14664 IEMOP_MNEMONIC("fisttp m32i");
14665 IEM_MC_BEGIN(3, 2);
14666 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14667 IEM_MC_LOCAL(uint16_t, u16Fsw);
14668 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14669 IEM_MC_ARG(int32_t *, pi32Dst, 1);
14670 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14671
14672 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14673 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14674 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14675 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14676
14677 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14678 IEM_MC_PREPARE_FPU_USAGE();
14679 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14680 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fistt_r80_to_i32, pu16Fsw, pi32Dst, pr80Value);
14681 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi32Dst, IEM_ACCESS_DATA_W, u16Fsw);
14682 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14683 IEM_MC_ELSE()
14684 IEM_MC_IF_FCW_IM()
14685 IEM_MC_STORE_MEM_I32_CONST_BY_REF(pi32Dst, INT32_MIN /* (integer indefinite) */);
14686 IEM_MC_MEM_COMMIT_AND_UNMAP(pi32Dst, IEM_ACCESS_DATA_W);
14687 IEM_MC_ENDIF();
14688 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14689 IEM_MC_ENDIF();
14690 IEM_MC_ADVANCE_RIP();
14691
14692 IEM_MC_END();
14693 return VINF_SUCCESS;
14694}
14695
14696
14697/** Opcode 0xdb !11/2. */
14698FNIEMOP_DEF_1(iemOp_fist_m32i, uint8_t, bRm)
14699{
14700 IEMOP_MNEMONIC("fist m32i");
14701 IEM_MC_BEGIN(3, 2);
14702 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14703 IEM_MC_LOCAL(uint16_t, u16Fsw);
14704 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14705 IEM_MC_ARG(int32_t *, pi32Dst, 1);
14706 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14707
14708 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14709 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14710 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14711 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14712
14713 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14714 IEM_MC_PREPARE_FPU_USAGE();
14715 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14716 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i32, pu16Fsw, pi32Dst, pr80Value);
14717 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi32Dst, IEM_ACCESS_DATA_W, u16Fsw);
14718 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14719 IEM_MC_ELSE()
14720 IEM_MC_IF_FCW_IM()
14721 IEM_MC_STORE_MEM_I32_CONST_BY_REF(pi32Dst, INT32_MIN /* (integer indefinite) */);
14722 IEM_MC_MEM_COMMIT_AND_UNMAP(pi32Dst, IEM_ACCESS_DATA_W);
14723 IEM_MC_ENDIF();
14724 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14725 IEM_MC_ENDIF();
14726 IEM_MC_ADVANCE_RIP();
14727
14728 IEM_MC_END();
14729 return VINF_SUCCESS;
14730}
14731
14732
14733/** Opcode 0xdb !11/3. */
14734FNIEMOP_DEF_1(iemOp_fistp_m32i, uint8_t, bRm)
14735{
14736 IEMOP_MNEMONIC("fisttp m32i");
14737 IEM_MC_BEGIN(3, 2);
14738 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14739 IEM_MC_LOCAL(uint16_t, u16Fsw);
14740 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14741 IEM_MC_ARG(int32_t *, pi32Dst, 1);
14742 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14743
14744 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14745 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14746 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14747 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14748
14749 IEM_MC_MEM_MAP(pi32Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14750 IEM_MC_PREPARE_FPU_USAGE();
14751 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14752 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i32, pu16Fsw, pi32Dst, pr80Value);
14753 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi32Dst, IEM_ACCESS_DATA_W, u16Fsw);
14754 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14755 IEM_MC_ELSE()
14756 IEM_MC_IF_FCW_IM()
14757 IEM_MC_STORE_MEM_I32_CONST_BY_REF(pi32Dst, INT32_MIN /* (integer indefinite) */);
14758 IEM_MC_MEM_COMMIT_AND_UNMAP(pi32Dst, IEM_ACCESS_DATA_W);
14759 IEM_MC_ENDIF();
14760 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14761 IEM_MC_ENDIF();
14762 IEM_MC_ADVANCE_RIP();
14763
14764 IEM_MC_END();
14765 return VINF_SUCCESS;
14766}
14767
14768
14769/** Opcode 0xdb !11/5. */
14770FNIEMOP_DEF_1(iemOp_fld_m80r, uint8_t, bRm)
14771{
14772 IEMOP_MNEMONIC("fld m80r");
14773
14774 IEM_MC_BEGIN(2, 3);
14775 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
14776 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
14777 IEM_MC_LOCAL(RTFLOAT80U, r80Val);
14778 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
14779 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT80U, pr80Val, r80Val, 1);
14780
14781 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
14782 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14783
14784 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14785 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14786 IEM_MC_FETCH_MEM_R80(r80Val, pIemCpu->iEffSeg, GCPtrEffSrc);
14787
14788 IEM_MC_PREPARE_FPU_USAGE();
14789 IEM_MC_IF_FPUREG_IS_EMPTY(7)
14790 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fld_r80_from_r80, pFpuRes, pr80Val);
14791 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pIemCpu->iEffSeg, GCPtrEffSrc);
14792 IEM_MC_ELSE()
14793 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pIemCpu->iEffSeg, GCPtrEffSrc);
14794 IEM_MC_ENDIF();
14795 IEM_MC_ADVANCE_RIP();
14796
14797 IEM_MC_END();
14798 return VINF_SUCCESS;
14799}
14800
14801
14802/** Opcode 0xdb !11/7. */
14803FNIEMOP_DEF_1(iemOp_fstp_m80r, uint8_t, bRm)
14804{
14805 IEMOP_MNEMONIC("fstp m80r");
14806 IEM_MC_BEGIN(3, 2);
14807 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
14808 IEM_MC_LOCAL(uint16_t, u16Fsw);
14809 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
14810 IEM_MC_ARG(PRTFLOAT80U, pr80Dst, 1);
14811 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
14812
14813 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
14814 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14815 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14816 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14817
14818 IEM_MC_MEM_MAP(pr80Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
14819 IEM_MC_PREPARE_FPU_USAGE();
14820 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
14821 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r80, pu16Fsw, pr80Dst, pr80Value);
14822 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr80Dst, IEM_ACCESS_DATA_W, u16Fsw);
14823 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
14824 IEM_MC_ELSE()
14825 IEM_MC_IF_FCW_IM()
14826 IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(pr80Dst);
14827 IEM_MC_MEM_COMMIT_AND_UNMAP(pr80Dst, IEM_ACCESS_DATA_W);
14828 IEM_MC_ENDIF();
14829 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
14830 IEM_MC_ENDIF();
14831 IEM_MC_ADVANCE_RIP();
14832
14833 IEM_MC_END();
14834 return VINF_SUCCESS;
14835}
14836
14837
14838/** Opcode 0xdb 11/0. */
14839FNIEMOP_DEF_1(iemOp_fcmovnb_stN, uint8_t, bRm)
14840{
14841 IEMOP_MNEMONIC("fcmovnb st0,stN");
14842 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14843
14844 IEM_MC_BEGIN(0, 1);
14845 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
14846
14847 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14848 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14849
14850 IEM_MC_PREPARE_FPU_USAGE();
14851 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
14852 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_CF)
14853 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
14854 IEM_MC_ENDIF();
14855 IEM_MC_UPDATE_FPU_OPCODE_IP();
14856 IEM_MC_ELSE()
14857 IEM_MC_FPU_STACK_UNDERFLOW(0);
14858 IEM_MC_ENDIF();
14859 IEM_MC_ADVANCE_RIP();
14860
14861 IEM_MC_END();
14862 return VINF_SUCCESS;
14863}
14864
14865
14866/** Opcode 0xdb 11/1. */
14867FNIEMOP_DEF_1(iemOp_fcmovne_stN, uint8_t, bRm)
14868{
14869 IEMOP_MNEMONIC("fcmovne st0,stN");
14870 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14871
14872 IEM_MC_BEGIN(0, 1);
14873 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
14874
14875 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14876 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14877
14878 IEM_MC_PREPARE_FPU_USAGE();
14879 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
14880 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_ZF)
14881 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
14882 IEM_MC_ENDIF();
14883 IEM_MC_UPDATE_FPU_OPCODE_IP();
14884 IEM_MC_ELSE()
14885 IEM_MC_FPU_STACK_UNDERFLOW(0);
14886 IEM_MC_ENDIF();
14887 IEM_MC_ADVANCE_RIP();
14888
14889 IEM_MC_END();
14890 return VINF_SUCCESS;
14891}
14892
14893
14894/** Opcode 0xdb 11/2. */
14895FNIEMOP_DEF_1(iemOp_fcmovnbe_stN, uint8_t, bRm)
14896{
14897 IEMOP_MNEMONIC("fcmovnbe st0,stN");
14898 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14899
14900 IEM_MC_BEGIN(0, 1);
14901 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
14902
14903 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14904 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14905
14906 IEM_MC_PREPARE_FPU_USAGE();
14907 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
14908 IEM_MC_IF_EFL_NO_BITS_SET(X86_EFL_CF | X86_EFL_ZF)
14909 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
14910 IEM_MC_ENDIF();
14911 IEM_MC_UPDATE_FPU_OPCODE_IP();
14912 IEM_MC_ELSE()
14913 IEM_MC_FPU_STACK_UNDERFLOW(0);
14914 IEM_MC_ENDIF();
14915 IEM_MC_ADVANCE_RIP();
14916
14917 IEM_MC_END();
14918 return VINF_SUCCESS;
14919}
14920
14921
14922/** Opcode 0xdb 11/3. */
14923FNIEMOP_DEF_1(iemOp_fcmovnnu_stN, uint8_t, bRm)
14924{
14925 IEMOP_MNEMONIC("fcmovnnu st0,stN");
14926 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14927
14928 IEM_MC_BEGIN(0, 1);
14929 IEM_MC_LOCAL(PCRTFLOAT80U, pr80ValueN);
14930
14931 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14932 IEM_MC_MAYBE_RAISE_FPU_XCPT();
14933
14934 IEM_MC_PREPARE_FPU_USAGE();
14935 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(pr80ValueN, bRm & X86_MODRM_RM_MASK, 0)
14936 IEM_MC_IF_EFL_BIT_NOT_SET(X86_EFL_PF)
14937 IEM_MC_STORE_FPUREG_R80_SRC_REF(0, pr80ValueN);
14938 IEM_MC_ENDIF();
14939 IEM_MC_UPDATE_FPU_OPCODE_IP();
14940 IEM_MC_ELSE()
14941 IEM_MC_FPU_STACK_UNDERFLOW(0);
14942 IEM_MC_ENDIF();
14943 IEM_MC_ADVANCE_RIP();
14944
14945 IEM_MC_END();
14946 return VINF_SUCCESS;
14947}
14948
14949
14950/** Opcode 0xdb 0xe0. */
14951FNIEMOP_DEF(iemOp_fneni)
14952{
14953 IEMOP_MNEMONIC("fneni (8087/ign)");
14954 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14955 IEM_MC_BEGIN(0,0);
14956 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14957 IEM_MC_ADVANCE_RIP();
14958 IEM_MC_END();
14959 return VINF_SUCCESS;
14960}
14961
14962
14963/** Opcode 0xdb 0xe1. */
14964FNIEMOP_DEF(iemOp_fndisi)
14965{
14966 IEMOP_MNEMONIC("fndisi (8087/ign)");
14967 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14968 IEM_MC_BEGIN(0,0);
14969 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14970 IEM_MC_ADVANCE_RIP();
14971 IEM_MC_END();
14972 return VINF_SUCCESS;
14973}
14974
14975
14976/** Opcode 0xdb 0xe2. */
14977FNIEMOP_DEF(iemOp_fnclex)
14978{
14979 IEMOP_MNEMONIC("fnclex");
14980 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14981
14982 IEM_MC_BEGIN(0,0);
14983 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
14984 IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE();
14985 IEM_MC_CLEAR_FSW_EX();
14986 IEM_MC_ADVANCE_RIP();
14987 IEM_MC_END();
14988 return VINF_SUCCESS;
14989}
14990
14991
14992/** Opcode 0xdb 0xe3. */
14993FNIEMOP_DEF(iemOp_fninit)
14994{
14995 IEMOP_MNEMONIC("fninit");
14996 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
14997 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_finit, false /*fCheckXcpts*/);
14998}
14999
15000
15001/** Opcode 0xdb 0xe4. */
15002FNIEMOP_DEF(iemOp_fnsetpm)
15003{
15004 IEMOP_MNEMONIC("fnsetpm (80287/ign)"); /* set protected mode on fpu. */
15005 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15006 IEM_MC_BEGIN(0,0);
15007 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15008 IEM_MC_ADVANCE_RIP();
15009 IEM_MC_END();
15010 return VINF_SUCCESS;
15011}
15012
15013
15014/** Opcode 0xdb 0xe5. */
15015FNIEMOP_DEF(iemOp_frstpm)
15016{
15017 IEMOP_MNEMONIC("frstpm (80287XL/ign)"); /* reset pm, back to real mode. */
15018#if 0 /* #UDs on newer CPUs */
15019 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15020 IEM_MC_BEGIN(0,0);
15021 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15022 IEM_MC_ADVANCE_RIP();
15023 IEM_MC_END();
15024 return VINF_SUCCESS;
15025#else
15026 return IEMOP_RAISE_INVALID_OPCODE();
15027#endif
15028}
15029
15030
15031/** Opcode 0xdb 11/5. */
15032FNIEMOP_DEF_1(iemOp_fucomi_stN, uint8_t, bRm)
15033{
15034 IEMOP_MNEMONIC("fucomi st0,stN");
15035 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_fcomi_fucomi, bRm & X86_MODRM_RM_MASK, iemAImpl_fucomi_r80_by_r80, false /*fPop*/);
15036}
15037
15038
15039/** Opcode 0xdb 11/6. */
15040FNIEMOP_DEF_1(iemOp_fcomi_stN, uint8_t, bRm)
15041{
15042 IEMOP_MNEMONIC("fcomi st0,stN");
15043 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_fcomi_fucomi, bRm & X86_MODRM_RM_MASK, iemAImpl_fcomi_r80_by_r80, false /*fPop*/);
15044}
15045
15046
15047/** Opcode 0xdb. */
15048FNIEMOP_DEF(iemOp_EscF3)
15049{
15050 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
15051 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
15052 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
15053 {
15054 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15055 {
15056 case 0: return FNIEMOP_CALL_1(iemOp_fcmovnb_stN, bRm);
15057 case 1: return FNIEMOP_CALL_1(iemOp_fcmovne_stN, bRm);
15058 case 2: return FNIEMOP_CALL_1(iemOp_fcmovnbe_stN, bRm);
15059 case 3: return FNIEMOP_CALL_1(iemOp_fcmovnnu_stN, bRm);
15060 case 4:
15061 switch (bRm)
15062 {
15063 case 0xe0: return FNIEMOP_CALL(iemOp_fneni);
15064 case 0xe1: return FNIEMOP_CALL(iemOp_fndisi);
15065 case 0xe2: return FNIEMOP_CALL(iemOp_fnclex);
15066 case 0xe3: return FNIEMOP_CALL(iemOp_fninit);
15067 case 0xe4: return FNIEMOP_CALL(iemOp_fnsetpm);
15068 case 0xe5: return FNIEMOP_CALL(iemOp_frstpm);
15069 case 0xe6: return IEMOP_RAISE_INVALID_OPCODE();
15070 case 0xe7: return IEMOP_RAISE_INVALID_OPCODE();
15071 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15072 }
15073 break;
15074 case 5: return FNIEMOP_CALL_1(iemOp_fucomi_stN, bRm);
15075 case 6: return FNIEMOP_CALL_1(iemOp_fcomi_stN, bRm);
15076 case 7: return IEMOP_RAISE_INVALID_OPCODE();
15077 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15078 }
15079 }
15080 else
15081 {
15082 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15083 {
15084 case 0: return FNIEMOP_CALL_1(iemOp_fild_m32i, bRm);
15085 case 1: return FNIEMOP_CALL_1(iemOp_fisttp_m32i,bRm);
15086 case 2: return FNIEMOP_CALL_1(iemOp_fist_m32i, bRm);
15087 case 3: return FNIEMOP_CALL_1(iemOp_fistp_m32i, bRm);
15088 case 4: return IEMOP_RAISE_INVALID_OPCODE();
15089 case 5: return FNIEMOP_CALL_1(iemOp_fld_m80r, bRm);
15090 case 6: return IEMOP_RAISE_INVALID_OPCODE();
15091 case 7: return FNIEMOP_CALL_1(iemOp_fstp_m80r, bRm);
15092 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15093 }
15094 }
15095}
15096
15097
15098/**
15099 * Common worker for FPU instructions working on STn and ST0, and storing the
15100 * result in STn unless IE, DE or ZE was raised.
15101 *
15102 * @param pfnAImpl Pointer to the instruction implementation (assembly).
15103 */
15104FNIEMOP_DEF_2(iemOpHlpFpu_stN_st0, uint8_t, bRm, PFNIEMAIMPLFPUR80, pfnAImpl)
15105{
15106 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15107
15108 IEM_MC_BEGIN(3, 1);
15109 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
15110 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
15111 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
15112 IEM_MC_ARG(PCRTFLOAT80U, pr80Value2, 2);
15113
15114 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15115 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15116
15117 IEM_MC_PREPARE_FPU_USAGE();
15118 IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(pr80Value1, bRm & X86_MODRM_RM_MASK, pr80Value2, 0)
15119 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pr80Value2);
15120 IEM_MC_STORE_FPU_RESULT(FpuRes, bRm & X86_MODRM_RM_MASK);
15121 IEM_MC_ELSE()
15122 IEM_MC_FPU_STACK_UNDERFLOW(bRm & X86_MODRM_RM_MASK);
15123 IEM_MC_ENDIF();
15124 IEM_MC_ADVANCE_RIP();
15125
15126 IEM_MC_END();
15127 return VINF_SUCCESS;
15128}
15129
15130
15131/** Opcode 0xdc 11/0. */
15132FNIEMOP_DEF_1(iemOp_fadd_stN_st0, uint8_t, bRm)
15133{
15134 IEMOP_MNEMONIC("fadd stN,st0");
15135 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fadd_r80_by_r80);
15136}
15137
15138
15139/** Opcode 0xdc 11/1. */
15140FNIEMOP_DEF_1(iemOp_fmul_stN_st0, uint8_t, bRm)
15141{
15142 IEMOP_MNEMONIC("fmul stN,st0");
15143 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fmul_r80_by_r80);
15144}
15145
15146
15147/** Opcode 0xdc 11/4. */
15148FNIEMOP_DEF_1(iemOp_fsubr_stN_st0, uint8_t, bRm)
15149{
15150 IEMOP_MNEMONIC("fsubr stN,st0");
15151 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fsubr_r80_by_r80);
15152}
15153
15154
15155/** Opcode 0xdc 11/5. */
15156FNIEMOP_DEF_1(iemOp_fsub_stN_st0, uint8_t, bRm)
15157{
15158 IEMOP_MNEMONIC("fsub stN,st0");
15159 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fsub_r80_by_r80);
15160}
15161
15162
15163/** Opcode 0xdc 11/6. */
15164FNIEMOP_DEF_1(iemOp_fdivr_stN_st0, uint8_t, bRm)
15165{
15166 IEMOP_MNEMONIC("fdivr stN,st0");
15167 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fdivr_r80_by_r80);
15168}
15169
15170
15171/** Opcode 0xdc 11/7. */
15172FNIEMOP_DEF_1(iemOp_fdiv_stN_st0, uint8_t, bRm)
15173{
15174 IEMOP_MNEMONIC("fdiv stN,st0");
15175 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0, bRm, iemAImpl_fdiv_r80_by_r80);
15176}
15177
15178
15179/**
15180 * Common worker for FPU instructions working on ST0 and a 64-bit floating point
15181 * memory operand, and storing the result in ST0.
15182 *
15183 * @param pfnAImpl Pointer to the instruction implementation (assembly).
15184 */
15185FNIEMOP_DEF_2(iemOpHlpFpu_ST0_m64r, uint8_t, bRm, PFNIEMAIMPLFPUR64, pfnImpl)
15186{
15187 IEM_MC_BEGIN(3, 3);
15188 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
15189 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
15190 IEM_MC_LOCAL(RTFLOAT64U, r64Factor2);
15191 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
15192 IEM_MC_ARG(PCRTFLOAT80U, pr80Factor1, 1);
15193 IEM_MC_ARG_LOCAL_REF(PRTFLOAT64U, pr64Factor2, r64Factor2, 2);
15194
15195 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
15196 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15197 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15198 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15199
15200 IEM_MC_FETCH_MEM_R64(r64Factor2, pIemCpu->iEffSeg, GCPtrEffSrc);
15201 IEM_MC_PREPARE_FPU_USAGE();
15202 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Factor1, 0)
15203 IEM_MC_CALL_FPU_AIMPL_3(pfnImpl, pFpuRes, pr80Factor1, pr64Factor2);
15204 IEM_MC_STORE_FPU_RESULT_MEM_OP(FpuRes, 0, pIemCpu->iEffSeg, GCPtrEffSrc);
15205 IEM_MC_ELSE()
15206 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(0, pIemCpu->iEffSeg, GCPtrEffSrc);
15207 IEM_MC_ENDIF();
15208 IEM_MC_ADVANCE_RIP();
15209
15210 IEM_MC_END();
15211 return VINF_SUCCESS;
15212}
15213
15214
15215/** Opcode 0xdc !11/0. */
15216FNIEMOP_DEF_1(iemOp_fadd_m64r, uint8_t, bRm)
15217{
15218 IEMOP_MNEMONIC("fadd m64r");
15219 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fadd_r80_by_r64);
15220}
15221
15222
15223/** Opcode 0xdc !11/1. */
15224FNIEMOP_DEF_1(iemOp_fmul_m64r, uint8_t, bRm)
15225{
15226 IEMOP_MNEMONIC("fmul m64r");
15227 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fmul_r80_by_r64);
15228}
15229
15230
15231/** Opcode 0xdc !11/2. */
15232FNIEMOP_DEF_1(iemOp_fcom_m64r, uint8_t, bRm)
15233{
15234 IEMOP_MNEMONIC("fcom st0,m64r");
15235
15236 IEM_MC_BEGIN(3, 3);
15237 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
15238 IEM_MC_LOCAL(uint16_t, u16Fsw);
15239 IEM_MC_LOCAL(RTFLOAT64U, r64Val2);
15240 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15241 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
15242 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT64U, pr64Val2, r64Val2, 2);
15243
15244 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
15245 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15246
15247 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15248 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15249 IEM_MC_FETCH_MEM_R64(r64Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
15250
15251 IEM_MC_PREPARE_FPU_USAGE();
15252 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
15253 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fcom_r80_by_r64, pu16Fsw, pr80Value1, pr64Val2);
15254 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
15255 IEM_MC_ELSE()
15256 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
15257 IEM_MC_ENDIF();
15258 IEM_MC_ADVANCE_RIP();
15259
15260 IEM_MC_END();
15261 return VINF_SUCCESS;
15262}
15263
15264
15265/** Opcode 0xdc !11/3. */
15266FNIEMOP_DEF_1(iemOp_fcomp_m64r, uint8_t, bRm)
15267{
15268 IEMOP_MNEMONIC("fcomp st0,m64r");
15269
15270 IEM_MC_BEGIN(3, 3);
15271 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
15272 IEM_MC_LOCAL(uint16_t, u16Fsw);
15273 IEM_MC_LOCAL(RTFLOAT64U, r64Val2);
15274 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15275 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
15276 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT64U, pr64Val2, r64Val2, 2);
15277
15278 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
15279 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15280
15281 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15282 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15283 IEM_MC_FETCH_MEM_R64(r64Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
15284
15285 IEM_MC_PREPARE_FPU_USAGE();
15286 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
15287 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fcom_r80_by_r64, pu16Fsw, pr80Value1, pr64Val2);
15288 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
15289 IEM_MC_ELSE()
15290 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
15291 IEM_MC_ENDIF();
15292 IEM_MC_ADVANCE_RIP();
15293
15294 IEM_MC_END();
15295 return VINF_SUCCESS;
15296}
15297
15298
15299/** Opcode 0xdc !11/4. */
15300FNIEMOP_DEF_1(iemOp_fsub_m64r, uint8_t, bRm)
15301{
15302 IEMOP_MNEMONIC("fsub m64r");
15303 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fsub_r80_by_r64);
15304}
15305
15306
15307/** Opcode 0xdc !11/5. */
15308FNIEMOP_DEF_1(iemOp_fsubr_m64r, uint8_t, bRm)
15309{
15310 IEMOP_MNEMONIC("fsubr m64r");
15311 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fsubr_r80_by_r64);
15312}
15313
15314
15315/** Opcode 0xdc !11/6. */
15316FNIEMOP_DEF_1(iemOp_fdiv_m64r, uint8_t, bRm)
15317{
15318 IEMOP_MNEMONIC("fdiv m64r");
15319 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fdiv_r80_by_r64);
15320}
15321
15322
15323/** Opcode 0xdc !11/7. */
15324FNIEMOP_DEF_1(iemOp_fdivr_m64r, uint8_t, bRm)
15325{
15326 IEMOP_MNEMONIC("fdivr m64r");
15327 return FNIEMOP_CALL_2(iemOpHlpFpu_ST0_m64r, bRm, iemAImpl_fdivr_r80_by_r64);
15328}
15329
15330
15331/** Opcode 0xdc. */
15332FNIEMOP_DEF(iemOp_EscF4)
15333{
15334 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
15335 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
15336 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
15337 {
15338 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15339 {
15340 case 0: return FNIEMOP_CALL_1(iemOp_fadd_stN_st0, bRm);
15341 case 1: return FNIEMOP_CALL_1(iemOp_fmul_stN_st0, bRm);
15342 case 2: return FNIEMOP_CALL_1(iemOp_fcom_stN, bRm); /* Marked reserved, intel behavior is that of FCOM ST(i). */
15343 case 3: return FNIEMOP_CALL_1(iemOp_fcomp_stN, bRm); /* Marked reserved, intel behavior is that of FCOMP ST(i). */
15344 case 4: return FNIEMOP_CALL_1(iemOp_fsubr_stN_st0, bRm);
15345 case 5: return FNIEMOP_CALL_1(iemOp_fsub_stN_st0, bRm);
15346 case 6: return FNIEMOP_CALL_1(iemOp_fdivr_stN_st0, bRm);
15347 case 7: return FNIEMOP_CALL_1(iemOp_fdiv_stN_st0, bRm);
15348 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15349 }
15350 }
15351 else
15352 {
15353 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15354 {
15355 case 0: return FNIEMOP_CALL_1(iemOp_fadd_m64r, bRm);
15356 case 1: return FNIEMOP_CALL_1(iemOp_fmul_m64r, bRm);
15357 case 2: return FNIEMOP_CALL_1(iemOp_fcom_m64r, bRm);
15358 case 3: return FNIEMOP_CALL_1(iemOp_fcomp_m64r, bRm);
15359 case 4: return FNIEMOP_CALL_1(iemOp_fsub_m64r, bRm);
15360 case 5: return FNIEMOP_CALL_1(iemOp_fsubr_m64r, bRm);
15361 case 6: return FNIEMOP_CALL_1(iemOp_fdiv_m64r, bRm);
15362 case 7: return FNIEMOP_CALL_1(iemOp_fdivr_m64r, bRm);
15363 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15364 }
15365 }
15366}
15367
15368
15369/** Opcode 0xdd !11/0.
15370 * @sa iemOp_fld_m32r */
15371FNIEMOP_DEF_1(iemOp_fld_m64r, uint8_t, bRm)
15372{
15373 IEMOP_MNEMONIC("fld m64r");
15374
15375 IEM_MC_BEGIN(2, 3);
15376 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
15377 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
15378 IEM_MC_LOCAL(RTFLOAT64U, r64Val);
15379 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
15380 IEM_MC_ARG_LOCAL_REF(PCRTFLOAT64U, pr64Val, r64Val, 1);
15381
15382 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
15383 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15384 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15385 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15386
15387 IEM_MC_FETCH_MEM_R64(r64Val, pIemCpu->iEffSeg, GCPtrEffSrc);
15388 IEM_MC_PREPARE_FPU_USAGE();
15389 IEM_MC_IF_FPUREG_IS_EMPTY(7)
15390 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fld_r64_to_r80, pFpuRes, pr64Val);
15391 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pIemCpu->iEffSeg, GCPtrEffSrc);
15392 IEM_MC_ELSE()
15393 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pIemCpu->iEffSeg, GCPtrEffSrc);
15394 IEM_MC_ENDIF();
15395 IEM_MC_ADVANCE_RIP();
15396
15397 IEM_MC_END();
15398 return VINF_SUCCESS;
15399}
15400
15401
15402/** Opcode 0xdd !11/0. */
15403FNIEMOP_DEF_1(iemOp_fisttp_m64i, uint8_t, bRm)
15404{
15405 IEMOP_MNEMONIC("fisttp m64i");
15406 IEM_MC_BEGIN(3, 2);
15407 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
15408 IEM_MC_LOCAL(uint16_t, u16Fsw);
15409 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15410 IEM_MC_ARG(int64_t *, pi64Dst, 1);
15411 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
15412
15413 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15414 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15415 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15416 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15417
15418 IEM_MC_MEM_MAP(pi64Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
15419 IEM_MC_PREPARE_FPU_USAGE();
15420 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
15421 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fistt_r80_to_i64, pu16Fsw, pi64Dst, pr80Value);
15422 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi64Dst, IEM_ACCESS_DATA_W, u16Fsw);
15423 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
15424 IEM_MC_ELSE()
15425 IEM_MC_IF_FCW_IM()
15426 IEM_MC_STORE_MEM_I64_CONST_BY_REF(pi64Dst, INT64_MIN /* (integer indefinite) */);
15427 IEM_MC_MEM_COMMIT_AND_UNMAP(pi64Dst, IEM_ACCESS_DATA_W);
15428 IEM_MC_ENDIF();
15429 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
15430 IEM_MC_ENDIF();
15431 IEM_MC_ADVANCE_RIP();
15432
15433 IEM_MC_END();
15434 return VINF_SUCCESS;
15435}
15436
15437
15438/** Opcode 0xdd !11/0. */
15439FNIEMOP_DEF_1(iemOp_fst_m64r, uint8_t, bRm)
15440{
15441 IEMOP_MNEMONIC("fst m64r");
15442 IEM_MC_BEGIN(3, 2);
15443 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
15444 IEM_MC_LOCAL(uint16_t, u16Fsw);
15445 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15446 IEM_MC_ARG(PRTFLOAT64U, pr64Dst, 1);
15447 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
15448
15449 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15450 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15451 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15452 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15453
15454 IEM_MC_MEM_MAP(pr64Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
15455 IEM_MC_PREPARE_FPU_USAGE();
15456 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
15457 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r64, pu16Fsw, pr64Dst, pr80Value);
15458 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr64Dst, IEM_ACCESS_DATA_W, u16Fsw);
15459 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
15460 IEM_MC_ELSE()
15461 IEM_MC_IF_FCW_IM()
15462 IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(pr64Dst);
15463 IEM_MC_MEM_COMMIT_AND_UNMAP(pr64Dst, IEM_ACCESS_DATA_W);
15464 IEM_MC_ENDIF();
15465 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
15466 IEM_MC_ENDIF();
15467 IEM_MC_ADVANCE_RIP();
15468
15469 IEM_MC_END();
15470 return VINF_SUCCESS;
15471}
15472
15473
15474
15475
15476/** Opcode 0xdd !11/0. */
15477FNIEMOP_DEF_1(iemOp_fstp_m64r, uint8_t, bRm)
15478{
15479 IEMOP_MNEMONIC("fstp m64r");
15480 IEM_MC_BEGIN(3, 2);
15481 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
15482 IEM_MC_LOCAL(uint16_t, u16Fsw);
15483 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15484 IEM_MC_ARG(PRTFLOAT64U, pr64Dst, 1);
15485 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
15486
15487 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15488 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15489 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15490 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15491
15492 IEM_MC_MEM_MAP(pr64Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
15493 IEM_MC_PREPARE_FPU_USAGE();
15494 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
15495 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fst_r80_to_r64, pu16Fsw, pr64Dst, pr80Value);
15496 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pr64Dst, IEM_ACCESS_DATA_W, u16Fsw);
15497 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
15498 IEM_MC_ELSE()
15499 IEM_MC_IF_FCW_IM()
15500 IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(pr64Dst);
15501 IEM_MC_MEM_COMMIT_AND_UNMAP(pr64Dst, IEM_ACCESS_DATA_W);
15502 IEM_MC_ENDIF();
15503 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
15504 IEM_MC_ENDIF();
15505 IEM_MC_ADVANCE_RIP();
15506
15507 IEM_MC_END();
15508 return VINF_SUCCESS;
15509}
15510
15511
15512/** Opcode 0xdd !11/0. */
15513FNIEMOP_DEF_1(iemOp_frstor, uint8_t, bRm)
15514{
15515 IEMOP_MNEMONIC("frstor m94/108byte");
15516 IEM_MC_BEGIN(3, 0);
15517 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0);
15518 IEM_MC_ARG(uint8_t, iEffSeg, 1);
15519 IEM_MC_ARG(RTGCPTR, GCPtrEffSrc, 2);
15520 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
15521 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15522 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15523 IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE();
15524 IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg);
15525 IEM_MC_CALL_CIMPL_3(iemCImpl_frstor, enmEffOpSize, iEffSeg, GCPtrEffSrc);
15526 IEM_MC_END();
15527 return VINF_SUCCESS;
15528}
15529
15530
15531/** Opcode 0xdd !11/0. */
15532FNIEMOP_DEF_1(iemOp_fnsave, uint8_t, bRm)
15533{
15534 IEMOP_MNEMONIC("fnsave m94/108byte");
15535 IEM_MC_BEGIN(3, 0);
15536 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, /*=*/ pIemCpu->enmEffOpSize, 0);
15537 IEM_MC_ARG(uint8_t, iEffSeg, 1);
15538 IEM_MC_ARG(RTGCPTR, GCPtrEffDst, 2);
15539 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15540 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15541 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15542 IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ();
15543 IEM_MC_ASSIGN(iEffSeg, pIemCpu->iEffSeg);
15544 IEM_MC_CALL_CIMPL_3(iemCImpl_fnsave, enmEffOpSize, iEffSeg, GCPtrEffDst);
15545 IEM_MC_END();
15546 return VINF_SUCCESS;
15547
15548}
15549
15550/** Opcode 0xdd !11/0. */
15551FNIEMOP_DEF_1(iemOp_fnstsw, uint8_t, bRm)
15552{
15553 IEMOP_MNEMONIC("fnstsw m16");
15554
15555 IEM_MC_BEGIN(0, 2);
15556 IEM_MC_LOCAL(uint16_t, u16Tmp);
15557 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
15558
15559 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
15560 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15561 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15562
15563 IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ();
15564 IEM_MC_FETCH_FSW(u16Tmp);
15565 IEM_MC_STORE_MEM_U16(pIemCpu->iEffSeg, GCPtrEffDst, u16Tmp);
15566 IEM_MC_ADVANCE_RIP();
15567
15568/** @todo Debug / drop a hint to the verifier that things may differ
15569 * from REM. Seen 0x4020 (iem) vs 0x4000 (rem) at 0008:801c6b88 booting
15570 * NT4SP1. (X86_FSW_PE) */
15571 IEM_MC_END();
15572 return VINF_SUCCESS;
15573}
15574
15575
15576/** Opcode 0xdd 11/0. */
15577FNIEMOP_DEF_1(iemOp_ffree_stN, uint8_t, bRm)
15578{
15579 IEMOP_MNEMONIC("ffree stN");
15580 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15581 /* Note! C0, C1, C2 and C3 are documented as undefined, we leave the
15582 unmodified. */
15583
15584 IEM_MC_BEGIN(0, 0);
15585
15586 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15587 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15588
15589 IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE();
15590 IEM_MC_FPU_STACK_FREE(bRm & X86_MODRM_RM_MASK);
15591 IEM_MC_UPDATE_FPU_OPCODE_IP();
15592
15593 IEM_MC_ADVANCE_RIP();
15594 IEM_MC_END();
15595 return VINF_SUCCESS;
15596}
15597
15598
15599/** Opcode 0xdd 11/1. */
15600FNIEMOP_DEF_1(iemOp_fst_stN, uint8_t, bRm)
15601{
15602 IEMOP_MNEMONIC("fst st0,stN");
15603 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15604
15605 IEM_MC_BEGIN(0, 2);
15606 IEM_MC_LOCAL(PCRTFLOAT80U, pr80Value);
15607 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
15608 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15609 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15610
15611 IEM_MC_PREPARE_FPU_USAGE();
15612 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
15613 IEM_MC_SET_FPU_RESULT(FpuRes, 0 /*FSW*/, pr80Value);
15614 IEM_MC_STORE_FPU_RESULT(FpuRes, bRm & X86_MODRM_RM_MASK);
15615 IEM_MC_ELSE()
15616 IEM_MC_FPU_STACK_UNDERFLOW(bRm & X86_MODRM_RM_MASK);
15617 IEM_MC_ENDIF();
15618
15619 IEM_MC_ADVANCE_RIP();
15620 IEM_MC_END();
15621 return VINF_SUCCESS;
15622}
15623
15624
15625/** Opcode 0xdd 11/3. */
15626FNIEMOP_DEF_1(iemOp_fucom_stN_st0, uint8_t, bRm)
15627{
15628 IEMOP_MNEMONIC("fcom st0,stN");
15629 return FNIEMOP_CALL_2(iemOpHlpFpuNoStore_st0_stN, bRm, iemAImpl_fucom_r80_by_r80);
15630}
15631
15632
15633/** Opcode 0xdd 11/4. */
15634FNIEMOP_DEF_1(iemOp_fucomp_stN, uint8_t, bRm)
15635{
15636 IEMOP_MNEMONIC("fcomp st0,stN");
15637 return FNIEMOP_CALL_2(iemOpHlpFpuNoStore_st0_stN_pop, bRm, iemAImpl_fucom_r80_by_r80);
15638}
15639
15640
15641/** Opcode 0xdd. */
15642FNIEMOP_DEF(iemOp_EscF5)
15643{
15644 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
15645 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
15646 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
15647 {
15648 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15649 {
15650 case 0: return FNIEMOP_CALL_1(iemOp_ffree_stN, bRm);
15651 case 1: return FNIEMOP_CALL_1(iemOp_fxch_stN, bRm); /* Reserved, intel behavior is that of XCHG ST(i). */
15652 case 2: return FNIEMOP_CALL_1(iemOp_fst_stN, bRm);
15653 case 3: return FNIEMOP_CALL_1(iemOp_fstp_stN, bRm);
15654 case 4: return FNIEMOP_CALL_1(iemOp_fucom_stN_st0,bRm);
15655 case 5: return FNIEMOP_CALL_1(iemOp_fucomp_stN, bRm);
15656 case 6: return IEMOP_RAISE_INVALID_OPCODE();
15657 case 7: return IEMOP_RAISE_INVALID_OPCODE();
15658 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15659 }
15660 }
15661 else
15662 {
15663 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15664 {
15665 case 0: return FNIEMOP_CALL_1(iemOp_fld_m64r, bRm);
15666 case 1: return FNIEMOP_CALL_1(iemOp_fisttp_m64i, bRm);
15667 case 2: return FNIEMOP_CALL_1(iemOp_fst_m64r, bRm);
15668 case 3: return FNIEMOP_CALL_1(iemOp_fstp_m64r, bRm);
15669 case 4: return FNIEMOP_CALL_1(iemOp_frstor, bRm);
15670 case 5: return IEMOP_RAISE_INVALID_OPCODE();
15671 case 6: return FNIEMOP_CALL_1(iemOp_fnsave, bRm);
15672 case 7: return FNIEMOP_CALL_1(iemOp_fnstsw, bRm);
15673 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15674 }
15675 }
15676}
15677
15678
15679/** Opcode 0xde 11/0. */
15680FNIEMOP_DEF_1(iemOp_faddp_stN_st0, uint8_t, bRm)
15681{
15682 IEMOP_MNEMONIC("faddp stN,st0");
15683 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fadd_r80_by_r80);
15684}
15685
15686
15687/** Opcode 0xde 11/0. */
15688FNIEMOP_DEF_1(iemOp_fmulp_stN_st0, uint8_t, bRm)
15689{
15690 IEMOP_MNEMONIC("fmulp stN,st0");
15691 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fmul_r80_by_r80);
15692}
15693
15694
15695/** Opcode 0xde 0xd9. */
15696FNIEMOP_DEF(iemOp_fcompp)
15697{
15698 IEMOP_MNEMONIC("fucompp st0,stN");
15699 return FNIEMOP_CALL_1(iemOpHlpFpuNoStore_st0_stN_pop_pop, iemAImpl_fcom_r80_by_r80);
15700}
15701
15702
15703/** Opcode 0xde 11/4. */
15704FNIEMOP_DEF_1(iemOp_fsubrp_stN_st0, uint8_t, bRm)
15705{
15706 IEMOP_MNEMONIC("fsubrp stN,st0");
15707 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fsubr_r80_by_r80);
15708}
15709
15710
15711/** Opcode 0xde 11/5. */
15712FNIEMOP_DEF_1(iemOp_fsubp_stN_st0, uint8_t, bRm)
15713{
15714 IEMOP_MNEMONIC("fsubp stN,st0");
15715 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fsub_r80_by_r80);
15716}
15717
15718
15719/** Opcode 0xde 11/6. */
15720FNIEMOP_DEF_1(iemOp_fdivrp_stN_st0, uint8_t, bRm)
15721{
15722 IEMOP_MNEMONIC("fdivrp stN,st0");
15723 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fdivr_r80_by_r80);
15724}
15725
15726
15727/** Opcode 0xde 11/7. */
15728FNIEMOP_DEF_1(iemOp_fdivp_stN_st0, uint8_t, bRm)
15729{
15730 IEMOP_MNEMONIC("fdivp stN,st0");
15731 return FNIEMOP_CALL_2(iemOpHlpFpu_stN_st0_pop, bRm, iemAImpl_fdiv_r80_by_r80);
15732}
15733
15734
15735/**
15736 * Common worker for FPU instructions working on ST0 and an m16i, and storing
15737 * the result in ST0.
15738 *
15739 * @param pfnAImpl Pointer to the instruction implementation (assembly).
15740 */
15741FNIEMOP_DEF_2(iemOpHlpFpu_st0_m16i, uint8_t, bRm, PFNIEMAIMPLFPUI16, pfnAImpl)
15742{
15743 IEM_MC_BEGIN(3, 3);
15744 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
15745 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
15746 IEM_MC_LOCAL(int16_t, i16Val2);
15747 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
15748 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
15749 IEM_MC_ARG_LOCAL_REF(int16_t const *, pi16Val2, i16Val2, 2);
15750
15751 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
15752 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15753
15754 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15755 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15756 IEM_MC_FETCH_MEM_I16(i16Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
15757
15758 IEM_MC_PREPARE_FPU_USAGE();
15759 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
15760 IEM_MC_CALL_FPU_AIMPL_3(pfnAImpl, pFpuRes, pr80Value1, pi16Val2);
15761 IEM_MC_STORE_FPU_RESULT(FpuRes, 0);
15762 IEM_MC_ELSE()
15763 IEM_MC_FPU_STACK_UNDERFLOW(0);
15764 IEM_MC_ENDIF();
15765 IEM_MC_ADVANCE_RIP();
15766
15767 IEM_MC_END();
15768 return VINF_SUCCESS;
15769}
15770
15771
15772/** Opcode 0xde !11/0. */
15773FNIEMOP_DEF_1(iemOp_fiadd_m16i, uint8_t, bRm)
15774{
15775 IEMOP_MNEMONIC("fiadd m16i");
15776 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fiadd_r80_by_i16);
15777}
15778
15779
15780/** Opcode 0xde !11/1. */
15781FNIEMOP_DEF_1(iemOp_fimul_m16i, uint8_t, bRm)
15782{
15783 IEMOP_MNEMONIC("fimul m16i");
15784 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fimul_r80_by_i16);
15785}
15786
15787
15788/** Opcode 0xde !11/2. */
15789FNIEMOP_DEF_1(iemOp_ficom_m16i, uint8_t, bRm)
15790{
15791 IEMOP_MNEMONIC("ficom st0,m16i");
15792
15793 IEM_MC_BEGIN(3, 3);
15794 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
15795 IEM_MC_LOCAL(uint16_t, u16Fsw);
15796 IEM_MC_LOCAL(int16_t, i16Val2);
15797 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15798 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
15799 IEM_MC_ARG_LOCAL_REF(int16_t const *, pi16Val2, i16Val2, 2);
15800
15801 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
15802 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15803
15804 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15805 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15806 IEM_MC_FETCH_MEM_I16(i16Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
15807
15808 IEM_MC_PREPARE_FPU_USAGE();
15809 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
15810 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_ficom_r80_by_i16, pu16Fsw, pr80Value1, pi16Val2);
15811 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
15812 IEM_MC_ELSE()
15813 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
15814 IEM_MC_ENDIF();
15815 IEM_MC_ADVANCE_RIP();
15816
15817 IEM_MC_END();
15818 return VINF_SUCCESS;
15819}
15820
15821
15822/** Opcode 0xde !11/3. */
15823FNIEMOP_DEF_1(iemOp_ficomp_m16i, uint8_t, bRm)
15824{
15825 IEMOP_MNEMONIC("ficomp st0,m16i");
15826
15827 IEM_MC_BEGIN(3, 3);
15828 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
15829 IEM_MC_LOCAL(uint16_t, u16Fsw);
15830 IEM_MC_LOCAL(int16_t, i16Val2);
15831 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
15832 IEM_MC_ARG(PCRTFLOAT80U, pr80Value1, 1);
15833 IEM_MC_ARG_LOCAL_REF(int16_t const *, pi16Val2, i16Val2, 2);
15834
15835 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
15836 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15837
15838 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15839 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15840 IEM_MC_FETCH_MEM_I16(i16Val2, pIemCpu->iEffSeg, GCPtrEffSrc);
15841
15842 IEM_MC_PREPARE_FPU_USAGE();
15843 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value1, 0)
15844 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_ficom_r80_by_i16, pu16Fsw, pr80Value1, pi16Val2);
15845 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffSrc);
15846 IEM_MC_ELSE()
15847 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffSrc);
15848 IEM_MC_ENDIF();
15849 IEM_MC_ADVANCE_RIP();
15850
15851 IEM_MC_END();
15852 return VINF_SUCCESS;
15853}
15854
15855
15856/** Opcode 0xde !11/4. */
15857FNIEMOP_DEF_1(iemOp_fisub_m16i, uint8_t, bRm)
15858{
15859 IEMOP_MNEMONIC("fisub m16i");
15860 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fisub_r80_by_i16);
15861}
15862
15863
15864/** Opcode 0xde !11/5. */
15865FNIEMOP_DEF_1(iemOp_fisubr_m16i, uint8_t, bRm)
15866{
15867 IEMOP_MNEMONIC("fisubr m16i");
15868 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fisubr_r80_by_i16);
15869}
15870
15871
15872/** Opcode 0xde !11/6. */
15873FNIEMOP_DEF_1(iemOp_fidiv_m16i, uint8_t, bRm)
15874{
15875 IEMOP_MNEMONIC("fiadd m16i");
15876 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fidiv_r80_by_i16);
15877}
15878
15879
15880/** Opcode 0xde !11/7. */
15881FNIEMOP_DEF_1(iemOp_fidivr_m16i, uint8_t, bRm)
15882{
15883 IEMOP_MNEMONIC("fiadd m16i");
15884 return FNIEMOP_CALL_2(iemOpHlpFpu_st0_m16i, bRm, iemAImpl_fidivr_r80_by_i16);
15885}
15886
15887
15888/** Opcode 0xde. */
15889FNIEMOP_DEF(iemOp_EscF6)
15890{
15891 pIemCpu->offFpuOpcode = pIemCpu->offOpcode - 1;
15892 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
15893 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
15894 {
15895 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15896 {
15897 case 0: return FNIEMOP_CALL_1(iemOp_faddp_stN_st0, bRm);
15898 case 1: return FNIEMOP_CALL_1(iemOp_fmulp_stN_st0, bRm);
15899 case 2: return FNIEMOP_CALL_1(iemOp_fcomp_stN, bRm);
15900 case 3: if (bRm == 0xd9)
15901 return FNIEMOP_CALL(iemOp_fcompp);
15902 return IEMOP_RAISE_INVALID_OPCODE();
15903 case 4: return FNIEMOP_CALL_1(iemOp_fsubrp_stN_st0, bRm);
15904 case 5: return FNIEMOP_CALL_1(iemOp_fsubp_stN_st0, bRm);
15905 case 6: return FNIEMOP_CALL_1(iemOp_fdivrp_stN_st0, bRm);
15906 case 7: return FNIEMOP_CALL_1(iemOp_fdivp_stN_st0, bRm);
15907 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15908 }
15909 }
15910 else
15911 {
15912 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
15913 {
15914 case 0: return FNIEMOP_CALL_1(iemOp_fiadd_m16i, bRm);
15915 case 1: return FNIEMOP_CALL_1(iemOp_fimul_m16i, bRm);
15916 case 2: return FNIEMOP_CALL_1(iemOp_ficom_m16i, bRm);
15917 case 3: return FNIEMOP_CALL_1(iemOp_ficomp_m16i, bRm);
15918 case 4: return FNIEMOP_CALL_1(iemOp_fisub_m16i, bRm);
15919 case 5: return FNIEMOP_CALL_1(iemOp_fisubr_m16i, bRm);
15920 case 6: return FNIEMOP_CALL_1(iemOp_fidiv_m16i, bRm);
15921 case 7: return FNIEMOP_CALL_1(iemOp_fidivr_m16i, bRm);
15922 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15923 }
15924 }
15925}
15926
15927
15928/** Opcode 0xdf 11/0.
15929 * Undocument instruction, assumed to work like ffree + fincstp. */
15930FNIEMOP_DEF_1(iemOp_ffreep_stN, uint8_t, bRm)
15931{
15932 IEMOP_MNEMONIC("ffreep stN");
15933 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15934
15935 IEM_MC_BEGIN(0, 0);
15936
15937 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15938 IEM_MC_MAYBE_RAISE_FPU_XCPT();
15939
15940 IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE();
15941 IEM_MC_FPU_STACK_FREE(bRm & X86_MODRM_RM_MASK);
15942 IEM_MC_FPU_STACK_INC_TOP();
15943 IEM_MC_UPDATE_FPU_OPCODE_IP();
15944
15945 IEM_MC_ADVANCE_RIP();
15946 IEM_MC_END();
15947 return VINF_SUCCESS;
15948}
15949
15950
15951/** Opcode 0xdf 0xe0. */
15952FNIEMOP_DEF(iemOp_fnstsw_ax)
15953{
15954 IEMOP_MNEMONIC("fnstsw ax");
15955 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15956
15957 IEM_MC_BEGIN(0, 1);
15958 IEM_MC_LOCAL(uint16_t, u16Tmp);
15959 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
15960 IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ();
15961 IEM_MC_FETCH_FSW(u16Tmp);
15962 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Tmp);
15963 IEM_MC_ADVANCE_RIP();
15964 IEM_MC_END();
15965 return VINF_SUCCESS;
15966}
15967
15968
15969/** Opcode 0xdf 11/5. */
15970FNIEMOP_DEF_1(iemOp_fucomip_st0_stN, uint8_t, bRm)
15971{
15972 IEMOP_MNEMONIC("fcomip st0,stN");
15973 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_fcomi_fucomi, bRm & X86_MODRM_RM_MASK, iemAImpl_fcomi_r80_by_r80, true /*fPop*/);
15974}
15975
15976
15977/** Opcode 0xdf 11/6. */
15978FNIEMOP_DEF_1(iemOp_fcomip_st0_stN, uint8_t, bRm)
15979{
15980 IEMOP_MNEMONIC("fcomip st0,stN");
15981 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_fcomi_fucomi, bRm & X86_MODRM_RM_MASK, iemAImpl_fcomi_r80_by_r80, true /*fPop*/);
15982}
15983
15984
15985/** Opcode 0xdf !11/0. */
15986FNIEMOP_DEF_1(iemOp_fild_m16i, uint8_t, bRm)
15987{
15988 IEMOP_MNEMONIC("fild m16i");
15989
15990 IEM_MC_BEGIN(2, 3);
15991 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
15992 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
15993 IEM_MC_LOCAL(int16_t, i16Val);
15994 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
15995 IEM_MC_ARG_LOCAL_REF(int16_t const *, pi16Val, i16Val, 1);
15996
15997 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
15998 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
15999
16000 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
16001 IEM_MC_MAYBE_RAISE_FPU_XCPT();
16002 IEM_MC_FETCH_MEM_I16(i16Val, pIemCpu->iEffSeg, GCPtrEffSrc);
16003
16004 IEM_MC_PREPARE_FPU_USAGE();
16005 IEM_MC_IF_FPUREG_IS_EMPTY(7)
16006 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fild_i16_to_r80, pFpuRes, pi16Val);
16007 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pIemCpu->iEffSeg, GCPtrEffSrc);
16008 IEM_MC_ELSE()
16009 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pIemCpu->iEffSeg, GCPtrEffSrc);
16010 IEM_MC_ENDIF();
16011 IEM_MC_ADVANCE_RIP();
16012
16013 IEM_MC_END();
16014 return VINF_SUCCESS;
16015}
16016
16017
16018/** Opcode 0xdf !11/1. */
16019FNIEMOP_DEF_1(iemOp_fisttp_m16i, uint8_t, bRm)
16020{
16021 IEMOP_MNEMONIC("fisttp m16i");
16022 IEM_MC_BEGIN(3, 2);
16023 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16024 IEM_MC_LOCAL(uint16_t, u16Fsw);
16025 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
16026 IEM_MC_ARG(int16_t *, pi16Dst, 1);
16027 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
16028
16029 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16030 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
16031 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
16032 IEM_MC_MAYBE_RAISE_FPU_XCPT();
16033
16034 IEM_MC_MEM_MAP(pi16Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
16035 IEM_MC_PREPARE_FPU_USAGE();
16036 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
16037 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fistt_r80_to_i16, pu16Fsw, pi16Dst, pr80Value);
16038 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi16Dst, IEM_ACCESS_DATA_W, u16Fsw);
16039 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
16040 IEM_MC_ELSE()
16041 IEM_MC_IF_FCW_IM()
16042 IEM_MC_STORE_MEM_I16_CONST_BY_REF(pi16Dst, INT16_MIN /* (integer indefinite) */);
16043 IEM_MC_MEM_COMMIT_AND_UNMAP(pi16Dst, IEM_ACCESS_DATA_W);
16044 IEM_MC_ENDIF();
16045 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
16046 IEM_MC_ENDIF();
16047 IEM_MC_ADVANCE_RIP();
16048
16049 IEM_MC_END();
16050 return VINF_SUCCESS;
16051}
16052
16053
16054/** Opcode 0xdf !11/2. */
16055FNIEMOP_DEF_1(iemOp_fist_m16i, uint8_t, bRm)
16056{
16057 IEMOP_MNEMONIC("fistp m16i");
16058 IEM_MC_BEGIN(3, 2);
16059 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16060 IEM_MC_LOCAL(uint16_t, u16Fsw);
16061 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
16062 IEM_MC_ARG(int16_t *, pi16Dst, 1);
16063 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
16064
16065 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16066 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
16067 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
16068 IEM_MC_MAYBE_RAISE_FPU_XCPT();
16069
16070 IEM_MC_MEM_MAP(pi16Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
16071 IEM_MC_PREPARE_FPU_USAGE();
16072 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
16073 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i16, pu16Fsw, pi16Dst, pr80Value);
16074 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi16Dst, IEM_ACCESS_DATA_W, u16Fsw);
16075 IEM_MC_UPDATE_FSW_WITH_MEM_OP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
16076 IEM_MC_ELSE()
16077 IEM_MC_IF_FCW_IM()
16078 IEM_MC_STORE_MEM_I16_CONST_BY_REF(pi16Dst, INT16_MIN /* (integer indefinite) */);
16079 IEM_MC_MEM_COMMIT_AND_UNMAP(pi16Dst, IEM_ACCESS_DATA_W);
16080 IEM_MC_ENDIF();
16081 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
16082 IEM_MC_ENDIF();
16083 IEM_MC_ADVANCE_RIP();
16084
16085 IEM_MC_END();
16086 return VINF_SUCCESS;
16087}
16088
16089
16090/** Opcode 0xdf !11/3. */
16091FNIEMOP_DEF_1(iemOp_fistp_m16i, uint8_t, bRm)
16092{
16093 IEMOP_MNEMONIC("fistp m16i");
16094 IEM_MC_BEGIN(3, 2);
16095 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16096 IEM_MC_LOCAL(uint16_t, u16Fsw);
16097 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
16098 IEM_MC_ARG(int16_t *, pi16Dst, 1);
16099 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
16100
16101 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16102 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
16103 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
16104 IEM_MC_MAYBE_RAISE_FPU_XCPT();
16105
16106 IEM_MC_MEM_MAP(pi16Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
16107 IEM_MC_PREPARE_FPU_USAGE();
16108 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
16109 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i16, pu16Fsw, pi16Dst, pr80Value);
16110 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi16Dst, IEM_ACCESS_DATA_W, u16Fsw);
16111 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
16112 IEM_MC_ELSE()
16113 IEM_MC_IF_FCW_IM()
16114 IEM_MC_STORE_MEM_I16_CONST_BY_REF(pi16Dst, INT16_MIN /* (integer indefinite) */);
16115 IEM_MC_MEM_COMMIT_AND_UNMAP(pi16Dst, IEM_ACCESS_DATA_W);
16116 IEM_MC_ENDIF();
16117 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
16118 IEM_MC_ENDIF();
16119 IEM_MC_ADVANCE_RIP();
16120
16121 IEM_MC_END();
16122 return VINF_SUCCESS;
16123}
16124
16125
16126/** Opcode 0xdf !11/4. */
16127FNIEMOP_STUB_1(iemOp_fbld_m80d, uint8_t, bRm);
16128
16129
16130/** Opcode 0xdf !11/5. */
16131FNIEMOP_DEF_1(iemOp_fild_m64i, uint8_t, bRm)
16132{
16133 IEMOP_MNEMONIC("fild m64i");
16134
16135 IEM_MC_BEGIN(2, 3);
16136 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
16137 IEM_MC_LOCAL(IEMFPURESULT, FpuRes);
16138 IEM_MC_LOCAL(int64_t, i64Val);
16139 IEM_MC_ARG_LOCAL_REF(PIEMFPURESULT, pFpuRes, FpuRes, 0);
16140 IEM_MC_ARG_LOCAL_REF(int64_t const *, pi64Val, i64Val, 1);
16141
16142 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
16143 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
16144
16145 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
16146 IEM_MC_MAYBE_RAISE_FPU_XCPT();
16147 IEM_MC_FETCH_MEM_I64(i64Val, pIemCpu->iEffSeg, GCPtrEffSrc);
16148
16149 IEM_MC_PREPARE_FPU_USAGE();
16150 IEM_MC_IF_FPUREG_IS_EMPTY(7)
16151 IEM_MC_CALL_FPU_AIMPL_2(iemAImpl_fild_i64_to_r80, pFpuRes, pi64Val);
16152 IEM_MC_PUSH_FPU_RESULT_MEM_OP(FpuRes, pIemCpu->iEffSeg, GCPtrEffSrc);
16153 IEM_MC_ELSE()
16154 IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(pIemCpu->iEffSeg, GCPtrEffSrc);
16155 IEM_MC_ENDIF();
16156 IEM_MC_ADVANCE_RIP();
16157
16158 IEM_MC_END();
16159 return VINF_SUCCESS;
16160}
16161
16162
16163/** Opcode 0xdf !11/6. */
16164FNIEMOP_STUB_1(iemOp_fbstp_m80d, uint8_t, bRm);
16165
16166
16167/** Opcode 0xdf !11/7. */
16168FNIEMOP_DEF_1(iemOp_fistp_m64i, uint8_t, bRm)
16169{
16170 IEMOP_MNEMONIC("fistp m64i");
16171 IEM_MC_BEGIN(3, 2);
16172 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16173 IEM_MC_LOCAL(uint16_t, u16Fsw);
16174 IEM_MC_ARG_LOCAL_REF(uint16_t *, pu16Fsw, u16Fsw, 0);
16175 IEM_MC_ARG(int64_t *, pi64Dst, 1);
16176 IEM_MC_ARG(PCRTFLOAT80U, pr80Value, 2);
16177
16178 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16179 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
16180 IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE();
16181 IEM_MC_MAYBE_RAISE_FPU_XCPT();
16182
16183 IEM_MC_MEM_MAP(pi64Dst, IEM_ACCESS_DATA_W, pIemCpu->iEffSeg, GCPtrEffDst, 1 /*arg*/);
16184 IEM_MC_PREPARE_FPU_USAGE();
16185 IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(pr80Value, 0)
16186 IEM_MC_CALL_FPU_AIMPL_3(iemAImpl_fist_r80_to_i64, pu16Fsw, pi64Dst, pr80Value);
16187 IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(pi64Dst, IEM_ACCESS_DATA_W, u16Fsw);
16188 IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(u16Fsw, pIemCpu->iEffSeg, GCPtrEffDst);
16189 IEM_MC_ELSE()
16190 IEM_MC_IF_FCW_IM()
16191 IEM_MC_STORE_MEM_I64_CONST_BY_REF(pi64Dst, INT64_MIN /* (integer indefinite) */);
16192 IEM_MC_MEM_COMMIT_AND_UNMAP(pi64Dst, IEM_ACCESS_DATA_W);
16193 IEM_MC_ENDIF();
16194 IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(UINT8_MAX, pIemCpu->iEffSeg, GCPtrEffDst);
16195 IEM_MC_ENDIF();
16196 IEM_MC_ADVANCE_RIP();
16197
16198 IEM_MC_END();
16199 return VINF_SUCCESS;
16200}
16201
16202
16203/** Opcode 0xdf. */
16204FNIEMOP_DEF(iemOp_EscF7)
16205{
16206 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
16207 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16208 {
16209 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
16210 {
16211 case 0: return FNIEMOP_CALL_1(iemOp_ffreep_stN, bRm); /* ffree + pop afterwards, since forever according to AMD. */
16212 case 1: return FNIEMOP_CALL_1(iemOp_fxch_stN, bRm); /* Reserved, behaves like FXCH ST(i) on intel. */
16213 case 2: return FNIEMOP_CALL_1(iemOp_fstp_stN, bRm); /* Reserved, behaves like FSTP ST(i) on intel. */
16214 case 3: return FNIEMOP_CALL_1(iemOp_fstp_stN, bRm); /* Reserved, behaves like FSTP ST(i) on intel. */
16215 case 4: if (bRm == 0xe0)
16216 return FNIEMOP_CALL(iemOp_fnstsw_ax);
16217 return IEMOP_RAISE_INVALID_OPCODE();
16218 case 5: return FNIEMOP_CALL_1(iemOp_fucomip_st0_stN, bRm);
16219 case 6: return FNIEMOP_CALL_1(iemOp_fcomip_st0_stN, bRm);
16220 case 7: return IEMOP_RAISE_INVALID_OPCODE();
16221 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16222 }
16223 }
16224 else
16225 {
16226 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
16227 {
16228 case 0: return FNIEMOP_CALL_1(iemOp_fild_m16i, bRm);
16229 case 1: return FNIEMOP_CALL_1(iemOp_fisttp_m16i, bRm);
16230 case 2: return FNIEMOP_CALL_1(iemOp_fist_m16i, bRm);
16231 case 3: return FNIEMOP_CALL_1(iemOp_fistp_m16i, bRm);
16232 case 4: return FNIEMOP_CALL_1(iemOp_fbld_m80d, bRm);
16233 case 5: return FNIEMOP_CALL_1(iemOp_fild_m64i, bRm);
16234 case 6: return FNIEMOP_CALL_1(iemOp_fbstp_m80d, bRm);
16235 case 7: return FNIEMOP_CALL_1(iemOp_fistp_m64i, bRm);
16236 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16237 }
16238 }
16239}
16240
16241
16242/** Opcode 0xe0. */
16243FNIEMOP_DEF(iemOp_loopne_Jb)
16244{
16245 IEMOP_MNEMONIC("loopne Jb");
16246 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
16247 IEMOP_HLP_NO_LOCK_PREFIX();
16248 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
16249
16250 switch (pIemCpu->enmEffAddrMode)
16251 {
16252 case IEMMODE_16BIT:
16253 IEM_MC_BEGIN(0,0);
16254 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
16255 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
16256 IEM_MC_REL_JMP_S8(i8Imm);
16257 } IEM_MC_ELSE() {
16258 IEM_MC_ADVANCE_RIP();
16259 } IEM_MC_ENDIF();
16260 IEM_MC_END();
16261 return VINF_SUCCESS;
16262
16263 case IEMMODE_32BIT:
16264 IEM_MC_BEGIN(0,0);
16265 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
16266 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
16267 IEM_MC_REL_JMP_S8(i8Imm);
16268 } IEM_MC_ELSE() {
16269 IEM_MC_ADVANCE_RIP();
16270 } IEM_MC_ENDIF();
16271 IEM_MC_END();
16272 return VINF_SUCCESS;
16273
16274 case IEMMODE_64BIT:
16275 IEM_MC_BEGIN(0,0);
16276 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
16277 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(X86_EFL_ZF) {
16278 IEM_MC_REL_JMP_S8(i8Imm);
16279 } IEM_MC_ELSE() {
16280 IEM_MC_ADVANCE_RIP();
16281 } IEM_MC_ENDIF();
16282 IEM_MC_END();
16283 return VINF_SUCCESS;
16284
16285 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16286 }
16287}
16288
16289
16290/** Opcode 0xe1. */
16291FNIEMOP_DEF(iemOp_loope_Jb)
16292{
16293 IEMOP_MNEMONIC("loope Jb");
16294 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
16295 IEMOP_HLP_NO_LOCK_PREFIX();
16296 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
16297
16298 switch (pIemCpu->enmEffAddrMode)
16299 {
16300 case IEMMODE_16BIT:
16301 IEM_MC_BEGIN(0,0);
16302 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
16303 IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
16304 IEM_MC_REL_JMP_S8(i8Imm);
16305 } IEM_MC_ELSE() {
16306 IEM_MC_ADVANCE_RIP();
16307 } IEM_MC_ENDIF();
16308 IEM_MC_END();
16309 return VINF_SUCCESS;
16310
16311 case IEMMODE_32BIT:
16312 IEM_MC_BEGIN(0,0);
16313 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
16314 IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
16315 IEM_MC_REL_JMP_S8(i8Imm);
16316 } IEM_MC_ELSE() {
16317 IEM_MC_ADVANCE_RIP();
16318 } IEM_MC_ENDIF();
16319 IEM_MC_END();
16320 return VINF_SUCCESS;
16321
16322 case IEMMODE_64BIT:
16323 IEM_MC_BEGIN(0,0);
16324 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
16325 IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(X86_EFL_ZF) {
16326 IEM_MC_REL_JMP_S8(i8Imm);
16327 } IEM_MC_ELSE() {
16328 IEM_MC_ADVANCE_RIP();
16329 } IEM_MC_ENDIF();
16330 IEM_MC_END();
16331 return VINF_SUCCESS;
16332
16333 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16334 }
16335}
16336
16337
16338/** Opcode 0xe2. */
16339FNIEMOP_DEF(iemOp_loop_Jb)
16340{
16341 IEMOP_MNEMONIC("loop Jb");
16342 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
16343 IEMOP_HLP_NO_LOCK_PREFIX();
16344 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
16345
16346 /** @todo Check out the #GP case if EIP < CS.Base or EIP > CS.Limit when
16347 * using the 32-bit operand size override. How can that be restarted? See
16348 * weird pseudo code in intel manual. */
16349 switch (pIemCpu->enmEffAddrMode)
16350 {
16351 case IEMMODE_16BIT:
16352 IEM_MC_BEGIN(0,0);
16353 if (-(int8_t)pIemCpu->offOpcode != i8Imm)
16354 {
16355 IEM_MC_SUB_GREG_U16(X86_GREG_xCX, 1);
16356 IEM_MC_IF_CX_IS_NZ() {
16357 IEM_MC_REL_JMP_S8(i8Imm);
16358 } IEM_MC_ELSE() {
16359 IEM_MC_ADVANCE_RIP();
16360 } IEM_MC_ENDIF();
16361 }
16362 else
16363 {
16364 IEM_MC_STORE_GREG_U16_CONST(X86_GREG_xCX, 0);
16365 IEM_MC_ADVANCE_RIP();
16366 }
16367 IEM_MC_END();
16368 return VINF_SUCCESS;
16369
16370 case IEMMODE_32BIT:
16371 IEM_MC_BEGIN(0,0);
16372 if (-(int8_t)pIemCpu->offOpcode != i8Imm)
16373 {
16374 IEM_MC_SUB_GREG_U32(X86_GREG_xCX, 1);
16375 IEM_MC_IF_ECX_IS_NZ() {
16376 IEM_MC_REL_JMP_S8(i8Imm);
16377 } IEM_MC_ELSE() {
16378 IEM_MC_ADVANCE_RIP();
16379 } IEM_MC_ENDIF();
16380 }
16381 else
16382 {
16383 IEM_MC_STORE_GREG_U32_CONST(X86_GREG_xCX, 0);
16384 IEM_MC_ADVANCE_RIP();
16385 }
16386 IEM_MC_END();
16387 return VINF_SUCCESS;
16388
16389 case IEMMODE_64BIT:
16390 IEM_MC_BEGIN(0,0);
16391 if (-(int8_t)pIemCpu->offOpcode != i8Imm)
16392 {
16393 IEM_MC_SUB_GREG_U64(X86_GREG_xCX, 1);
16394 IEM_MC_IF_RCX_IS_NZ() {
16395 IEM_MC_REL_JMP_S8(i8Imm);
16396 } IEM_MC_ELSE() {
16397 IEM_MC_ADVANCE_RIP();
16398 } IEM_MC_ENDIF();
16399 }
16400 else
16401 {
16402 IEM_MC_STORE_GREG_U64_CONST(X86_GREG_xCX, 0);
16403 IEM_MC_ADVANCE_RIP();
16404 }
16405 IEM_MC_END();
16406 return VINF_SUCCESS;
16407
16408 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16409 }
16410}
16411
16412
16413/** Opcode 0xe3. */
16414FNIEMOP_DEF(iemOp_jecxz_Jb)
16415{
16416 IEMOP_MNEMONIC("jecxz Jb");
16417 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
16418 IEMOP_HLP_NO_LOCK_PREFIX();
16419 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
16420
16421 switch (pIemCpu->enmEffAddrMode)
16422 {
16423 case IEMMODE_16BIT:
16424 IEM_MC_BEGIN(0,0);
16425 IEM_MC_IF_CX_IS_NZ() {
16426 IEM_MC_ADVANCE_RIP();
16427 } IEM_MC_ELSE() {
16428 IEM_MC_REL_JMP_S8(i8Imm);
16429 } IEM_MC_ENDIF();
16430 IEM_MC_END();
16431 return VINF_SUCCESS;
16432
16433 case IEMMODE_32BIT:
16434 IEM_MC_BEGIN(0,0);
16435 IEM_MC_IF_ECX_IS_NZ() {
16436 IEM_MC_ADVANCE_RIP();
16437 } IEM_MC_ELSE() {
16438 IEM_MC_REL_JMP_S8(i8Imm);
16439 } IEM_MC_ENDIF();
16440 IEM_MC_END();
16441 return VINF_SUCCESS;
16442
16443 case IEMMODE_64BIT:
16444 IEM_MC_BEGIN(0,0);
16445 IEM_MC_IF_RCX_IS_NZ() {
16446 IEM_MC_ADVANCE_RIP();
16447 } IEM_MC_ELSE() {
16448 IEM_MC_REL_JMP_S8(i8Imm);
16449 } IEM_MC_ENDIF();
16450 IEM_MC_END();
16451 return VINF_SUCCESS;
16452
16453 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16454 }
16455}
16456
16457
16458/** Opcode 0xe4 */
16459FNIEMOP_DEF(iemOp_in_AL_Ib)
16460{
16461 IEMOP_MNEMONIC("in eAX,Ib");
16462 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
16463 IEMOP_HLP_NO_LOCK_PREFIX();
16464 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, 1);
16465}
16466
16467
16468/** Opcode 0xe5 */
16469FNIEMOP_DEF(iemOp_in_eAX_Ib)
16470{
16471 IEMOP_MNEMONIC("in eAX,Ib");
16472 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
16473 IEMOP_HLP_NO_LOCK_PREFIX();
16474 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_in, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
16475}
16476
16477
16478/** Opcode 0xe6 */
16479FNIEMOP_DEF(iemOp_out_Ib_AL)
16480{
16481 IEMOP_MNEMONIC("out Ib,AL");
16482 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
16483 IEMOP_HLP_NO_LOCK_PREFIX();
16484 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, 1);
16485}
16486
16487
16488/** Opcode 0xe7 */
16489FNIEMOP_DEF(iemOp_out_Ib_eAX)
16490{
16491 IEMOP_MNEMONIC("out Ib,eAX");
16492 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
16493 IEMOP_HLP_NO_LOCK_PREFIX();
16494 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_out, u8Imm, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
16495}
16496
16497
16498/** Opcode 0xe8. */
16499FNIEMOP_DEF(iemOp_call_Jv)
16500{
16501 IEMOP_MNEMONIC("call Jv");
16502 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
16503 switch (pIemCpu->enmEffOpSize)
16504 {
16505 case IEMMODE_16BIT:
16506 {
16507 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
16508 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_16, (int16_t)u16Imm);
16509 }
16510
16511 case IEMMODE_32BIT:
16512 {
16513 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
16514 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_32, (int32_t)u32Imm);
16515 }
16516
16517 case IEMMODE_64BIT:
16518 {
16519 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
16520 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_call_rel_64, u64Imm);
16521 }
16522
16523 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16524 }
16525}
16526
16527
16528/** Opcode 0xe9. */
16529FNIEMOP_DEF(iemOp_jmp_Jv)
16530{
16531 IEMOP_MNEMONIC("jmp Jv");
16532 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
16533 switch (pIemCpu->enmEffOpSize)
16534 {
16535 case IEMMODE_16BIT:
16536 {
16537 int16_t i16Imm; IEM_OPCODE_GET_NEXT_S16(&i16Imm);
16538 IEM_MC_BEGIN(0, 0);
16539 IEM_MC_REL_JMP_S16(i16Imm);
16540 IEM_MC_END();
16541 return VINF_SUCCESS;
16542 }
16543
16544 case IEMMODE_64BIT:
16545 case IEMMODE_32BIT:
16546 {
16547 int32_t i32Imm; IEM_OPCODE_GET_NEXT_S32(&i32Imm);
16548 IEM_MC_BEGIN(0, 0);
16549 IEM_MC_REL_JMP_S32(i32Imm);
16550 IEM_MC_END();
16551 return VINF_SUCCESS;
16552 }
16553
16554 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16555 }
16556}
16557
16558
16559/** Opcode 0xea. */
16560FNIEMOP_DEF(iemOp_jmp_Ap)
16561{
16562 IEMOP_MNEMONIC("jmp Ap");
16563 IEMOP_HLP_NO_64BIT();
16564
16565 /* Decode the far pointer address and pass it on to the far call C implementation. */
16566 uint32_t offSeg;
16567 if (pIemCpu->enmEffOpSize != IEMMODE_16BIT)
16568 IEM_OPCODE_GET_NEXT_U32(&offSeg);
16569 else
16570 IEM_OPCODE_GET_NEXT_U16_ZX_U32(&offSeg);
16571 uint16_t uSel; IEM_OPCODE_GET_NEXT_U16(&uSel);
16572 IEMOP_HLP_NO_LOCK_PREFIX();
16573 return IEM_MC_DEFER_TO_CIMPL_3(iemCImpl_FarJmp, uSel, offSeg, pIemCpu->enmEffOpSize);
16574}
16575
16576
16577/** Opcode 0xeb. */
16578FNIEMOP_DEF(iemOp_jmp_Jb)
16579{
16580 IEMOP_MNEMONIC("jmp Jb");
16581 int8_t i8Imm; IEM_OPCODE_GET_NEXT_S8(&i8Imm);
16582 IEMOP_HLP_NO_LOCK_PREFIX();
16583 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
16584
16585 IEM_MC_BEGIN(0, 0);
16586 IEM_MC_REL_JMP_S8(i8Imm);
16587 IEM_MC_END();
16588 return VINF_SUCCESS;
16589}
16590
16591
16592/** Opcode 0xec */
16593FNIEMOP_DEF(iemOp_in_AL_DX)
16594{
16595 IEMOP_MNEMONIC("in AL,DX");
16596 IEMOP_HLP_NO_LOCK_PREFIX();
16597 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, 1);
16598}
16599
16600
16601/** Opcode 0xed */
16602FNIEMOP_DEF(iemOp_eAX_DX)
16603{
16604 IEMOP_MNEMONIC("in eAX,DX");
16605 IEMOP_HLP_NO_LOCK_PREFIX();
16606 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_in_eAX_DX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
16607}
16608
16609
16610/** Opcode 0xee */
16611FNIEMOP_DEF(iemOp_out_DX_AL)
16612{
16613 IEMOP_MNEMONIC("out DX,AL");
16614 IEMOP_HLP_NO_LOCK_PREFIX();
16615 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, 1);
16616}
16617
16618
16619/** Opcode 0xef */
16620FNIEMOP_DEF(iemOp_out_DX_eAX)
16621{
16622 IEMOP_MNEMONIC("out DX,eAX");
16623 IEMOP_HLP_NO_LOCK_PREFIX();
16624 return IEM_MC_DEFER_TO_CIMPL_1(iemCImpl_out_DX_eAX, pIemCpu->enmEffOpSize == IEMMODE_16BIT ? 2 : 4);
16625}
16626
16627
16628/** Opcode 0xf0. */
16629FNIEMOP_DEF(iemOp_lock)
16630{
16631 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("lock");
16632 pIemCpu->fPrefixes |= IEM_OP_PRF_LOCK;
16633
16634 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
16635 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
16636}
16637
16638
16639/** Opcode 0xf1. */
16640FNIEMOP_DEF(iemOp_int_1)
16641{
16642 IEMOP_MNEMONIC("int1"); /* icebp */
16643 IEMOP_HLP_MIN_386(); /** @todo does not generate #UD on 286, or so they say... */
16644 /** @todo testcase! */
16645 return IEM_MC_DEFER_TO_CIMPL_2(iemCImpl_int, X86_XCPT_DB, false /*fIsBpInstr*/);
16646}
16647
16648
16649/** Opcode 0xf2. */
16650FNIEMOP_DEF(iemOp_repne)
16651{
16652 /* This overrides any previous REPE prefix. */
16653 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPZ;
16654 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("repne");
16655 pIemCpu->fPrefixes |= IEM_OP_PRF_REPNZ;
16656
16657 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
16658 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
16659}
16660
16661
16662/** Opcode 0xf3. */
16663FNIEMOP_DEF(iemOp_repe)
16664{
16665 /* This overrides any previous REPNE prefix. */
16666 pIemCpu->fPrefixes &= ~IEM_OP_PRF_REPNZ;
16667 IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE("repe");
16668 pIemCpu->fPrefixes |= IEM_OP_PRF_REPZ;
16669
16670 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
16671 return FNIEMOP_CALL(g_apfnOneByteMap[b]);
16672}
16673
16674
16675/** Opcode 0xf4. */
16676FNIEMOP_DEF(iemOp_hlt)
16677{
16678 IEMOP_HLP_NO_LOCK_PREFIX();
16679 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_hlt);
16680}
16681
16682
16683/** Opcode 0xf5. */
16684FNIEMOP_DEF(iemOp_cmc)
16685{
16686 IEMOP_MNEMONIC("cmc");
16687 IEMOP_HLP_NO_LOCK_PREFIX();
16688 IEM_MC_BEGIN(0, 0);
16689 IEM_MC_FLIP_EFL_BIT(X86_EFL_CF);
16690 IEM_MC_ADVANCE_RIP();
16691 IEM_MC_END();
16692 return VINF_SUCCESS;
16693}
16694
16695
16696/**
16697 * Common implementation of 'inc/dec/not/neg Eb'.
16698 *
16699 * @param bRm The RM byte.
16700 * @param pImpl The instruction implementation.
16701 */
16702FNIEMOP_DEF_2(iemOpCommonUnaryEb, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
16703{
16704 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16705 {
16706 /* register access */
16707 IEM_MC_BEGIN(2, 0);
16708 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
16709 IEM_MC_ARG(uint32_t *, pEFlags, 1);
16710 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16711 IEM_MC_REF_EFLAGS(pEFlags);
16712 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
16713 IEM_MC_ADVANCE_RIP();
16714 IEM_MC_END();
16715 }
16716 else
16717 {
16718 /* memory access. */
16719 IEM_MC_BEGIN(2, 2);
16720 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
16721 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
16722 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16723
16724 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16725 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16726 IEM_MC_FETCH_EFLAGS(EFlags);
16727 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
16728 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU8, pu8Dst, pEFlags);
16729 else
16730 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU8, pu8Dst, pEFlags);
16731
16732 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_RW);
16733 IEM_MC_COMMIT_EFLAGS(EFlags);
16734 IEM_MC_ADVANCE_RIP();
16735 IEM_MC_END();
16736 }
16737 return VINF_SUCCESS;
16738}
16739
16740
16741/**
16742 * Common implementation of 'inc/dec/not/neg Ev'.
16743 *
16744 * @param bRm The RM byte.
16745 * @param pImpl The instruction implementation.
16746 */
16747FNIEMOP_DEF_2(iemOpCommonUnaryEv, uint8_t, bRm, PCIEMOPUNARYSIZES, pImpl)
16748{
16749 /* Registers are handled by a common worker. */
16750 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16751 return FNIEMOP_CALL_2(iemOpCommonUnaryGReg, pImpl, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16752
16753 /* Memory we do here. */
16754 switch (pIemCpu->enmEffOpSize)
16755 {
16756 case IEMMODE_16BIT:
16757 IEM_MC_BEGIN(2, 2);
16758 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
16759 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
16760 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16761
16762 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16763 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16764 IEM_MC_FETCH_EFLAGS(EFlags);
16765 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
16766 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU16, pu16Dst, pEFlags);
16767 else
16768 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU16, pu16Dst, pEFlags);
16769
16770 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW);
16771 IEM_MC_COMMIT_EFLAGS(EFlags);
16772 IEM_MC_ADVANCE_RIP();
16773 IEM_MC_END();
16774 return VINF_SUCCESS;
16775
16776 case IEMMODE_32BIT:
16777 IEM_MC_BEGIN(2, 2);
16778 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
16779 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
16780 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16781
16782 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16783 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16784 IEM_MC_FETCH_EFLAGS(EFlags);
16785 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
16786 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU32, pu32Dst, pEFlags);
16787 else
16788 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU32, pu32Dst, pEFlags);
16789
16790 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_RW);
16791 IEM_MC_COMMIT_EFLAGS(EFlags);
16792 IEM_MC_ADVANCE_RIP();
16793 IEM_MC_END();
16794 return VINF_SUCCESS;
16795
16796 case IEMMODE_64BIT:
16797 IEM_MC_BEGIN(2, 2);
16798 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
16799 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 1);
16800 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16801
16802 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
16803 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16804 IEM_MC_FETCH_EFLAGS(EFlags);
16805 if (!(pIemCpu->fPrefixes & IEM_OP_PRF_LOCK))
16806 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnNormalU64, pu64Dst, pEFlags);
16807 else
16808 IEM_MC_CALL_VOID_AIMPL_2(pImpl->pfnLockedU64, pu64Dst, pEFlags);
16809
16810 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_RW);
16811 IEM_MC_COMMIT_EFLAGS(EFlags);
16812 IEM_MC_ADVANCE_RIP();
16813 IEM_MC_END();
16814 return VINF_SUCCESS;
16815
16816 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16817 }
16818}
16819
16820
16821/** Opcode 0xf6 /0. */
16822FNIEMOP_DEF_1(iemOp_grp3_test_Eb, uint8_t, bRm)
16823{
16824 IEMOP_MNEMONIC("test Eb,Ib");
16825 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
16826
16827 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16828 {
16829 /* register access */
16830 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
16831 IEMOP_HLP_NO_LOCK_PREFIX();
16832
16833 IEM_MC_BEGIN(3, 0);
16834 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
16835 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/u8Imm, 1);
16836 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16837 IEM_MC_REF_GREG_U8(pu8Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16838 IEM_MC_REF_EFLAGS(pEFlags);
16839 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
16840 IEM_MC_ADVANCE_RIP();
16841 IEM_MC_END();
16842 }
16843 else
16844 {
16845 /* memory access. */
16846 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
16847
16848 IEM_MC_BEGIN(3, 2);
16849 IEM_MC_ARG(uint8_t *, pu8Dst, 0);
16850 IEM_MC_ARG(uint8_t, u8Src, 1);
16851 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
16852 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16853
16854 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 1);
16855 uint8_t u8Imm; IEM_OPCODE_GET_NEXT_U8(&u8Imm);
16856 IEM_MC_ASSIGN(u8Src, u8Imm);
16857 IEM_MC_MEM_MAP(pu8Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16858 IEM_MC_FETCH_EFLAGS(EFlags);
16859 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u8, pu8Dst, u8Src, pEFlags);
16860
16861 IEM_MC_MEM_COMMIT_AND_UNMAP(pu8Dst, IEM_ACCESS_DATA_R);
16862 IEM_MC_COMMIT_EFLAGS(EFlags);
16863 IEM_MC_ADVANCE_RIP();
16864 IEM_MC_END();
16865 }
16866 return VINF_SUCCESS;
16867}
16868
16869
16870/** Opcode 0xf7 /0. */
16871FNIEMOP_DEF_1(iemOp_grp3_test_Ev, uint8_t, bRm)
16872{
16873 IEMOP_MNEMONIC("test Ev,Iv");
16874 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
16875 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_AF);
16876
16877 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
16878 {
16879 /* register access */
16880 switch (pIemCpu->enmEffOpSize)
16881 {
16882 case IEMMODE_16BIT:
16883 {
16884 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
16885 IEM_MC_BEGIN(3, 0);
16886 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
16887 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/u16Imm, 1);
16888 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16889 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16890 IEM_MC_REF_EFLAGS(pEFlags);
16891 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
16892 IEM_MC_ADVANCE_RIP();
16893 IEM_MC_END();
16894 return VINF_SUCCESS;
16895 }
16896
16897 case IEMMODE_32BIT:
16898 {
16899 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
16900 IEM_MC_BEGIN(3, 0);
16901 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
16902 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/u32Imm, 1);
16903 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16904 IEM_MC_REF_GREG_U32(pu32Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16905 IEM_MC_REF_EFLAGS(pEFlags);
16906 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
16907 /* No clearing the high dword here - test doesn't write back the result. */
16908 IEM_MC_ADVANCE_RIP();
16909 IEM_MC_END();
16910 return VINF_SUCCESS;
16911 }
16912
16913 case IEMMODE_64BIT:
16914 {
16915 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
16916 IEM_MC_BEGIN(3, 0);
16917 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
16918 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/u64Imm, 1);
16919 IEM_MC_ARG(uint32_t *, pEFlags, 2);
16920 IEM_MC_REF_GREG_U64(pu64Dst, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
16921 IEM_MC_REF_EFLAGS(pEFlags);
16922 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
16923 IEM_MC_ADVANCE_RIP();
16924 IEM_MC_END();
16925 return VINF_SUCCESS;
16926 }
16927
16928 IEM_NOT_REACHED_DEFAULT_CASE_RET();
16929 }
16930 }
16931 else
16932 {
16933 /* memory access. */
16934 switch (pIemCpu->enmEffOpSize)
16935 {
16936 case IEMMODE_16BIT:
16937 {
16938 IEM_MC_BEGIN(3, 2);
16939 IEM_MC_ARG(uint16_t *, pu16Dst, 0);
16940 IEM_MC_ARG(uint16_t, u16Src, 1);
16941 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
16942 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16943
16944 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 2);
16945 uint16_t u16Imm; IEM_OPCODE_GET_NEXT_U16(&u16Imm);
16946 IEM_MC_ASSIGN(u16Src, u16Imm);
16947 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16948 IEM_MC_FETCH_EFLAGS(EFlags);
16949 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u16, pu16Dst, u16Src, pEFlags);
16950
16951 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_R);
16952 IEM_MC_COMMIT_EFLAGS(EFlags);
16953 IEM_MC_ADVANCE_RIP();
16954 IEM_MC_END();
16955 return VINF_SUCCESS;
16956 }
16957
16958 case IEMMODE_32BIT:
16959 {
16960 IEM_MC_BEGIN(3, 2);
16961 IEM_MC_ARG(uint32_t *, pu32Dst, 0);
16962 IEM_MC_ARG(uint32_t, u32Src, 1);
16963 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
16964 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16965
16966 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
16967 uint32_t u32Imm; IEM_OPCODE_GET_NEXT_U32(&u32Imm);
16968 IEM_MC_ASSIGN(u32Src, u32Imm);
16969 IEM_MC_MEM_MAP(pu32Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16970 IEM_MC_FETCH_EFLAGS(EFlags);
16971 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u32, pu32Dst, u32Src, pEFlags);
16972
16973 IEM_MC_MEM_COMMIT_AND_UNMAP(pu32Dst, IEM_ACCESS_DATA_R);
16974 IEM_MC_COMMIT_EFLAGS(EFlags);
16975 IEM_MC_ADVANCE_RIP();
16976 IEM_MC_END();
16977 return VINF_SUCCESS;
16978 }
16979
16980 case IEMMODE_64BIT:
16981 {
16982 IEM_MC_BEGIN(3, 2);
16983 IEM_MC_ARG(uint64_t *, pu64Dst, 0);
16984 IEM_MC_ARG(uint64_t, u64Src, 1);
16985 IEM_MC_ARG_LOCAL_EFLAGS( pEFlags, EFlags, 2);
16986 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
16987
16988 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 4);
16989 uint64_t u64Imm; IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64Imm);
16990 IEM_MC_ASSIGN(u64Src, u64Imm);
16991 IEM_MC_MEM_MAP(pu64Dst, IEM_ACCESS_DATA_R, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/);
16992 IEM_MC_FETCH_EFLAGS(EFlags);
16993 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_test_u64, pu64Dst, u64Src, pEFlags);
16994
16995 IEM_MC_MEM_COMMIT_AND_UNMAP(pu64Dst, IEM_ACCESS_DATA_R);
16996 IEM_MC_COMMIT_EFLAGS(EFlags);
16997 IEM_MC_ADVANCE_RIP();
16998 IEM_MC_END();
16999 return VINF_SUCCESS;
17000 }
17001
17002 IEM_NOT_REACHED_DEFAULT_CASE_RET();
17003 }
17004 }
17005}
17006
17007
17008/** Opcode 0xf6 /4, /5, /6 and /7. */
17009FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEb, uint8_t, bRm, PFNIEMAIMPLMULDIVU8, pfnU8)
17010{
17011 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
17012
17013 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
17014 {
17015 /* register access */
17016 IEMOP_HLP_NO_LOCK_PREFIX();
17017 IEM_MC_BEGIN(3, 1);
17018 IEM_MC_ARG(uint16_t *, pu16AX, 0);
17019 IEM_MC_ARG(uint8_t, u8Value, 1);
17020 IEM_MC_ARG(uint32_t *, pEFlags, 2);
17021 IEM_MC_LOCAL(int32_t, rc);
17022
17023 IEM_MC_FETCH_GREG_U8(u8Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
17024 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
17025 IEM_MC_REF_EFLAGS(pEFlags);
17026 IEM_MC_CALL_AIMPL_3(rc, pfnU8, pu16AX, u8Value, pEFlags);
17027 IEM_MC_IF_LOCAL_IS_Z(rc) {
17028 IEM_MC_ADVANCE_RIP();
17029 } IEM_MC_ELSE() {
17030 IEM_MC_RAISE_DIVIDE_ERROR();
17031 } IEM_MC_ENDIF();
17032
17033 IEM_MC_END();
17034 }
17035 else
17036 {
17037 /* memory access. */
17038 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
17039
17040 IEM_MC_BEGIN(3, 2);
17041 IEM_MC_ARG(uint16_t *, pu16AX, 0);
17042 IEM_MC_ARG(uint8_t, u8Value, 1);
17043 IEM_MC_ARG(uint32_t *, pEFlags, 2);
17044 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
17045 IEM_MC_LOCAL(int32_t, rc);
17046
17047 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
17048 IEM_MC_FETCH_MEM_U8(u8Value, pIemCpu->iEffSeg, GCPtrEffDst);
17049 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
17050 IEM_MC_REF_EFLAGS(pEFlags);
17051 IEM_MC_CALL_AIMPL_3(rc, pfnU8, pu16AX, u8Value, pEFlags);
17052 IEM_MC_IF_LOCAL_IS_Z(rc) {
17053 IEM_MC_ADVANCE_RIP();
17054 } IEM_MC_ELSE() {
17055 IEM_MC_RAISE_DIVIDE_ERROR();
17056 } IEM_MC_ENDIF();
17057
17058 IEM_MC_END();
17059 }
17060 return VINF_SUCCESS;
17061}
17062
17063
17064/** Opcode 0xf7 /4, /5, /6 and /7. */
17065FNIEMOP_DEF_2(iemOpCommonGrp3MulDivEv, uint8_t, bRm, PCIEMOPMULDIVSIZES, pImpl)
17066{
17067 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo should probably not be raised until we've fetched all the opcode bytes? */
17068 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
17069
17070 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
17071 {
17072 /* register access */
17073 switch (pIemCpu->enmEffOpSize)
17074 {
17075 case IEMMODE_16BIT:
17076 {
17077 IEMOP_HLP_NO_LOCK_PREFIX();
17078 IEM_MC_BEGIN(4, 1);
17079 IEM_MC_ARG(uint16_t *, pu16AX, 0);
17080 IEM_MC_ARG(uint16_t *, pu16DX, 1);
17081 IEM_MC_ARG(uint16_t, u16Value, 2);
17082 IEM_MC_ARG(uint32_t *, pEFlags, 3);
17083 IEM_MC_LOCAL(int32_t, rc);
17084
17085 IEM_MC_FETCH_GREG_U16(u16Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
17086 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
17087 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
17088 IEM_MC_REF_EFLAGS(pEFlags);
17089 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
17090 IEM_MC_IF_LOCAL_IS_Z(rc) {
17091 IEM_MC_ADVANCE_RIP();
17092 } IEM_MC_ELSE() {
17093 IEM_MC_RAISE_DIVIDE_ERROR();
17094 } IEM_MC_ENDIF();
17095
17096 IEM_MC_END();
17097 return VINF_SUCCESS;
17098 }
17099
17100 case IEMMODE_32BIT:
17101 {
17102 IEMOP_HLP_NO_LOCK_PREFIX();
17103 IEM_MC_BEGIN(4, 1);
17104 IEM_MC_ARG(uint32_t *, pu32AX, 0);
17105 IEM_MC_ARG(uint32_t *, pu32DX, 1);
17106 IEM_MC_ARG(uint32_t, u32Value, 2);
17107 IEM_MC_ARG(uint32_t *, pEFlags, 3);
17108 IEM_MC_LOCAL(int32_t, rc);
17109
17110 IEM_MC_FETCH_GREG_U32(u32Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
17111 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
17112 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
17113 IEM_MC_REF_EFLAGS(pEFlags);
17114 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
17115 IEM_MC_IF_LOCAL_IS_Z(rc) {
17116 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32AX);
17117 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32DX);
17118 IEM_MC_ADVANCE_RIP();
17119 } IEM_MC_ELSE() {
17120 IEM_MC_RAISE_DIVIDE_ERROR();
17121 } IEM_MC_ENDIF();
17122
17123 IEM_MC_END();
17124 return VINF_SUCCESS;
17125 }
17126
17127 case IEMMODE_64BIT:
17128 {
17129 IEMOP_HLP_NO_LOCK_PREFIX();
17130 IEM_MC_BEGIN(4, 1);
17131 IEM_MC_ARG(uint64_t *, pu64AX, 0);
17132 IEM_MC_ARG(uint64_t *, pu64DX, 1);
17133 IEM_MC_ARG(uint64_t, u64Value, 2);
17134 IEM_MC_ARG(uint32_t *, pEFlags, 3);
17135 IEM_MC_LOCAL(int32_t, rc);
17136
17137 IEM_MC_FETCH_GREG_U64(u64Value, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
17138 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
17139 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
17140 IEM_MC_REF_EFLAGS(pEFlags);
17141 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
17142 IEM_MC_IF_LOCAL_IS_Z(rc) {
17143 IEM_MC_ADVANCE_RIP();
17144 } IEM_MC_ELSE() {
17145 IEM_MC_RAISE_DIVIDE_ERROR();
17146 } IEM_MC_ENDIF();
17147
17148 IEM_MC_END();
17149 return VINF_SUCCESS;
17150 }
17151
17152 IEM_NOT_REACHED_DEFAULT_CASE_RET();
17153 }
17154 }
17155 else
17156 {
17157 /* memory access. */
17158 switch (pIemCpu->enmEffOpSize)
17159 {
17160 case IEMMODE_16BIT:
17161 {
17162 IEMOP_HLP_NO_LOCK_PREFIX();
17163 IEM_MC_BEGIN(4, 2);
17164 IEM_MC_ARG(uint16_t *, pu16AX, 0);
17165 IEM_MC_ARG(uint16_t *, pu16DX, 1);
17166 IEM_MC_ARG(uint16_t, u16Value, 2);
17167 IEM_MC_ARG(uint32_t *, pEFlags, 3);
17168 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
17169 IEM_MC_LOCAL(int32_t, rc);
17170
17171 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
17172 IEM_MC_FETCH_MEM_U16(u16Value, pIemCpu->iEffSeg, GCPtrEffDst);
17173 IEM_MC_REF_GREG_U16(pu16AX, X86_GREG_xAX);
17174 IEM_MC_REF_GREG_U16(pu16DX, X86_GREG_xDX);
17175 IEM_MC_REF_EFLAGS(pEFlags);
17176 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU16, pu16AX, pu16DX, u16Value, pEFlags);
17177 IEM_MC_IF_LOCAL_IS_Z(rc) {
17178 IEM_MC_ADVANCE_RIP();
17179 } IEM_MC_ELSE() {
17180 IEM_MC_RAISE_DIVIDE_ERROR();
17181 } IEM_MC_ENDIF();
17182
17183 IEM_MC_END();
17184 return VINF_SUCCESS;
17185 }
17186
17187 case IEMMODE_32BIT:
17188 {
17189 IEMOP_HLP_NO_LOCK_PREFIX();
17190 IEM_MC_BEGIN(4, 2);
17191 IEM_MC_ARG(uint32_t *, pu32AX, 0);
17192 IEM_MC_ARG(uint32_t *, pu32DX, 1);
17193 IEM_MC_ARG(uint32_t, u32Value, 2);
17194 IEM_MC_ARG(uint32_t *, pEFlags, 3);
17195 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
17196 IEM_MC_LOCAL(int32_t, rc);
17197
17198 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
17199 IEM_MC_FETCH_MEM_U32(u32Value, pIemCpu->iEffSeg, GCPtrEffDst);
17200 IEM_MC_REF_GREG_U32(pu32AX, X86_GREG_xAX);
17201 IEM_MC_REF_GREG_U32(pu32DX, X86_GREG_xDX);
17202 IEM_MC_REF_EFLAGS(pEFlags);
17203 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU32, pu32AX, pu32DX, u32Value, pEFlags);
17204 IEM_MC_IF_LOCAL_IS_Z(rc) {
17205 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32AX);
17206 IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(pu32DX);
17207 IEM_MC_ADVANCE_RIP();
17208 } IEM_MC_ELSE() {
17209 IEM_MC_RAISE_DIVIDE_ERROR();
17210 } IEM_MC_ENDIF();
17211
17212 IEM_MC_END();
17213 return VINF_SUCCESS;
17214 }
17215
17216 case IEMMODE_64BIT:
17217 {
17218 IEMOP_HLP_NO_LOCK_PREFIX();
17219 IEM_MC_BEGIN(4, 2);
17220 IEM_MC_ARG(uint64_t *, pu64AX, 0);
17221 IEM_MC_ARG(uint64_t *, pu64DX, 1);
17222 IEM_MC_ARG(uint64_t, u64Value, 2);
17223 IEM_MC_ARG(uint32_t *, pEFlags, 3);
17224 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst);
17225 IEM_MC_LOCAL(int32_t, rc);
17226
17227 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0);
17228 IEM_MC_FETCH_MEM_U64(u64Value, pIemCpu->iEffSeg, GCPtrEffDst);
17229 IEM_MC_REF_GREG_U64(pu64AX, X86_GREG_xAX);
17230 IEM_MC_REF_GREG_U64(pu64DX, X86_GREG_xDX);
17231 IEM_MC_REF_EFLAGS(pEFlags);
17232 IEM_MC_CALL_AIMPL_4(rc, pImpl->pfnU64, pu64AX, pu64DX, u64Value, pEFlags);
17233 IEM_MC_IF_LOCAL_IS_Z(rc) {
17234 IEM_MC_ADVANCE_RIP();
17235 } IEM_MC_ELSE() {
17236 IEM_MC_RAISE_DIVIDE_ERROR();
17237 } IEM_MC_ENDIF();
17238
17239 IEM_MC_END();
17240 return VINF_SUCCESS;
17241 }
17242
17243 IEM_NOT_REACHED_DEFAULT_CASE_RET();
17244 }
17245 }
17246}
17247
17248/** Opcode 0xf6. */
17249FNIEMOP_DEF(iemOp_Grp3_Eb)
17250{
17251 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
17252 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
17253 {
17254 case 0:
17255 return FNIEMOP_CALL_1(iemOp_grp3_test_Eb, bRm);
17256 case 1:
17257/** @todo testcase: Present on <=386, most 486 (not early), Pentiums, and current CPUs too. CPUUNDOC.EXE */
17258 return IEMOP_RAISE_INVALID_OPCODE();
17259 case 2:
17260 IEMOP_MNEMONIC("not Eb");
17261 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_not);
17262 case 3:
17263 IEMOP_MNEMONIC("neg Eb");
17264 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_neg);
17265 case 4:
17266 IEMOP_MNEMONIC("mul Eb");
17267 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
17268 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_mul_u8);
17269 case 5:
17270 IEMOP_MNEMONIC("imul Eb");
17271 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
17272 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_imul_u8);
17273 case 6:
17274 IEMOP_MNEMONIC("div Eb");
17275 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
17276 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_div_u8);
17277 case 7:
17278 IEMOP_MNEMONIC("idiv Eb");
17279 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
17280 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEb, bRm, iemAImpl_idiv_u8);
17281 IEM_NOT_REACHED_DEFAULT_CASE_RET();
17282 }
17283}
17284
17285
17286/** Opcode 0xf7. */
17287FNIEMOP_DEF(iemOp_Grp3_Ev)
17288{
17289 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
17290 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
17291 {
17292 case 0:
17293 return FNIEMOP_CALL_1(iemOp_grp3_test_Ev, bRm);
17294 case 1:
17295/** @todo testcase: Present on <=386, most 486 (not early), Pentiums, and current CPUs too. CPUUNDOC.EXE */
17296 return IEMOP_RAISE_INVALID_OPCODE();
17297 case 2:
17298 IEMOP_MNEMONIC("not Ev");
17299 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_not);
17300 case 3:
17301 IEMOP_MNEMONIC("neg Ev");
17302 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_neg);
17303 case 4:
17304 IEMOP_MNEMONIC("mul Ev");
17305 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
17306 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_mul);
17307 case 5:
17308 IEMOP_MNEMONIC("imul Ev");
17309 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF);
17310 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_imul);
17311 case 6:
17312 IEMOP_MNEMONIC("div Ev");
17313 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
17314 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_div);
17315 case 7:
17316 IEMOP_MNEMONIC("idiv Ev");
17317 IEMOP_VERIFICATION_UNDEFINED_EFLAGS(X86_EFL_SF | X86_EFL_ZF | X86_EFL_AF | X86_EFL_PF | X86_EFL_OF | X86_EFL_CF);
17318 return FNIEMOP_CALL_2(iemOpCommonGrp3MulDivEv, bRm, &g_iemAImpl_idiv);
17319 IEM_NOT_REACHED_DEFAULT_CASE_RET();
17320 }
17321}
17322
17323
17324/** Opcode 0xf8. */
17325FNIEMOP_DEF(iemOp_clc)
17326{
17327 IEMOP_MNEMONIC("clc");
17328 IEMOP_HLP_NO_LOCK_PREFIX();
17329 IEM_MC_BEGIN(0, 0);
17330 IEM_MC_CLEAR_EFL_BIT(X86_EFL_CF);
17331 IEM_MC_ADVANCE_RIP();
17332 IEM_MC_END();
17333 return VINF_SUCCESS;
17334}
17335
17336
17337/** Opcode 0xf9. */
17338FNIEMOP_DEF(iemOp_stc)
17339{
17340 IEMOP_MNEMONIC("stc");
17341 IEMOP_HLP_NO_LOCK_PREFIX();
17342 IEM_MC_BEGIN(0, 0);
17343 IEM_MC_SET_EFL_BIT(X86_EFL_CF);
17344 IEM_MC_ADVANCE_RIP();
17345 IEM_MC_END();
17346 return VINF_SUCCESS;
17347}
17348
17349
17350/** Opcode 0xfa. */
17351FNIEMOP_DEF(iemOp_cli)
17352{
17353 IEMOP_MNEMONIC("cli");
17354 IEMOP_HLP_NO_LOCK_PREFIX();
17355 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_cli);
17356}
17357
17358
17359FNIEMOP_DEF(iemOp_sti)
17360{
17361 IEMOP_MNEMONIC("sti");
17362 IEMOP_HLP_NO_LOCK_PREFIX();
17363 return IEM_MC_DEFER_TO_CIMPL_0(iemCImpl_sti);
17364}
17365
17366
17367/** Opcode 0xfc. */
17368FNIEMOP_DEF(iemOp_cld)
17369{
17370 IEMOP_MNEMONIC("cld");
17371 IEMOP_HLP_NO_LOCK_PREFIX();
17372 IEM_MC_BEGIN(0, 0);
17373 IEM_MC_CLEAR_EFL_BIT(X86_EFL_DF);
17374 IEM_MC_ADVANCE_RIP();
17375 IEM_MC_END();
17376 return VINF_SUCCESS;
17377}
17378
17379
17380/** Opcode 0xfd. */
17381FNIEMOP_DEF(iemOp_std)
17382{
17383 IEMOP_MNEMONIC("std");
17384 IEMOP_HLP_NO_LOCK_PREFIX();
17385 IEM_MC_BEGIN(0, 0);
17386 IEM_MC_SET_EFL_BIT(X86_EFL_DF);
17387 IEM_MC_ADVANCE_RIP();
17388 IEM_MC_END();
17389 return VINF_SUCCESS;
17390}
17391
17392
17393/** Opcode 0xfe. */
17394FNIEMOP_DEF(iemOp_Grp4)
17395{
17396 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
17397 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
17398 {
17399 case 0:
17400 IEMOP_MNEMONIC("inc Ev");
17401 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_inc);
17402 case 1:
17403 IEMOP_MNEMONIC("dec Ev");
17404 return FNIEMOP_CALL_2(iemOpCommonUnaryEb, bRm, &g_iemAImpl_dec);
17405 default:
17406 IEMOP_MNEMONIC("grp4-ud");
17407 return IEMOP_RAISE_INVALID_OPCODE();
17408 }
17409}
17410
17411
17412/**
17413 * Opcode 0xff /2.
17414 * @param bRm The RM byte.
17415 */
17416FNIEMOP_DEF_1(iemOp_Grp5_calln_Ev, uint8_t, bRm)
17417{
17418 IEMOP_MNEMONIC("calln Ev");
17419 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
17420 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
17421
17422 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
17423 {
17424 /* The new RIP is taken from a register. */
17425 switch (pIemCpu->enmEffOpSize)
17426 {
17427 case IEMMODE_16BIT:
17428 IEM_MC_BEGIN(1, 0);
17429 IEM_MC_ARG(uint16_t, u16Target, 0);
17430 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
17431 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
17432 IEM_MC_END()
17433 return VINF_SUCCESS;
17434
17435 case IEMMODE_32BIT:
17436 IEM_MC_BEGIN(1, 0);
17437 IEM_MC_ARG(uint32_t, u32Target, 0);
17438 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
17439 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
17440 IEM_MC_END()
17441 return VINF_SUCCESS;
17442
17443 case IEMMODE_64BIT:
17444 IEM_MC_BEGIN(1, 0);
17445 IEM_MC_ARG(uint64_t, u64Target, 0);
17446 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
17447 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
17448 IEM_MC_END()
17449 return VINF_SUCCESS;
17450
17451 IEM_NOT_REACHED_DEFAULT_CASE_RET();
17452 }
17453 }
17454 else
17455 {
17456 /* The new RIP is taken from a register. */
17457 switch (pIemCpu->enmEffOpSize)
17458 {
17459 case IEMMODE_16BIT:
17460 IEM_MC_BEGIN(1, 1);
17461 IEM_MC_ARG(uint16_t, u16Target, 0);
17462 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17463 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17464 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
17465 IEM_MC_CALL_CIMPL_1(iemCImpl_call_16, u16Target);
17466 IEM_MC_END()
17467 return VINF_SUCCESS;
17468
17469 case IEMMODE_32BIT:
17470 IEM_MC_BEGIN(1, 1);
17471 IEM_MC_ARG(uint32_t, u32Target, 0);
17472 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17473 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17474 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
17475 IEM_MC_CALL_CIMPL_1(iemCImpl_call_32, u32Target);
17476 IEM_MC_END()
17477 return VINF_SUCCESS;
17478
17479 case IEMMODE_64BIT:
17480 IEM_MC_BEGIN(1, 1);
17481 IEM_MC_ARG(uint64_t, u64Target, 0);
17482 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17483 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17484 IEM_MC_FETCH_MEM_U64(u64Target, pIemCpu->iEffSeg, GCPtrEffSrc);
17485 IEM_MC_CALL_CIMPL_1(iemCImpl_call_64, u64Target);
17486 IEM_MC_END()
17487 return VINF_SUCCESS;
17488
17489 IEM_NOT_REACHED_DEFAULT_CASE_RET();
17490 }
17491 }
17492}
17493
17494typedef IEM_CIMPL_DECL_TYPE_3(FNIEMCIMPLFARBRANCH, uint16_t, uSel, uint64_t, offSeg, IEMMODE, enmOpSize);
17495
17496FNIEMOP_DEF_2(iemOpHlp_Grp5_far_Ep, uint8_t, bRm, FNIEMCIMPLFARBRANCH *, pfnCImpl)
17497{
17498 /* Registers? How?? */
17499 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
17500 return IEMOP_RAISE_INVALID_OPCODE(); /* callf eax is not legal */
17501
17502 /* Far pointer loaded from memory. */
17503 switch (pIemCpu->enmEffOpSize)
17504 {
17505 case IEMMODE_16BIT:
17506 IEM_MC_BEGIN(3, 1);
17507 IEM_MC_ARG(uint16_t, u16Sel, 0);
17508 IEM_MC_ARG(uint16_t, offSeg, 1);
17509 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
17510 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17511 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17512 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
17513 IEM_MC_FETCH_MEM_U16(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
17514 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 2);
17515 IEM_MC_CALL_CIMPL_3(pfnCImpl, u16Sel, offSeg, enmEffOpSize);
17516 IEM_MC_END();
17517 return VINF_SUCCESS;
17518
17519 case IEMMODE_64BIT:
17520 /** @todo testcase: AMD does not seem to believe in the case (see bs-cpu-xcpt-1)
17521 * and will apparently ignore REX.W, at least for the jmp far qword [rsp]
17522 * and call far qword [rsp] encodings. */
17523 if (!IEM_IS_GUEST_CPU_AMD(pIemCpu))
17524 {
17525 IEM_MC_BEGIN(3, 1);
17526 IEM_MC_ARG(uint16_t, u16Sel, 0);
17527 IEM_MC_ARG(uint64_t, offSeg, 1);
17528 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_16BIT, 2);
17529 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17530 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17531 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
17532 IEM_MC_FETCH_MEM_U64(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
17533 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 8);
17534 IEM_MC_CALL_CIMPL_3(pfnCImpl, u16Sel, offSeg, enmEffOpSize);
17535 IEM_MC_END();
17536 return VINF_SUCCESS;
17537 }
17538 /* AMD falls thru. */
17539
17540 case IEMMODE_32BIT:
17541 IEM_MC_BEGIN(3, 1);
17542 IEM_MC_ARG(uint16_t, u16Sel, 0);
17543 IEM_MC_ARG(uint32_t, offSeg, 1);
17544 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize, IEMMODE_32BIT, 2);
17545 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17546 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17547 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX();
17548 IEM_MC_FETCH_MEM_U32(offSeg, pIemCpu->iEffSeg, GCPtrEffSrc);
17549 IEM_MC_FETCH_MEM_U16_DISP(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc, 4);
17550 IEM_MC_CALL_CIMPL_3(pfnCImpl, u16Sel, offSeg, enmEffOpSize);
17551 IEM_MC_END();
17552 return VINF_SUCCESS;
17553
17554 IEM_NOT_REACHED_DEFAULT_CASE_RET();
17555 }
17556}
17557
17558
17559/**
17560 * Opcode 0xff /3.
17561 * @param bRm The RM byte.
17562 */
17563FNIEMOP_DEF_1(iemOp_Grp5_callf_Ep, uint8_t, bRm)
17564{
17565 IEMOP_MNEMONIC("callf Ep");
17566 return FNIEMOP_CALL_2(iemOpHlp_Grp5_far_Ep, bRm, iemCImpl_callf);
17567}
17568
17569
17570/**
17571 * Opcode 0xff /4.
17572 * @param bRm The RM byte.
17573 */
17574FNIEMOP_DEF_1(iemOp_Grp5_jmpn_Ev, uint8_t, bRm)
17575{
17576 IEMOP_MNEMONIC("jmpn Ev");
17577 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
17578 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
17579
17580 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
17581 {
17582 /* The new RIP is taken from a register. */
17583 switch (pIemCpu->enmEffOpSize)
17584 {
17585 case IEMMODE_16BIT:
17586 IEM_MC_BEGIN(0, 1);
17587 IEM_MC_LOCAL(uint16_t, u16Target);
17588 IEM_MC_FETCH_GREG_U16(u16Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
17589 IEM_MC_SET_RIP_U16(u16Target);
17590 IEM_MC_END()
17591 return VINF_SUCCESS;
17592
17593 case IEMMODE_32BIT:
17594 IEM_MC_BEGIN(0, 1);
17595 IEM_MC_LOCAL(uint32_t, u32Target);
17596 IEM_MC_FETCH_GREG_U32(u32Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
17597 IEM_MC_SET_RIP_U32(u32Target);
17598 IEM_MC_END()
17599 return VINF_SUCCESS;
17600
17601 case IEMMODE_64BIT:
17602 IEM_MC_BEGIN(0, 1);
17603 IEM_MC_LOCAL(uint64_t, u64Target);
17604 IEM_MC_FETCH_GREG_U64(u64Target, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
17605 IEM_MC_SET_RIP_U64(u64Target);
17606 IEM_MC_END()
17607 return VINF_SUCCESS;
17608
17609 IEM_NOT_REACHED_DEFAULT_CASE_RET();
17610 }
17611 }
17612 else
17613 {
17614 /* The new RIP is taken from a memory location. */
17615 switch (pIemCpu->enmEffOpSize)
17616 {
17617 case IEMMODE_16BIT:
17618 IEM_MC_BEGIN(0, 2);
17619 IEM_MC_LOCAL(uint16_t, u16Target);
17620 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17621 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17622 IEM_MC_FETCH_MEM_U16(u16Target, pIemCpu->iEffSeg, GCPtrEffSrc);
17623 IEM_MC_SET_RIP_U16(u16Target);
17624 IEM_MC_END()
17625 return VINF_SUCCESS;
17626
17627 case IEMMODE_32BIT:
17628 IEM_MC_BEGIN(0, 2);
17629 IEM_MC_LOCAL(uint32_t, u32Target);
17630 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17631 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17632 IEM_MC_FETCH_MEM_U32(u32Target, pIemCpu->iEffSeg, GCPtrEffSrc);
17633 IEM_MC_SET_RIP_U32(u32Target);
17634 IEM_MC_END()
17635 return VINF_SUCCESS;
17636
17637 case IEMMODE_64BIT:
17638 IEM_MC_BEGIN(0, 2);
17639 IEM_MC_LOCAL(uint64_t, u64Target);
17640 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17641 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17642 IEM_MC_FETCH_MEM_U64(u64Target, pIemCpu->iEffSeg, GCPtrEffSrc);
17643 IEM_MC_SET_RIP_U64(u64Target);
17644 IEM_MC_END()
17645 return VINF_SUCCESS;
17646
17647 IEM_NOT_REACHED_DEFAULT_CASE_RET();
17648 }
17649 }
17650}
17651
17652
17653/**
17654 * Opcode 0xff /5.
17655 * @param bRm The RM byte.
17656 */
17657FNIEMOP_DEF_1(iemOp_Grp5_jmpf_Ep, uint8_t, bRm)
17658{
17659 IEMOP_MNEMONIC("jmpf Ep");
17660 return FNIEMOP_CALL_2(iemOpHlp_Grp5_far_Ep, bRm, iemCImpl_FarJmp);
17661}
17662
17663
17664/**
17665 * Opcode 0xff /6.
17666 * @param bRm The RM byte.
17667 */
17668FNIEMOP_DEF_1(iemOp_Grp5_push_Ev, uint8_t, bRm)
17669{
17670 IEMOP_MNEMONIC("push Ev");
17671 IEMOP_HLP_NO_LOCK_PREFIX(); /** @todo Too early? */
17672
17673 /* Registers are handled by a common worker. */
17674 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT))
17675 return FNIEMOP_CALL_1(iemOpCommonPushGReg, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB);
17676
17677 /* Memory we do here. */
17678 IEMOP_HLP_DEFAULT_64BIT_OP_SIZE();
17679 switch (pIemCpu->enmEffOpSize)
17680 {
17681 case IEMMODE_16BIT:
17682 IEM_MC_BEGIN(0, 2);
17683 IEM_MC_LOCAL(uint16_t, u16Src);
17684 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17685 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17686 IEM_MC_FETCH_MEM_U16(u16Src, pIemCpu->iEffSeg, GCPtrEffSrc);
17687 IEM_MC_PUSH_U16(u16Src);
17688 IEM_MC_ADVANCE_RIP();
17689 IEM_MC_END();
17690 return VINF_SUCCESS;
17691
17692 case IEMMODE_32BIT:
17693 IEM_MC_BEGIN(0, 2);
17694 IEM_MC_LOCAL(uint32_t, u32Src);
17695 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17696 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17697 IEM_MC_FETCH_MEM_U32(u32Src, pIemCpu->iEffSeg, GCPtrEffSrc);
17698 IEM_MC_PUSH_U32(u32Src);
17699 IEM_MC_ADVANCE_RIP();
17700 IEM_MC_END();
17701 return VINF_SUCCESS;
17702
17703 case IEMMODE_64BIT:
17704 IEM_MC_BEGIN(0, 2);
17705 IEM_MC_LOCAL(uint64_t, u64Src);
17706 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc);
17707 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0);
17708 IEM_MC_FETCH_MEM_U64(u64Src, pIemCpu->iEffSeg, GCPtrEffSrc);
17709 IEM_MC_PUSH_U64(u64Src);
17710 IEM_MC_ADVANCE_RIP();
17711 IEM_MC_END();
17712 return VINF_SUCCESS;
17713
17714 IEM_NOT_REACHED_DEFAULT_CASE_RET();
17715 }
17716}
17717
17718
17719/** Opcode 0xff. */
17720FNIEMOP_DEF(iemOp_Grp5)
17721{
17722 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm);
17723 switch ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK)
17724 {
17725 case 0:
17726 IEMOP_MNEMONIC("inc Ev");
17727 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_inc);
17728 case 1:
17729 IEMOP_MNEMONIC("dec Ev");
17730 return FNIEMOP_CALL_2(iemOpCommonUnaryEv, bRm, &g_iemAImpl_dec);
17731 case 2:
17732 return FNIEMOP_CALL_1(iemOp_Grp5_calln_Ev, bRm);
17733 case 3:
17734 return FNIEMOP_CALL_1(iemOp_Grp5_callf_Ep, bRm);
17735 case 4:
17736 return FNIEMOP_CALL_1(iemOp_Grp5_jmpn_Ev, bRm);
17737 case 5:
17738 return FNIEMOP_CALL_1(iemOp_Grp5_jmpf_Ep, bRm);
17739 case 6:
17740 return FNIEMOP_CALL_1(iemOp_Grp5_push_Ev, bRm);
17741 case 7:
17742 IEMOP_MNEMONIC("grp5-ud");
17743 return IEMOP_RAISE_INVALID_OPCODE();
17744 }
17745 AssertFailedReturn(VERR_IEM_IPE_3);
17746}
17747
17748
17749
17750const PFNIEMOP g_apfnOneByteMap[256] =
17751{
17752 /* 0x00 */ iemOp_add_Eb_Gb, iemOp_add_Ev_Gv, iemOp_add_Gb_Eb, iemOp_add_Gv_Ev,
17753 /* 0x04 */ iemOp_add_Al_Ib, iemOp_add_eAX_Iz, iemOp_push_ES, iemOp_pop_ES,
17754 /* 0x08 */ iemOp_or_Eb_Gb, iemOp_or_Ev_Gv, iemOp_or_Gb_Eb, iemOp_or_Gv_Ev,
17755 /* 0x0c */ iemOp_or_Al_Ib, iemOp_or_eAX_Iz, iemOp_push_CS, iemOp_2byteEscape,
17756 /* 0x10 */ iemOp_adc_Eb_Gb, iemOp_adc_Ev_Gv, iemOp_adc_Gb_Eb, iemOp_adc_Gv_Ev,
17757 /* 0x14 */ iemOp_adc_Al_Ib, iemOp_adc_eAX_Iz, iemOp_push_SS, iemOp_pop_SS,
17758 /* 0x18 */ iemOp_sbb_Eb_Gb, iemOp_sbb_Ev_Gv, iemOp_sbb_Gb_Eb, iemOp_sbb_Gv_Ev,
17759 /* 0x1c */ iemOp_sbb_Al_Ib, iemOp_sbb_eAX_Iz, iemOp_push_DS, iemOp_pop_DS,
17760 /* 0x20 */ iemOp_and_Eb_Gb, iemOp_and_Ev_Gv, iemOp_and_Gb_Eb, iemOp_and_Gv_Ev,
17761 /* 0x24 */ iemOp_and_Al_Ib, iemOp_and_eAX_Iz, iemOp_seg_ES, iemOp_daa,
17762 /* 0x28 */ iemOp_sub_Eb_Gb, iemOp_sub_Ev_Gv, iemOp_sub_Gb_Eb, iemOp_sub_Gv_Ev,
17763 /* 0x2c */ iemOp_sub_Al_Ib, iemOp_sub_eAX_Iz, iemOp_seg_CS, iemOp_das,
17764 /* 0x30 */ iemOp_xor_Eb_Gb, iemOp_xor_Ev_Gv, iemOp_xor_Gb_Eb, iemOp_xor_Gv_Ev,
17765 /* 0x34 */ iemOp_xor_Al_Ib, iemOp_xor_eAX_Iz, iemOp_seg_SS, iemOp_aaa,
17766 /* 0x38 */ iemOp_cmp_Eb_Gb, iemOp_cmp_Ev_Gv, iemOp_cmp_Gb_Eb, iemOp_cmp_Gv_Ev,
17767 /* 0x3c */ iemOp_cmp_Al_Ib, iemOp_cmp_eAX_Iz, iemOp_seg_DS, iemOp_aas,
17768 /* 0x40 */ iemOp_inc_eAX, iemOp_inc_eCX, iemOp_inc_eDX, iemOp_inc_eBX,
17769 /* 0x44 */ iemOp_inc_eSP, iemOp_inc_eBP, iemOp_inc_eSI, iemOp_inc_eDI,
17770 /* 0x48 */ iemOp_dec_eAX, iemOp_dec_eCX, iemOp_dec_eDX, iemOp_dec_eBX,
17771 /* 0x4c */ iemOp_dec_eSP, iemOp_dec_eBP, iemOp_dec_eSI, iemOp_dec_eDI,
17772 /* 0x50 */ iemOp_push_eAX, iemOp_push_eCX, iemOp_push_eDX, iemOp_push_eBX,
17773 /* 0x54 */ iemOp_push_eSP, iemOp_push_eBP, iemOp_push_eSI, iemOp_push_eDI,
17774 /* 0x58 */ iemOp_pop_eAX, iemOp_pop_eCX, iemOp_pop_eDX, iemOp_pop_eBX,
17775 /* 0x5c */ iemOp_pop_eSP, iemOp_pop_eBP, iemOp_pop_eSI, iemOp_pop_eDI,
17776 /* 0x60 */ iemOp_pusha, iemOp_popa, iemOp_bound_Gv_Ma_evex, iemOp_arpl_Ew_Gw_movsx_Gv_Ev,
17777 /* 0x64 */ iemOp_seg_FS, iemOp_seg_GS, iemOp_op_size, iemOp_addr_size,
17778 /* 0x68 */ iemOp_push_Iz, iemOp_imul_Gv_Ev_Iz, iemOp_push_Ib, iemOp_imul_Gv_Ev_Ib,
17779 /* 0x6c */ iemOp_insb_Yb_DX, iemOp_inswd_Yv_DX, iemOp_outsb_Yb_DX, iemOp_outswd_Yv_DX,
17780 /* 0x70 */ iemOp_jo_Jb, iemOp_jno_Jb, iemOp_jc_Jb, iemOp_jnc_Jb,
17781 /* 0x74 */ iemOp_je_Jb, iemOp_jne_Jb, iemOp_jbe_Jb, iemOp_jnbe_Jb,
17782 /* 0x78 */ iemOp_js_Jb, iemOp_jns_Jb, iemOp_jp_Jb, iemOp_jnp_Jb,
17783 /* 0x7c */ iemOp_jl_Jb, iemOp_jnl_Jb, iemOp_jle_Jb, iemOp_jnle_Jb,
17784 /* 0x80 */ iemOp_Grp1_Eb_Ib_80, iemOp_Grp1_Ev_Iz, iemOp_Grp1_Eb_Ib_82, iemOp_Grp1_Ev_Ib,
17785 /* 0x84 */ iemOp_test_Eb_Gb, iemOp_test_Ev_Gv, iemOp_xchg_Eb_Gb, iemOp_xchg_Ev_Gv,
17786 /* 0x88 */ iemOp_mov_Eb_Gb, iemOp_mov_Ev_Gv, iemOp_mov_Gb_Eb, iemOp_mov_Gv_Ev,
17787 /* 0x8c */ iemOp_mov_Ev_Sw, iemOp_lea_Gv_M, iemOp_mov_Sw_Ev, iemOp_Grp1A,
17788 /* 0x90 */ iemOp_nop, iemOp_xchg_eCX_eAX, iemOp_xchg_eDX_eAX, iemOp_xchg_eBX_eAX,
17789 /* 0x94 */ iemOp_xchg_eSP_eAX, iemOp_xchg_eBP_eAX, iemOp_xchg_eSI_eAX, iemOp_xchg_eDI_eAX,
17790 /* 0x98 */ iemOp_cbw, iemOp_cwd, iemOp_call_Ap, iemOp_wait,
17791 /* 0x9c */ iemOp_pushf_Fv, iemOp_popf_Fv, iemOp_sahf, iemOp_lahf,
17792 /* 0xa0 */ iemOp_mov_Al_Ob, iemOp_mov_rAX_Ov, iemOp_mov_Ob_AL, iemOp_mov_Ov_rAX,
17793 /* 0xa4 */ iemOp_movsb_Xb_Yb, iemOp_movswd_Xv_Yv, iemOp_cmpsb_Xb_Yb, iemOp_cmpswd_Xv_Yv,
17794 /* 0xa8 */ iemOp_test_AL_Ib, iemOp_test_eAX_Iz, iemOp_stosb_Yb_AL, iemOp_stoswd_Yv_eAX,
17795 /* 0xac */ iemOp_lodsb_AL_Xb, iemOp_lodswd_eAX_Xv, iemOp_scasb_AL_Xb, iemOp_scaswd_eAX_Xv,
17796 /* 0xb0 */ iemOp_mov_AL_Ib, iemOp_CL_Ib, iemOp_DL_Ib, iemOp_BL_Ib,
17797 /* 0xb4 */ iemOp_mov_AH_Ib, iemOp_CH_Ib, iemOp_DH_Ib, iemOp_BH_Ib,
17798 /* 0xb8 */ iemOp_eAX_Iv, iemOp_eCX_Iv, iemOp_eDX_Iv, iemOp_eBX_Iv,
17799 /* 0xbc */ iemOp_eSP_Iv, iemOp_eBP_Iv, iemOp_eSI_Iv, iemOp_eDI_Iv,
17800 /* 0xc0 */ iemOp_Grp2_Eb_Ib, iemOp_Grp2_Ev_Ib, iemOp_retn_Iw, iemOp_retn,
17801 /* 0xc4 */ iemOp_les_Gv_Mp_vex2, iemOp_lds_Gv_Mp_vex3, iemOp_Grp11_Eb_Ib, iemOp_Grp11_Ev_Iz,
17802 /* 0xc8 */ iemOp_enter_Iw_Ib, iemOp_leave, iemOp_retf_Iw, iemOp_retf,
17803 /* 0xcc */ iemOp_int_3, iemOp_int_Ib, iemOp_into, iemOp_iret,
17804 /* 0xd0 */ iemOp_Grp2_Eb_1, iemOp_Grp2_Ev_1, iemOp_Grp2_Eb_CL, iemOp_Grp2_Ev_CL,
17805 /* 0xd4 */ iemOp_aam_Ib, iemOp_aad_Ib, iemOp_salc, iemOp_xlat,
17806 /* 0xd8 */ iemOp_EscF0, iemOp_EscF1, iemOp_EscF2, iemOp_EscF3,
17807 /* 0xdc */ iemOp_EscF4, iemOp_EscF5, iemOp_EscF6, iemOp_EscF7,
17808 /* 0xe0 */ iemOp_loopne_Jb, iemOp_loope_Jb, iemOp_loop_Jb, iemOp_jecxz_Jb,
17809 /* 0xe4 */ iemOp_in_AL_Ib, iemOp_in_eAX_Ib, iemOp_out_Ib_AL, iemOp_out_Ib_eAX,
17810 /* 0xe8 */ iemOp_call_Jv, iemOp_jmp_Jv, iemOp_jmp_Ap, iemOp_jmp_Jb,
17811 /* 0xec */ iemOp_in_AL_DX, iemOp_eAX_DX, iemOp_out_DX_AL, iemOp_out_DX_eAX,
17812 /* 0xf0 */ iemOp_lock, iemOp_int_1, iemOp_repne, iemOp_repe,
17813 /* 0xf4 */ iemOp_hlt, iemOp_cmc, iemOp_Grp3_Eb, iemOp_Grp3_Ev,
17814 /* 0xf8 */ iemOp_clc, iemOp_stc, iemOp_cli, iemOp_sti,
17815 /* 0xfc */ iemOp_cld, iemOp_std, iemOp_Grp4, iemOp_Grp5,
17816};
17817
17818
17819/** @} */
17820
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette