VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMInlineDecode-x86.h@ 108436

Last change on this file since 108436 was 108278, checked in by vboxsync, 3 months ago

VMM/IEM: Removed the #ifndef IEM_WITH_SETJMP code. We've had IEM_WITH_SETJMP defined unconditionally since 7.0 and the code probably doesn't even compile w/o it, so best remove the unused code. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.4 KB
Line 
1/* $Id: IEMInlineDecode-x86.h 108278 2025-02-18 15:46:53Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Inlined Decoding related Functions, x86 target.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.215389.xyz.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_VMMAll_target_x86_IEMInlineDecode_x86_h
29#define VMM_INCLUDED_SRC_VMMAll_target_x86_IEMInlineDecode_x86_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include <VBox/err.h>
35
36
37#ifndef IEM_WITH_OPAQUE_DECODER_STATE
38
39/**
40 * Fetches the first opcode byte, longjmp on error.
41 *
42 * @returns The opcode byte.
43 * @param pVCpu The cross context virtual CPU structure of the calling thread.
44 */
45DECL_INLINE_THROW(uint8_t) iemOpcodeGetFirstU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
46{
47 /*
48 * Check for hardware instruction breakpoints.
49 * Note! Guest breakpoints are only checked after POP SS or MOV SS on AMD CPUs.
50 */
51 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_INSTR)))
52 { /* likely */ }
53 else
54 {
55 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
56 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base,
57 !(pVCpu->cpum.GstCtx.rflags.uBoth & CPUMCTX_INHIBIT_SHADOW_SS)
58 || IEM_IS_GUEST_CPU_AMD(pVCpu));
59 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
60 { /* likely */ }
61 else
62 {
63 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
64 rcStrict = iemRaiseDebugException(pVCpu);
65 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
66 }
67 }
68
69 /*
70 * Fetch the first opcode byte.
71 */
72# ifdef IEM_WITH_CODE_TLB
73 uint8_t bRet;
74 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
75 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
76 if (RT_LIKELY( pbBuf != NULL
77 && offBuf < pVCpu->iem.s.cbInstrBuf))
78 {
79 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
80 bRet = pbBuf[offBuf];
81 }
82 else
83 bRet = iemOpcodeGetNextU8SlowJmp(pVCpu);
84# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
85 Assert(pVCpu->iem.s.offOpcode == 0);
86 pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet;
87# endif
88 return bRet;
89
90# else /* !IEM_WITH_CODE_TLB */
91 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
92 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
93 {
94 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
95 return pVCpu->iem.s.abOpcode[offOpcode];
96 }
97 return iemOpcodeGetNextU8SlowJmp(pVCpu);
98# endif
99}
100
101/**
102 * Fetches the first opcode byte, returns/throws automatically on failure.
103 *
104 * @param a_pu8 Where to return the opcode byte.
105 * @remark Implicitly references pVCpu.
106 */
107# define IEM_OPCODE_GET_FIRST_U8(a_pu8) (*(a_pu8) = iemOpcodeGetFirstU8Jmp(pVCpu))
108
109
110/**
111 * Fetches the next opcode byte, longjmp on error.
112 *
113 * @returns The opcode byte.
114 * @param pVCpu The cross context virtual CPU structure of the calling thread.
115 */
116DECL_INLINE_THROW(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
117{
118# ifdef IEM_WITH_CODE_TLB
119 uint8_t bRet;
120 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
121 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
122 if (RT_LIKELY( pbBuf != NULL
123 && offBuf < pVCpu->iem.s.cbInstrBuf))
124 {
125 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
126 bRet = pbBuf[offBuf];
127 }
128 else
129 bRet = iemOpcodeGetNextU8SlowJmp(pVCpu);
130# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
131 Assert(pVCpu->iem.s.offOpcode < sizeof(pVCpu->iem.s.abOpcode));
132 pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++] = bRet;
133# endif
134 return bRet;
135
136# else /* !IEM_WITH_CODE_TLB */
137 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
138 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
139 {
140 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
141 return pVCpu->iem.s.abOpcode[offOpcode];
142 }
143 return iemOpcodeGetNextU8SlowJmp(pVCpu);
144# endif
145}
146
147/**
148 * Fetches the next opcode byte, returns automatically on failure.
149 *
150 * @param a_pu8 Where to return the opcode byte.
151 * @remark Implicitly references pVCpu.
152 */
153# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
154
155/**
156 * Fetches the next signed byte from the opcode stream, returning automatically
157 * on failure.
158 *
159 * @param a_pi8 Where to return the signed byte.
160 * @remark Implicitly references pVCpu.
161 */
162# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
163
164/**
165 * Fetches the next signed byte from the opcode stream and sign-extending it to
166 * a word, returning automatically on failure.
167 *
168 * @param a_pu16 Where to return the word.
169 * @remark Implicitly references pVCpu.
170 */
171# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (uint16_t)(int16_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
172
173/**
174 * Fetches the next signed byte from the opcode stream and sign-extending it to
175 * a word, returning automatically on failure.
176 *
177 * @param a_pu32 Where to return the word.
178 * @remark Implicitly references pVCpu.
179 */
180# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (uint32_t)(int32_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
181
182/**
183 * Fetches the next signed byte from the opcode stream and sign-extending it to
184 * a word, returning automatically on failure.
185 *
186 * @param a_pu64 Where to return the word.
187 * @remark Implicitly references pVCpu.
188 */
189# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
190
191
192/**
193 * Fetches the next opcode word, longjmp on error.
194 *
195 * @returns The opcode word.
196 * @param pVCpu The cross context virtual CPU structure of the calling thread.
197 */
198DECL_INLINE_THROW(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
199{
200# ifdef IEM_WITH_CODE_TLB
201 uint16_t u16Ret;
202 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
203 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
204 if (RT_LIKELY( pbBuf != NULL
205 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
206 {
207 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
208# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
209 u16Ret = *(uint16_t const *)&pbBuf[offBuf];
210# else
211 u16Ret = RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
212# endif
213 }
214 else
215 u16Ret = iemOpcodeGetNextU16SlowJmp(pVCpu);
216
217# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
218 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
219 Assert(offOpcode + 1 < sizeof(pVCpu->iem.s.abOpcode));
220# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
221 *(uint16_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u16Ret;
222# else
223 pVCpu->iem.s.abOpcode[offOpcode] = RT_LO_U8(u16Ret);
224 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_HI_U8(u16Ret);
225# endif
226 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)2;
227# endif
228
229 return u16Ret;
230
231# else /* !IEM_WITH_CODE_TLB */
232 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
233 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
234 {
235 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
236# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
237 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
238# else
239 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
240# endif
241 }
242 return iemOpcodeGetNextU16SlowJmp(pVCpu);
243# endif /* !IEM_WITH_CODE_TLB */
244}
245
246/**
247 * Fetches the next opcode word, returns automatically on failure.
248 *
249 * @param a_pu16 Where to return the opcode word.
250 * @remark Implicitly references pVCpu.
251 */
252# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
253
254/**
255 * Fetches the next opcode word and zero extends it to a double word, returns
256 * automatically on failure.
257 *
258 * @param a_pu32 Where to return the opcode double word.
259 * @remark Implicitly references pVCpu.
260 */
261# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
262
263/**
264 * Fetches the next opcode word and zero extends it to a quad word, returns
265 * automatically on failure.
266 *
267 * @param a_pu64 Where to return the opcode quad word.
268 * @remark Implicitly references pVCpu.
269 */
270# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
271
272/**
273 * Fetches the next signed word from the opcode stream, returning automatically
274 * on failure.
275 *
276 * @param a_pi16 Where to return the signed word.
277 * @remark Implicitly references pVCpu.
278 */
279# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
280
281
282/**
283 * Fetches the next opcode dword, longjmp on error.
284 *
285 * @returns The opcode dword.
286 * @param pVCpu The cross context virtual CPU structure of the calling thread.
287 */
288DECL_INLINE_THROW(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
289{
290# ifdef IEM_WITH_CODE_TLB
291 uint32_t u32Ret;
292 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
293 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
294 if (RT_LIKELY( pbBuf != NULL
295 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
296 {
297 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
298# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
299 u32Ret = *(uint32_t const *)&pbBuf[offBuf];
300# else
301 u32Ret = RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
302 pbBuf[offBuf + 1],
303 pbBuf[offBuf + 2],
304 pbBuf[offBuf + 3]);
305# endif
306 }
307 else
308 u32Ret = iemOpcodeGetNextU32SlowJmp(pVCpu);
309
310# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
311 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
312 Assert(offOpcode + 3 < sizeof(pVCpu->iem.s.abOpcode));
313# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
314 *(uint32_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u32Ret;
315# else
316 pVCpu->iem.s.abOpcode[offOpcode] = RT_BYTE1(u32Ret);
317 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u32Ret);
318 pVCpu->iem.s.abOpcode[offOpcode + 2] = RT_BYTE3(u32Ret);
319 pVCpu->iem.s.abOpcode[offOpcode + 3] = RT_BYTE4(u32Ret);
320# endif
321 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)4;
322# endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */
323
324 return u32Ret;
325
326# else /* !IEM_WITH_CODE_TLB */
327 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
328 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
329 {
330 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
331# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
332 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
333# else
334 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
335 pVCpu->iem.s.abOpcode[offOpcode + 1],
336 pVCpu->iem.s.abOpcode[offOpcode + 2],
337 pVCpu->iem.s.abOpcode[offOpcode + 3]);
338# endif
339 }
340 return iemOpcodeGetNextU32SlowJmp(pVCpu);
341# endif
342}
343
344/**
345 * Fetches the next opcode dword, returns automatically on failure.
346 *
347 * @param a_pu32 Where to return the opcode dword.
348 * @remark Implicitly references pVCpu.
349 */
350# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
351
352/**
353 * Fetches the next opcode dword and zero extends it to a quad word, returns
354 * automatically on failure.
355 *
356 * @param a_pu64 Where to return the opcode quad word.
357 * @remark Implicitly references pVCpu.
358 */
359# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
360
361/**
362 * Fetches the next signed double word from the opcode stream, returning
363 * automatically on failure.
364 *
365 * @param a_pi32 Where to return the signed double word.
366 * @remark Implicitly references pVCpu.
367 */
368# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
369
370/**
371 * Fetches the next opcode double word and sign extends it to a quad word,
372 * returns automatically on failure.
373 *
374 * @param a_pu64 Where to return the opcode quad word.
375 * @remark Implicitly references pVCpu.
376 */
377# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (uint64_t)(int64_t)(int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
378
379
380/**
381 * Fetches the next opcode qword, longjmp on error.
382 *
383 * @returns The opcode qword.
384 * @param pVCpu The cross context virtual CPU structure of the calling thread.
385 */
386DECL_INLINE_THROW(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
387{
388# ifdef IEM_WITH_CODE_TLB
389 uint64_t u64Ret;
390 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
391 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
392 if (RT_LIKELY( pbBuf != NULL
393 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
394 {
395 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
396# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
397 u64Ret = *(uint64_t const *)&pbBuf[offBuf];
398# else
399 u64Ret = RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
400 pbBuf[offBuf + 1],
401 pbBuf[offBuf + 2],
402 pbBuf[offBuf + 3],
403 pbBuf[offBuf + 4],
404 pbBuf[offBuf + 5],
405 pbBuf[offBuf + 6],
406 pbBuf[offBuf + 7]);
407# endif
408 }
409 else
410 u64Ret = iemOpcodeGetNextU64SlowJmp(pVCpu);
411
412# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
413 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
414 Assert(offOpcode + 7 < sizeof(pVCpu->iem.s.abOpcode));
415# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
416 *(uint64_t *)&pVCpu->iem.s.abOpcode[offOpcode] = u64Ret;
417# else
418 pVCpu->iem.s.abOpcode[offOpcode] = RT_BYTE1(u64Ret);
419 pVCpu->iem.s.abOpcode[offOpcode + 1] = RT_BYTE2(u64Ret);
420 pVCpu->iem.s.abOpcode[offOpcode + 2] = RT_BYTE3(u64Ret);
421 pVCpu->iem.s.abOpcode[offOpcode + 3] = RT_BYTE4(u64Ret);
422 pVCpu->iem.s.abOpcode[offOpcode + 4] = RT_BYTE5(u64Ret);
423 pVCpu->iem.s.abOpcode[offOpcode + 5] = RT_BYTE6(u64Ret);
424 pVCpu->iem.s.abOpcode[offOpcode + 6] = RT_BYTE7(u64Ret);
425 pVCpu->iem.s.abOpcode[offOpcode + 7] = RT_BYTE8(u64Ret);
426# endif
427 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + (uint8_t)8;
428# endif /* IEM_WITH_CODE_TLB_AND_OPCODE_BUF */
429
430 return u64Ret;
431
432# else /* !IEM_WITH_CODE_TLB */
433 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
434 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
435 {
436 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
437# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
438 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
439# else
440 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
441 pVCpu->iem.s.abOpcode[offOpcode + 1],
442 pVCpu->iem.s.abOpcode[offOpcode + 2],
443 pVCpu->iem.s.abOpcode[offOpcode + 3],
444 pVCpu->iem.s.abOpcode[offOpcode + 4],
445 pVCpu->iem.s.abOpcode[offOpcode + 5],
446 pVCpu->iem.s.abOpcode[offOpcode + 6],
447 pVCpu->iem.s.abOpcode[offOpcode + 7]);
448# endif
449 }
450 return iemOpcodeGetNextU64SlowJmp(pVCpu);
451# endif /* !IEM_WITH_CODE_TLB */
452}
453
454/**
455 * Fetches the next opcode quad word, returns automatically on failure.
456 *
457 * @param a_pu64 Where to return the opcode quad word.
458 * @remark Implicitly references pVCpu.
459 */
460# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
461
462/**
463 * For fetching the opcode bytes for an ModR/M effective address, but throw
464 * away the result.
465 *
466 * This is used when decoding undefined opcodes and such where we want to avoid
467 * unnecessary MC blocks.
468 *
469 * @note The recompiler code overrides this one so iemOpHlpCalcRmEffAddrJmpEx is
470 * used instead. At least for now...
471 */
472# define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
473 (void)iemOpHlpCalcRmEffAddrJmp(pVCpu, bRm, 0); \
474 } while (0)
475
476
477
478
479/**
480 * Recalculates the effective operand size.
481 *
482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
483 */
484DECLINLINE(void) iemRecalEffOpSize(PVMCPUCC pVCpu) RT_NOEXCEPT
485{
486 switch (IEM_GET_CPU_MODE(pVCpu))
487 {
488 case IEMMODE_16BIT:
489 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
490 break;
491 case IEMMODE_32BIT:
492 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
493 break;
494 case IEMMODE_64BIT:
495 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
496 {
497 case 0:
498 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
499 break;
500 case IEM_OP_PRF_SIZE_OP:
501 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
502 break;
503 case IEM_OP_PRF_SIZE_REX_W:
504 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
505 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
506 break;
507 }
508 break;
509 default:
510 AssertFailed();
511 }
512}
513
514
515/**
516 * Sets the default operand size to 64-bit and recalculates the effective
517 * operand size.
518 *
519 * @param pVCpu The cross context virtual CPU structure of the calling thread.
520 */
521DECLINLINE(void) iemRecalEffOpSize64Default(PVMCPUCC pVCpu) RT_NOEXCEPT
522{
523 Assert(IEM_IS_64BIT_CODE(pVCpu));
524 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
525 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
526 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
527 else
528 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
529}
530
531
532/**
533 * Sets the default operand size to 64-bit and recalculates the effective
534 * operand size, with intel ignoring any operand size prefix (AMD respects it).
535 *
536 * This is for the relative jumps.
537 *
538 * @param pVCpu The cross context virtual CPU structure of the calling thread.
539 */
540DECLINLINE(void) iemRecalEffOpSize64DefaultAndIntelIgnoresOpSizePrefix(PVMCPUCC pVCpu) RT_NOEXCEPT
541{
542 Assert(IEM_IS_64BIT_CODE(pVCpu));
543 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
544 if ( (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP
545 || pVCpu->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL)
546 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
547 else
548 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
549}
550
551#endif /* !IEM_WITH_OPAQUE_DECODER_STATE */
552
553
554#endif /* !VMM_INCLUDED_SRC_VMMAll_target_x86_IEMInlineDecode_x86_h */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette