VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdFuncsBltIn.cpp@ 102624

Last change on this file since 102624 was 102624, checked in by vboxsync, 18 months ago

VMM/IEM: BODY_CONSIDER_CS_LIM_CHECKING. bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 32.0 KB
Line 
1/* $Id: IEMAllThrdFuncsBltIn.cpp 102624 2023-12-16 03:15:54Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Emulation, Built-in Threaded Functions.
4 *
5 * This is separate from IEMThreadedFunctions.cpp because it doesn't work
6 * with IEM_WITH_OPAQUE_DECODER_STATE defined.
7 */
8
9/*
10 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
11 *
12 * This file is part of VirtualBox base platform packages, as
13 * available from https://www.215389.xyz.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, in version 3 of the
18 * License.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see <https://www.gnu.org/licenses>.
27 *
28 * SPDX-License-Identifier: GPL-3.0-only
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
36#define VMCPU_INCL_CPUM_GST_CTX
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/apic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gim.h>
47#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
48# include <VBox/vmm/em.h>
49# include <VBox/vmm/hm_svm.h>
50#endif
51#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
52# include <VBox/vmm/hmvmxinline.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#include "IEMInternal.h"
58#include <VBox/vmm/vmcc.h>
59#include <VBox/log.h>
60#include <VBox/err.h>
61#include <VBox/param.h>
62#include <VBox/dis.h>
63#include <VBox/disopcode-x86-amd64.h>
64#include <iprt/asm-math.h>
65#include <iprt/assert.h>
66#include <iprt/string.h>
67#include <iprt/x86.h>
68
69#include "IEMInline.h"
70
71
72
73static VBOXSTRICTRC iemThreadeFuncWorkerObsoleteTb(PVMCPUCC pVCpu)
74{
75 /* We set fSafeToFree to false where as we're being called in the context
76 of a TB callback function, which for native TBs means we cannot release
77 the executable memory till we've returned our way back to iemTbExec as
78 that return path codes via the native code generated for the TB. */
79 iemThreadedTbObsolete(pVCpu, pVCpu->iem.s.pCurTbR3, false /*fSafeToFree*/);
80 return VINF_IEM_REEXEC_BREAK;
81}
82
83
84/**
85 * Built-in function that calls a C-implemention function taking zero arguments.
86 */
87IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_DeferToCImpl0)
88{
89 PFNIEMCIMPL0 const pfnCImpl = (PFNIEMCIMPL0)(uintptr_t)uParam0;
90 uint8_t const cbInstr = (uint8_t)uParam1;
91 RT_NOREF(uParam2);
92 return pfnCImpl(pVCpu, cbInstr);
93}
94
95
96/**
97 * Built-in function that checks for pending interrupts that can be delivered or
98 * forced action flags.
99 *
100 * This triggers after the completion of an instruction, so EIP is already at
101 * the next instruction. If an IRQ or important FF is pending, this will return
102 * a non-zero status that stops TB execution.
103 */
104IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckIrq)
105{
106 RT_NOREF(uParam0, uParam1, uParam2);
107
108 /*
109 * Check for IRQs and other FFs that needs servicing.
110 */
111 uint64_t fCpu = pVCpu->fLocalForcedActions;
112 fCpu &= VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
113 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
114 | VMCPU_FF_TLB_FLUSH
115 | VMCPU_FF_UNHALT );
116 /** @todo this isn't even close to the NMI and interrupt conditions in EM! */
117 if (RT_LIKELY( ( !fCpu
118 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
119 && ( !pVCpu->cpum.GstCtx.rflags.Bits.u1IF
120 || CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)) ) )
121 && !VM_FF_IS_ANY_SET(pVCpu->CTX_SUFF(pVM), VM_FF_ALL_MASK) ))
122 return VINF_SUCCESS;
123
124 Log(("%04x:%08RX32: Pending IRQ and/or FF: fCpu=%#RX64 fVm=%#RX32 IF=%d\n",
125 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, fCpu,
126 pVCpu->CTX_SUFF(pVM)->fGlobalForcedActions & VM_FF_ALL_MASK, pVCpu->cpum.GstCtx.rflags.Bits.u1IF));
127 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckIrqBreaks);
128 return VINF_IEM_REEXEC_BREAK;
129}
130
131
132/**
133 * Built-in function that compares the fExec mask against uParam0.
134 *
135 * This is used both for IEM_CIMPL_F_MODE and IEM_CIMPL_F_VMEXIT after executing
136 * an instruction.
137 */
138IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckMode)
139{
140 uint32_t const fExpectedExec = (uint32_t)uParam0;
141 if ((pVCpu->iem.s.fExec & IEMTB_F_KEY_MASK) == (fExpectedExec & IEMTB_F_KEY_MASK))
142 return VINF_SUCCESS;
143 LogFlow(("Mode changed at %04x:%08RX64: %#x -> %#x (xor: %#x, xor-key: %#x)\n",
144 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fExpectedExec,
145 pVCpu->iem.s.fExec, fExpectedExec ^ pVCpu->iem.s.fExec, (fExpectedExec ^ pVCpu->iem.s.fExec) & IEMTB_F_KEY_MASK));
146 RT_NOREF(uParam1, uParam2);
147 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckModeBreaks);
148 return VINF_IEM_REEXEC_BREAK;
149}
150
151
152/**
153 * Built-in function that checks for hardware instruction breakpoints.
154 */
155IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckHwInstrBps)
156{
157 VBOXSTRICTRC rcStrict = DBGFBpCheckInstruction(pVCpu->CTX_SUFF(pVM), pVCpu,
158 pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base);
159 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
160 return VINF_SUCCESS;
161
162 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
163 {
164 LogFlow(("Guest HW bp at %04x:%08RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
165 rcStrict = iemRaiseDebugException(pVCpu);
166 Assert(rcStrict != VINF_SUCCESS);
167 }
168 else
169 LogFlow(("VBoxDbg HW bp at %04x:%08RX64: %Rrc\n",
170 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, VBOXSTRICTRC_VAL(rcStrict) ));
171 RT_NOREF(uParam0, uParam1, uParam2);
172 return rcStrict;
173}
174
175
176DECL_FORCE_INLINE(RTGCPHYS) iemTbGetRangePhysPageAddr(PCIEMTB pTb, uint8_t idxRange)
177{
178 Assert(idxRange < RT_MIN(pTb->cRanges, RT_ELEMENTS(pTb->aRanges)));
179 uint8_t const idxPage = pTb->aRanges[idxRange].idxPhysPage;
180 Assert(idxPage <= RT_ELEMENTS(pTb->aGCPhysPages));
181 if (idxPage == 0)
182 return pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
183 Assert(!(pTb->aGCPhysPages[idxPage - 1] & GUEST_PAGE_OFFSET_MASK));
184 return pTb->aGCPhysPages[idxPage - 1];
185}
186
187
188/**
189 * Macro that implements the 16/32-bit CS.LIM check, as this is done by a
190 * number of functions.
191 */
192/** @todo consider 32-bit EIP mid-instruction wrap-around... Difficult to
193 * test, since it would require replacing the default firmware. */
194#define BODY_CHECK_CS_LIM(a_cbInstr) do { \
195 if (RT_LIKELY((uint32_t)(pVCpu->cpum.GstCtx.eip + cbInstr - 1U) <= pVCpu->cpum.GstCtx.cs.u32Limit)) \
196 { /* likely */ } \
197 else \
198 { \
199 Log7(("EIP out of bounds at %04x:%08RX32 LB %u - CS.LIM=%#RX32\n", \
200 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, (a_cbInstr), pVCpu->cpum.GstCtx.cs.u32Limit)); \
201 return iemRaiseGeneralProtectionFault0(pVCpu); \
202 } \
203 } while(0)
204
205/**
206 * Macro that considers whether we need CS.LIM checking after a branch or
207 * crossing over to a new page.
208 */
209#define BODY_CONSIDER_CS_LIM_CHECKING(a_pTb, a_cbInstr) do { \
210 int64_t const offFromLim = (int64_t)pVCpu->cpum.GstCtx.cs.u32Limit - (int64_t)pVCpu->cpum.GstCtx.eip; \
211 if (offFromLim >= GUEST_PAGE_SIZE + 16 - (int32_t)(pVCpu->cpum.GstCtx.cs.u64Base & GUEST_PAGE_OFFSET_MASK)) \
212 { /* likely */ } \
213 else \
214 { \
215 Log7(("TB need CS.LIM: %p at %04x:%08RX64 LB %u; #%u offFromLim=%#RX64 CS.LIM=%#RX32 CS.BASE=%#RX64\n", \
216 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), offFromLim, \
217 pVCpu->cpum.GstCtx.cs.u32Limit, pVCpu->cpum.GstCtx.cs.u64Base, __LINE__)); \
218 RT_NOREF(a_pTb, a_cbInstr); \
219 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckNeedCsLimChecking); \
220 return VINF_IEM_REEXEC_BREAK; \
221 } \
222 } while(0)
223
224/**
225 * Macro that implements opcode (re-)checking.
226 */
227#define BODY_CHECK_OPCODES(a_pTb, a_idxRange, a_offRange, a_cbInstr) do { \
228 Assert((a_idxRange) < (a_pTb)->cRanges && (a_pTb)->cRanges <= RT_ELEMENTS((a_pTb)->aRanges)); \
229 Assert((a_offRange) < (a_pTb)->aRanges[(a_idxRange)].cbOpcodes); \
230 /* We can use pbInstrBuf here as it will be updated when branching (and prior to executing a TB). */ \
231 if (RT_LIKELY(memcmp(&pVCpu->iem.s.pbInstrBuf[(a_pTb)->aRanges[(a_idxRange)].offPhysPage + (a_offRange)], \
232 &(a_pTb)->pabOpcodes[ (a_pTb)->aRanges[(a_idxRange)].offOpcodes + (a_offRange)], \
233 (a_pTb)->aRanges[(a_idxRange)].cbOpcodes - (a_offRange)) == 0)) \
234 { /* likely */ } \
235 else \
236 { \
237 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; range %u, off %#x LB %#x + %#x; #%u\n", (a_pTb), \
238 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_idxRange), \
239 (a_pTb)->aRanges[(a_idxRange)].offOpcodes, (a_pTb)->aRanges[(a_idxRange)].cbOpcodes, (a_offRange), __LINE__)); \
240 RT_NOREF(a_cbInstr); \
241 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
242 } \
243 } while(0)
244
245/**
246 * Macro that implements TLB loading and updating pbInstrBuf updating for an
247 * instruction crossing into a new page.
248 *
249 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
250 */
251#define BODY_LOAD_TLB_FOR_NEW_PAGE(a_pTb, a_offInstr, a_idxRange, a_cbInstr) do { \
252 pVCpu->iem.s.pbInstrBuf = NULL; \
253 pVCpu->iem.s.offCurInstrStart = GUEST_PAGE_SIZE - (a_offInstr); \
254 pVCpu->iem.s.offInstrNextByte = GUEST_PAGE_SIZE; \
255 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
256 \
257 RTGCPHYS const GCPhysNewPage = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange); \
258 if (RT_LIKELY( pVCpu->iem.s.GCPhysInstrBuf == GCPhysNewPage \
259 && pVCpu->iem.s.pbInstrBuf)) \
260 { /* likely */ } \
261 else \
262 { \
263 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; crossing at %#x; GCPhys=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
264 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), (a_offInstr), \
265 pVCpu->iem.s.GCPhysInstrBuf, GCPhysNewPage, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
266 RT_NOREF(a_cbInstr); \
267 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
268 } \
269 } while(0)
270
271/**
272 * Macro that implements TLB loading and updating pbInstrBuf updating when
273 * branching or when crossing a page on an instruction boundrary.
274 *
275 * This differs from BODY_LOAD_TLB_FOR_NEW_PAGE in that it will first check if
276 * it is an inter-page branch and also check the page offset.
277 *
278 * This may long jump if we're raising a \#PF, \#GP or similar trouble.
279 */
280#define BODY_LOAD_TLB_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
281 /* Is RIP within the current code page? */ \
282 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
283 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
284 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
285 if (off < pVCpu->iem.s.cbInstrBufTotal) \
286 { \
287 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
288 Assert(pVCpu->iem.s.pbInstrBuf); \
289 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
290 | pTb->aRanges[(a_idxRange)].offPhysPage; \
291 if (GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off) \
292 { /* we're good */ } \
293 else if (pTb->aRanges[(a_idxRange)].offPhysPage != off) \
294 { \
295 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
296 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
297 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
298 RT_NOREF(a_cbInstr); \
299 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
300 return VINF_IEM_REEXEC_BREAK; \
301 } \
302 else \
303 { \
304 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/1; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
305 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
306 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
307 RT_NOREF(a_cbInstr); \
308 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
309 } \
310 } \
311 else \
312 { \
313 /* Must translate new RIP. */ \
314 pVCpu->iem.s.pbInstrBuf = NULL; \
315 pVCpu->iem.s.offCurInstrStart = 0; \
316 pVCpu->iem.s.offInstrNextByte = 0; \
317 iemOpcodeFetchBytesJmp(pVCpu, 0, NULL); \
318 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK) || !pVCpu->iem.s.pbInstrBuf); \
319 \
320 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
321 | pTb->aRanges[(a_idxRange)].offPhysPage; \
322 uint64_t const offNew = uPc - pVCpu->iem.s.uInstrBufPc; \
323 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + offNew \
324 && pVCpu->iem.s.pbInstrBuf) \
325 { /* likely */ } \
326 else if ( pTb->aRanges[(a_idxRange)].offPhysPage != offNew \
327 && pVCpu->iem.s.pbInstrBuf) \
328 { \
329 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
330 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
331 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
332 RT_NOREF(a_cbInstr); \
333 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
334 return VINF_IEM_REEXEC_BREAK; \
335 } \
336 else \
337 { \
338 Log7(("TB obsolete: %p at %04x:%08RX64 LB %u; branching/2; GCPhysWithOffset=%RGp expected %RGp, pbInstrBuf=%p - #%u\n", \
339 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
340 pVCpu->iem.s.GCPhysInstrBuf + offNew, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
341 RT_NOREF(a_cbInstr); \
342 return iemThreadeFuncWorkerObsoleteTb(pVCpu); \
343 } \
344 } \
345 } while(0)
346
347/**
348 * Macro that implements PC check after a conditional branch.
349 */
350#define BODY_CHECK_PC_AFTER_BRANCH(a_pTb, a_idxRange, a_cbInstr) do { \
351 /* Is RIP within the current code page? */ \
352 Assert(pVCpu->cpum.GstCtx.cs.u64Base == 0 || !IEM_IS_64BIT_CODE(pVCpu)); \
353 uint64_t const uPc = pVCpu->cpum.GstCtx.rip + pVCpu->cpum.GstCtx.cs.u64Base; \
354 uint64_t const off = uPc - pVCpu->iem.s.uInstrBufPc; \
355 Assert(!(pVCpu->iem.s.GCPhysInstrBuf & GUEST_PAGE_OFFSET_MASK)); \
356 RTGCPHYS const GCPhysRangePageWithOffset = iemTbGetRangePhysPageAddr(a_pTb, a_idxRange) \
357 | pTb->aRanges[(a_idxRange)].offPhysPage; \
358 if ( GCPhysRangePageWithOffset == pVCpu->iem.s.GCPhysInstrBuf + off \
359 && off < pVCpu->iem.s.cbInstrBufTotal) \
360 { /* we're good */ } \
361 else \
362 { \
363 Log7(("TB jmp miss: %p at %04x:%08RX64 LB %u; GCPhysWithOffset=%RGp hoped for %RGp, pbInstrBuf=%p - #%u\n", \
364 (a_pTb), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, (a_cbInstr), \
365 pVCpu->iem.s.GCPhysInstrBuf + off, GCPhysRangePageWithOffset, pVCpu->iem.s.pbInstrBuf, __LINE__)); \
366 RT_NOREF(a_cbInstr); \
367 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatCheckBranchMisses); \
368 return VINF_IEM_REEXEC_BREAK; \
369 } \
370 } while(0)
371
372
373
374/**
375 * Built-in function that checks the EIP/IP + uParam0 is within CS.LIM,
376 * raising a \#GP(0) if this isn't the case.
377 */
378IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLim)
379{
380 uint32_t const cbInstr = (uint32_t)uParam0;
381 RT_NOREF(uParam1, uParam2);
382 BODY_CHECK_CS_LIM(cbInstr);
383 return VINF_SUCCESS;
384}
385
386
387/**
388 * Built-in function for re-checking opcodes and CS.LIM after an instruction
389 * that may have modified them.
390 */
391IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodes)
392{
393 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
394 uint32_t const cbInstr = (uint32_t)uParam0;
395 uint32_t const idxRange = (uint32_t)uParam1;
396 uint32_t const offRange = (uint32_t)uParam2;
397 BODY_CHECK_CS_LIM(cbInstr);
398 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
399 return VINF_SUCCESS;
400}
401
402
403/**
404 * Built-in function for re-checking opcodes after an instruction that may have
405 * modified them.
406 */
407IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodes)
408{
409 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
410 uint32_t const cbInstr = (uint32_t)uParam0;
411 uint32_t const idxRange = (uint32_t)uParam1;
412 uint32_t const offRange = (uint32_t)uParam2;
413 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
414 return VINF_SUCCESS;
415}
416
417
418/**
419 * Built-in function for re-checking opcodes and considering the need for CS.LIM
420 * checking after an instruction that may have modified them.
421 */
422IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesConsiderCsLim)
423{
424 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
425 uint32_t const cbInstr = (uint32_t)uParam0;
426 uint32_t const idxRange = (uint32_t)uParam1;
427 uint32_t const offRange = (uint32_t)uParam2;
428 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
429 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
430 return VINF_SUCCESS;
431}
432
433
434/*
435 * Post-branching checkers.
436 */
437
438/**
439 * Built-in function for checking CS.LIM, checking the PC and checking opcodes
440 * after conditional branching within the same page.
441 *
442 * @see iemThreadedFunc_BltIn_CheckPcAndOpcodes
443 */
444IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes)
445{
446 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
447 uint32_t const cbInstr = (uint32_t)uParam0;
448 uint32_t const idxRange = (uint32_t)uParam1;
449 uint32_t const offRange = (uint32_t)uParam2;
450 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
451 BODY_CHECK_CS_LIM(cbInstr);
452 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
453 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
454 //LogFunc(("okay\n"));
455 return VINF_SUCCESS;
456}
457
458
459/**
460 * Built-in function for checking the PC and checking opcodes after conditional
461 * branching within the same page.
462 *
463 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
464 */
465IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodes)
466{
467 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
468 uint32_t const cbInstr = (uint32_t)uParam0;
469 uint32_t const idxRange = (uint32_t)uParam1;
470 uint32_t const offRange = (uint32_t)uParam2;
471 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
472 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
473 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
474 //LogFunc(("okay\n"));
475 return VINF_SUCCESS;
476}
477
478
479/**
480 * Built-in function for checking the PC and checking opcodes and considering
481 * the need for CS.LIM checking after conditional branching within the same
482 * page.
483 *
484 * @see iemThreadedFunc_BltIn_CheckCsLimAndPcAndOpcodes
485 */
486IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckPcAndOpcodesConsiderCsLim)
487{
488 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
489 uint32_t const cbInstr = (uint32_t)uParam0;
490 uint32_t const idxRange = (uint32_t)uParam1;
491 uint32_t const offRange = (uint32_t)uParam2;
492 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
493 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
494 BODY_CHECK_PC_AFTER_BRANCH(pTb, idxRange, cbInstr);
495 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
496 //LogFunc(("okay\n"));
497 return VINF_SUCCESS;
498}
499
500
501/**
502 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
503 * transitioning to a different code page.
504 *
505 * The code page transition can either be natural over onto the next page (with
506 * the instruction starting at page offset zero) or by means of branching.
507 *
508 * @see iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb
509 */
510IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb)
511{
512 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
513 uint32_t const cbInstr = (uint32_t)uParam0;
514 uint32_t const idxRange = (uint32_t)uParam1;
515 uint32_t const offRange = (uint32_t)uParam2;
516 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
517 BODY_CHECK_CS_LIM(cbInstr);
518 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
519 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
520 //LogFunc(("okay\n"));
521 return VINF_SUCCESS;
522}
523
524
525/**
526 * Built-in function for loading TLB and checking opcodes when transitioning to
527 * a different code page.
528 *
529 * The code page transition can either be natural over onto the next page (with
530 * the instruction starting at page offset zero) or by means of branching.
531 *
532 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
533 */
534IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlb)
535{
536 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
537 uint32_t const cbInstr = (uint32_t)uParam0;
538 uint32_t const idxRange = (uint32_t)uParam1;
539 uint32_t const offRange = (uint32_t)uParam2;
540 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
541 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
542 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
543 //LogFunc(("okay\n"));
544 return VINF_SUCCESS;
545}
546
547
548/**
549 * Built-in function for loading TLB and checking opcodes and considering the
550 * need for CS.LIM checking when transitioning to a different code page.
551 *
552 * The code page transition can either be natural over onto the next page (with
553 * the instruction starting at page offset zero) or by means of branching.
554 *
555 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb
556 */
557IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim)
558{
559 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
560 uint32_t const cbInstr = (uint32_t)uParam0;
561 uint32_t const idxRange = (uint32_t)uParam1;
562 uint32_t const offRange = (uint32_t)uParam2;
563 //LogFunc(("idxRange=%u @ %#x LB %#x: offPhysPage=%#x LB %#x\n", idxRange, offRange, cbInstr, pTb->aRanges[idxRange].offPhysPage, pTb->aRanges[idxRange].cbOpcodes));
564 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
565 BODY_LOAD_TLB_AFTER_BRANCH(pTb, idxRange, cbInstr);
566 BODY_CHECK_OPCODES(pTb, idxRange, offRange, cbInstr);
567 //LogFunc(("okay\n"));
568 return VINF_SUCCESS;
569}
570
571
572
573/*
574 * Natural page crossing checkers.
575 */
576
577/**
578 * Built-in function for checking CS.LIM, loading TLB and checking opcodes on
579 * both pages when transitioning to a different code page.
580 *
581 * This is used when the previous instruction requires revalidation of opcodes
582 * bytes and the current instruction stries a page boundrary with opcode bytes
583 * in both the old and new page.
584 *
585 * @see iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb
586 */
587IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb)
588{
589 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
590 uint32_t const cbInstr = (uint32_t)uParam0;
591 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
592 uint32_t const idxRange1 = (uint32_t)uParam1;
593 uint32_t const offRange1 = (uint32_t)uParam2;
594 uint32_t const idxRange2 = idxRange1 + 1;
595 BODY_CHECK_CS_LIM(cbInstr);
596 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
597 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
598 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
599 return VINF_SUCCESS;
600}
601
602
603/**
604 * Built-in function for loading TLB and checking opcodes on both pages when
605 * transitioning to a different code page.
606 *
607 * This is used when the previous instruction requires revalidation of opcodes
608 * bytes and the current instruction stries a page boundrary with opcode bytes
609 * in both the old and new page.
610 *
611 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
612 */
613IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb)
614{
615 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
616 uint32_t const cbInstr = (uint32_t)uParam0;
617 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
618 uint32_t const idxRange1 = (uint32_t)uParam1;
619 uint32_t const offRange1 = (uint32_t)uParam2;
620 uint32_t const idxRange2 = idxRange1 + 1;
621 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
622 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
623 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
624 return VINF_SUCCESS;
625}
626
627
628/**
629 * Built-in function for loading TLB and checking opcodes on both pages and
630 * considering the need for CS.LIM checking when transitioning to a different
631 * code page.
632 *
633 * This is used when the previous instruction requires revalidation of opcodes
634 * bytes and the current instruction stries a page boundrary with opcode bytes
635 * in both the old and new page.
636 *
637 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb
638 */
639IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim)
640{
641 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
642 uint32_t const cbInstr = (uint32_t)uParam0;
643 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
644 uint32_t const idxRange1 = (uint32_t)uParam1;
645 uint32_t const offRange1 = (uint32_t)uParam2;
646 uint32_t const idxRange2 = idxRange1 + 1;
647 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
648 BODY_CHECK_OPCODES(pTb, idxRange1, offRange1, cbInstr);
649 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
650 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
651 return VINF_SUCCESS;
652}
653
654
655/**
656 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
657 * advancing naturally to a different code page.
658 *
659 * Only opcodes on the new page is checked.
660 *
661 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb
662 */
663IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb)
664{
665 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
666 uint32_t const cbInstr = (uint32_t)uParam0;
667 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
668 uint32_t const idxRange1 = (uint32_t)uParam1;
669 //uint32_t const offRange1 = (uint32_t)uParam2;
670 uint32_t const idxRange2 = idxRange1 + 1;
671 BODY_CHECK_CS_LIM(cbInstr);
672 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
673 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
674 RT_NOREF(uParam2);
675 return VINF_SUCCESS;
676}
677
678
679/**
680 * Built-in function for loading TLB and checking opcodes when advancing
681 * naturally to a different code page.
682 *
683 * Only opcodes on the new page is checked.
684 *
685 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
686 */
687IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb)
688{
689 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
690 uint32_t const cbInstr = (uint32_t)uParam0;
691 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
692 uint32_t const idxRange1 = (uint32_t)uParam1;
693 //uint32_t const offRange1 = (uint32_t)uParam2;
694 uint32_t const idxRange2 = idxRange1 + 1;
695 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
696 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
697 RT_NOREF(uParam2);
698 return VINF_SUCCESS;
699}
700
701
702/**
703 * Built-in function for loading TLB and checking opcodes and considering the
704 * need for CS.LIM checking when advancing naturally to a different code page.
705 *
706 * Only opcodes on the new page is checked.
707 *
708 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb
709 */
710IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim)
711{
712 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
713 uint32_t const cbInstr = (uint32_t)uParam0;
714 uint32_t const cbStartPage = (uint32_t)(uParam0 >> 32);
715 uint32_t const idxRange1 = (uint32_t)uParam1;
716 //uint32_t const offRange1 = (uint32_t)uParam2;
717 uint32_t const idxRange2 = idxRange1 + 1;
718 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
719 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, cbStartPage, idxRange2, cbInstr);
720 BODY_CHECK_OPCODES(pTb, idxRange2, 0, cbInstr);
721 RT_NOREF(uParam2);
722 return VINF_SUCCESS;
723}
724
725
726/**
727 * Built-in function for checking CS.LIM, loading TLB and checking opcodes when
728 * advancing naturally to a different code page with first instr at byte 0.
729 *
730 * @see iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb
731 */
732IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb)
733{
734 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
735 uint32_t const cbInstr = (uint32_t)uParam0;
736 uint32_t const idxRange = (uint32_t)uParam1;
737 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
738 BODY_CHECK_CS_LIM(cbInstr);
739 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
740 Assert(pVCpu->iem.s.offCurInstrStart == 0);
741 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
742 return VINF_SUCCESS;
743}
744
745
746/**
747 * Built-in function for loading TLB and checking opcodes when advancing
748 * naturally to a different code page with first instr at byte 0.
749 *
750 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
751 */
752IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb)
753{
754 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
755 uint32_t const cbInstr = (uint32_t)uParam0;
756 uint32_t const idxRange = (uint32_t)uParam1;
757 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
758 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
759 Assert(pVCpu->iem.s.offCurInstrStart == 0);
760 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
761 return VINF_SUCCESS;
762}
763
764
765/**
766 * Built-in function for loading TLB and checking opcodes and considering the
767 * need for CS.LIM checking when advancing naturally to a different code page
768 * with first instr at byte 0.
769 *
770 * @see iemThreadedFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb
771 */
772IEM_DECL_IEMTHREADEDFUNC_DEF(iemThreadedFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim)
773{
774 PCIEMTB const pTb = pVCpu->iem.s.pCurTbR3;
775 uint32_t const cbInstr = (uint32_t)uParam0;
776 uint32_t const idxRange = (uint32_t)uParam1;
777 RT_NOREF(uParam2); //Assert(uParam2 == 0 /*offRange*/);
778 BODY_CONSIDER_CS_LIM_CHECKING(pTb, cbInstr);
779 BODY_LOAD_TLB_FOR_NEW_PAGE(pTb, 0, idxRange, cbInstr);
780 Assert(pVCpu->iem.s.offCurInstrStart == 0);
781 BODY_CHECK_OPCODES(pTb, idxRange, 0, cbInstr);
782 return VINF_SUCCESS;
783}
784
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette