VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMAll-x86.cpp@ 108246

Last change on this file since 108246 was 108246, checked in by vboxsync, 3 months ago

VMM/IEM: Splitting up IEMAll.cpp. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 9.9 KB
Line 
1/* $Id: IEMAll-x86.cpp 108246 2025-02-17 00:18:01Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - x86 target, miscellaneous.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.215389.xyz.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#ifdef IN_RING0
35# define VBOX_VMM_TARGET_X86
36#endif
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/dbgf.h>
40#include "IEMInternal.h"
41#include <VBox/vmm/vmcc.h>
42#include <VBox/log.h>
43#include <VBox/param.h>
44#include <iprt/assert.h>
45#include <iprt/string.h>
46#include <iprt/x86.h>
47
48#include "IEMInline.h"
49
50
51/**
52 * Calculates IEM_F_BRK_PENDING_XXX (IEM_F_PENDING_BRK_MASK) flags, slow code
53 * path.
54 *
55 * This will also invalidate TLB entries for any pages with active data
56 * breakpoints on them.
57 *
58 * @returns IEM_F_BRK_PENDING_XXX or zero.
59 * @param pVCpu The cross context virtual CPU structure of the
60 * calling thread.
61 *
62 * @note Don't call directly, use iemCalcExecDbgFlags instead.
63 */
64uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu)
65{
66 uint32_t fExec = 0;
67
68 /*
69 * Helper for invalidate the data TLB for breakpoint addresses.
70 *
71 * This is to make sure any access to the page will always trigger a TLB
72 * load for as long as the breakpoint is enabled.
73 */
74#ifdef IEM_WITH_DATA_TLB
75# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { \
76 RTGCPTR uTagNoRev = (a_uValue); \
77 uTagNoRev = IEMTLB_CALC_TAG_NO_REV(uTagNoRev); \
78 /** @todo do large page accounting */ \
79 uintptr_t const idxEven = IEMTLB_TAG_TO_EVEN_INDEX(uTagNoRev); \
80 if (pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevision)) \
81 pVCpu->iem.s.DataTlb.aEntries[idxEven].uTag = 0; \
82 if (pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag == (uTagNoRev | pVCpu->iem.s.DataTlb.uTlbRevisionGlobal)) \
83 pVCpu->iem.s.DataTlb.aEntries[idxEven + 1].uTag = 0; \
84 } while (0)
85#else
86# define INVALID_TLB_ENTRY_FOR_BP(a_uValue) do { } while (0)
87#endif
88
89 /*
90 * Process guest breakpoints.
91 */
92#define PROCESS_ONE_BP(a_fDr7, a_iBp, a_uValue) do { \
93 if (a_fDr7 & X86_DR7_L_G(a_iBp)) \
94 { \
95 switch (X86_DR7_GET_RW(a_fDr7, a_iBp)) \
96 { \
97 case X86_DR7_RW_EO: \
98 fExec |= IEM_F_PENDING_BRK_INSTR; \
99 break; \
100 case X86_DR7_RW_WO: \
101 case X86_DR7_RW_RW: \
102 fExec |= IEM_F_PENDING_BRK_DATA; \
103 INVALID_TLB_ENTRY_FOR_BP(a_uValue); \
104 break; \
105 case X86_DR7_RW_IO: \
106 fExec |= IEM_F_PENDING_BRK_X86_IO; \
107 break; \
108 } \
109 } \
110 } while (0)
111
112 uint32_t const fGstDr7 = (uint32_t)pVCpu->cpum.GstCtx.dr[7];
113 if (fGstDr7 & X86_DR7_ENABLED_MASK)
114 {
115/** @todo extract more details here to simplify matching later. */
116#ifdef IEM_WITH_DATA_TLB
117 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_DR0_DR3);
118#endif
119 PROCESS_ONE_BP(fGstDr7, 0, pVCpu->cpum.GstCtx.dr[0]);
120 PROCESS_ONE_BP(fGstDr7, 1, pVCpu->cpum.GstCtx.dr[1]);
121 PROCESS_ONE_BP(fGstDr7, 2, pVCpu->cpum.GstCtx.dr[2]);
122 PROCESS_ONE_BP(fGstDr7, 3, pVCpu->cpum.GstCtx.dr[3]);
123 }
124
125 /*
126 * Process hypervisor breakpoints.
127 */
128 PVMCC const pVM = pVCpu->CTX_SUFF(pVM);
129 uint32_t const fHyperDr7 = DBGFBpGetDR7(pVM);
130 if (fHyperDr7 & X86_DR7_ENABLED_MASK)
131 {
132/** @todo extract more details here to simplify matching later. */
133 PROCESS_ONE_BP(fHyperDr7, 0, DBGFBpGetDR0(pVM));
134 PROCESS_ONE_BP(fHyperDr7, 1, DBGFBpGetDR1(pVM));
135 PROCESS_ONE_BP(fHyperDr7, 2, DBGFBpGetDR2(pVM));
136 PROCESS_ONE_BP(fHyperDr7, 3, DBGFBpGetDR3(pVM));
137 }
138
139 return fExec;
140}
141
142
143/** @name Register Access.
144 * @{
145 */
146
147/**
148 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
149 *
150 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
151 * segment limit.
152 *
153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
154 * @param cbInstr Instruction size.
155 * @param offNextInstr The offset of the next instruction.
156 * @param enmEffOpSize Effective operand size.
157 */
158VBOXSTRICTRC iemRegRipRelativeJumpS8AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int8_t offNextInstr,
159 IEMMODE enmEffOpSize) RT_NOEXCEPT
160{
161 switch (enmEffOpSize)
162 {
163 case IEMMODE_16BIT:
164 {
165 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + (int16_t)offNextInstr;
166 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
167 || IEM_IS_64BIT_CODE(pVCpu) /* no CS limit checks in 64-bit mode */))
168 pVCpu->cpum.GstCtx.rip = uNewIp;
169 else
170 return iemRaiseGeneralProtectionFault0(pVCpu);
171 break;
172 }
173
174 case IEMMODE_32BIT:
175 {
176 Assert(!IEM_IS_64BIT_CODE(pVCpu));
177 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
178
179 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + (int32_t)offNextInstr;
180 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
181 pVCpu->cpum.GstCtx.rip = uNewEip;
182 else
183 return iemRaiseGeneralProtectionFault0(pVCpu);
184 break;
185 }
186
187 case IEMMODE_64BIT:
188 {
189 Assert(IEM_IS_64BIT_CODE(pVCpu));
190
191 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
192 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
193 pVCpu->cpum.GstCtx.rip = uNewRip;
194 else
195 return iemRaiseGeneralProtectionFault0(pVCpu);
196 break;
197 }
198
199 IEM_NOT_REACHED_DEFAULT_CASE_RET();
200 }
201
202#ifndef IEM_WITH_CODE_TLB
203 /* Flush the prefetch buffer. */
204 pVCpu->iem.s.cbOpcode = cbInstr;
205#endif
206
207 /*
208 * Clear RF and finish the instruction (maybe raise #DB).
209 */
210 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
211}
212
213
214/**
215 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
216 *
217 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
218 * segment limit.
219 *
220 * @returns Strict VBox status code.
221 * @param pVCpu The cross context virtual CPU structure of the calling thread.
222 * @param cbInstr Instruction size.
223 * @param offNextInstr The offset of the next instruction.
224 */
225VBOXSTRICTRC iemRegRipRelativeJumpS16AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int16_t offNextInstr) RT_NOEXCEPT
226{
227 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
228
229 uint16_t const uNewIp = pVCpu->cpum.GstCtx.ip + cbInstr + offNextInstr;
230 if (RT_LIKELY( uNewIp <= pVCpu->cpum.GstCtx.cs.u32Limit
231 || IEM_IS_64BIT_CODE(pVCpu) /* no limit checking in 64-bit mode */))
232 pVCpu->cpum.GstCtx.rip = uNewIp;
233 else
234 return iemRaiseGeneralProtectionFault0(pVCpu);
235
236#ifndef IEM_WITH_CODE_TLB
237 /* Flush the prefetch buffer. */
238 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
239#endif
240
241 /*
242 * Clear RF and finish the instruction (maybe raise #DB).
243 */
244 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
245}
246
247
248/**
249 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
250 *
251 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
252 * segment limit.
253 *
254 * @returns Strict VBox status code.
255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
256 * @param cbInstr Instruction size.
257 * @param offNextInstr The offset of the next instruction.
258 * @param enmEffOpSize Effective operand size.
259 */
260VBOXSTRICTRC iemRegRipRelativeJumpS32AndFinishClearingRF(PVMCPUCC pVCpu, uint8_t cbInstr, int32_t offNextInstr,
261 IEMMODE enmEffOpSize) RT_NOEXCEPT
262{
263 if (enmEffOpSize == IEMMODE_32BIT)
264 {
265 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(!IEM_IS_64BIT_CODE(pVCpu));
266
267 uint32_t const uNewEip = pVCpu->cpum.GstCtx.eip + cbInstr + offNextInstr;
268 if (RT_LIKELY(uNewEip <= pVCpu->cpum.GstCtx.cs.u32Limit))
269 pVCpu->cpum.GstCtx.rip = uNewEip;
270 else
271 return iemRaiseGeneralProtectionFault0(pVCpu);
272 }
273 else
274 {
275 Assert(enmEffOpSize == IEMMODE_64BIT);
276
277 uint64_t const uNewRip = pVCpu->cpum.GstCtx.rip + cbInstr + (int64_t)offNextInstr;
278 if (RT_LIKELY(IEM_IS_CANONICAL(uNewRip)))
279 pVCpu->cpum.GstCtx.rip = uNewRip;
280 else
281 return iemRaiseGeneralProtectionFault0(pVCpu);
282 }
283
284#ifndef IEM_WITH_CODE_TLB
285 /* Flush the prefetch buffer. */
286 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
287#endif
288
289 /*
290 * Clear RF and finish the instruction (maybe raise #DB).
291 */
292 return iemRegFinishClearingRF(pVCpu, VINF_SUCCESS);
293}
294
295/** @} */
296
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette