1 | /* $Id: IEMAllMem.cpp 108331 2025-02-21 14:45:43Z vboxsync $ */
|
---|
2 | /** @file
|
---|
3 | * IEM - Interpreted Execution Manager - Common Memory Routines.
|
---|
4 | */
|
---|
5 |
|
---|
6 | /*
|
---|
7 | * Copyright (C) 2011-2024 Oracle and/or its affiliates.
|
---|
8 | *
|
---|
9 | * This file is part of VirtualBox base platform packages, as
|
---|
10 | * available from https://www.215389.xyz.
|
---|
11 | *
|
---|
12 | * This program is free software; you can redistribute it and/or
|
---|
13 | * modify it under the terms of the GNU General Public License
|
---|
14 | * as published by the Free Software Foundation, in version 3 of the
|
---|
15 | * License.
|
---|
16 | *
|
---|
17 | * This program is distributed in the hope that it will be useful, but
|
---|
18 | * WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
20 | * General Public License for more details.
|
---|
21 | *
|
---|
22 | * You should have received a copy of the GNU General Public License
|
---|
23 | * along with this program; if not, see <https://www.gnu.org/licenses>.
|
---|
24 | *
|
---|
25 | * SPDX-License-Identifier: GPL-3.0-only
|
---|
26 | */
|
---|
27 |
|
---|
28 |
|
---|
29 | /*********************************************************************************************************************************
|
---|
30 | * Header Files *
|
---|
31 | *********************************************************************************************************************************/
|
---|
32 | #define LOG_GROUP LOG_GROUP_IEM_MEM
|
---|
33 | #define VMCPU_INCL_CPUM_GST_CTX
|
---|
34 | #ifdef IN_RING0
|
---|
35 | # define VBOX_VMM_TARGET_X86
|
---|
36 | #endif
|
---|
37 | #include <VBox/vmm/iem.h>
|
---|
38 | #include <VBox/vmm/cpum.h>
|
---|
39 | #include <VBox/vmm/pgm.h>
|
---|
40 | #include <VBox/vmm/dbgf.h>
|
---|
41 | #include "IEMInternal.h"
|
---|
42 | #include <VBox/vmm/vmcc.h>
|
---|
43 | #include <VBox/log.h>
|
---|
44 | #include <VBox/err.h>
|
---|
45 | #include <VBox/param.h>
|
---|
46 | #include <iprt/assert.h>
|
---|
47 | #include <iprt/string.h>
|
---|
48 | #include <iprt/x86.h>
|
---|
49 |
|
---|
50 | #include "IEMInline.h"
|
---|
51 | #ifdef VBOX_VMM_TARGET_X86
|
---|
52 | # include "target-x86/IEMInline-x86.h" /* not really required. sigh. */
|
---|
53 | # include "target-x86/IEMAllTlbInline-x86.h"
|
---|
54 | #endif
|
---|
55 |
|
---|
56 |
|
---|
57 | /*********************************************************************************************************************************
|
---|
58 | * Global Variables *
|
---|
59 | *********************************************************************************************************************************/
|
---|
60 | #if defined(IEM_LOG_MEMORY_WRITES)
|
---|
61 | /** What IEM just wrote. */
|
---|
62 | uint8_t g_abIemWrote[256];
|
---|
63 | /** How much IEM just wrote. */
|
---|
64 | size_t g_cbIemWrote;
|
---|
65 | #endif
|
---|
66 |
|
---|
67 |
|
---|
68 | /** @name Memory access.
|
---|
69 | *
|
---|
70 | * @{
|
---|
71 | */
|
---|
72 |
|
---|
73 | /**
|
---|
74 | * Commits a bounce buffer that needs writing back and unmaps it.
|
---|
75 | *
|
---|
76 | * @returns Strict VBox status code.
|
---|
77 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
78 | * @param iMemMap The index of the buffer to commit.
|
---|
79 | * @param fPostponeFail Whether we can postpone writer failures to ring-3.
|
---|
80 | * Always false in ring-3, obviously.
|
---|
81 | */
|
---|
82 | static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
|
---|
83 | {
|
---|
84 | Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
|
---|
85 | Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
|
---|
86 | #ifdef IN_RING3
|
---|
87 | Assert(!fPostponeFail);
|
---|
88 | RT_NOREF_PV(fPostponeFail);
|
---|
89 | #endif
|
---|
90 |
|
---|
91 | /*
|
---|
92 | * Do the writing.
|
---|
93 | */
|
---|
94 | PVMCC pVM = pVCpu->CTX_SUFF(pVM);
|
---|
95 | if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
|
---|
96 | {
|
---|
97 | uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
|
---|
98 | uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
|
---|
99 | uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
|
---|
100 | if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
|
---|
101 | {
|
---|
102 | /*
|
---|
103 | * Carefully and efficiently dealing with access handler return
|
---|
104 | * codes make this a little bloated.
|
---|
105 | */
|
---|
106 | VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
|
---|
107 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
|
---|
108 | pbBuf,
|
---|
109 | cbFirst,
|
---|
110 | PGMACCESSORIGIN_IEM);
|
---|
111 | if (rcStrict == VINF_SUCCESS)
|
---|
112 | {
|
---|
113 | if (cbSecond)
|
---|
114 | {
|
---|
115 | rcStrict = PGMPhysWrite(pVM,
|
---|
116 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
|
---|
117 | pbBuf + cbFirst,
|
---|
118 | cbSecond,
|
---|
119 | PGMACCESSORIGIN_IEM);
|
---|
120 | if (rcStrict == VINF_SUCCESS)
|
---|
121 | { /* nothing */ }
|
---|
122 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
123 | {
|
---|
124 | LogEx(LOG_GROUP_IEM,
|
---|
125 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
|
---|
126 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
127 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
128 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
129 | }
|
---|
130 | #ifndef IN_RING3
|
---|
131 | else if (fPostponeFail)
|
---|
132 | {
|
---|
133 | LogEx(LOG_GROUP_IEM,
|
---|
134 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
|
---|
135 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
136 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
137 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
|
---|
138 | VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
|
---|
139 | return iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
140 | }
|
---|
141 | #endif
|
---|
142 | else
|
---|
143 | {
|
---|
144 | LogEx(LOG_GROUP_IEM,
|
---|
145 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
|
---|
146 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
147 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
148 | return rcStrict;
|
---|
149 | }
|
---|
150 | }
|
---|
151 | }
|
---|
152 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
153 | {
|
---|
154 | if (!cbSecond)
|
---|
155 | {
|
---|
156 | LogEx(LOG_GROUP_IEM,
|
---|
157 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
|
---|
158 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
159 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
160 | }
|
---|
161 | else
|
---|
162 | {
|
---|
163 | VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
|
---|
164 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
|
---|
165 | pbBuf + cbFirst,
|
---|
166 | cbSecond,
|
---|
167 | PGMACCESSORIGIN_IEM);
|
---|
168 | if (rcStrict2 == VINF_SUCCESS)
|
---|
169 | {
|
---|
170 | LogEx(LOG_GROUP_IEM,
|
---|
171 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
|
---|
172 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
|
---|
173 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
|
---|
174 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
175 | }
|
---|
176 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
|
---|
177 | {
|
---|
178 | LogEx(LOG_GROUP_IEM,
|
---|
179 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
|
---|
180 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
|
---|
181 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
|
---|
182 | PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
|
---|
183 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
184 | }
|
---|
185 | #ifndef IN_RING3
|
---|
186 | else if (fPostponeFail)
|
---|
187 | {
|
---|
188 | LogEx(LOG_GROUP_IEM,
|
---|
189 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
|
---|
190 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
191 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
192 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
|
---|
193 | VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
|
---|
194 | return iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
195 | }
|
---|
196 | #endif
|
---|
197 | else
|
---|
198 | {
|
---|
199 | LogEx(LOG_GROUP_IEM,
|
---|
200 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
|
---|
201 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
|
---|
202 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
|
---|
203 | return rcStrict2;
|
---|
204 | }
|
---|
205 | }
|
---|
206 | }
|
---|
207 | #ifndef IN_RING3
|
---|
208 | else if (fPostponeFail)
|
---|
209 | {
|
---|
210 | LogEx(LOG_GROUP_IEM,
|
---|
211 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
|
---|
212 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
213 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
214 | if (!cbSecond)
|
---|
215 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
|
---|
216 | else
|
---|
217 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
|
---|
218 | VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
|
---|
219 | return iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
220 | }
|
---|
221 | #endif
|
---|
222 | else
|
---|
223 | {
|
---|
224 | LogEx(LOG_GROUP_IEM,
|
---|
225 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
|
---|
226 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
|
---|
227 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
|
---|
228 | return rcStrict;
|
---|
229 | }
|
---|
230 | }
|
---|
231 | else
|
---|
232 | {
|
---|
233 | /*
|
---|
234 | * No access handlers, much simpler.
|
---|
235 | */
|
---|
236 | int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
|
---|
237 | if (RT_SUCCESS(rc))
|
---|
238 | {
|
---|
239 | if (cbSecond)
|
---|
240 | {
|
---|
241 | rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
|
---|
242 | if (RT_SUCCESS(rc))
|
---|
243 | { /* likely */ }
|
---|
244 | else
|
---|
245 | {
|
---|
246 | LogEx(LOG_GROUP_IEM,
|
---|
247 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
|
---|
248 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
249 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
|
---|
250 | return rc;
|
---|
251 | }
|
---|
252 | }
|
---|
253 | }
|
---|
254 | else
|
---|
255 | {
|
---|
256 | LogEx(LOG_GROUP_IEM,
|
---|
257 | ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
|
---|
258 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
|
---|
259 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
|
---|
260 | return rc;
|
---|
261 | }
|
---|
262 | }
|
---|
263 | }
|
---|
264 |
|
---|
265 | #if defined(IEM_LOG_MEMORY_WRITES)
|
---|
266 | Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
|
---|
267 | RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
|
---|
268 | if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
|
---|
269 | Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
|
---|
270 | RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
|
---|
271 | &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
|
---|
272 |
|
---|
273 | size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
|
---|
274 | g_cbIemWrote = cbWrote;
|
---|
275 | memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
|
---|
276 | #endif
|
---|
277 |
|
---|
278 | /*
|
---|
279 | * Free the mapping entry.
|
---|
280 | */
|
---|
281 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
282 | Assert(pVCpu->iem.s.cActiveMappings != 0);
|
---|
283 | pVCpu->iem.s.cActiveMappings--;
|
---|
284 | return VINF_SUCCESS;
|
---|
285 | }
|
---|
286 |
|
---|
287 |
|
---|
288 | /**
|
---|
289 | * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
|
---|
290 | * @todo duplicated
|
---|
291 | */
|
---|
292 | DECL_FORCE_INLINE(uint32_t)
|
---|
293 | iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
|
---|
294 | {
|
---|
295 | bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
|
---|
296 | if (fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
297 | return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
|
---|
298 | return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
|
---|
299 | }
|
---|
300 |
|
---|
301 |
|
---|
302 | /**
|
---|
303 | * iemMemMap worker that deals with a request crossing pages.
|
---|
304 | */
|
---|
305 | VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
|
---|
306 | size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess) RT_NOEXCEPT
|
---|
307 | {
|
---|
308 | STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
|
---|
309 | Assert(cbMem <= GUEST_PAGE_SIZE);
|
---|
310 |
|
---|
311 | /*
|
---|
312 | * Do the address translations.
|
---|
313 | */
|
---|
314 | uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
|
---|
315 | RTGCPHYS GCPhysFirst;
|
---|
316 | VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
|
---|
317 | if (rcStrict != VINF_SUCCESS)
|
---|
318 | return rcStrict;
|
---|
319 | Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
|
---|
320 |
|
---|
321 | uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
|
---|
322 | RTGCPHYS GCPhysSecond;
|
---|
323 | rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
|
---|
324 | cbSecondPage, fAccess, &GCPhysSecond);
|
---|
325 | if (rcStrict != VINF_SUCCESS)
|
---|
326 | return rcStrict;
|
---|
327 | Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
|
---|
328 | GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
|
---|
329 |
|
---|
330 | PVMCC pVM = pVCpu->CTX_SUFF(pVM);
|
---|
331 |
|
---|
332 | /*
|
---|
333 | * Check for data breakpoints.
|
---|
334 | */
|
---|
335 | if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
|
---|
336 | { /* likely */ }
|
---|
337 | else
|
---|
338 | {
|
---|
339 | uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
|
---|
340 | fDataBps |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
|
---|
341 | cbSecondPage, fAccess);
|
---|
342 | pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
|
---|
343 | if (fDataBps > 1)
|
---|
344 | LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
|
---|
345 | fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
|
---|
346 | }
|
---|
347 |
|
---|
348 | /*
|
---|
349 | * Read in the current memory content if it's a read, execute or partial
|
---|
350 | * write access.
|
---|
351 | */
|
---|
352 | uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
|
---|
353 |
|
---|
354 | if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
|
---|
355 | {
|
---|
356 | if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
|
---|
357 | {
|
---|
358 | /*
|
---|
359 | * Must carefully deal with access handler status codes here,
|
---|
360 | * makes the code a bit bloated.
|
---|
361 | */
|
---|
362 | rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
|
---|
363 | if (rcStrict == VINF_SUCCESS)
|
---|
364 | {
|
---|
365 | rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
|
---|
366 | if (rcStrict == VINF_SUCCESS)
|
---|
367 | { /*likely */ }
|
---|
368 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
369 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
370 | else
|
---|
371 | {
|
---|
372 | LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
|
---|
373 | GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
374 | return rcStrict;
|
---|
375 | }
|
---|
376 | }
|
---|
377 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
378 | {
|
---|
379 | VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
|
---|
380 | if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
|
---|
381 | {
|
---|
382 | PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
|
---|
383 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
384 | }
|
---|
385 | else
|
---|
386 | {
|
---|
387 | LogEx(LOG_GROUP_IEM,
|
---|
388 | ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
|
---|
389 | GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
|
---|
390 | return rcStrict2;
|
---|
391 | }
|
---|
392 | }
|
---|
393 | else
|
---|
394 | {
|
---|
395 | LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
|
---|
396 | GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
397 | return rcStrict;
|
---|
398 | }
|
---|
399 | }
|
---|
400 | else
|
---|
401 | {
|
---|
402 | /*
|
---|
403 | * No informational status codes here, much more straight forward.
|
---|
404 | */
|
---|
405 | int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
|
---|
406 | if (RT_SUCCESS(rc))
|
---|
407 | {
|
---|
408 | Assert(rc == VINF_SUCCESS);
|
---|
409 | rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
|
---|
410 | if (RT_SUCCESS(rc))
|
---|
411 | Assert(rc == VINF_SUCCESS);
|
---|
412 | else
|
---|
413 | {
|
---|
414 | LogEx(LOG_GROUP_IEM,
|
---|
415 | ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
|
---|
416 | return rc;
|
---|
417 | }
|
---|
418 | }
|
---|
419 | else
|
---|
420 | {
|
---|
421 | LogEx(LOG_GROUP_IEM,
|
---|
422 | ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
|
---|
423 | return rc;
|
---|
424 | }
|
---|
425 | }
|
---|
426 | }
|
---|
427 | #ifdef VBOX_STRICT
|
---|
428 | else
|
---|
429 | memset(pbBuf, 0xcc, cbMem);
|
---|
430 | if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
|
---|
431 | memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
|
---|
432 | #endif
|
---|
433 | AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
|
---|
434 |
|
---|
435 | /*
|
---|
436 | * Commit the bounce buffer entry.
|
---|
437 | */
|
---|
438 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
|
---|
439 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
|
---|
440 | pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
|
---|
441 | pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
|
---|
442 | pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
|
---|
443 | pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
|
---|
444 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
|
---|
445 | pVCpu->iem.s.iNextMapping = iMemMap + 1;
|
---|
446 | pVCpu->iem.s.cActiveMappings++;
|
---|
447 |
|
---|
448 | *ppvMem = pbBuf;
|
---|
449 | *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
|
---|
450 | return VINF_SUCCESS;
|
---|
451 | }
|
---|
452 |
|
---|
453 |
|
---|
454 | /**
|
---|
455 | * iemMemMap woker that deals with iemMemPageMap failures.
|
---|
456 | */
|
---|
457 | VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
|
---|
458 | RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap) RT_NOEXCEPT
|
---|
459 | {
|
---|
460 | STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
|
---|
461 |
|
---|
462 | /*
|
---|
463 | * Filter out conditions we can handle and the ones which shouldn't happen.
|
---|
464 | */
|
---|
465 | if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
|
---|
466 | && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
|
---|
467 | && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
|
---|
468 | {
|
---|
469 | AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
|
---|
470 | return rcMap;
|
---|
471 | }
|
---|
472 | pVCpu->iem.s.cPotentialExits++;
|
---|
473 |
|
---|
474 | /*
|
---|
475 | * Read in the current memory content if it's a read, execute or partial
|
---|
476 | * write access.
|
---|
477 | */
|
---|
478 | uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
|
---|
479 | if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
|
---|
480 | {
|
---|
481 | if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
|
---|
482 | memset(pbBuf, 0xff, cbMem);
|
---|
483 | else
|
---|
484 | {
|
---|
485 | int rc;
|
---|
486 | if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
|
---|
487 | {
|
---|
488 | VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
|
---|
489 | if (rcStrict == VINF_SUCCESS)
|
---|
490 | { /* nothing */ }
|
---|
491 | else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
|
---|
492 | rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
|
---|
493 | else
|
---|
494 | {
|
---|
495 | LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
|
---|
496 | GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
|
---|
497 | return rcStrict;
|
---|
498 | }
|
---|
499 | }
|
---|
500 | else
|
---|
501 | {
|
---|
502 | rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
|
---|
503 | if (RT_SUCCESS(rc))
|
---|
504 | { /* likely */ }
|
---|
505 | else
|
---|
506 | {
|
---|
507 | LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
|
---|
508 | GCPhysFirst, rc));
|
---|
509 | return rc;
|
---|
510 | }
|
---|
511 | }
|
---|
512 | }
|
---|
513 | }
|
---|
514 | #ifdef VBOX_STRICT
|
---|
515 | else
|
---|
516 | memset(pbBuf, 0xcc, cbMem);
|
---|
517 | #endif
|
---|
518 | #ifdef VBOX_STRICT
|
---|
519 | if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
|
---|
520 | memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
|
---|
521 | #endif
|
---|
522 |
|
---|
523 | /*
|
---|
524 | * Commit the bounce buffer entry.
|
---|
525 | */
|
---|
526 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
|
---|
527 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
|
---|
528 | pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
|
---|
529 | pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
|
---|
530 | pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
|
---|
531 | pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
|
---|
532 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
|
---|
533 | pVCpu->iem.s.iNextMapping = iMemMap + 1;
|
---|
534 | pVCpu->iem.s.cActiveMappings++;
|
---|
535 |
|
---|
536 | *ppvMem = pbBuf;
|
---|
537 | *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
|
---|
538 | return VINF_SUCCESS;
|
---|
539 | }
|
---|
540 |
|
---|
541 |
|
---|
542 |
|
---|
543 | /**
|
---|
544 | * Commits the guest memory if bounce buffered and unmaps it.
|
---|
545 | *
|
---|
546 | * @returns Strict VBox status code.
|
---|
547 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
548 | * @param bUnmapInfo Unmap info set by iemMemMap.
|
---|
549 | */
|
---|
550 | VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
|
---|
551 | {
|
---|
552 | uintptr_t const iMemMap = bUnmapInfo & 0x7;
|
---|
553 | AssertMsgReturn( (bUnmapInfo & 0x08)
|
---|
554 | && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
|
---|
555 | && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
|
---|
556 | ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
|
---|
557 | VERR_NOT_FOUND);
|
---|
558 |
|
---|
559 | /* If it's bounce buffered, we may need to write back the buffer. */
|
---|
560 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
|
---|
561 | {
|
---|
562 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
563 | return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
|
---|
564 | }
|
---|
565 | /* Otherwise unlock it. */
|
---|
566 | else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
|
---|
567 | PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
568 |
|
---|
569 | /* Free the entry. */
|
---|
570 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
571 | Assert(pVCpu->iem.s.cActiveMappings != 0);
|
---|
572 | pVCpu->iem.s.cActiveMappings--;
|
---|
573 | return VINF_SUCCESS;
|
---|
574 | }
|
---|
575 |
|
---|
576 |
|
---|
577 | /**
|
---|
578 | * Rolls back the guest memory (conceptually only) and unmaps it.
|
---|
579 | *
|
---|
580 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
581 | * @param bUnmapInfo Unmap info set by iemMemMap.
|
---|
582 | */
|
---|
583 | void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
|
---|
584 | {
|
---|
585 | uintptr_t const iMemMap = bUnmapInfo & 0x7;
|
---|
586 | AssertMsgReturnVoid( (bUnmapInfo & 0x08)
|
---|
587 | && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
|
---|
588 | && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
|
---|
589 | == ((unsigned)bUnmapInfo >> 4),
|
---|
590 | ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
|
---|
591 |
|
---|
592 | /* Unlock it if necessary. */
|
---|
593 | if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
|
---|
594 | PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
595 |
|
---|
596 | /* Free the entry. */
|
---|
597 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
598 | Assert(pVCpu->iem.s.cActiveMappings != 0);
|
---|
599 | pVCpu->iem.s.cActiveMappings--;
|
---|
600 | }
|
---|
601 |
|
---|
602 |
|
---|
603 | /**
|
---|
604 | * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
|
---|
605 | *
|
---|
606 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
607 | * @param bUnmapInfo Unmap info set by iemMemMap.
|
---|
608 | */
|
---|
609 | void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
610 | {
|
---|
611 | uintptr_t const iMemMap = bUnmapInfo & 0x7;
|
---|
612 | AssertMsgReturnVoid( (bUnmapInfo & 0x08)
|
---|
613 | && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
|
---|
614 | && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
|
---|
615 | == ((unsigned)bUnmapInfo >> 4),
|
---|
616 | ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
|
---|
617 |
|
---|
618 | /* If it's bounce buffered, we may need to write back the buffer. */
|
---|
619 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
|
---|
620 | {
|
---|
621 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
622 | {
|
---|
623 | VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
|
---|
624 | if (rcStrict == VINF_SUCCESS)
|
---|
625 | return;
|
---|
626 | IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
|
---|
627 | }
|
---|
628 | }
|
---|
629 | /* Otherwise unlock it. */
|
---|
630 | else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
|
---|
631 | PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
632 |
|
---|
633 | /* Free the entry. */
|
---|
634 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
635 | Assert(pVCpu->iem.s.cActiveMappings != 0);
|
---|
636 | pVCpu->iem.s.cActiveMappings--;
|
---|
637 | }
|
---|
638 |
|
---|
639 |
|
---|
640 | /** Fallback for iemMemCommitAndUnmapRwJmp. */
|
---|
641 | void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
642 | {
|
---|
643 | Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
|
---|
644 | iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
|
---|
645 | }
|
---|
646 |
|
---|
647 |
|
---|
648 | /** Fallback for iemMemCommitAndUnmapAtJmp. */
|
---|
649 | void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
650 | {
|
---|
651 | Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
|
---|
652 | iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
|
---|
653 | }
|
---|
654 |
|
---|
655 |
|
---|
656 | /** Fallback for iemMemCommitAndUnmapWoJmp. */
|
---|
657 | void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
658 | {
|
---|
659 | Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
|
---|
660 | iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
|
---|
661 | }
|
---|
662 |
|
---|
663 |
|
---|
664 | /** Fallback for iemMemCommitAndUnmapRoJmp. */
|
---|
665 | void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
|
---|
666 | {
|
---|
667 | Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
|
---|
668 | iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
|
---|
669 | }
|
---|
670 |
|
---|
671 |
|
---|
672 | /** Fallback for iemMemRollbackAndUnmapWo. */
|
---|
673 | void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
|
---|
674 | {
|
---|
675 | Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
|
---|
676 | iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
|
---|
677 | }
|
---|
678 |
|
---|
679 |
|
---|
680 | #ifndef IN_RING3
|
---|
681 | /**
|
---|
682 | * Commits the guest memory if bounce buffered and unmaps it, if any bounce
|
---|
683 | * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
|
---|
684 | *
|
---|
685 | * Allows the instruction to be completed and retired, while the IEM user will
|
---|
686 | * return to ring-3 immediately afterwards and do the postponed writes there.
|
---|
687 | *
|
---|
688 | * @returns VBox status code (no strict statuses). Caller must check
|
---|
689 | * VMCPU_FF_IEM before repeating string instructions and similar stuff.
|
---|
690 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
691 | * @param pvMem The mapping.
|
---|
692 | * @param fAccess The kind of access.
|
---|
693 | */
|
---|
694 | VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
|
---|
695 | {
|
---|
696 | uintptr_t const iMemMap = bUnmapInfo & 0x7;
|
---|
697 | AssertMsgReturn( (bUnmapInfo & 0x08)
|
---|
698 | && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
|
---|
699 | && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
|
---|
700 | == ((unsigned)bUnmapInfo >> 4),
|
---|
701 | ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
|
---|
702 | VERR_NOT_FOUND);
|
---|
703 |
|
---|
704 | /* If it's bounce buffered, we may need to write back the buffer. */
|
---|
705 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
|
---|
706 | {
|
---|
707 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
|
---|
708 | return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
|
---|
709 | }
|
---|
710 | /* Otherwise unlock it. */
|
---|
711 | else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
|
---|
712 | PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
713 |
|
---|
714 | /* Free the entry. */
|
---|
715 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
716 | Assert(pVCpu->iem.s.cActiveMappings != 0);
|
---|
717 | pVCpu->iem.s.cActiveMappings--;
|
---|
718 | return VINF_SUCCESS;
|
---|
719 | }
|
---|
720 | #endif
|
---|
721 |
|
---|
722 |
|
---|
723 | /**
|
---|
724 | * Rollbacks mappings, releasing page locks and such.
|
---|
725 | *
|
---|
726 | * The caller shall only call this after checking cActiveMappings.
|
---|
727 | *
|
---|
728 | * @param pVCpu The cross context virtual CPU structure of the calling thread.
|
---|
729 | */
|
---|
730 | void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
|
---|
731 | {
|
---|
732 | Assert(pVCpu->iem.s.cActiveMappings > 0);
|
---|
733 |
|
---|
734 | uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
|
---|
735 | while (iMemMap-- > 0)
|
---|
736 | {
|
---|
737 | uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
|
---|
738 | if (fAccess != IEM_ACCESS_INVALID)
|
---|
739 | {
|
---|
740 | AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
|
---|
741 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
742 | if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
|
---|
743 | PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
|
---|
744 | AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
|
---|
745 | ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
|
---|
746 | iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
|
---|
747 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
|
---|
748 | pVCpu->iem.s.cActiveMappings--;
|
---|
749 | }
|
---|
750 | }
|
---|
751 | }
|
---|
752 |
|
---|
753 | #undef LOG_GROUP
|
---|
754 | #define LOG_GROUP LOG_GROUP_IEM
|
---|
755 |
|
---|
756 | /** @} */
|
---|
757 |
|
---|
758 |
|
---|
759 | #ifdef IN_RING3
|
---|
760 |
|
---|
761 | /**
|
---|
762 | * Handles the unlikely and probably fatal merge cases.
|
---|
763 | *
|
---|
764 | * @returns Merged status code.
|
---|
765 | * @param rcStrict Current EM status code.
|
---|
766 | * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
|
---|
767 | * with @a rcStrict.
|
---|
768 | * @param iMemMap The memory mapping index. For error reporting only.
|
---|
769 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
770 | * thread, for error reporting only.
|
---|
771 | */
|
---|
772 | DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
|
---|
773 | unsigned iMemMap, PVMCPUCC pVCpu)
|
---|
774 | {
|
---|
775 | if (RT_FAILURE_NP(rcStrict))
|
---|
776 | return rcStrict;
|
---|
777 |
|
---|
778 | if (RT_FAILURE_NP(rcStrictCommit))
|
---|
779 | return rcStrictCommit;
|
---|
780 |
|
---|
781 | if (rcStrict == rcStrictCommit)
|
---|
782 | return rcStrictCommit;
|
---|
783 |
|
---|
784 | AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
|
---|
785 | VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
|
---|
786 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
|
---|
787 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
|
---|
788 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
|
---|
789 | return VERR_IOM_FF_STATUS_IPE;
|
---|
790 | }
|
---|
791 |
|
---|
792 |
|
---|
793 | /**
|
---|
794 | * Helper for IOMR3ProcessForceFlag.
|
---|
795 | *
|
---|
796 | * @returns Merged status code.
|
---|
797 | * @param rcStrict Current EM status code.
|
---|
798 | * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
|
---|
799 | * with @a rcStrict.
|
---|
800 | * @param iMemMap The memory mapping index. For error reporting only.
|
---|
801 | * @param pVCpu The cross context virtual CPU structure of the calling
|
---|
802 | * thread, for error reporting only.
|
---|
803 | */
|
---|
804 | DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
|
---|
805 | {
|
---|
806 | /* Simple. */
|
---|
807 | if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
|
---|
808 | return rcStrictCommit;
|
---|
809 |
|
---|
810 | if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
|
---|
811 | return rcStrict;
|
---|
812 |
|
---|
813 | /* EM scheduling status codes. */
|
---|
814 | if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
|
---|
815 | && rcStrict <= VINF_EM_LAST))
|
---|
816 | {
|
---|
817 | if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
|
---|
818 | && rcStrictCommit <= VINF_EM_LAST))
|
---|
819 | return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
|
---|
820 | }
|
---|
821 |
|
---|
822 | /* Unlikely */
|
---|
823 | return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
|
---|
824 | }
|
---|
825 |
|
---|
826 |
|
---|
827 | /**
|
---|
828 | * Called by force-flag handling code when VMCPU_FF_IEM is set.
|
---|
829 | *
|
---|
830 | * @returns Merge between @a rcStrict and what the commit operation returned.
|
---|
831 | * @param pVM The cross context VM structure.
|
---|
832 | * @param pVCpu The cross context virtual CPU structure of the calling EMT.
|
---|
833 | * @param rcStrict The status code returned by ring-0 or raw-mode.
|
---|
834 | */
|
---|
835 | VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
|
---|
836 | {
|
---|
837 | /*
|
---|
838 | * Reset the pending commit.
|
---|
839 | */
|
---|
840 | AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
|
---|
841 | & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
|
---|
842 | ("%#x %#x %#x\n",
|
---|
843 | pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
|
---|
844 | VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
|
---|
845 |
|
---|
846 | /*
|
---|
847 | * Commit the pending bounce buffers (usually just one).
|
---|
848 | */
|
---|
849 | unsigned cBufs = 0;
|
---|
850 | unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
|
---|
851 | while (iMemMap-- > 0)
|
---|
852 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
|
---|
853 | {
|
---|
854 | Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
|
---|
855 | Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
|
---|
856 | Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
|
---|
857 |
|
---|
858 | uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
|
---|
859 | uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
|
---|
860 | uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
|
---|
861 |
|
---|
862 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
|
---|
863 | {
|
---|
864 | VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
|
---|
865 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
|
---|
866 | pbBuf,
|
---|
867 | cbFirst,
|
---|
868 | PGMACCESSORIGIN_IEM);
|
---|
869 | rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
|
---|
870 | Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
|
---|
871 | iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
|
---|
872 | VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
|
---|
873 | }
|
---|
874 |
|
---|
875 | if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
|
---|
876 | {
|
---|
877 | VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
|
---|
878 | pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
|
---|
879 | pbBuf + cbFirst,
|
---|
880 | cbSecond,
|
---|
881 | PGMACCESSORIGIN_IEM);
|
---|
882 | rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
|
---|
883 | Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
|
---|
884 | iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
|
---|
885 | VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
|
---|
886 | }
|
---|
887 | cBufs++;
|
---|
888 | pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
|
---|
889 | }
|
---|
890 |
|
---|
891 | AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
|
---|
892 | ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
|
---|
893 | pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
|
---|
894 | pVCpu->iem.s.cActiveMappings = 0;
|
---|
895 | return rcStrict;
|
---|
896 | }
|
---|
897 |
|
---|
898 | #endif /* IN_RING3 */
|
---|
899 |
|
---|