VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllMem.cpp@ 108278

Last change on this file since 108278 was 108278, checked in by vboxsync, 3 months ago

VMM/IEM: Removed the #ifndef IEM_WITH_SETJMP code. We've had IEM_WITH_SETJMP defined unconditionally since 7.0 and the code probably doesn't even compile w/o it, so best remove the unused code. jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 39.7 KB
Line 
1/* $Id: IEMAllMem.cpp 108278 2025-02-18 15:46:53Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Common Memory Routines.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.215389.xyz.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#define LOG_GROUP LOG_GROUP_IEM_MEM
33#define VMCPU_INCL_CPUM_GST_CTX
34#ifdef IN_RING0
35# define VBOX_VMM_TARGET_X86
36#endif
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/pgm.h>
40#include <VBox/vmm/dbgf.h>
41#include "IEMInternal.h"
42#include <VBox/vmm/vmcc.h>
43#include <VBox/log.h>
44#include <VBox/err.h>
45#include <VBox/param.h>
46#include <iprt/assert.h>
47#include <iprt/string.h>
48#include <iprt/x86.h>
49
50#include "IEMInline.h"
51#ifdef VBOX_VMM_TARGET_X86
52# include "target-x86/IEMInline-x86.h" /* not really required. sigh. */
53# include "target-x86/IEMAllTlbInline-x86.h"
54#endif
55
56
57/*********************************************************************************************************************************
58* Global Variables *
59*********************************************************************************************************************************/
60#if defined(IEM_LOG_MEMORY_WRITES)
61/** What IEM just wrote. */
62uint8_t g_abIemWrote[256];
63/** How much IEM just wrote. */
64size_t g_cbIemWrote;
65#endif
66
67
68/** @name Memory access.
69 *
70 * @{
71 */
72
73/**
74 * Commits a bounce buffer that needs writing back and unmaps it.
75 *
76 * @returns Strict VBox status code.
77 * @param pVCpu The cross context virtual CPU structure of the calling thread.
78 * @param iMemMap The index of the buffer to commit.
79 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
80 * Always false in ring-3, obviously.
81 */
82static VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
83{
84 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
85 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
86#ifdef IN_RING3
87 Assert(!fPostponeFail);
88 RT_NOREF_PV(fPostponeFail);
89#endif
90
91 /*
92 * Do the writing.
93 */
94 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
95 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
96 {
97 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
98 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
99 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
100 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
101 {
102 /*
103 * Carefully and efficiently dealing with access handler return
104 * codes make this a little bloated.
105 */
106 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
107 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
108 pbBuf,
109 cbFirst,
110 PGMACCESSORIGIN_IEM);
111 if (rcStrict == VINF_SUCCESS)
112 {
113 if (cbSecond)
114 {
115 rcStrict = PGMPhysWrite(pVM,
116 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
117 pbBuf + cbFirst,
118 cbSecond,
119 PGMACCESSORIGIN_IEM);
120 if (rcStrict == VINF_SUCCESS)
121 { /* nothing */ }
122 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
123 {
124 LogEx(LOG_GROUP_IEM,
125 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
126 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
127 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
128 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
129 }
130#ifndef IN_RING3
131 else if (fPostponeFail)
132 {
133 LogEx(LOG_GROUP_IEM,
134 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
135 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
136 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
137 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
138 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
139 return iemSetPassUpStatus(pVCpu, rcStrict);
140 }
141#endif
142 else
143 {
144 LogEx(LOG_GROUP_IEM,
145 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
146 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
147 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
148 return rcStrict;
149 }
150 }
151 }
152 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
153 {
154 if (!cbSecond)
155 {
156 LogEx(LOG_GROUP_IEM,
157 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
158 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
159 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
160 }
161 else
162 {
163 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
164 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
165 pbBuf + cbFirst,
166 cbSecond,
167 PGMACCESSORIGIN_IEM);
168 if (rcStrict2 == VINF_SUCCESS)
169 {
170 LogEx(LOG_GROUP_IEM,
171 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
172 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
173 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
174 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
175 }
176 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
177 {
178 LogEx(LOG_GROUP_IEM,
179 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
180 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
181 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
182 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
183 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
184 }
185#ifndef IN_RING3
186 else if (fPostponeFail)
187 {
188 LogEx(LOG_GROUP_IEM,
189 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
190 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
191 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
192 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
193 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
194 return iemSetPassUpStatus(pVCpu, rcStrict);
195 }
196#endif
197 else
198 {
199 LogEx(LOG_GROUP_IEM,
200 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
201 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
202 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
203 return rcStrict2;
204 }
205 }
206 }
207#ifndef IN_RING3
208 else if (fPostponeFail)
209 {
210 LogEx(LOG_GROUP_IEM,
211 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
212 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
213 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
214 if (!cbSecond)
215 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
216 else
217 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
218 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
219 return iemSetPassUpStatus(pVCpu, rcStrict);
220 }
221#endif
222 else
223 {
224 LogEx(LOG_GROUP_IEM,
225 ("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
226 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
227 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
228 return rcStrict;
229 }
230 }
231 else
232 {
233 /*
234 * No access handlers, much simpler.
235 */
236 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
237 if (RT_SUCCESS(rc))
238 {
239 if (cbSecond)
240 {
241 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
242 if (RT_SUCCESS(rc))
243 { /* likely */ }
244 else
245 {
246 LogEx(LOG_GROUP_IEM,
247 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
248 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
249 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
250 return rc;
251 }
252 }
253 }
254 else
255 {
256 LogEx(LOG_GROUP_IEM,
257 ("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
258 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
259 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
260 return rc;
261 }
262 }
263 }
264
265#if defined(IEM_LOG_MEMORY_WRITES)
266 Log5(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
267 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
268 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
269 Log5(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
270 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
271 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
272
273 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
274 g_cbIemWrote = cbWrote;
275 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
276#endif
277
278 /*
279 * Free the mapping entry.
280 */
281 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
282 Assert(pVCpu->iem.s.cActiveMappings != 0);
283 pVCpu->iem.s.cActiveMappings--;
284 return VINF_SUCCESS;
285}
286
287
288/**
289 * Helper for iemMemMap, iemMemMapJmp and iemMemBounceBufferMapCrossPage.
290 * @todo duplicated
291 */
292DECL_FORCE_INLINE(uint32_t)
293iemMemCheckDataBreakpoint(PVMCC pVM, PVMCPUCC pVCpu, RTGCPTR GCPtrMem, size_t cbMem, uint32_t fAccess)
294{
295 bool const fSysAccess = (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_SYS;
296 if (fAccess & IEM_ACCESS_TYPE_WRITE)
297 return DBGFBpCheckDataWrite(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
298 return DBGFBpCheckDataRead(pVM, pVCpu, GCPtrMem, (uint32_t)cbMem, fSysAccess);
299}
300
301
302/**
303 * iemMemMap worker that deals with a request crossing pages.
304 */
305VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
306 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess) RT_NOEXCEPT
307{
308 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferCrossPage);
309 Assert(cbMem <= GUEST_PAGE_SIZE);
310
311 /*
312 * Do the address translations.
313 */
314 uint32_t const cbFirstPage = GUEST_PAGE_SIZE - (uint32_t)(GCPtrFirst & GUEST_PAGE_OFFSET_MASK);
315 RTGCPHYS GCPhysFirst;
316 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, cbFirstPage, fAccess, &GCPhysFirst);
317 if (rcStrict != VINF_SUCCESS)
318 return rcStrict;
319 Assert((GCPhysFirst & GUEST_PAGE_OFFSET_MASK) == (GCPtrFirst & GUEST_PAGE_OFFSET_MASK));
320
321 uint32_t const cbSecondPage = (uint32_t)cbMem - cbFirstPage;
322 RTGCPHYS GCPhysSecond;
323 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
324 cbSecondPage, fAccess, &GCPhysSecond);
325 if (rcStrict != VINF_SUCCESS)
326 return rcStrict;
327 Assert((GCPhysSecond & GUEST_PAGE_OFFSET_MASK) == 0);
328 GCPhysSecond &= ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK; /** @todo why? */
329
330 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
331
332 /*
333 * Check for data breakpoints.
334 */
335 if (RT_LIKELY(!(pVCpu->iem.s.fExec & IEM_F_PENDING_BRK_DATA)))
336 { /* likely */ }
337 else
338 {
339 uint32_t fDataBps = iemMemCheckDataBreakpoint(pVM, pVCpu, GCPtrFirst, cbFirstPage, fAccess);
340 fDataBps |= iemMemCheckDataBreakpoint(pVM, pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)GUEST_PAGE_OFFSET_MASK,
341 cbSecondPage, fAccess);
342 pVCpu->cpum.GstCtx.eflags.uBoth |= fDataBps & (CPUMCTX_DBG_HIT_DRX_MASK | CPUMCTX_DBG_DBGF_MASK);
343 if (fDataBps > 1)
344 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapCrossPage: Data breakpoint: fDataBps=%#x for %RGv LB %zx; fAccess=%#x cs:rip=%04x:%08RX64\n",
345 fDataBps, GCPtrFirst, cbMem, fAccess, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
346 }
347
348 /*
349 * Read in the current memory content if it's a read, execute or partial
350 * write access.
351 */
352 uint8_t * const pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
353
354 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
355 {
356 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
357 {
358 /*
359 * Must carefully deal with access handler status codes here,
360 * makes the code a bit bloated.
361 */
362 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
363 if (rcStrict == VINF_SUCCESS)
364 {
365 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
366 if (rcStrict == VINF_SUCCESS)
367 { /*likely */ }
368 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
369 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
370 else
371 {
372 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
373 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
374 return rcStrict;
375 }
376 }
377 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
378 {
379 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
380 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
381 {
382 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
383 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
384 }
385 else
386 {
387 LogEx(LOG_GROUP_IEM,
388 ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
389 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
390 return rcStrict2;
391 }
392 }
393 else
394 {
395 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
396 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
397 return rcStrict;
398 }
399 }
400 else
401 {
402 /*
403 * No informational status codes here, much more straight forward.
404 */
405 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
406 if (RT_SUCCESS(rc))
407 {
408 Assert(rc == VINF_SUCCESS);
409 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
410 if (RT_SUCCESS(rc))
411 Assert(rc == VINF_SUCCESS);
412 else
413 {
414 LogEx(LOG_GROUP_IEM,
415 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
416 return rc;
417 }
418 }
419 else
420 {
421 LogEx(LOG_GROUP_IEM,
422 ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
423 return rc;
424 }
425 }
426 }
427#ifdef VBOX_STRICT
428 else
429 memset(pbBuf, 0xcc, cbMem);
430 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
431 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
432#endif
433 AssertCompileMemberAlignment(VMCPU, iem.s.aBounceBuffers, 64);
434
435 /*
436 * Commit the bounce buffer entry.
437 */
438 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
439 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
440 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
441 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
442 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
443 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
444 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
445 pVCpu->iem.s.iNextMapping = iMemMap + 1;
446 pVCpu->iem.s.cActiveMappings++;
447
448 *ppvMem = pbBuf;
449 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
450 return VINF_SUCCESS;
451}
452
453
454/**
455 * iemMemMap woker that deals with iemMemPageMap failures.
456 */
457VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
458 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap) RT_NOEXCEPT
459{
460 STAM_COUNTER_INC(&pVCpu->iem.s.StatMemBounceBufferMapPhys);
461
462 /*
463 * Filter out conditions we can handle and the ones which shouldn't happen.
464 */
465 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
466 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
467 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
468 {
469 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
470 return rcMap;
471 }
472 pVCpu->iem.s.cPotentialExits++;
473
474 /*
475 * Read in the current memory content if it's a read, execute or partial
476 * write access.
477 */
478 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
479 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
480 {
481 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
482 memset(pbBuf, 0xff, cbMem);
483 else
484 {
485 int rc;
486 if (!(pVCpu->iem.s.fExec & IEM_F_BYPASS_HANDLERS))
487 {
488 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
489 if (rcStrict == VINF_SUCCESS)
490 { /* nothing */ }
491 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
492 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
493 else
494 {
495 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
496 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
497 return rcStrict;
498 }
499 }
500 else
501 {
502 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
503 if (RT_SUCCESS(rc))
504 { /* likely */ }
505 else
506 {
507 LogEx(LOG_GROUP_IEM, ("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
508 GCPhysFirst, rc));
509 return rc;
510 }
511 }
512 }
513 }
514#ifdef VBOX_STRICT
515 else
516 memset(pbBuf, 0xcc, cbMem);
517#endif
518#ifdef VBOX_STRICT
519 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
520 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
521#endif
522
523 /*
524 * Commit the bounce buffer entry.
525 */
526 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
527 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
528 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
529 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
530 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
531 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
532 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
533 pVCpu->iem.s.iNextMapping = iMemMap + 1;
534 pVCpu->iem.s.cActiveMappings++;
535
536 *ppvMem = pbBuf;
537 *pbUnmapInfo = iMemMap | 0x08 | ((fAccess & IEM_ACCESS_TYPE_MASK) << 4);
538 return VINF_SUCCESS;
539}
540
541
542
543/**
544 * Commits the guest memory if bounce buffered and unmaps it.
545 *
546 * @returns Strict VBox status code.
547 * @param pVCpu The cross context virtual CPU structure of the calling thread.
548 * @param bUnmapInfo Unmap info set by iemMemMap.
549 */
550VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
551{
552 uintptr_t const iMemMap = bUnmapInfo & 0x7;
553 AssertMsgReturn( (bUnmapInfo & 0x08)
554 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
555 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf)) == ((unsigned)bUnmapInfo >> 4),
556 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
557 VERR_NOT_FOUND);
558
559 /* If it's bounce buffered, we may need to write back the buffer. */
560 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
561 {
562 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
563 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
564 }
565 /* Otherwise unlock it. */
566 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
567 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
568
569 /* Free the entry. */
570 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
571 Assert(pVCpu->iem.s.cActiveMappings != 0);
572 pVCpu->iem.s.cActiveMappings--;
573 return VINF_SUCCESS;
574}
575
576
577/**
578 * Rolls back the guest memory (conceptually only) and unmaps it.
579 *
580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
581 * @param bUnmapInfo Unmap info set by iemMemMap.
582 */
583void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
584{
585 uintptr_t const iMemMap = bUnmapInfo & 0x7;
586 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
587 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
588 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
589 == ((unsigned)bUnmapInfo >> 4),
590 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
591
592 /* Unlock it if necessary. */
593 if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
594 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
595
596 /* Free the entry. */
597 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
598 Assert(pVCpu->iem.s.cActiveMappings != 0);
599 pVCpu->iem.s.cActiveMappings--;
600}
601
602
603/**
604 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
605 *
606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
607 * @param pvMem The mapping.
608 * @param fAccess The kind of access.
609 */
610void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
611{
612 uintptr_t const iMemMap = bUnmapInfo & 0x7;
613 AssertMsgReturnVoid( (bUnmapInfo & 0x08)
614 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
615 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
616 == ((unsigned)bUnmapInfo >> 4),
617 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess));
618
619 /* If it's bounce buffered, we may need to write back the buffer. */
620 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
621 {
622 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
623 {
624 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
625 if (rcStrict == VINF_SUCCESS)
626 return;
627 IEM_DO_LONGJMP(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
628 }
629 }
630 /* Otherwise unlock it. */
631 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
632 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
633
634 /* Free the entry. */
635 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
636 Assert(pVCpu->iem.s.cActiveMappings != 0);
637 pVCpu->iem.s.cActiveMappings--;
638}
639
640
641/** Fallback for iemMemCommitAndUnmapRwJmp. */
642void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
643{
644 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
645 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
646}
647
648
649/** Fallback for iemMemCommitAndUnmapAtJmp. */
650void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
651{
652 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE));
653 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
654}
655
656
657/** Fallback for iemMemCommitAndUnmapWoJmp. */
658void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
659{
660 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
661 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
662}
663
664
665/** Fallback for iemMemCommitAndUnmapRoJmp. */
666void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP
667{
668 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_READ);
669 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);
670}
671
672
673/** Fallback for iemMemRollbackAndUnmapWo. */
674void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
675{
676 Assert(((bUnmapInfo >> 4) & IEM_ACCESS_TYPE_MASK) == IEM_ACCESS_TYPE_WRITE);
677 iemMemRollbackAndUnmap(pVCpu, bUnmapInfo);
678}
679
680
681#ifndef IN_RING3
682/**
683 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
684 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
685 *
686 * Allows the instruction to be completed and retired, while the IEM user will
687 * return to ring-3 immediately afterwards and do the postponed writes there.
688 *
689 * @returns VBox status code (no strict statuses). Caller must check
690 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
691 * @param pVCpu The cross context virtual CPU structure of the calling thread.
692 * @param pvMem The mapping.
693 * @param fAccess The kind of access.
694 */
695VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT
696{
697 uintptr_t const iMemMap = bUnmapInfo & 0x7;
698 AssertMsgReturn( (bUnmapInfo & 0x08)
699 && iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
700 && (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_TYPE_MASK | 0xf))
701 == ((unsigned)bUnmapInfo >> 4),
702 ("%#x fAccess=%#x\n", bUnmapInfo, pVCpu->iem.s.aMemMappings[iMemMap].fAccess),
703 VERR_NOT_FOUND);
704
705 /* If it's bounce buffered, we may need to write back the buffer. */
706 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
707 {
708 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
709 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
710 }
711 /* Otherwise unlock it. */
712 else if (!(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_NOT_LOCKED))
713 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
714
715 /* Free the entry. */
716 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
717 Assert(pVCpu->iem.s.cActiveMappings != 0);
718 pVCpu->iem.s.cActiveMappings--;
719 return VINF_SUCCESS;
720}
721#endif
722
723
724/**
725 * Rollbacks mappings, releasing page locks and such.
726 *
727 * The caller shall only call this after checking cActiveMappings.
728 *
729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
730 */
731void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT
732{
733 Assert(pVCpu->iem.s.cActiveMappings > 0);
734
735 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
736 while (iMemMap-- > 0)
737 {
738 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
739 if (fAccess != IEM_ACCESS_INVALID)
740 {
741 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
742 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
743 if (!(fAccess & (IEM_ACCESS_BOUNCE_BUFFERED | IEM_ACCESS_NOT_LOCKED)))
744 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
745 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
746 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
747 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
748 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
749 pVCpu->iem.s.cActiveMappings--;
750 }
751 }
752}
753
754#undef LOG_GROUP
755#define LOG_GROUP LOG_GROUP_IEM
756
757/** @} */
758
759
760#ifdef IN_RING3
761
762/**
763 * Handles the unlikely and probably fatal merge cases.
764 *
765 * @returns Merged status code.
766 * @param rcStrict Current EM status code.
767 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
768 * with @a rcStrict.
769 * @param iMemMap The memory mapping index. For error reporting only.
770 * @param pVCpu The cross context virtual CPU structure of the calling
771 * thread, for error reporting only.
772 */
773DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
774 unsigned iMemMap, PVMCPUCC pVCpu)
775{
776 if (RT_FAILURE_NP(rcStrict))
777 return rcStrict;
778
779 if (RT_FAILURE_NP(rcStrictCommit))
780 return rcStrictCommit;
781
782 if (rcStrict == rcStrictCommit)
783 return rcStrictCommit;
784
785 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
786 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
787 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
788 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
789 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
790 return VERR_IOM_FF_STATUS_IPE;
791}
792
793
794/**
795 * Helper for IOMR3ProcessForceFlag.
796 *
797 * @returns Merged status code.
798 * @param rcStrict Current EM status code.
799 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
800 * with @a rcStrict.
801 * @param iMemMap The memory mapping index. For error reporting only.
802 * @param pVCpu The cross context virtual CPU structure of the calling
803 * thread, for error reporting only.
804 */
805DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
806{
807 /* Simple. */
808 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
809 return rcStrictCommit;
810
811 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
812 return rcStrict;
813
814 /* EM scheduling status codes. */
815 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
816 && rcStrict <= VINF_EM_LAST))
817 {
818 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
819 && rcStrictCommit <= VINF_EM_LAST))
820 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
821 }
822
823 /* Unlikely */
824 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
825}
826
827
828/**
829 * Called by force-flag handling code when VMCPU_FF_IEM is set.
830 *
831 * @returns Merge between @a rcStrict and what the commit operation returned.
832 * @param pVM The cross context VM structure.
833 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
834 * @param rcStrict The status code returned by ring-0 or raw-mode.
835 */
836VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
837{
838 /*
839 * Reset the pending commit.
840 */
841 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
842 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
843 ("%#x %#x %#x\n",
844 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
845 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
846
847 /*
848 * Commit the pending bounce buffers (usually just one).
849 */
850 unsigned cBufs = 0;
851 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
852 while (iMemMap-- > 0)
853 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
854 {
855 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
856 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
857 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
858
859 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
860 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
861 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
862
863 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
864 {
865 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
866 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
867 pbBuf,
868 cbFirst,
869 PGMACCESSORIGIN_IEM);
870 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
871 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
872 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
873 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
874 }
875
876 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
877 {
878 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
879 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
880 pbBuf + cbFirst,
881 cbSecond,
882 PGMACCESSORIGIN_IEM);
883 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
884 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
885 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
886 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
887 }
888 cBufs++;
889 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
890 }
891
892 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
893 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
894 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
895 pVCpu->iem.s.cActiveMappings = 0;
896 return rcStrict;
897}
898
899#endif /* IN_RING3 */
900
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette