VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 60852

Last change on this file since 60852 was 60852, checked in by vboxsync, 9 years ago

IOM: New way of defer RC+R0 MMIO writes (only used for IEM accesses).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 100.6 KB
Line 
1/* $Id: IOMAllMMIO.cpp 60852 2016-05-05 17:47:40Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#include <VBox/vmm/iem.h>
32#include "IOMInternal.h"
33#include <VBox/vmm/vm.h>
34#include <VBox/vmm/vmm.h>
35#include <VBox/vmm/hm.h>
36#include "IOMInline.h"
37
38#include <VBox/dis.h>
39#include <VBox/disopcode.h>
40#include <VBox/vmm/pdmdev.h>
41#include <VBox/param.h>
42#include <VBox/err.h>
43#include <iprt/assert.h>
44#include <VBox/log.h>
45#include <iprt/asm.h>
46#include <iprt/string.h>
47
48
49/*********************************************************************************************************************************
50* Defined Constants And Macros *
51*********************************************************************************************************************************/
52/** @def IEM_USE_IEM_INSTEAD
53 * Use IEM instead of IOM for interpreting MMIO accesses.
54 * Because of PATM/CSAM issues in raw-mode, we've split this up into 2nd and 3rd
55 * IEM deployment step. */
56#if ((defined(IN_RING3) || defined(IN_RING0)) && defined(VBOX_WITH_2ND_IEM_STEP)) \
57 || defined(VBOX_WITH_3RD_IEM_STEP) || defined(DOXYGEN_RUNNING)
58# define IEM_USE_IEM_INSTEAD
59#endif
60
61
62/*********************************************************************************************************************************
63* Global Variables *
64*********************************************************************************************************************************/
65
66/**
67 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
68 */
69static const unsigned g_aSize2Shift[] =
70{
71 ~0U, /* 0 - invalid */
72 0, /* *1 == 2^0 */
73 1, /* *2 == 2^1 */
74 ~0U, /* 3 - invalid */
75 2, /* *4 == 2^2 */
76 ~0U, /* 5 - invalid */
77 ~0U, /* 6 - invalid */
78 ~0U, /* 7 - invalid */
79 3 /* *8 == 2^3 */
80};
81
82/**
83 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
84 */
85#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
86
87
88/**
89 * Returns the contents of register or immediate data of instruction's parameter.
90 *
91 * @returns true on success.
92 *
93 * @todo Get rid of this code. Use DISQueryParamVal instead
94 *
95 * @param pCpu Pointer to current disassembler context.
96 * @param pParam Pointer to parameter of instruction to process.
97 * @param pRegFrame Pointer to CPUMCTXCORE guest structure.
98 * @param pu64Data Where to store retrieved data.
99 * @param pcbSize Where to store the size of data (1, 2, 4, 8).
100 */
101bool iomGetRegImmData(PDISCPUSTATE pCpu, PCDISOPPARAM pParam, PCPUMCTXCORE pRegFrame, uint64_t *pu64Data, unsigned *pcbSize)
102{
103 NOREF(pCpu);
104 if (pParam->fUse & (DISUSE_BASE | DISUSE_INDEX | DISUSE_SCALE | DISUSE_DISPLACEMENT8 | DISUSE_DISPLACEMENT16 | DISUSE_DISPLACEMENT32))
105 {
106 *pcbSize = 0;
107 *pu64Data = 0;
108 return false;
109 }
110
111 /* divide and conquer */
112 if (pParam->fUse & (DISUSE_REG_GEN64 | DISUSE_REG_GEN32 | DISUSE_REG_GEN16 | DISUSE_REG_GEN8))
113 {
114 if (pParam->fUse & DISUSE_REG_GEN32)
115 {
116 *pcbSize = 4;
117 DISFetchReg32(pRegFrame, pParam->Base.idxGenReg, (uint32_t *)pu64Data);
118 return true;
119 }
120
121 if (pParam->fUse & DISUSE_REG_GEN16)
122 {
123 *pcbSize = 2;
124 DISFetchReg16(pRegFrame, pParam->Base.idxGenReg, (uint16_t *)pu64Data);
125 return true;
126 }
127
128 if (pParam->fUse & DISUSE_REG_GEN8)
129 {
130 *pcbSize = 1;
131 DISFetchReg8(pRegFrame, pParam->Base.idxGenReg, (uint8_t *)pu64Data);
132 return true;
133 }
134
135 Assert(pParam->fUse & DISUSE_REG_GEN64);
136 *pcbSize = 8;
137 DISFetchReg64(pRegFrame, pParam->Base.idxGenReg, pu64Data);
138 return true;
139 }
140 else
141 {
142 if (pParam->fUse & (DISUSE_IMMEDIATE64 | DISUSE_IMMEDIATE64_SX8))
143 {
144 *pcbSize = 8;
145 *pu64Data = pParam->uValue;
146 return true;
147 }
148
149 if (pParam->fUse & (DISUSE_IMMEDIATE32 | DISUSE_IMMEDIATE32_SX8))
150 {
151 *pcbSize = 4;
152 *pu64Data = (uint32_t)pParam->uValue;
153 return true;
154 }
155
156 if (pParam->fUse & (DISUSE_IMMEDIATE16 | DISUSE_IMMEDIATE16_SX8))
157 {
158 *pcbSize = 2;
159 *pu64Data = (uint16_t)pParam->uValue;
160 return true;
161 }
162
163 if (pParam->fUse & DISUSE_IMMEDIATE8)
164 {
165 *pcbSize = 1;
166 *pu64Data = (uint8_t)pParam->uValue;
167 return true;
168 }
169
170 if (pParam->fUse & DISUSE_REG_SEG)
171 {
172 *pcbSize = 2;
173 DISFetchRegSeg(pRegFrame, (DISSELREG)pParam->Base.idxSegReg, (RTSEL *)pu64Data);
174 return true;
175 } /* Else - error. */
176
177 AssertFailed();
178 *pcbSize = 0;
179 *pu64Data = 0;
180 return false;
181 }
182}
183
184
185/**
186 * Saves data to 8/16/32 general purpose or segment register defined by
187 * instruction's parameter.
188 *
189 * @returns true on success.
190 * @param pCpu Pointer to current disassembler context.
191 * @param pParam Pointer to parameter of instruction to process.
192 * @param pRegFrame Pointer to CPUMCTXCORE guest structure.
193 * @param u64Data 8/16/32/64 bit data to store.
194 */
195bool iomSaveDataToReg(PDISCPUSTATE pCpu, PCDISOPPARAM pParam, PCPUMCTXCORE pRegFrame, uint64_t u64Data)
196{
197 NOREF(pCpu);
198 if (pParam->fUse & (DISUSE_BASE | DISUSE_INDEX | DISUSE_SCALE | DISUSE_DISPLACEMENT8 | DISUSE_DISPLACEMENT16 | DISUSE_DISPLACEMENT32 | DISUSE_DISPLACEMENT64 | DISUSE_IMMEDIATE8 | DISUSE_IMMEDIATE16 | DISUSE_IMMEDIATE32 | DISUSE_IMMEDIATE32_SX8 | DISUSE_IMMEDIATE16_SX8))
199 {
200 return false;
201 }
202
203 if (pParam->fUse & DISUSE_REG_GEN32)
204 {
205 DISWriteReg32(pRegFrame, pParam->Base.idxGenReg, (uint32_t)u64Data);
206 return true;
207 }
208
209 if (pParam->fUse & DISUSE_REG_GEN64)
210 {
211 DISWriteReg64(pRegFrame, pParam->Base.idxGenReg, u64Data);
212 return true;
213 }
214
215 if (pParam->fUse & DISUSE_REG_GEN16)
216 {
217 DISWriteReg16(pRegFrame, pParam->Base.idxGenReg, (uint16_t)u64Data);
218 return true;
219 }
220
221 if (pParam->fUse & DISUSE_REG_GEN8)
222 {
223 DISWriteReg8(pRegFrame, pParam->Base.idxGenReg, (uint8_t)u64Data);
224 return true;
225 }
226
227 if (pParam->fUse & DISUSE_REG_SEG)
228 {
229 DISWriteRegSeg(pRegFrame, (DISSELREG)pParam->Base.idxSegReg, (RTSEL)u64Data);
230 return true;
231 }
232
233 /* Else - error. */
234 return false;
235}
236
237
238#ifndef IN_RING3
239/**
240 * Defers a pending MMIO write to ring-3.
241 *
242 * @returns VINF_IOM_R3_MMIO_COMMIT_WRITE
243 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
244 * @param GCPhys The write address.
245 * @param pvBuf The bytes being written.
246 * @param cbBuf How many bytes.
247 * @param pRange The range, if resolved.
248 */
249static VBOXSTRICTRC iomMmioRing3WritePending(PVMCPU pVCpu, RTGCPHYS GCPhys, void const *pvBuf, size_t cbBuf, PIOMMMIORANGE pRange)
250{
251 Log3(("iomMmioRing3WritePending: %RGp LB %#x\n", GCPhys, cbBuf));
252 AssertReturn(pVCpu->iom.s.PendingMmioWrite.cbValue == 0, VERR_IOM_MMIO_IPE_1);
253 pVCpu->iom.s.PendingMmioWrite.GCPhys = GCPhys;
254 AssertReturn(cbBuf <= sizeof(pVCpu->iom.s.PendingMmioWrite.abValue), VERR_IOM_MMIO_IPE_2);
255 pVCpu->iom.s.PendingMmioWrite.cbValue = (uint32_t)cbBuf;
256 memcpy(pVCpu->iom.s.PendingMmioWrite.abValue, pvBuf, cbBuf);
257 VMCPU_FF_SET(pVCpu, VMCPU_FF_IOM);
258 return VINF_IOM_R3_MMIO_COMMIT_WRITE;
259}
260#endif
261
262
263/**
264 * Deals with complicated MMIO writes.
265 *
266 * Complicated means unaligned or non-dword/qword sized accesses depending on
267 * the MMIO region's access mode flags.
268 *
269 * @returns Strict VBox status code. Any EM scheduling status code,
270 * VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
271 * VINF_IOM_R3_MMIO_READ may be returned.
272 *
273 * @param pVM The cross context VM structure.
274 * @param pRange The range to write to.
275 * @param GCPhys The physical address to start writing.
276 * @param pvValue Where to store the value.
277 * @param cbValue The size of the value to write.
278 */
279static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void const *pvValue, unsigned cbValue)
280{
281 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
282 && (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
283 VERR_IOM_MMIO_IPE_1);
284 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
285 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
286 bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING
287 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING;
288
289 /*
290 * Do debug stop if requested.
291 */
292 int rc = VINF_SUCCESS; NOREF(pVM);
293#ifdef VBOX_STRICT
294 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
295 {
296# ifdef IN_RING3
297 LogRel(("IOM: Complicated write %#x byte at %RGp to %s, initiating debugger intervention\n", cbValue, GCPhys,
298 R3STRING(pRange->pszDesc)));
299 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
300 "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
301 if (rc == VERR_DBGF_NOT_ATTACHED)
302 rc = VINF_SUCCESS;
303# else
304 return VINF_IOM_R3_MMIO_WRITE;
305# endif
306 }
307#endif
308
309 /*
310 * Check if we should ignore the write.
311 */
312 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD)
313 {
314 Assert(cbValue != 4 || (GCPhys & 3));
315 return VINF_SUCCESS;
316 }
317 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD)
318 {
319 Assert((cbValue != 4 && cbValue != 8) || (GCPhys & (cbValue - 1)));
320 return VINF_SUCCESS;
321 }
322
323 /*
324 * Split and conquer.
325 */
326 for (;;)
327 {
328 unsigned const offAccess = GCPhys & 3;
329 unsigned cbThisPart = 4 - offAccess;
330 if (cbThisPart > cbValue)
331 cbThisPart = cbValue;
332
333 /*
334 * Get the missing bits (if any).
335 */
336 uint32_t u32MissingValue = 0;
337 if (fReadMissing && cbThisPart != 4)
338 {
339 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
340 GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
341 switch (rc2)
342 {
343 case VINF_SUCCESS:
344 break;
345 case VINF_IOM_MMIO_UNUSED_FF:
346 u32MissingValue = UINT32_C(0xffffffff);
347 break;
348 case VINF_IOM_MMIO_UNUSED_00:
349 u32MissingValue = 0;
350 break;
351 case VINF_IOM_R3_MMIO_READ:
352 case VINF_IOM_R3_MMIO_READ_WRITE:
353 case VINF_IOM_R3_MMIO_WRITE:
354 /** @todo What if we've split a transfer and already read
355 * something? Since writes generally have sideeffects we
356 * could be kind of screwed here...
357 *
358 * Fix: VINF_IOM_R3_IOPORT_COMMIT_WRITE (part 2) */
359 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
360 return rc2;
361 default:
362 if (RT_FAILURE(rc2))
363 {
364 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
365 return rc2;
366 }
367 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
368 if (rc == VINF_SUCCESS || rc2 < rc)
369 rc = rc2;
370 break;
371 }
372 }
373
374 /*
375 * Merge missing and given bits.
376 */
377 uint32_t u32GivenMask;
378 uint32_t u32GivenValue;
379 switch (cbThisPart)
380 {
381 case 1:
382 u32GivenValue = *(uint8_t const *)pvValue;
383 u32GivenMask = UINT32_C(0x000000ff);
384 break;
385 case 2:
386 u32GivenValue = *(uint16_t const *)pvValue;
387 u32GivenMask = UINT32_C(0x0000ffff);
388 break;
389 case 3:
390 u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
391 ((uint8_t const *)pvValue)[2], 0);
392 u32GivenMask = UINT32_C(0x00ffffff);
393 break;
394 case 4:
395 u32GivenValue = *(uint32_t const *)pvValue;
396 u32GivenMask = UINT32_C(0xffffffff);
397 break;
398 default:
399 AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
400 }
401 if (offAccess)
402 {
403 u32GivenValue <<= offAccess * 8;
404 u32GivenMask <<= offAccess * 8;
405 }
406
407 uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
408 | (u32GivenValue & u32GivenMask);
409
410 /*
411 * Do DWORD write to the device.
412 */
413 int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
414 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
415 switch (rc2)
416 {
417 case VINF_SUCCESS:
418 break;
419 case VINF_IOM_R3_MMIO_READ:
420 case VINF_IOM_R3_MMIO_READ_WRITE:
421 case VINF_IOM_R3_MMIO_WRITE:
422 /** @todo What if we've split a transfer and already read
423 * something? Since reads can have sideeffects we could be
424 * kind of screwed here...
425 *
426 * Fix: VINF_IOM_R3_IOPORT_COMMIT_WRITE (part 2) */
427 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
428 return rc2;
429 default:
430 if (RT_FAILURE(rc2))
431 {
432 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
433 return rc2;
434 }
435 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
436 if (rc == VINF_SUCCESS || rc2 < rc)
437 rc = rc2;
438 break;
439 }
440
441 /*
442 * Advance.
443 */
444 cbValue -= cbThisPart;
445 if (!cbValue)
446 break;
447 GCPhys += cbThisPart;
448 pvValue = (uint8_t const *)pvValue + cbThisPart;
449 }
450
451 return rc;
452}
453
454
455
456
457/**
458 * Wrapper which does the write and updates range statistics when such are enabled.
459 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE!
460 */
461static VBOXSTRICTRC iomMMIODoWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault,
462 const void *pvData, unsigned cb)
463{
464#ifdef VBOX_WITH_STATISTICS
465 int rcSem = IOM_LOCK_SHARED(pVM);
466 if (rcSem == VERR_SEM_BUSY)
467 return VINF_IOM_R3_MMIO_WRITE;
468 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
469 if (!pStats)
470# ifdef IN_RING3
471 return VERR_NO_MEMORY;
472# else
473 return VINF_IOM_R3_MMIO_WRITE;
474# endif
475 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
476#else
477 NOREF(pVCpu);
478#endif
479
480 VBOXSTRICTRC rcStrict;
481 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
482 {
483 if ( (cb == 4 && !(GCPhysFault & 3))
484 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
485 || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) )
486 rcStrict = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
487 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
488 else
489 rcStrict = iomMMIODoComplicatedWrite(pVM, pRange, GCPhysFault, pvData, cb);
490 }
491 else
492 rcStrict = VINF_SUCCESS;
493
494 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
495 STAM_COUNTER_INC(&pStats->Accesses);
496 return rcStrict;
497}
498
499
500/**
501 * Deals with complicated MMIO reads.
502 *
503 * Complicated means unaligned or non-dword/qword sized accesses depending on
504 * the MMIO region's access mode flags.
505 *
506 * @returns Strict VBox status code. Any EM scheduling status code,
507 * VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
508 * VINF_IOM_R3_MMIO_WRITE may be returned.
509 *
510 * @param pVM The cross context VM structure.
511 * @param pRange The range to read from.
512 * @param GCPhys The physical address to start reading.
513 * @param pvValue Where to store the value.
514 * @param cbValue The size of the value to read.
515 */
516static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
517{
518 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
519 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
520 VERR_IOM_MMIO_IPE_1);
521 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
522 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
523
524 /*
525 * Do debug stop if requested.
526 */
527 int rc = VINF_SUCCESS; NOREF(pVM);
528#ifdef VBOX_STRICT
529 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
530 {
531# ifdef IN_RING3
532 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
533 "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
534 if (rc == VERR_DBGF_NOT_ATTACHED)
535 rc = VINF_SUCCESS;
536# else
537 return VINF_IOM_R3_MMIO_READ;
538# endif
539 }
540#endif
541
542 /*
543 * Split and conquer.
544 */
545 for (;;)
546 {
547 /*
548 * Do DWORD read from the device.
549 */
550 uint32_t u32Value;
551 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
552 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
553 switch (rc2)
554 {
555 case VINF_SUCCESS:
556 break;
557 case VINF_IOM_MMIO_UNUSED_FF:
558 u32Value = UINT32_C(0xffffffff);
559 break;
560 case VINF_IOM_MMIO_UNUSED_00:
561 u32Value = 0;
562 break;
563 case VINF_IOM_R3_MMIO_READ:
564 case VINF_IOM_R3_MMIO_READ_WRITE:
565 case VINF_IOM_R3_MMIO_WRITE:
566 /** @todo What if we've split a transfer and already read
567 * something? Since reads can have sideeffects we could be
568 * kind of screwed here... */
569 LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
570 return rc2;
571 default:
572 if (RT_FAILURE(rc2))
573 {
574 Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
575 return rc2;
576 }
577 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
578 if (rc == VINF_SUCCESS || rc2 < rc)
579 rc = rc2;
580 break;
581 }
582 u32Value >>= (GCPhys & 3) * 8;
583
584 /*
585 * Write what we've read.
586 */
587 unsigned cbThisPart = 4 - (GCPhys & 3);
588 if (cbThisPart > cbValue)
589 cbThisPart = cbValue;
590
591 switch (cbThisPart)
592 {
593 case 1:
594 *(uint8_t *)pvValue = (uint8_t)u32Value;
595 break;
596 case 2:
597 *(uint16_t *)pvValue = (uint16_t)u32Value;
598 break;
599 case 3:
600 ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
601 ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
602 ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
603 break;
604 case 4:
605 *(uint32_t *)pvValue = u32Value;
606 break;
607 }
608
609 /*
610 * Advance.
611 */
612 cbValue -= cbThisPart;
613 if (!cbValue)
614 break;
615 GCPhys += cbThisPart;
616 pvValue = (uint8_t *)pvValue + cbThisPart;
617 }
618
619 return rc;
620}
621
622
623/**
624 * Implements VINF_IOM_MMIO_UNUSED_FF.
625 *
626 * @returns VINF_SUCCESS.
627 * @param pvValue Where to store the zeros.
628 * @param cbValue How many bytes to read.
629 */
630static int iomMMIODoReadFFs(void *pvValue, size_t cbValue)
631{
632 switch (cbValue)
633 {
634 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
635 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
636 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
637 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
638 default:
639 {
640 uint8_t *pb = (uint8_t *)pvValue;
641 while (cbValue--)
642 *pb++ = UINT8_C(0xff);
643 break;
644 }
645 }
646 return VINF_SUCCESS;
647}
648
649
650/**
651 * Implements VINF_IOM_MMIO_UNUSED_00.
652 *
653 * @returns VINF_SUCCESS.
654 * @param pvValue Where to store the zeros.
655 * @param cbValue How many bytes to read.
656 */
657static int iomMMIODoRead00s(void *pvValue, size_t cbValue)
658{
659 switch (cbValue)
660 {
661 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
662 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
663 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
664 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
665 default:
666 {
667 uint8_t *pb = (uint8_t *)pvValue;
668 while (cbValue--)
669 *pb++ = UINT8_C(0x00);
670 break;
671 }
672 }
673 return VINF_SUCCESS;
674}
675
676
677/**
678 * Wrapper which does the read and updates range statistics when such are enabled.
679 */
680DECLINLINE(VBOXSTRICTRC) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys,
681 void *pvValue, unsigned cbValue)
682{
683#ifdef VBOX_WITH_STATISTICS
684 int rcSem = IOM_LOCK_SHARED(pVM);
685 if (rcSem == VERR_SEM_BUSY)
686 return VINF_IOM_R3_MMIO_READ;
687 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
688 if (!pStats)
689# ifdef IN_RING3
690 return VERR_NO_MEMORY;
691# else
692 return VINF_IOM_R3_MMIO_READ;
693# endif
694 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
695#else
696 NOREF(pVCpu);
697#endif
698
699 VBOXSTRICTRC rcStrict;
700 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
701 {
702 if ( ( cbValue == 4
703 && !(GCPhys & 3))
704 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
705 || ( cbValue == 8
706 && !(GCPhys & 7)
707 && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) )
708 rcStrict = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
709 pvValue, cbValue);
710 else
711 rcStrict = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
712 }
713 else
714 rcStrict = VINF_IOM_MMIO_UNUSED_FF;
715 if (rcStrict != VINF_SUCCESS)
716 {
717 switch (VBOXSTRICTRC_VAL(rcStrict))
718 {
719 case VINF_IOM_MMIO_UNUSED_FF: rcStrict = iomMMIODoReadFFs(pvValue, cbValue); break;
720 case VINF_IOM_MMIO_UNUSED_00: rcStrict = iomMMIODoRead00s(pvValue, cbValue); break;
721 }
722 }
723
724 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
725 STAM_COUNTER_INC(&pStats->Accesses);
726 return rcStrict;
727}
728
729
730/**
731 * Internal - statistics only.
732 */
733DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
734{
735#ifdef VBOX_WITH_STATISTICS
736 switch (cb)
737 {
738 case 1:
739 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
740 break;
741 case 2:
742 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
743 break;
744 case 4:
745 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
746 break;
747 case 8:
748 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
749 break;
750 default:
751 /* No way. */
752 AssertMsgFailed(("Invalid data length %d\n", cb));
753 break;
754 }
755#else
756 NOREF(pVM); NOREF(cb);
757#endif
758}
759
760
761#ifndef IEM_USE_IEM_INSTEAD
762
763/**
764 * MOV reg, mem (read)
765 * MOVZX reg, mem (read)
766 * MOVSX reg, mem (read)
767 *
768 * @returns VBox status code.
769 *
770 * @param pVM The cross context VM structure.
771 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
772 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
773 * @param pCpu Disassembler CPU state.
774 * @param pRange Pointer MMIO range.
775 * @param GCPhysFault The GC physical address corresponding to pvFault.
776 */
777static int iomInterpretMOVxXRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
778 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
779{
780 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
781
782 /*
783 * Get the data size from parameter 2,
784 * and call the handler function to get the data.
785 */
786 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
787 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
788
789 uint64_t u64Data = 0;
790 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb));
791 if (rc == VINF_SUCCESS)
792 {
793 /*
794 * Do sign extension for MOVSX.
795 */
796 /** @todo checkup MOVSX implementation! */
797 if (pCpu->pCurInstr->uOpcode == OP_MOVSX)
798 {
799 if (cb == 1)
800 {
801 /* DWORD <- BYTE */
802 int64_t iData = (int8_t)u64Data;
803 u64Data = (uint64_t)iData;
804 }
805 else
806 {
807 /* DWORD <- WORD */
808 int64_t iData = (int16_t)u64Data;
809 u64Data = (uint64_t)iData;
810 }
811 }
812
813 /*
814 * Store the result to register (parameter 1).
815 */
816 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, u64Data);
817 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
818 }
819
820 if (rc == VINF_SUCCESS)
821 iomMMIOStatLength(pVM, cb);
822 return rc;
823}
824
825
826/**
827 * MOV mem, reg|imm (write)
828 *
829 * @returns VBox status code.
830 *
831 * @param pVM The cross context VM structure.
832 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
833 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
834 * @param pCpu Disassembler CPU state.
835 * @param pRange Pointer MMIO range.
836 * @param GCPhysFault The GC physical address corresponding to pvFault.
837 */
838static int iomInterpretMOVxXWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
839 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
840{
841 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
842
843 /*
844 * Get data to write from second parameter,
845 * and call the callback to write it.
846 */
847 unsigned cb = 0;
848 uint64_t u64Data = 0;
849 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &u64Data, &cb);
850 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
851
852 int rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb));
853 if (rc == VINF_SUCCESS)
854 iomMMIOStatLength(pVM, cb);
855 return rc;
856}
857
858
859/** Wrapper for reading virtual memory. */
860DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
861{
862 /* Note: This will fail in R0 or RC if it hits an access handler. That
863 isn't a problem though since the operation can be restarted in REM. */
864#ifdef IN_RC
865 NOREF(pVCpu);
866 int rc = MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
867 /* Page may be protected and not directly accessible. */
868 if (rc == VERR_ACCESS_DENIED)
869 rc = VINF_IOM_R3_IOPORT_WRITE;
870 return rc;
871#else
872 return VBOXSTRICTRC_VAL(PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb, PGMACCESSORIGIN_IOM));
873#endif
874}
875
876
877/** Wrapper for writing virtual memory. */
878DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
879{
880 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
881 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
882 * as well since we're not behind the pgm lock and handler may change between calls.
883 *
884 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr may mess up
885 * the state of some shadowed structures. */
886#if defined(IN_RING0) || defined(IN_RC)
887 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
888#else
889 NOREF(pCtxCore);
890 return VBOXSTRICTRC_VAL(PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb, PGMACCESSORIGIN_IOM));
891#endif
892}
893
894
895#if defined(IOM_WITH_MOVS_SUPPORT) && 0 /* locking prevents this from working. has buggy ecx handling. */
896/**
897 * [REP] MOVSB
898 * [REP] MOVSW
899 * [REP] MOVSD
900 *
901 * Restricted implementation.
902 *
903 *
904 * @returns VBox status code.
905 *
906 * @param pVM The cross context VM structure.
907 * @param uErrorCode CPU Error code.
908 * @param pRegFrame Trap register frame.
909 * @param GCPhysFault The GC physical address corresponding to pvFault.
910 * @param pCpu Disassembler CPU state.
911 * @param pRange Pointer MMIO range.
912 * @param ppStat Which sub-sample to attribute this call to.
913 */
914static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange,
915 PSTAMPROFILE *ppStat)
916{
917 /*
918 * We do not support segment prefixes or REPNE.
919 */
920 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
921 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
922
923 PVMCPU pVCpu = VMMGetCpu(pVM);
924
925 /*
926 * Get bytes/words/dwords/qword count to copy.
927 */
928 uint32_t cTransfers = 1;
929 if (pCpu->fPrefix & DISPREFIX_REP)
930 {
931#ifndef IN_RC
932 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
933 && pRegFrame->rcx >= _4G)
934 return VINF_EM_RAW_EMULATE_INSTR;
935#endif
936
937 cTransfers = pRegFrame->ecx;
938 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == DISCPUMODE_16BIT)
939 cTransfers &= 0xffff;
940
941 if (!cTransfers)
942 return VINF_SUCCESS;
943 }
944
945 /* Get the current privilege level. */
946 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
947
948 /*
949 * Get data size.
950 */
951 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
952 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
953 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
954
955#ifdef VBOX_WITH_STATISTICS
956 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
957 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
958#endif
959
960/** @todo re-evaluate on page boundaries. */
961
962 RTGCPHYS Phys = GCPhysFault;
963 int rc;
964 if (fWriteAccess)
965 {
966 /*
967 * Write operation: [Mem] -> [MMIO]
968 * ds:esi (Virt Src) -> es:edi (Phys Dst)
969 */
970 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
971
972 /* Check callback. */
973 if (!pRange->CTX_SUFF(pfnWriteCallback))
974 return VINF_IOM_R3_MMIO_WRITE;
975
976 /* Convert source address ds:esi. */
977 RTGCUINTPTR pu8Virt;
978 rc = SELMToFlatEx(pVM, DISSELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
979 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
980 (PRTGCPTR)&pu8Virt);
981 if (RT_SUCCESS(rc))
982 {
983
984 /* Access verification first; we currently can't recover properly from traps inside this instruction */
985 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
986 if (rc != VINF_SUCCESS)
987 {
988 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
989 return VINF_EM_RAW_EMULATE_INSTR;
990 }
991
992#ifdef IN_RC
993 MMGCRamRegisterTrapHandler(pVM);
994#endif
995
996 /* copy loop. */
997 while (cTransfers)
998 {
999 uint32_t u32Data = 0;
1000 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
1001 if (rc != VINF_SUCCESS)
1002 break;
1003 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb));
1004 if (rc != VINF_SUCCESS)
1005 break;
1006
1007 pu8Virt += offIncrement;
1008 Phys += offIncrement;
1009 pRegFrame->rsi += offIncrement;
1010 pRegFrame->rdi += offIncrement;
1011 cTransfers--;
1012 }
1013#ifdef IN_RC
1014 MMGCRamDeregisterTrapHandler(pVM);
1015#endif
1016 /* Update ecx. */
1017 if (pCpu->fPrefix & DISPREFIX_REP)
1018 pRegFrame->ecx = cTransfers;
1019 }
1020 else
1021 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1022 }
1023 else
1024 {
1025 /*
1026 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
1027 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
1028 */
1029 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
1030
1031 /* Check callback. */
1032 if (!pRange->CTX_SUFF(pfnReadCallback))
1033 return VINF_IOM_R3_MMIO_READ;
1034
1035 /* Convert destination address. */
1036 RTGCUINTPTR pu8Virt;
1037 rc = SELMToFlatEx(pVM, DISSELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
1038 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
1039 (RTGCPTR *)&pu8Virt);
1040 if (RT_FAILURE(rc))
1041 return VINF_IOM_R3_MMIO_READ;
1042
1043 /* Check if destination address is MMIO. */
1044 PIOMMMIORANGE pMMIODst;
1045 RTGCPHYS PhysDst;
1046 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
1047 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
1048 if ( RT_SUCCESS(rc)
1049 && (pMMIODst = iomMmioGetRangeWithRef(pVM, PhysDst)))
1050 {
1051 /** @todo implement per-device locks for MMIO access. */
1052 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
1053
1054 /*
1055 * Extra: [MMIO] -> [MMIO]
1056 */
1057 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
1058 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
1059 {
1060 iomMmioReleaseRange(pVM, pRange);
1061 return VINF_IOM_R3_MMIO_READ_WRITE;
1062 }
1063
1064 /* copy loop. */
1065 while (cTransfers)
1066 {
1067 uint32_t u32Data;
1068 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb));
1069 if (rc != VINF_SUCCESS)
1070 break;
1071 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb));
1072 if (rc != VINF_SUCCESS)
1073 break;
1074
1075 Phys += offIncrement;
1076 PhysDst += offIncrement;
1077 pRegFrame->rsi += offIncrement;
1078 pRegFrame->rdi += offIncrement;
1079 cTransfers--;
1080 }
1081 iomMmioReleaseRange(pVM, pRange);
1082 }
1083 else
1084 {
1085 /*
1086 * Normal: [MMIO] -> [Mem]
1087 */
1088 /* Access verification first; we currently can't recover properly from traps inside this instruction */
1089 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
1090 if (rc != VINF_SUCCESS)
1091 {
1092 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
1093 return VINF_EM_RAW_EMULATE_INSTR;
1094 }
1095
1096 /* copy loop. */
1097#ifdef IN_RC
1098 MMGCRamRegisterTrapHandler(pVM);
1099#endif
1100 while (cTransfers)
1101 {
1102 uint32_t u32Data;
1103 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb));
1104 if (rc != VINF_SUCCESS)
1105 break;
1106 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
1107 if (rc != VINF_SUCCESS)
1108 {
1109 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
1110 break;
1111 }
1112
1113 pu8Virt += offIncrement;
1114 Phys += offIncrement;
1115 pRegFrame->rsi += offIncrement;
1116 pRegFrame->rdi += offIncrement;
1117 cTransfers--;
1118 }
1119#ifdef IN_RC
1120 MMGCRamDeregisterTrapHandler(pVM);
1121#endif
1122 }
1123
1124 /* Update ecx on exit. */
1125 if (pCpu->fPrefix & DISPREFIX_REP)
1126 pRegFrame->ecx = cTransfers;
1127 }
1128
1129 /* work statistics. */
1130 if (rc == VINF_SUCCESS)
1131 iomMMIOStatLength(pVM, cb);
1132 NOREF(ppStat);
1133 return rc;
1134}
1135#endif /* IOM_WITH_MOVS_SUPPORT */
1136
1137
1138/**
1139 * Gets the address / opcode mask corresponding to the given CPU mode.
1140 *
1141 * @returns Mask.
1142 * @param enmCpuMode CPU mode.
1143 */
1144static uint64_t iomDisModeToMask(DISCPUMODE enmCpuMode)
1145{
1146 switch (enmCpuMode)
1147 {
1148 case DISCPUMODE_16BIT: return UINT16_MAX;
1149 case DISCPUMODE_32BIT: return UINT32_MAX;
1150 case DISCPUMODE_64BIT: return UINT64_MAX;
1151 default:
1152 AssertFailedReturn(UINT32_MAX);
1153 }
1154}
1155
1156
1157/**
1158 * [REP] STOSB
1159 * [REP] STOSW
1160 * [REP] STOSD
1161 *
1162 * Restricted implementation.
1163 *
1164 *
1165 * @returns VBox status code.
1166 *
1167 * @param pVM The cross context VM structure.
1168 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1169 * @param pRegFrame Trap register frame.
1170 * @param GCPhysFault The GC physical address corresponding to pvFault.
1171 * @param pCpu Disassembler CPU state.
1172 * @param pRange Pointer MMIO range.
1173 */
1174static int iomInterpretSTOS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault,
1175 PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
1176{
1177 /*
1178 * We do not support segment prefixes or REPNE..
1179 */
1180 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
1181 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1182
1183 /*
1184 * Get bytes/words/dwords/qwords count to copy.
1185 */
1186 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1187 RTGCUINTREG cTransfers = 1;
1188 if (pCpu->fPrefix & DISPREFIX_REP)
1189 {
1190#ifndef IN_RC
1191 if ( CPUMIsGuestIn64BitCode(pVCpu)
1192 && pRegFrame->rcx >= _4G)
1193 return VINF_EM_RAW_EMULATE_INSTR;
1194#endif
1195
1196 cTransfers = pRegFrame->rcx & fAddrMask;
1197 if (!cTransfers)
1198 return VINF_SUCCESS;
1199 }
1200
1201/** @todo r=bird: bounds checks! */
1202
1203 /*
1204 * Get data size.
1205 */
1206 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
1207 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1208 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1209
1210#ifdef VBOX_WITH_STATISTICS
1211 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
1212 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
1213#endif
1214
1215
1216 RTGCPHYS Phys = GCPhysFault;
1217 int rc;
1218 if ( pRange->CTX_SUFF(pfnFillCallback)
1219 && cb <= 4 /* can only fill 32-bit values */)
1220 {
1221 /*
1222 * Use the fill callback.
1223 */
1224 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
1225 if (offIncrement > 0)
1226 {
1227 /* addr++ variant. */
1228 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys,
1229 pRegFrame->eax, cb, cTransfers);
1230 if (rc == VINF_SUCCESS)
1231 {
1232 /* Update registers. */
1233 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1234 | (pRegFrame->rdi & ~fAddrMask);
1235 if (pCpu->fPrefix & DISPREFIX_REP)
1236 pRegFrame->rcx &= ~fAddrMask;
1237 }
1238 }
1239 else
1240 {
1241 /* addr-- variant. */
1242 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1243 Phys - ((cTransfers - 1) << SIZE_2_SHIFT(cb)),
1244 pRegFrame->eax, cb, cTransfers);
1245 if (rc == VINF_SUCCESS)
1246 {
1247 /* Update registers. */
1248 pRegFrame->rdi = ((pRegFrame->rdi - (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1249 | (pRegFrame->rdi & ~fAddrMask);
1250 if (pCpu->fPrefix & DISPREFIX_REP)
1251 pRegFrame->rcx &= ~fAddrMask;
1252 }
1253 }
1254 }
1255 else
1256 {
1257 /*
1258 * Use the write callback.
1259 */
1260 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
1261 uint64_t u64Data = pRegFrame->rax;
1262
1263 /* fill loop. */
1264 do
1265 {
1266 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, Phys, &u64Data, cb));
1267 if (rc != VINF_SUCCESS)
1268 break;
1269
1270 Phys += offIncrement;
1271 pRegFrame->rdi = ((pRegFrame->rdi + offIncrement) & fAddrMask)
1272 | (pRegFrame->rdi & ~fAddrMask);
1273 cTransfers--;
1274 } while (cTransfers);
1275
1276 /* Update rcx on exit. */
1277 if (pCpu->fPrefix & DISPREFIX_REP)
1278 pRegFrame->rcx = (cTransfers & fAddrMask)
1279 | (pRegFrame->rcx & ~fAddrMask);
1280 }
1281
1282 /*
1283 * Work statistics and return.
1284 */
1285 if (rc == VINF_SUCCESS)
1286 iomMMIOStatLength(pVM, cb);
1287 return rc;
1288}
1289
1290
1291/**
1292 * [REP] LODSB
1293 * [REP] LODSW
1294 * [REP] LODSD
1295 *
1296 * Restricted implementation.
1297 *
1298 *
1299 * @returns VBox status code.
1300 *
1301 * @param pVM The cross context VM structure.
1302 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1303 * @param pRegFrame Trap register frame.
1304 * @param GCPhysFault The GC physical address corresponding to pvFault.
1305 * @param pCpu Disassembler CPU state.
1306 * @param pRange Pointer MMIO range.
1307 */
1308static int iomInterpretLODS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1309 PIOMMMIORANGE pRange)
1310{
1311 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1312
1313 /*
1314 * We do not support segment prefixes or REP*.
1315 */
1316 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REP | DISPREFIX_REPNE))
1317 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1318
1319 /*
1320 * Get data size.
1321 */
1322 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
1323 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1324 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1325
1326 /*
1327 * Perform read.
1328 */
1329 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &pRegFrame->rax, cb));
1330 if (rc == VINF_SUCCESS)
1331 {
1332 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1333 pRegFrame->rsi = ((pRegFrame->rsi + offIncrement) & fAddrMask)
1334 | (pRegFrame->rsi & ~fAddrMask);
1335 }
1336
1337 /*
1338 * Work statistics and return.
1339 */
1340 if (rc == VINF_SUCCESS)
1341 iomMMIOStatLength(pVM, cb);
1342 return rc;
1343}
1344
1345
1346/**
1347 * CMP [MMIO], reg|imm
1348 * CMP reg|imm, [MMIO]
1349 *
1350 * Restricted implementation.
1351 *
1352 *
1353 * @returns VBox status code.
1354 *
1355 * @param pVM The cross context VM structure.
1356 * @param pRegFrame Trap register frame.
1357 * @param GCPhysFault The GC physical address corresponding to pvFault.
1358 * @param pCpu Disassembler CPU state.
1359 * @param pRange Pointer MMIO range.
1360 */
1361static int iomInterpretCMP(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1362 PIOMMMIORANGE pRange)
1363{
1364 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1365
1366 /*
1367 * Get the operands.
1368 */
1369 unsigned cb = 0;
1370 uint64_t uData1 = 0;
1371 uint64_t uData2 = 0;
1372 int rc;
1373 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1374 /* cmp reg, [MMIO]. */
1375 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1376 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1377 /* cmp [MMIO], reg|imm. */
1378 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1379 else
1380 {
1381 AssertMsgFailed(("Disassember CMP problem..\n"));
1382 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1383 }
1384
1385 if (rc == VINF_SUCCESS)
1386 {
1387#if HC_ARCH_BITS == 32
1388 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1389 if (cb > 4)
1390 return VINF_IOM_R3_MMIO_READ_WRITE;
1391#endif
1392 /* Emulate CMP and update guest flags. */
1393 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
1394 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1395 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1396 iomMMIOStatLength(pVM, cb);
1397 }
1398
1399 return rc;
1400}
1401
1402
1403/**
1404 * AND [MMIO], reg|imm
1405 * AND reg, [MMIO]
1406 * OR [MMIO], reg|imm
1407 * OR reg, [MMIO]
1408 *
1409 * Restricted implementation.
1410 *
1411 *
1412 * @returns VBox status code.
1413 *
1414 * @param pVM The cross context VM structure.
1415 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1416 * @param pRegFrame Trap register frame.
1417 * @param GCPhysFault The GC physical address corresponding to pvFault.
1418 * @param pCpu Disassembler CPU state.
1419 * @param pRange Pointer MMIO range.
1420 * @param pfnEmulate Instruction emulation function.
1421 */
1422static int iomInterpretOrXorAnd(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1423 PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
1424{
1425 unsigned cb = 0;
1426 uint64_t uData1 = 0;
1427 uint64_t uData2 = 0;
1428 bool fAndWrite;
1429 int rc;
1430
1431#ifdef LOG_ENABLED
1432 const char *pszInstr;
1433
1434 if (pCpu->pCurInstr->uOpcode == OP_XOR)
1435 pszInstr = "Xor";
1436 else if (pCpu->pCurInstr->uOpcode == OP_OR)
1437 pszInstr = "Or";
1438 else if (pCpu->pCurInstr->uOpcode == OP_AND)
1439 pszInstr = "And";
1440 else
1441 pszInstr = "OrXorAnd??";
1442#endif
1443
1444 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1445 {
1446#if HC_ARCH_BITS == 32
1447 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1448 if (cb > 4)
1449 return VINF_IOM_R3_MMIO_READ_WRITE;
1450#endif
1451 /* and reg, [MMIO]. */
1452 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1453 fAndWrite = false;
1454 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1455 }
1456 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1457 {
1458#if HC_ARCH_BITS == 32
1459 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1460 if (cb > 4)
1461 return VINF_IOM_R3_MMIO_READ_WRITE;
1462#endif
1463 /* and [MMIO], reg|imm. */
1464 fAndWrite = true;
1465 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
1466 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
1467 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1468 else
1469 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1470 }
1471 else
1472 {
1473 AssertMsgFailed(("Disassember AND problem..\n"));
1474 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1475 }
1476
1477 if (rc == VINF_SUCCESS)
1478 {
1479 /* Emulate AND and update guest flags. */
1480 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
1481
1482 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
1483
1484 if (fAndWrite)
1485 /* Store result to MMIO. */
1486 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1487 else
1488 {
1489 /* Store result to register. */
1490 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData1);
1491 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1492 }
1493 if (rc == VINF_SUCCESS)
1494 {
1495 /* Update guest's eflags and finish. */
1496 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1497 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1498 iomMMIOStatLength(pVM, cb);
1499 }
1500 }
1501
1502 return rc;
1503}
1504
1505
1506/**
1507 * TEST [MMIO], reg|imm
1508 * TEST reg, [MMIO]
1509 *
1510 * Restricted implementation.
1511 *
1512 *
1513 * @returns VBox status code.
1514 *
1515 * @param pVM The cross context VM structure.
1516 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1517 * @param pRegFrame Trap register frame.
1518 * @param GCPhysFault The GC physical address corresponding to pvFault.
1519 * @param pCpu Disassembler CPU state.
1520 * @param pRange Pointer MMIO range.
1521 */
1522static int iomInterpretTEST(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1523 PIOMMMIORANGE pRange)
1524{
1525 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1526
1527 unsigned cb = 0;
1528 uint64_t uData1 = 0;
1529 uint64_t uData2 = 0;
1530 int rc;
1531
1532 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1533 {
1534 /* and test, [MMIO]. */
1535 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1536 }
1537 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1538 {
1539 /* test [MMIO], reg|imm. */
1540 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1541 }
1542 else
1543 {
1544 AssertMsgFailed(("Disassember TEST problem..\n"));
1545 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1546 }
1547
1548 if (rc == VINF_SUCCESS)
1549 {
1550#if HC_ARCH_BITS == 32
1551 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1552 if (cb > 4)
1553 return VINF_IOM_R3_MMIO_READ_WRITE;
1554#endif
1555
1556 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
1557 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
1558 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1559 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1560 iomMMIOStatLength(pVM, cb);
1561 }
1562
1563 return rc;
1564}
1565
1566
1567/**
1568 * BT [MMIO], reg|imm
1569 *
1570 * Restricted implementation.
1571 *
1572 *
1573 * @returns VBox status code.
1574 *
1575 * @param pVM The cross context VM structure.
1576 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1577 * @param pRegFrame Trap register frame.
1578 * @param GCPhysFault The GC physical address corresponding to pvFault.
1579 * @param pCpu Disassembler CPU state.
1580 * @param pRange Pointer MMIO range.
1581 */
1582static int iomInterpretBT(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1583 PIOMMMIORANGE pRange)
1584{
1585 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1586
1587 uint64_t uBit = 0;
1588 uint64_t uData = 0;
1589 unsigned cbIgnored;
1590
1591 if (!iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uBit, &cbIgnored))
1592 {
1593 AssertMsgFailed(("Disassember BT problem..\n"));
1594 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1595 }
1596 /* The size of the memory operand only matters here. */
1597 unsigned cbData = DISGetParamSize(pCpu, &pCpu->Param1);
1598
1599 /* bt [MMIO], reg|imm. */
1600 int rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData, cbData));
1601 if (rc == VINF_SUCCESS)
1602 {
1603 /* Find the bit inside the faulting address */
1604 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
1605 iomMMIOStatLength(pVM, cbData);
1606 }
1607
1608 return rc;
1609}
1610
1611/**
1612 * XCHG [MMIO], reg
1613 * XCHG reg, [MMIO]
1614 *
1615 * Restricted implementation.
1616 *
1617 *
1618 * @returns VBox status code.
1619 *
1620 * @param pVM The cross context VM structure.
1621 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1622 * @param pRegFrame Trap register frame.
1623 * @param GCPhysFault The GC physical address corresponding to pvFault.
1624 * @param pCpu Disassembler CPU state.
1625 * @param pRange Pointer MMIO range.
1626 */
1627static int iomInterpretXCHG(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1628 PIOMMMIORANGE pRange)
1629{
1630 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
1631 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1632 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
1633 return VINF_IOM_R3_MMIO_READ_WRITE;
1634
1635 int rc;
1636 unsigned cb = 0;
1637 uint64_t uData1 = 0;
1638 uint64_t uData2 = 0;
1639 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1640 {
1641 /* xchg reg, [MMIO]. */
1642 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1643 if (rc == VINF_SUCCESS)
1644 {
1645 /* Store result to MMIO. */
1646 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1647
1648 if (rc == VINF_SUCCESS)
1649 {
1650 /* Store result to register. */
1651 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData2);
1652 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1653 }
1654 else
1655 Assert(rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1656 }
1657 else
1658 Assert(rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1659 }
1660 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1661 {
1662 /* xchg [MMIO], reg. */
1663 rc = VBOXSTRICTRC_TODO(iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb));
1664 if (rc == VINF_SUCCESS)
1665 {
1666 /* Store result to MMIO. */
1667 rc = VBOXSTRICTRC_TODO(iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb));
1668 if (rc == VINF_SUCCESS)
1669 {
1670 /* Store result to register. */
1671 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param2, pRegFrame, uData1);
1672 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1673 }
1674 else
1675 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc));
1676 }
1677 else
1678 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc));
1679 }
1680 else
1681 {
1682 AssertMsgFailed(("Disassember XCHG problem..\n"));
1683 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1684 }
1685 return rc;
1686}
1687
1688#endif /* !IEM_USE_IEM_INSTEAD */
1689
1690/**
1691 * Common worker for the \#PF handler and IOMMMIOPhysHandler (APIC+VT-x).
1692 *
1693 * @returns VBox status code (appropriate for GC return).
1694 * @param pVM The cross context VM structure.
1695 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1696 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
1697 * any error code (the EPT misconfig hack).
1698 * @param pCtxCore Trap register frame.
1699 * @param GCPhysFault The GC physical address corresponding to pvFault.
1700 * @param pvUser Pointer to the MMIO ring-3 range entry.
1701 */
1702static VBOXSTRICTRC iomMmioCommonPfHandler(PVM pVM, PVMCPU pVCpu, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore,
1703 RTGCPHYS GCPhysFault, void *pvUser)
1704{
1705 int rc = IOM_LOCK_SHARED(pVM);
1706#ifndef IN_RING3
1707 if (rc == VERR_SEM_BUSY)
1708 return VINF_IOM_R3_MMIO_READ_WRITE;
1709#endif
1710 AssertRC(rc);
1711
1712 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1713 Log(("iomMmioCommonPfHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n", GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
1714
1715 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1716 Assert(pRange);
1717 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
1718 iomMmioRetainRange(pRange);
1719#ifndef VBOX_WITH_STATISTICS
1720 IOM_UNLOCK_SHARED(pVM);
1721
1722#else
1723 /*
1724 * Locate the statistics.
1725 */
1726 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
1727 if (!pStats)
1728 {
1729 iomMmioReleaseRange(pVM, pRange);
1730# ifdef IN_RING3
1731 return VERR_NO_MEMORY;
1732# else
1733 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1734 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1735 return VINF_IOM_R3_MMIO_READ_WRITE;
1736# endif
1737 }
1738#endif
1739
1740#ifndef IN_RING3
1741 /*
1742 * Should we defer the request right away? This isn't usually the case, so
1743 * do the simple test first and the try deal with uErrorCode being N/A.
1744 */
1745 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
1746 || !pRange->CTX_SUFF(pfnReadCallback))
1747 && ( uErrorCode == UINT32_MAX
1748 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
1749 : uErrorCode & X86_TRAP_PF_RW
1750 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1751 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
1752 )
1753 )
1754 )
1755 {
1756 if (uErrorCode & X86_TRAP_PF_RW)
1757 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1758 else
1759 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1760
1761 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1762 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1763 iomMmioReleaseRange(pVM, pRange);
1764 return VINF_IOM_R3_MMIO_READ_WRITE;
1765 }
1766#endif /* !IN_RING3 */
1767
1768 /*
1769 * Retain the range and do locking.
1770 */
1771 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1772 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1773 if (rc != VINF_SUCCESS)
1774 {
1775 iomMmioReleaseRange(pVM, pRange);
1776 return rc;
1777 }
1778
1779#ifdef IEM_USE_IEM_INSTEAD
1780
1781 /*
1782 * Let IEM call us back via iomMmioHandler.
1783 */
1784 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu);
1785
1786 NOREF(pCtxCore); NOREF(GCPhysFault);
1787 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1788 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1789 iomMmioReleaseRange(pVM, pRange);
1790 if (RT_SUCCESS(rcStrict))
1791 return rcStrict;
1792 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1793 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1794 {
1795 Log(("IOM: Hit unsupported IEM feature!\n"));
1796 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
1797 }
1798 return rcStrict;
1799
1800#else
1801
1802 /*
1803 * Disassemble the instruction and interpret it.
1804 */
1805 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1806 unsigned cbOp;
1807 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1808 if (RT_FAILURE(rc))
1809 {
1810 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1811 iomMmioReleaseRange(pVM, pRange);
1812 return rc;
1813 }
1814 switch (pDis->pCurInstr->uOpcode)
1815 {
1816 case OP_MOV:
1817 case OP_MOVZX:
1818 case OP_MOVSX:
1819 {
1820 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1821 AssertMsg(uErrorCode == UINT32_MAX || DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse) == !!(uErrorCode & X86_TRAP_PF_RW), ("flags1=%#llx/%RTbool flags2=%#llx/%RTbool ErrCd=%#x\n", pDis->Param1.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse), pDis->Param2.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param2.fUse), uErrorCode));
1822 if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */
1823 ? uErrorCode & X86_TRAP_PF_RW
1824 : DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse))
1825 rc = iomInterpretMOVxXWrite(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1826 else
1827 rc = iomInterpretMOVxXRead(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1828 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1829 break;
1830 }
1831
1832
1833# ifdef IOM_WITH_MOVS_SUPPORT
1834 case OP_MOVSB:
1835 case OP_MOVSWD:
1836 {
1837 if (uErrorCode == UINT32_MAX)
1838 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1839 else
1840 {
1841 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1842 PSTAMPROFILE pStat = NULL;
1843 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1844 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1845 }
1846 break;
1847 }
1848# endif
1849
1850 case OP_STOSB:
1851 case OP_STOSWD:
1852 Assert(uErrorCode & X86_TRAP_PF_RW);
1853 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1854 rc = iomInterpretSTOS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1855 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1856 break;
1857
1858 case OP_LODSB:
1859 case OP_LODSWD:
1860 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1861 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1862 rc = iomInterpretLODS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1863 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1864 break;
1865
1866 case OP_CMP:
1867 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1868 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1869 rc = iomInterpretCMP(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1870 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1871 break;
1872
1873 case OP_AND:
1874 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1875 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1876 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1877 break;
1878
1879 case OP_OR:
1880 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1881 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1882 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1883 break;
1884
1885 case OP_XOR:
1886 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1887 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1888 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1889 break;
1890
1891 case OP_TEST:
1892 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1893 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1894 rc = iomInterpretTEST(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1895 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1896 break;
1897
1898 case OP_BT:
1899 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1900 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1901 rc = iomInterpretBT(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1902 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1903 break;
1904
1905 case OP_XCHG:
1906 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1907 rc = iomInterpretXCHG(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1908 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1909 break;
1910
1911
1912 /*
1913 * The instruction isn't supported. Hand it on to ring-3.
1914 */
1915 default:
1916 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1917 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1918 break;
1919 }
1920
1921 /*
1922 * On success advance EIP.
1923 */
1924 if (rc == VINF_SUCCESS)
1925 pCtxCore->rip += cbOp;
1926 else
1927 {
1928 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1929# if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1930 switch (rc)
1931 {
1932 case VINF_IOM_R3_MMIO_READ:
1933 case VINF_IOM_R3_MMIO_READ_WRITE:
1934 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1935 break;
1936 case VINF_IOM_R3_MMIO_WRITE:
1937 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1938 break;
1939 }
1940# endif
1941 }
1942
1943 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1944 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1945 iomMmioReleaseRange(pVM, pRange);
1946 return rc;
1947#endif /* !IEM_USE_IEM_INSTEAD */
1948}
1949
1950
1951/**
1952 * @callback_method_impl{FNPGMRZPHYSPFHANDLER,
1953 * \#PF access handler callback for MMIO pages.}
1954 *
1955 * @remarks The @a pvUser argument points to the IOMMMIORANGE.
1956 */
1957DECLEXPORT(VBOXSTRICTRC) iomMmioPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault,
1958 RTGCPHYS GCPhysFault, void *pvUser)
1959{
1960 LogFlow(("iomMmioPfHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1961 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip)); NOREF(pvFault);
1962 return iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
1963}
1964
1965
1966/**
1967 * Physical access handler for MMIO ranges.
1968 *
1969 * @returns VBox status code (appropriate for GC return).
1970 * @param pVM The cross context VM structure.
1971 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1972 * @param uErrorCode CPU Error code.
1973 * @param pCtxCore Trap register frame.
1974 * @param GCPhysFault The GC physical address.
1975 */
1976VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1977{
1978 /*
1979 * We don't have a range here, so look it up before calling the common function.
1980 */
1981 int rc2 = IOM_LOCK_SHARED(pVM); NOREF(rc2);
1982#ifndef IN_RING3
1983 if (rc2 == VERR_SEM_BUSY)
1984 return VINF_IOM_R3_MMIO_READ_WRITE;
1985#endif
1986 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysFault);
1987 if (RT_UNLIKELY(!pRange))
1988 {
1989 IOM_UNLOCK_SHARED(pVM);
1990 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1991 }
1992 iomMmioRetainRange(pRange);
1993 IOM_UNLOCK_SHARED(pVM);
1994
1995 VBOXSTRICTRC rcStrict = iomMmioCommonPfHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pRange);
1996
1997 iomMmioReleaseRange(pVM, pRange);
1998 return VBOXSTRICTRC_VAL(rcStrict);
1999}
2000
2001
2002/**
2003 * @callback_method_impl{FNPGMPHYSHANDLER, MMIO page accesses}
2004 *
2005 * @remarks The @a pvUser argument points to the MMIO range entry.
2006 */
2007PGM_ALL_CB2_DECL(VBOXSTRICTRC) iomMmioHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf,
2008 size_t cbBuf, PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
2009{
2010 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
2011 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
2012
2013 AssertMsg(cbBuf >= 1 && cbBuf <= 16, ("%zu\n", cbBuf));
2014 AssertPtr(pRange);
2015 NOREF(pvPhys); NOREF(enmOrigin);
2016
2017 /*
2018 * Validate the range.
2019 */
2020 int rc = IOM_LOCK_SHARED(pVM);
2021#ifndef IN_RING3
2022 if (rc == VERR_SEM_BUSY)
2023 {
2024 if (enmAccessType == PGMACCESSTYPE_READ)
2025 return VINF_IOM_R3_MMIO_READ;
2026 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
2027 return iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, NULL /*pRange*/);
2028 }
2029#endif
2030 AssertRC(rc);
2031 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
2032
2033 /*
2034 * Perform locking.
2035 */
2036 iomMmioRetainRange(pRange);
2037 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2038 IOM_UNLOCK_SHARED(pVM);
2039 VBOXSTRICTRC rcStrict = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
2040 if (rcStrict == VINF_SUCCESS)
2041 {
2042 /*
2043 * Perform the access.
2044 */
2045 if (enmAccessType == PGMACCESSTYPE_READ)
2046 rcStrict = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
2047 else
2048 {
2049 rcStrict = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
2050#ifndef IN_RING3
2051 if (rcStrict == VINF_IOM_R3_MMIO_WRITE)
2052 rcStrict = iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, pRange);
2053#endif
2054 }
2055
2056 /* Check the return code. */
2057#ifdef IN_RING3
2058 AssertMsg(rcStrict == VINF_SUCCESS, ("%Rrc - %RGp - %s\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pRange->pszDesc));
2059#else
2060 AssertMsg( rcStrict == VINF_SUCCESS
2061 || rcStrict == (enmAccessType == PGMACCESSTYPE_READ ? VINF_IOM_R3_MMIO_READ : VINF_IOM_R3_MMIO_WRITE)
2062 || (rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE && enmAccessType == PGMACCESSTYPE_WRITE)
2063 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
2064 || rcStrict == VINF_EM_DBG_STOP
2065 || rcStrict == VINF_EM_DBG_EVENT
2066 || rcStrict == VINF_EM_DBG_BREAKPOINT
2067 || rcStrict == VINF_EM_OFF
2068 || rcStrict == VINF_EM_SUSPEND
2069 || rcStrict == VINF_EM_RESET
2070 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
2071 //|| rcStrict == VINF_EM_HALT /* ?? */
2072 //|| rcStrict == VINF_EM_NO_MEMORY /* ?? */
2073 , ("%Rrc - %RGp - %p\n", VBOXSTRICTRC_VAL(rcStrict), GCPhysFault, pDevIns));
2074#endif
2075
2076 iomMmioReleaseRange(pVM, pRange);
2077 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2078 }
2079#ifdef IN_RING3
2080 else
2081 iomMmioReleaseRange(pVM, pRange);
2082#else
2083 else
2084 {
2085 if (rcStrict == VINF_IOM_R3_MMIO_READ_WRITE)
2086 {
2087 if (enmAccessType == PGMACCESSTYPE_READ)
2088 rcStrict = VINF_IOM_R3_MMIO_READ;
2089 else
2090 {
2091 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
2092 rcStrict = iomMmioRing3WritePending(pVCpu, GCPhysFault, pvBuf, cbBuf, pRange);
2093 }
2094 }
2095 iomMmioReleaseRange(pVM, pRange);
2096 }
2097#endif
2098 return rcStrict;
2099}
2100
2101
2102#ifdef IN_RING3 /* Only used by REM. */
2103
2104/**
2105 * Reads a MMIO register.
2106 *
2107 * @returns VBox status code.
2108 *
2109 * @param pVM The cross context VM structure.
2110 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2111 * @param GCPhys The physical address to read.
2112 * @param pu32Value Where to store the value read.
2113 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
2114 */
2115VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
2116{
2117 Assert(pVCpu->iom.s.PendingMmioWrite.cbValue == 0);
2118 /* Take the IOM lock before performing any MMIO. */
2119 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
2120#ifndef IN_RING3
2121 if (rc == VERR_SEM_BUSY)
2122 return VINF_IOM_R3_MMIO_WRITE;
2123#endif
2124 AssertRC(VBOXSTRICTRC_VAL(rc));
2125#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
2126 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
2127#endif
2128
2129 /*
2130 * Lookup the current context range node and statistics.
2131 */
2132 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2133 if (!pRange)
2134 {
2135 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
2136 IOM_UNLOCK_SHARED(pVM);
2137 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
2138 }
2139 iomMmioRetainRange(pRange);
2140#ifndef VBOX_WITH_STATISTICS
2141 IOM_UNLOCK_SHARED(pVM);
2142
2143#else /* VBOX_WITH_STATISTICS */
2144 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
2145 if (!pStats)
2146 {
2147 iomMmioReleaseRange(pVM, pRange);
2148# ifdef IN_RING3
2149 return VERR_NO_MEMORY;
2150# else
2151 return VINF_IOM_R3_MMIO_READ;
2152# endif
2153 }
2154 STAM_COUNTER_INC(&pStats->Accesses);
2155#endif /* VBOX_WITH_STATISTICS */
2156
2157 if (pRange->CTX_SUFF(pfnReadCallback))
2158 {
2159 /*
2160 * Perform locking.
2161 */
2162 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2163 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_WRITE);
2164 if (rc != VINF_SUCCESS)
2165 {
2166 iomMmioReleaseRange(pVM, pRange);
2167 return rc;
2168 }
2169
2170 /*
2171 * Perform the read and deal with the result.
2172 */
2173 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
2174 if ( (cbValue == 4 && !(GCPhys & 3))
2175 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
2176 || (cbValue == 8 && !(GCPhys & 7)) )
2177 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
2178 pu32Value, (unsigned)cbValue);
2179 else
2180 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pu32Value, (unsigned)cbValue);
2181 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
2182 switch (VBOXSTRICTRC_VAL(rc))
2183 {
2184 case VINF_SUCCESS:
2185 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
2186 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2187 iomMmioReleaseRange(pVM, pRange);
2188 return rc;
2189#ifndef IN_RING3
2190 case VINF_IOM_R3_MMIO_READ:
2191 case VINF_IOM_R3_MMIO_READ_WRITE:
2192 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
2193#endif
2194 default:
2195 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2196 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2197 iomMmioReleaseRange(pVM, pRange);
2198 return rc;
2199
2200 case VINF_IOM_MMIO_UNUSED_00:
2201 iomMMIODoRead00s(pu32Value, cbValue);
2202 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2203 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2204 iomMmioReleaseRange(pVM, pRange);
2205 return VINF_SUCCESS;
2206
2207 case VINF_IOM_MMIO_UNUSED_FF:
2208 iomMMIODoReadFFs(pu32Value, cbValue);
2209 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2210 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2211 iomMmioReleaseRange(pVM, pRange);
2212 return VINF_SUCCESS;
2213 }
2214 /* not reached */
2215 }
2216#ifndef IN_RING3
2217 if (pRange->pfnReadCallbackR3)
2218 {
2219 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
2220 iomMmioReleaseRange(pVM, pRange);
2221 return VINF_IOM_R3_MMIO_READ;
2222 }
2223#endif
2224
2225 /*
2226 * Unassigned memory - this is actually not supposed t happen...
2227 */
2228 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
2229 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
2230 iomMMIODoReadFFs(pu32Value, cbValue);
2231 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
2232 iomMmioReleaseRange(pVM, pRange);
2233 return VINF_SUCCESS;
2234}
2235
2236
2237/**
2238 * Writes to a MMIO register.
2239 *
2240 * @returns VBox status code.
2241 *
2242 * @param pVM The cross context VM structure.
2243 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2244 * @param GCPhys The physical address to write to.
2245 * @param u32Value The value to write.
2246 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
2247 */
2248VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
2249{
2250 Assert(pVCpu->iom.s.PendingMmioWrite.cbValue == 0);
2251 /* Take the IOM lock before performing any MMIO. */
2252 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
2253#ifndef IN_RING3
2254 if (rc == VERR_SEM_BUSY)
2255 return VINF_IOM_R3_MMIO_WRITE;
2256#endif
2257 AssertRC(VBOXSTRICTRC_VAL(rc));
2258#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
2259 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
2260#endif
2261
2262 /*
2263 * Lookup the current context range node.
2264 */
2265 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2266 if (!pRange)
2267 {
2268 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
2269 IOM_UNLOCK_SHARED(pVM);
2270 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
2271 }
2272 iomMmioRetainRange(pRange);
2273#ifndef VBOX_WITH_STATISTICS
2274 IOM_UNLOCK_SHARED(pVM);
2275
2276#else /* VBOX_WITH_STATISTICS */
2277 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
2278 if (!pStats)
2279 {
2280 iomMmioReleaseRange(pVM, pRange);
2281# ifdef IN_RING3
2282 return VERR_NO_MEMORY;
2283# else
2284 return VINF_IOM_R3_MMIO_WRITE;
2285# endif
2286 }
2287 STAM_COUNTER_INC(&pStats->Accesses);
2288#endif /* VBOX_WITH_STATISTICS */
2289
2290 if (pRange->CTX_SUFF(pfnWriteCallback))
2291 {
2292 /*
2293 * Perform locking.
2294 */
2295 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2296 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ);
2297 if (rc != VINF_SUCCESS)
2298 {
2299 iomMmioReleaseRange(pVM, pRange);
2300 return rc;
2301 }
2302
2303 /*
2304 * Perform the write.
2305 */
2306 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2307 if ( (cbValue == 4 && !(GCPhys & 3))
2308 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
2309 || (cbValue == 8 && !(GCPhys & 7)) )
2310 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
2311 GCPhys, &u32Value, (unsigned)cbValue);
2312 else
2313 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhys, &u32Value, (unsigned)cbValue);
2314 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2315#ifndef IN_RING3
2316 if ( rc == VINF_IOM_R3_MMIO_WRITE
2317 || rc == VINF_IOM_R3_MMIO_READ_WRITE)
2318 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2319#endif
2320 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2321 iomMmioReleaseRange(pVM, pRange);
2322 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2323 return rc;
2324 }
2325#ifndef IN_RING3
2326 if (pRange->pfnWriteCallbackR3)
2327 {
2328 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2329 iomMmioReleaseRange(pVM, pRange);
2330 return VINF_IOM_R3_MMIO_WRITE;
2331 }
2332#endif
2333
2334 /*
2335 * No write handler, nothing to do.
2336 */
2337 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2338 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2339 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
2340 iomMmioReleaseRange(pVM, pRange);
2341 return VINF_SUCCESS;
2342}
2343
2344#endif /* IN_RING3 - only used by REM. */
2345#ifndef IEM_USE_IEM_INSTEAD
2346
2347/**
2348 * [REP*] INSB/INSW/INSD
2349 * ES:EDI,DX[,ECX]
2350 *
2351 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2352 *
2353 * @returns Strict VBox status code. Informational status codes other than the one documented
2354 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2355 * @retval VINF_SUCCESS Success.
2356 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2357 * status code must be passed on to EM.
2358 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2359 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2360 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2361 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2362 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2363 *
2364 * @param pVM The cross context VM structure.
2365 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2366 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2367 * @param uPort IO Port
2368 * @param uPrefix IO instruction prefix
2369 * @param enmAddrMode The address mode.
2370 * @param cbTransfer Size of transfer unit
2371 */
2372VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2373 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2374{
2375 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
2376 Assert(pVCpu->iom.s.PendingMmioWrite.cbValue == 0);
2377
2378 /*
2379 * We do not support REPNE or decrementing destination
2380 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
2381 */
2382 if ( (uPrefix & DISPREFIX_REPNE)
2383 || pRegFrame->eflags.Bits.u1DF)
2384 return VINF_EM_RAW_EMULATE_INSTR;
2385
2386 /*
2387 * Get bytes/words/dwords count to transfer.
2388 */
2389 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2390 RTGCUINTREG cTransfers = 1;
2391 if (uPrefix & DISPREFIX_REP)
2392 {
2393#ifndef IN_RC
2394 if ( CPUMIsGuestIn64BitCode(pVCpu)
2395 && pRegFrame->rcx >= _4G)
2396 return VINF_EM_RAW_EMULATE_INSTR;
2397#endif
2398 cTransfers = pRegFrame->rcx & fAddrMask;
2399 if (!cTransfers)
2400 return VINF_SUCCESS;
2401 }
2402
2403 /* Convert destination address es:edi. */
2404 RTGCPTR GCPtrDst;
2405 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_ES, pRegFrame, pRegFrame->rdi & fAddrMask,
2406 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2407 &GCPtrDst);
2408 if (RT_FAILURE(rc2))
2409 {
2410 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
2411 return VINF_EM_RAW_EMULATE_INSTR;
2412 }
2413
2414 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
2415 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2416 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
2417 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
2418 if (rc2 != VINF_SUCCESS)
2419 {
2420 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
2421 return VINF_EM_RAW_EMULATE_INSTR;
2422 }
2423
2424 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2425 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2426 if (cTransfers > 1)
2427 {
2428 /*
2429 * Work the string page by page, letting the device handle as much
2430 * as it likes via the string I/O interface.
2431 */
2432 for (;;)
2433 {
2434 PGMPAGEMAPLOCK Lock;
2435 void *pvDst;
2436 rc2 = PGMPhysGCPtr2CCPtr(pVCpu, GCPtrDst, &pvDst, &Lock);
2437 if (RT_SUCCESS(rc2))
2438 {
2439 uint32_t cMaxThisTime = (PAGE_SIZE - (GCPtrDst & PAGE_OFFSET_MASK)) / cbTransfer;
2440 if (cMaxThisTime > cTransfers)
2441 cMaxThisTime = cTransfers;
2442 if (!cMaxThisTime)
2443 break;
2444 uint32_t cThisTime = cMaxThisTime;
2445
2446 rcStrict = IOMIOPortReadString(pVM, pVCpu, uPort, pvDst, &cThisTime, cbTransfer);
2447 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
2448 Assert(cThisTime <= cMaxThisTime); /* cThisTime is now how many transfers we have left. */
2449
2450 uint32_t const cActual = cMaxThisTime - cThisTime;
2451 if (cActual)
2452 { /* Must dirty the page. */
2453 uint8_t b = *(uint8_t *)pvDst;
2454 iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &b, 1);
2455 }
2456
2457 PGMPhysReleasePageMappingLock(pVM, &Lock);
2458
2459 uint32_t const cbActual = cActual * cbTransfer;
2460 cTransfers -= cActual;
2461 pRegFrame->rdi = ((pRegFrame->rdi + cbActual) & fAddrMask)
2462 | (pRegFrame->rdi & ~fAddrMask);
2463 GCPtrDst += cbActual;
2464
2465 if ( cThisTime
2466 || !cTransfers
2467 || rcStrict != VINF_SUCCESS
2468 || (GCPtrDst & PAGE_OFFSET_MASK))
2469 break;
2470 }
2471 else
2472 {
2473 Log(("IOMInterpretOUTSEx: PGMPhysGCPtr2CCPtr %#RGv -> %Rrc\n", GCPtrDst, rc2));
2474 break;
2475 }
2476 }
2477 }
2478
2479 /*
2480 * Single transfer / unmapped memory fallback.
2481 */
2482#ifdef IN_RC
2483 MMGCRamRegisterTrapHandler(pVM);
2484#endif
2485 while (cTransfers && rcStrict == VINF_SUCCESS)
2486 {
2487 uint32_t u32Value;
2488 rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &u32Value, cbTransfer);
2489 if (!IOM_SUCCESS(rcStrict))
2490 break;
2491 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
2492 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
2493 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
2494 pRegFrame->rdi = ((pRegFrame->rdi + cbTransfer) & fAddrMask)
2495 | (pRegFrame->rdi & ~fAddrMask);
2496 cTransfers--;
2497 }
2498#ifdef IN_RC
2499 MMGCRamDeregisterTrapHandler(pVM);
2500#endif
2501
2502 /* Update rcx on exit. */
2503 if (uPrefix & DISPREFIX_REP)
2504 pRegFrame->rcx = (cTransfers & fAddrMask)
2505 | (pRegFrame->rcx & ~fAddrMask);
2506
2507 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2508 return rcStrict;
2509}
2510
2511
2512/**
2513 * [REP*] OUTSB/OUTSW/OUTSD
2514 * DS:ESI,DX[,ECX]
2515 *
2516 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2517 *
2518 * @returns Strict VBox status code. Informational status codes other than the one documented
2519 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2520 * @retval VINF_SUCCESS Success.
2521 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2522 * status code must be passed on to EM.
2523 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2524 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2525 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2526 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2527 *
2528 * @param pVM The cross context VM structure.
2529 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
2530 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2531 * @param uPort IO Port
2532 * @param uPrefix IO instruction prefix
2533 * @param enmAddrMode The address mode.
2534 * @param cbTransfer Size of transfer unit
2535 *
2536 * @remarks This API will probably be relaced by IEM before long, so no use in
2537 * optimizing+fixing stuff too much here.
2538 */
2539VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2540 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2541{
2542 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
2543 Assert(pVCpu->iom.s.PendingMmioWrite.cbValue == 0);
2544
2545 /*
2546 * We do not support segment prefixes, REPNE or
2547 * decrementing source pointer.
2548 */
2549 if ( (uPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
2550 || pRegFrame->eflags.Bits.u1DF)
2551 return VINF_EM_RAW_EMULATE_INSTR;
2552
2553 /*
2554 * Get bytes/words/dwords count to transfer.
2555 */
2556 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2557 RTGCUINTREG cTransfers = 1;
2558 if (uPrefix & DISPREFIX_REP)
2559 {
2560#ifndef IN_RC
2561 if ( CPUMIsGuestIn64BitCode(pVCpu)
2562 && pRegFrame->rcx >= _4G)
2563 return VINF_EM_RAW_EMULATE_INSTR;
2564#endif
2565 cTransfers = pRegFrame->rcx & fAddrMask;
2566 if (!cTransfers)
2567 return VINF_SUCCESS;
2568 }
2569
2570 /* Convert source address ds:esi. */
2571 RTGCPTR GCPtrSrc;
2572 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_DS, pRegFrame, pRegFrame->rsi & fAddrMask,
2573 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2574 &GCPtrSrc);
2575 if (RT_FAILURE(rc2))
2576 {
2577 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
2578 return VINF_EM_RAW_EMULATE_INSTR;
2579 }
2580
2581 /* Access verification first; we currently can't recover properly from traps inside this instruction */
2582 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2583 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
2584 (cpl == 3) ? X86_PTE_US : 0);
2585 if (rc2 != VINF_SUCCESS)
2586 {
2587 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
2588 return VINF_EM_RAW_EMULATE_INSTR;
2589 }
2590
2591 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2592 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2593 if (cTransfers > 1)
2594 {
2595 /*
2596 * Work the string page by page, letting the device handle as much
2597 * as it likes via the string I/O interface.
2598 */
2599 for (;;)
2600 {
2601 PGMPAGEMAPLOCK Lock;
2602 void const *pvSrc;
2603 rc2 = PGMPhysGCPtr2CCPtrReadOnly(pVCpu, GCPtrSrc, &pvSrc, &Lock);
2604 if (RT_SUCCESS(rc2))
2605 {
2606 uint32_t cMaxThisTime = (PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK)) / cbTransfer;
2607 if (cMaxThisTime > cTransfers)
2608 cMaxThisTime = cTransfers;
2609 if (!cMaxThisTime)
2610 break;
2611 uint32_t cThisTime = cMaxThisTime;
2612
2613 rcStrict = IOMIOPortWriteString(pVM, pVCpu, uPort, pvSrc, &cThisTime, cbTransfer);
2614 AssertRC(VBOXSTRICTRC_VAL(rcStrict));
2615 Assert(cThisTime <= cMaxThisTime); /* cThisTime is now how many transfers we have left. */
2616
2617 PGMPhysReleasePageMappingLock(pVM, &Lock);
2618
2619 uint32_t const cActual = cMaxThisTime - cThisTime;
2620 uint32_t const cbActual = cActual * cbTransfer;
2621 cTransfers -= cActual;
2622 pRegFrame->rsi = ((pRegFrame->rsi + cbActual) & fAddrMask)
2623 | (pRegFrame->rsi & ~fAddrMask);
2624 GCPtrSrc += cbActual;
2625
2626 if ( cThisTime
2627 || !cTransfers
2628 || rcStrict != VINF_SUCCESS
2629 || (GCPtrSrc & PAGE_OFFSET_MASK))
2630 break;
2631 }
2632 else
2633 {
2634 Log(("IOMInterpretOUTSEx: PGMPhysGCPtr2CCPtrReadOnly %#RGv -> %Rrc\n", GCPtrSrc, rc2));
2635 break;
2636 }
2637 }
2638 }
2639
2640 /*
2641 * Single transfer / unmapped memory fallback.
2642 */
2643#ifdef IN_RC
2644 MMGCRamRegisterTrapHandler(pVM);
2645#endif
2646
2647 while (cTransfers && rcStrict == VINF_SUCCESS)
2648 {
2649 uint32_t u32Value = 0;
2650 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
2651 if (rcStrict != VINF_SUCCESS)
2652 break;
2653 rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, u32Value, cbTransfer);
2654 if (!IOM_SUCCESS(rcStrict))
2655 break;
2656 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
2657 pRegFrame->rsi = ((pRegFrame->rsi + cbTransfer) & fAddrMask)
2658 | (pRegFrame->rsi & ~fAddrMask);
2659 cTransfers--;
2660 }
2661
2662#ifdef IN_RC
2663 MMGCRamDeregisterTrapHandler(pVM);
2664#endif
2665
2666 /* Update rcx on exit. */
2667 if (uPrefix & DISPREFIX_REP)
2668 pRegFrame->rcx = (cTransfers & fAddrMask)
2669 | (pRegFrame->rcx & ~fAddrMask);
2670
2671 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2672 return rcStrict;
2673}
2674
2675#endif /* !IEM_USE_IEM_INSTEAD */
2676
2677
2678#ifndef IN_RC
2679
2680/**
2681 * Mapping an MMIO2 page in place of an MMIO page for direct access.
2682 *
2683 * (This is a special optimization used by the VGA device.)
2684 *
2685 * @returns VBox status code. This API may return VINF_SUCCESS even if no
2686 * remapping is made,.
2687 *
2688 * @param pVM The cross context VM structure.
2689 * @param GCPhys The address of the MMIO page to be changed.
2690 * @param GCPhysRemapped The address of the MMIO2 page.
2691 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2692 * for the time being.
2693 */
2694VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
2695{
2696# ifndef IEM_VERIFICATION_MODE_FULL
2697 /* Currently only called from the VGA device during MMIO. */
2698 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
2699 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2700 PVMCPU pVCpu = VMMGetCpu(pVM);
2701
2702 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2703 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2704 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2705 && !HMIsNestedPagingActive(pVM)))
2706 return VINF_SUCCESS; /* ignore */
2707
2708 int rc = IOM_LOCK_SHARED(pVM);
2709 if (RT_FAILURE(rc))
2710 return VINF_SUCCESS; /* better luck the next time around */
2711
2712 /*
2713 * Lookup the context range node the page belongs to.
2714 */
2715 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2716 AssertMsgReturn(pRange,
2717 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2718
2719 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2720 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2721
2722 /*
2723 * Do the aliasing; page align the addresses since PGM is picky.
2724 */
2725 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2726 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2727
2728 rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
2729
2730 IOM_UNLOCK_SHARED(pVM);
2731 AssertRCReturn(rc, rc);
2732
2733 /*
2734 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2735 * can simply prefetch it.
2736 *
2737 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2738 */
2739# if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
2740# ifdef VBOX_STRICT
2741 uint64_t fFlags;
2742 RTHCPHYS HCPhys;
2743 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2744 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2745# endif
2746# endif
2747 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2748 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2749# endif /* !IEM_VERIFICATION_MODE_FULL */
2750 return VINF_SUCCESS;
2751}
2752
2753
2754# ifndef IEM_VERIFICATION_MODE_FULL
2755/**
2756 * Mapping a HC page in place of an MMIO page for direct access.
2757 *
2758 * (This is a special optimization used by the APIC in the VT-x case.)
2759 *
2760 * @returns VBox status code.
2761 *
2762 * @param pVM The cross context VM structure.
2763 * @param pVCpu The cross context virtual CPU structure.
2764 * @param GCPhys The address of the MMIO page to be changed.
2765 * @param HCPhys The address of the host physical page.
2766 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2767 * for the time being.
2768 */
2769VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
2770{
2771 /* Currently only called from VT-x code during a page fault. */
2772 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
2773
2774 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2775 Assert(HMIsEnabled(pVM));
2776
2777 /*
2778 * Lookup the context range node the page belongs to.
2779 */
2780# ifdef VBOX_STRICT
2781 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2782 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2783 AssertMsgReturn(pRange,
2784 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2785 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2786 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2787# endif
2788
2789 /*
2790 * Do the aliasing; page align the addresses since PGM is picky.
2791 */
2792 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2793 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
2794
2795 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
2796 AssertRCReturn(rc, rc);
2797
2798 /*
2799 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2800 * can simply prefetch it.
2801 *
2802 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2803 */
2804 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2805 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2806 return VINF_SUCCESS;
2807}
2808# endif /* !IEM_VERIFICATION_MODE_FULL */
2809
2810
2811/**
2812 * Reset a previously modified MMIO region; restore the access flags.
2813 *
2814 * @returns VBox status code.
2815 *
2816 * @param pVM The cross context VM structure.
2817 * @param GCPhys Physical address that's part of the MMIO region to be reset.
2818 */
2819VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
2820{
2821 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
2822
2823 PVMCPU pVCpu = VMMGetCpu(pVM);
2824
2825 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2826 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2827 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2828 && !HMIsNestedPagingActive(pVM)))
2829 return VINF_SUCCESS; /* ignore */
2830
2831 /*
2832 * Lookup the context range node the page belongs to.
2833 */
2834# ifdef VBOX_STRICT
2835 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2836 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2837 AssertMsgReturn(pRange,
2838 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2839 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2840 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2841# endif
2842
2843 /*
2844 * Call PGM to do the job work.
2845 *
2846 * After the call, all the pages should be non-present... unless there is
2847 * a page pool flush pending (unlikely).
2848 */
2849 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2850 AssertRC(rc);
2851
2852# ifdef VBOX_STRICT
2853 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2854 {
2855 uint32_t cb = pRange->cb;
2856 GCPhys = pRange->GCPhys;
2857 while (cb)
2858 {
2859 uint64_t fFlags;
2860 RTHCPHYS HCPhys;
2861 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2862 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2863 cb -= PAGE_SIZE;
2864 GCPhys += PAGE_SIZE;
2865 }
2866 }
2867# endif
2868 return rc;
2869}
2870
2871#endif /* !IN_RC */
2872
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette