VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp@ 55909

Last change on this file since 55909 was 55909, checked in by vboxsync, 10 years ago

PGM,++: Made the ring-3 physical access handler callbacks present in all contexts, where applicable. They are not yet registered or used. Taking things slowly.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 92.8 KB
Line 
1/* $Id: IOMAllMMIO.cpp 55909 2015-05-18 13:09:16Z vboxsync $ */
2/** @file
3 * IOM - Input / Output Monitor - Any Context, MMIO & String I/O.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_IOM
23#include <VBox/vmm/iom.h>
24#include <VBox/vmm/cpum.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/selm.h>
27#include <VBox/vmm/mm.h>
28#include <VBox/vmm/em.h>
29#include <VBox/vmm/pgm.h>
30#include <VBox/vmm/trpm.h>
31#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
32# include <VBox/vmm/iem.h>
33#endif
34#include "IOMInternal.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/vmm/vmm.h>
37#include <VBox/vmm/hm.h>
38#include "IOMInline.h"
39
40#include <VBox/dis.h>
41#include <VBox/disopcode.h>
42#include <VBox/vmm/pdmdev.h>
43#include <VBox/param.h>
44#include <VBox/err.h>
45#include <iprt/assert.h>
46#include <VBox/log.h>
47#include <iprt/asm.h>
48#include <iprt/string.h>
49
50
51/*******************************************************************************
52* Global Variables *
53*******************************************************************************/
54
55/**
56 * Array for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
57 */
58static const unsigned g_aSize2Shift[] =
59{
60 ~0U, /* 0 - invalid */
61 0, /* *1 == 2^0 */
62 1, /* *2 == 2^1 */
63 ~0U, /* 3 - invalid */
64 2, /* *4 == 2^2 */
65 ~0U, /* 5 - invalid */
66 ~0U, /* 6 - invalid */
67 ~0U, /* 7 - invalid */
68 3 /* *8 == 2^3 */
69};
70
71/**
72 * Macro for fast recode of the operand size (1/2/4/8 bytes) to bit shift value.
73 */
74#define SIZE_2_SHIFT(cb) (g_aSize2Shift[cb])
75
76
77/**
78 * Deals with complicated MMIO writes.
79 *
80 * Complicated means unaligned or non-dword/qword sized accesses depending on
81 * the MMIO region's access mode flags.
82 *
83 * @returns Strict VBox status code. Any EM scheduling status code,
84 * VINF_IOM_R3_MMIO_WRITE, VINF_IOM_R3_MMIO_READ_WRITE or
85 * VINF_IOM_R3_MMIO_READ may be returned.
86 *
87 * @param pVM Pointer to the VM.
88 * @param pRange The range to write to.
89 * @param GCPhys The physical address to start writing.
90 * @param pvValue Where to store the value.
91 * @param cbValue The size of the value to write.
92 */
93static VBOXSTRICTRC iomMMIODoComplicatedWrite(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void const *pvValue, unsigned cbValue)
94{
95 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) != IOMMMIO_FLAGS_WRITE_PASSTHRU
96 && (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) <= IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING,
97 VERR_IOM_MMIO_IPE_1);
98 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
99 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
100 bool const fReadMissing = (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_READ_MISSING
101 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_DWORD_QWORD_READ_MISSING;
102
103 /*
104 * Do debug stop if requested.
105 */
106 int rc = VINF_SUCCESS; NOREF(pVM);
107#ifdef VBOX_STRICT
108 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_WRITE)
109 {
110# ifdef IN_RING3
111 LogRel(("IOM: Complicated write %#x byte at %RGp to %s, initiating debugger intervention\n", cbValue, GCPhys,
112 R3STRING(pRange->pszDesc)));
113 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
114 "Complicated write %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
115 if (rc == VERR_DBGF_NOT_ATTACHED)
116 rc = VINF_SUCCESS;
117# else
118 return VINF_IOM_R3_MMIO_WRITE;
119# endif
120 }
121#endif
122
123 /*
124 * Check if we should ignore the write.
125 */
126 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD)
127 {
128 Assert(cbValue != 4 || (GCPhys & 3));
129 return VINF_SUCCESS;
130 }
131 if ((pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_ONLY_DWORD_QWORD)
132 {
133 Assert((cbValue != 4 && cbValue != 8) || (GCPhys & (cbValue - 1)));
134 return VINF_SUCCESS;
135 }
136
137 /*
138 * Split and conquer.
139 */
140 for (;;)
141 {
142 unsigned const offAccess = GCPhys & 3;
143 unsigned cbThisPart = 4 - offAccess;
144 if (cbThisPart > cbValue)
145 cbThisPart = cbValue;
146
147 /*
148 * Get the missing bits (if any).
149 */
150 uint32_t u32MissingValue = 0;
151 if (fReadMissing && cbThisPart != 4)
152 {
153 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
154 GCPhys & ~(RTGCPHYS)3, &u32MissingValue, sizeof(u32MissingValue));
155 switch (rc2)
156 {
157 case VINF_SUCCESS:
158 break;
159 case VINF_IOM_MMIO_UNUSED_FF:
160 u32MissingValue = UINT32_C(0xffffffff);
161 break;
162 case VINF_IOM_MMIO_UNUSED_00:
163 u32MissingValue = 0;
164 break;
165 case VINF_IOM_R3_MMIO_READ:
166 case VINF_IOM_R3_MMIO_READ_WRITE:
167 case VINF_IOM_R3_MMIO_WRITE:
168 /** @todo What if we've split a transfer and already read
169 * something? Since writes generally have sideeffects we
170 * could be kind of screwed here...
171 *
172 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
173 * to REM for MMIO accesses (like may currently do). */
174
175 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
176 return rc2;
177 default:
178 if (RT_FAILURE(rc2))
179 {
180 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [read]\n", GCPhys, GCPhysStart, cbValue, rc2));
181 return rc2;
182 }
183 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
184 if (rc == VINF_SUCCESS || rc2 < rc)
185 rc = rc2;
186 break;
187 }
188 }
189
190 /*
191 * Merge missing and given bits.
192 */
193 uint32_t u32GivenMask;
194 uint32_t u32GivenValue;
195 switch (cbThisPart)
196 {
197 case 1:
198 u32GivenValue = *(uint8_t const *)pvValue;
199 u32GivenMask = UINT32_C(0x000000ff);
200 break;
201 case 2:
202 u32GivenValue = *(uint16_t const *)pvValue;
203 u32GivenMask = UINT32_C(0x0000ffff);
204 break;
205 case 3:
206 u32GivenValue = RT_MAKE_U32_FROM_U8(((uint8_t const *)pvValue)[0], ((uint8_t const *)pvValue)[1],
207 ((uint8_t const *)pvValue)[2], 0);
208 u32GivenMask = UINT32_C(0x00ffffff);
209 break;
210 case 4:
211 u32GivenValue = *(uint32_t const *)pvValue;
212 u32GivenMask = UINT32_C(0xffffffff);
213 break;
214 default:
215 AssertFailedReturn(VERR_IOM_MMIO_IPE_3);
216 }
217 if (offAccess)
218 {
219 u32GivenValue <<= offAccess * 8;
220 u32GivenMask <<= offAccess * 8;
221 }
222
223 uint32_t u32Value = (u32MissingValue & ~u32GivenMask)
224 | (u32GivenValue & u32GivenMask);
225
226 /*
227 * Do DWORD write to the device.
228 */
229 int rc2 = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
230 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
231 switch (rc2)
232 {
233 case VINF_SUCCESS:
234 break;
235 case VINF_IOM_R3_MMIO_READ:
236 case VINF_IOM_R3_MMIO_READ_WRITE:
237 case VINF_IOM_R3_MMIO_WRITE:
238 /** @todo What if we've split a transfer and already read
239 * something? Since reads can have sideeffects we could be
240 * kind of screwed here...
241 *
242 * Fix: Save the current state and resume it in ring-3. Requires EM to not go
243 * to REM for MMIO accesses (like may currently do). */
244 LogFlow(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
245 return rc2;
246 default:
247 if (RT_FAILURE(rc2))
248 {
249 Log(("iomMMIODoComplicatedWrite: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc [write]\n", GCPhys, GCPhysStart, cbValue, rc2));
250 return rc2;
251 }
252 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
253 if (rc == VINF_SUCCESS || rc2 < rc)
254 rc = rc2;
255 break;
256 }
257
258 /*
259 * Advance.
260 */
261 cbValue -= cbThisPart;
262 if (!cbValue)
263 break;
264 GCPhys += cbThisPart;
265 pvValue = (uint8_t const *)pvValue + cbThisPart;
266 }
267
268 return rc;
269}
270
271
272
273
274/**
275 * Wrapper which does the write and updates range statistics when such are enabled.
276 * @warning RT_SUCCESS(rc=VINF_IOM_R3_MMIO_WRITE) is TRUE!
277 */
278static int iomMMIODoWrite(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault, const void *pvData, unsigned cb)
279{
280#ifdef VBOX_WITH_STATISTICS
281 int rcSem = IOM_LOCK_SHARED(pVM);
282 if (rcSem == VERR_SEM_BUSY)
283 return VINF_IOM_R3_MMIO_WRITE;
284 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
285 if (!pStats)
286# ifdef IN_RING3
287 return VERR_NO_MEMORY;
288# else
289 return VINF_IOM_R3_MMIO_WRITE;
290# endif
291 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
292#endif
293
294 VBOXSTRICTRC rc;
295 if (RT_LIKELY(pRange->CTX_SUFF(pfnWriteCallback)))
296 {
297 if ( (cb == 4 && !(GCPhysFault & 3))
298 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
299 || (cb == 8 && !(GCPhysFault & 7) && IOMMMIO_DOES_WRITE_MODE_ALLOW_QWORD(pRange->fFlags)) )
300 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
301 GCPhysFault, (void *)pvData, cb); /** @todo fix const!! */
302 else
303 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhysFault, pvData, cb);
304 }
305 else
306 rc = VINF_SUCCESS;
307
308 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
309 STAM_COUNTER_INC(&pStats->Accesses);
310 return VBOXSTRICTRC_TODO(rc);
311}
312
313
314/**
315 * Deals with complicated MMIO reads.
316 *
317 * Complicated means unaligned or non-dword/qword sized accesses depending on
318 * the MMIO region's access mode flags.
319 *
320 * @returns Strict VBox status code. Any EM scheduling status code,
321 * VINF_IOM_R3_MMIO_READ, VINF_IOM_R3_MMIO_READ_WRITE or
322 * VINF_IOM_R3_MMIO_WRITE may be returned.
323 *
324 * @param pVM Pointer to the VM.
325 * @param pRange The range to read from.
326 * @param GCPhys The physical address to start reading.
327 * @param pvValue Where to store the value.
328 * @param cbValue The size of the value to read.
329 */
330static VBOXSTRICTRC iomMMIODoComplicatedRead(PVM pVM, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
331{
332 AssertReturn( (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD
333 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD,
334 VERR_IOM_MMIO_IPE_1);
335 AssertReturn(cbValue != 0 && cbValue <= 16, VERR_IOM_MMIO_IPE_2);
336 RTGCPHYS const GCPhysStart = GCPhys; NOREF(GCPhysStart);
337
338 /*
339 * Do debug stop if requested.
340 */
341 int rc = VINF_SUCCESS; NOREF(pVM);
342#ifdef VBOX_STRICT
343 if (pRange->fFlags & IOMMMIO_FLAGS_DBGSTOP_ON_COMPLICATED_READ)
344 {
345# ifdef IN_RING3
346 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, RT_SRC_POS,
347 "Complicated read %#x byte at %RGp to %s\n", cbValue, GCPhys, R3STRING(pRange->pszDesc));
348 if (rc == VERR_DBGF_NOT_ATTACHED)
349 rc = VINF_SUCCESS;
350# else
351 return VINF_IOM_R3_MMIO_READ;
352# endif
353 }
354#endif
355
356 /*
357 * Split and conquer.
358 */
359 for (;;)
360 {
361 /*
362 * Do DWORD read from the device.
363 */
364 uint32_t u32Value;
365 int rc2 = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
366 GCPhys & ~(RTGCPHYS)3, &u32Value, sizeof(u32Value));
367 switch (rc2)
368 {
369 case VINF_SUCCESS:
370 break;
371 case VINF_IOM_MMIO_UNUSED_FF:
372 u32Value = UINT32_C(0xffffffff);
373 break;
374 case VINF_IOM_MMIO_UNUSED_00:
375 u32Value = 0;
376 break;
377 case VINF_IOM_R3_MMIO_READ:
378 case VINF_IOM_R3_MMIO_READ_WRITE:
379 case VINF_IOM_R3_MMIO_WRITE:
380 /** @todo What if we've split a transfer and already read
381 * something? Since reads can have sideeffects we could be
382 * kind of screwed here... */
383 LogFlow(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
384 return rc2;
385 default:
386 if (RT_FAILURE(rc2))
387 {
388 Log(("iomMMIODoComplicatedRead: GCPhys=%RGp GCPhysStart=%RGp cbValue=%u rc=%Rrc\n", GCPhys, GCPhysStart, cbValue, rc2));
389 return rc2;
390 }
391 AssertMsgReturn(rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST, ("%Rrc\n", rc2), VERR_IPE_UNEXPECTED_INFO_STATUS);
392 if (rc == VINF_SUCCESS || rc2 < rc)
393 rc = rc2;
394 break;
395 }
396 u32Value >>= (GCPhys & 3) * 8;
397
398 /*
399 * Write what we've read.
400 */
401 unsigned cbThisPart = 4 - (GCPhys & 3);
402 if (cbThisPart > cbValue)
403 cbThisPart = cbValue;
404
405 switch (cbThisPart)
406 {
407 case 1:
408 *(uint8_t *)pvValue = (uint8_t)u32Value;
409 break;
410 case 2:
411 *(uint16_t *)pvValue = (uint16_t)u32Value;
412 break;
413 case 3:
414 ((uint8_t *)pvValue)[0] = RT_BYTE1(u32Value);
415 ((uint8_t *)pvValue)[1] = RT_BYTE2(u32Value);
416 ((uint8_t *)pvValue)[2] = RT_BYTE3(u32Value);
417 break;
418 case 4:
419 *(uint32_t *)pvValue = u32Value;
420 break;
421 }
422
423 /*
424 * Advance.
425 */
426 cbValue -= cbThisPart;
427 if (!cbValue)
428 break;
429 GCPhys += cbThisPart;
430 pvValue = (uint8_t *)pvValue + cbThisPart;
431 }
432
433 return rc;
434}
435
436
437/**
438 * Implements VINF_IOM_MMIO_UNUSED_FF.
439 *
440 * @returns VINF_SUCCESS.
441 * @param pvValue Where to store the zeros.
442 * @param cbValue How many bytes to read.
443 */
444static int iomMMIODoReadFFs(void *pvValue, size_t cbValue)
445{
446 switch (cbValue)
447 {
448 case 1: *(uint8_t *)pvValue = UINT8_C(0xff); break;
449 case 2: *(uint16_t *)pvValue = UINT16_C(0xffff); break;
450 case 4: *(uint32_t *)pvValue = UINT32_C(0xffffffff); break;
451 case 8: *(uint64_t *)pvValue = UINT64_C(0xffffffffffffffff); break;
452 default:
453 {
454 uint8_t *pb = (uint8_t *)pvValue;
455 while (cbValue--)
456 *pb++ = UINT8_C(0xff);
457 break;
458 }
459 }
460 return VINF_SUCCESS;
461}
462
463
464/**
465 * Implements VINF_IOM_MMIO_UNUSED_00.
466 *
467 * @returns VINF_SUCCESS.
468 * @param pvValue Where to store the zeros.
469 * @param cbValue How many bytes to read.
470 */
471static int iomMMIODoRead00s(void *pvValue, size_t cbValue)
472{
473 switch (cbValue)
474 {
475 case 1: *(uint8_t *)pvValue = UINT8_C(0x00); break;
476 case 2: *(uint16_t *)pvValue = UINT16_C(0x0000); break;
477 case 4: *(uint32_t *)pvValue = UINT32_C(0x00000000); break;
478 case 8: *(uint64_t *)pvValue = UINT64_C(0x0000000000000000); break;
479 default:
480 {
481 uint8_t *pb = (uint8_t *)pvValue;
482 while (cbValue--)
483 *pb++ = UINT8_C(0x00);
484 break;
485 }
486 }
487 return VINF_SUCCESS;
488}
489
490
491/**
492 * Wrapper which does the read and updates range statistics when such are enabled.
493 */
494DECLINLINE(int) iomMMIODoRead(PVM pVM, PVMCPU pVCpu, PIOMMMIORANGE pRange, RTGCPHYS GCPhys, void *pvValue, unsigned cbValue)
495{
496#ifdef VBOX_WITH_STATISTICS
497 int rcSem = IOM_LOCK_SHARED(pVM);
498 if (rcSem == VERR_SEM_BUSY)
499 return VINF_IOM_R3_MMIO_READ;
500 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
501 if (!pStats)
502# ifdef IN_RING3
503 return VERR_NO_MEMORY;
504# else
505 return VINF_IOM_R3_MMIO_READ;
506# endif
507 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
508#endif
509
510 VBOXSTRICTRC rc;
511 if (RT_LIKELY(pRange->CTX_SUFF(pfnReadCallback)))
512 {
513 if ( ( cbValue == 4
514 && !(GCPhys & 3))
515 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
516 || ( cbValue == 8
517 && !(GCPhys & 7)
518 && (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_DWORD_QWORD ) )
519 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys, pvValue, cbValue);
520 else
521 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pvValue, cbValue);
522 }
523 else
524 rc = VINF_IOM_MMIO_UNUSED_FF;
525 if (rc != VINF_SUCCESS)
526 {
527 switch (VBOXSTRICTRC_VAL(rc))
528 {
529 case VINF_IOM_MMIO_UNUSED_FF: rc = iomMMIODoReadFFs(pvValue, cbValue); break;
530 case VINF_IOM_MMIO_UNUSED_00: rc = iomMMIODoRead00s(pvValue, cbValue); break;
531 }
532 }
533
534 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
535 STAM_COUNTER_INC(&pStats->Accesses);
536 return VBOXSTRICTRC_VAL(rc);
537}
538
539
540/**
541 * Internal - statistics only.
542 */
543DECLINLINE(void) iomMMIOStatLength(PVM pVM, unsigned cb)
544{
545#ifdef VBOX_WITH_STATISTICS
546 switch (cb)
547 {
548 case 1:
549 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO1Byte);
550 break;
551 case 2:
552 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO2Bytes);
553 break;
554 case 4:
555 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO4Bytes);
556 break;
557 case 8:
558 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIO8Bytes);
559 break;
560 default:
561 /* No way. */
562 AssertMsgFailed(("Invalid data length %d\n", cb));
563 break;
564 }
565#else
566 NOREF(pVM); NOREF(cb);
567#endif
568}
569
570
571/**
572 * MOV reg, mem (read)
573 * MOVZX reg, mem (read)
574 * MOVSX reg, mem (read)
575 *
576 * @returns VBox status code.
577 *
578 * @param pVM The virtual machine.
579 * @param pVCpu Pointer to the virtual CPU structure of the caller.
580 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
581 * @param pCpu Disassembler CPU state.
582 * @param pRange Pointer MMIO range.
583 * @param GCPhysFault The GC physical address corresponding to pvFault.
584 */
585static int iomInterpretMOVxXRead(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
586 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
587{
588 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
589
590 /*
591 * Get the data size from parameter 2,
592 * and call the handler function to get the data.
593 */
594 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
595 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
596
597 uint64_t u64Data = 0;
598 int rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb);
599 if (rc == VINF_SUCCESS)
600 {
601 /*
602 * Do sign extension for MOVSX.
603 */
604 /** @todo checkup MOVSX implementation! */
605 if (pCpu->pCurInstr->uOpcode == OP_MOVSX)
606 {
607 if (cb == 1)
608 {
609 /* DWORD <- BYTE */
610 int64_t iData = (int8_t)u64Data;
611 u64Data = (uint64_t)iData;
612 }
613 else
614 {
615 /* DWORD <- WORD */
616 int64_t iData = (int16_t)u64Data;
617 u64Data = (uint64_t)iData;
618 }
619 }
620
621 /*
622 * Store the result to register (parameter 1).
623 */
624 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, u64Data);
625 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
626 }
627
628 if (rc == VINF_SUCCESS)
629 iomMMIOStatLength(pVM, cb);
630 return rc;
631}
632
633
634/**
635 * MOV mem, reg|imm (write)
636 *
637 * @returns VBox status code.
638 *
639 * @param pVM The virtual machine.
640 * @param pVCpu Pointer to the virtual CPU structure of the caller.
641 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
642 * @param pCpu Disassembler CPU state.
643 * @param pRange Pointer MMIO range.
644 * @param GCPhysFault The GC physical address corresponding to pvFault.
645 */
646static int iomInterpretMOVxXWrite(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu,
647 PIOMMMIORANGE pRange, RTGCPHYS GCPhysFault)
648{
649 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
650
651 /*
652 * Get data to write from second parameter,
653 * and call the callback to write it.
654 */
655 unsigned cb = 0;
656 uint64_t u64Data = 0;
657 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &u64Data, &cb);
658 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
659
660 int rc = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &u64Data, cb);
661 if (rc == VINF_SUCCESS)
662 iomMMIOStatLength(pVM, cb);
663 return rc;
664}
665
666
667/** Wrapper for reading virtual memory. */
668DECLINLINE(int) iomRamRead(PVMCPU pVCpu, void *pDest, RTGCPTR GCSrc, uint32_t cb)
669{
670 /* Note: This will fail in R0 or RC if it hits an access handler. That
671 isn't a problem though since the operation can be restarted in REM. */
672#ifdef IN_RC
673 NOREF(pVCpu);
674 int rc = MMGCRamReadNoTrapHandler(pDest, (void *)(uintptr_t)GCSrc, cb);
675 /* Page may be protected and not directly accessible. */
676 if (rc == VERR_ACCESS_DENIED)
677 rc = VINF_IOM_R3_IOPORT_WRITE;
678 return rc;
679#else
680 return PGMPhysReadGCPtr(pVCpu, pDest, GCSrc, cb, PGMACCESSORIGIN_IOM);
681#endif
682}
683
684
685/** Wrapper for writing virtual memory. */
686DECLINLINE(int) iomRamWrite(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, RTGCPTR GCPtrDst, void *pvSrc, uint32_t cb)
687{
688 /** @todo Need to update PGMVerifyAccess to take access handlers into account for Ring-0 and
689 * raw mode code. Some thought needs to be spent on theoretical concurrency issues as
690 * as well since we're not behind the pgm lock and handler may change between calls.
691 *
692 * PGMPhysInterpretedWriteNoHandlers/PGMPhysWriteGCPtr may mess up
693 * the state of some shadowed structures. */
694#if defined(IN_RING0) || defined(IN_RC)
695 return PGMPhysInterpretedWriteNoHandlers(pVCpu, pCtxCore, GCPtrDst, pvSrc, cb, false /*fRaiseTrap*/);
696#else
697 NOREF(pCtxCore);
698 return PGMPhysWriteGCPtr(pVCpu, GCPtrDst, pvSrc, cb, PGMACCESSORIGIN_IOM);
699#endif
700}
701
702
703#if defined(IOM_WITH_MOVS_SUPPORT) && 0 /* locking prevents this from working. has buggy ecx handling. */
704/**
705 * [REP] MOVSB
706 * [REP] MOVSW
707 * [REP] MOVSD
708 *
709 * Restricted implementation.
710 *
711 *
712 * @returns VBox status code.
713 *
714 * @param pVM The virtual machine.
715 * @param uErrorCode CPU Error code.
716 * @param pRegFrame Trap register frame.
717 * @param GCPhysFault The GC physical address corresponding to pvFault.
718 * @param pCpu Disassembler CPU state.
719 * @param pRange Pointer MMIO range.
720 * @param ppStat Which sub-sample to attribute this call to.
721 */
722static int iomInterpretMOVS(PVM pVM, bool fWriteAccess, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu, PIOMMMIORANGE pRange,
723 PSTAMPROFILE *ppStat)
724{
725 /*
726 * We do not support segment prefixes or REPNE.
727 */
728 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
729 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> interpret whatever. */
730
731 PVMCPU pVCpu = VMMGetCpu(pVM);
732
733 /*
734 * Get bytes/words/dwords/qword count to copy.
735 */
736 uint32_t cTransfers = 1;
737 if (pCpu->fPrefix & DISPREFIX_REP)
738 {
739#ifndef IN_RC
740 if ( CPUMIsGuestIn64BitCode(pVCpu, pRegFrame)
741 && pRegFrame->rcx >= _4G)
742 return VINF_EM_RAW_EMULATE_INSTR;
743#endif
744
745 cTransfers = pRegFrame->ecx;
746 if (SELMGetCpuModeFromSelector(pVM, pRegFrame->eflags, pRegFrame->cs, &pRegFrame->csHid) == DISCPUMODE_16BIT)
747 cTransfers &= 0xffff;
748
749 if (!cTransfers)
750 return VINF_SUCCESS;
751 }
752
753 /* Get the current privilege level. */
754 uint32_t cpl = CPUMGetGuestCPL(pVCpu, pRegFrame);
755
756 /*
757 * Get data size.
758 */
759 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
760 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
761 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
762
763#ifdef VBOX_WITH_STATISTICS
764 if (pVM->iom.s.cMovsMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
765 pVM->iom.s.cMovsMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
766#endif
767
768/** @todo re-evaluate on page boundaries. */
769
770 RTGCPHYS Phys = GCPhysFault;
771 int rc;
772 if (fWriteAccess)
773 {
774 /*
775 * Write operation: [Mem] -> [MMIO]
776 * ds:esi (Virt Src) -> es:edi (Phys Dst)
777 */
778 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsToMMIO; });
779
780 /* Check callback. */
781 if (!pRange->CTX_SUFF(pfnWriteCallback))
782 return VINF_IOM_R3_MMIO_WRITE;
783
784 /* Convert source address ds:esi. */
785 RTGCUINTPTR pu8Virt;
786 rc = SELMToFlatEx(pVM, DISSELREG_DS, pRegFrame, (RTGCPTR)pRegFrame->rsi,
787 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
788 (PRTGCPTR)&pu8Virt);
789 if (RT_SUCCESS(rc))
790 {
791
792 /* Access verification first; we currently can't recover properly from traps inside this instruction */
793 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, (cpl == 3) ? X86_PTE_US : 0);
794 if (rc != VINF_SUCCESS)
795 {
796 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
797 return VINF_EM_RAW_EMULATE_INSTR;
798 }
799
800#ifdef IN_RC
801 MMGCRamRegisterTrapHandler(pVM);
802#endif
803
804 /* copy loop. */
805 while (cTransfers)
806 {
807 uint32_t u32Data = 0;
808 rc = iomRamRead(pVCpu, &u32Data, (RTGCPTR)pu8Virt, cb);
809 if (rc != VINF_SUCCESS)
810 break;
811 rc = iomMMIODoWrite(pVM, pRange, Phys, &u32Data, cb);
812 if (rc != VINF_SUCCESS)
813 break;
814
815 pu8Virt += offIncrement;
816 Phys += offIncrement;
817 pRegFrame->rsi += offIncrement;
818 pRegFrame->rdi += offIncrement;
819 cTransfers--;
820 }
821#ifdef IN_RC
822 MMGCRamDeregisterTrapHandler(pVM);
823#endif
824 /* Update ecx. */
825 if (pCpu->fPrefix & DISPREFIX_REP)
826 pRegFrame->ecx = cTransfers;
827 }
828 else
829 rc = VINF_IOM_R3_MMIO_READ_WRITE;
830 }
831 else
832 {
833 /*
834 * Read operation: [MMIO] -> [mem] or [MMIO] -> [MMIO]
835 * ds:[eSI] (Phys Src) -> es:[eDI] (Virt Dst)
836 */
837 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsFromMMIO; });
838
839 /* Check callback. */
840 if (!pRange->CTX_SUFF(pfnReadCallback))
841 return VINF_IOM_R3_MMIO_READ;
842
843 /* Convert destination address. */
844 RTGCUINTPTR pu8Virt;
845 rc = SELMToFlatEx(pVM, DISSELREG_ES, pRegFrame, (RTGCPTR)pRegFrame->rdi,
846 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
847 (RTGCPTR *)&pu8Virt);
848 if (RT_FAILURE(rc))
849 return VINF_IOM_R3_MMIO_READ;
850
851 /* Check if destination address is MMIO. */
852 PIOMMMIORANGE pMMIODst;
853 RTGCPHYS PhysDst;
854 rc = PGMGstGetPage(pVCpu, (RTGCPTR)pu8Virt, NULL, &PhysDst);
855 PhysDst |= (RTGCUINTPTR)pu8Virt & PAGE_OFFSET_MASK;
856 if ( RT_SUCCESS(rc)
857 && (pMMIODst = iomMmioGetRangeWithRef(pVM, PhysDst)))
858 {
859 /** @todo implement per-device locks for MMIO access. */
860 Assert(!pMMIODst->CTX_SUFF(pDevIns)->CTX_SUFF(pCritSect));
861
862 /*
863 * Extra: [MMIO] -> [MMIO]
864 */
865 STAM_STATS({ *ppStat = &pVM->iom.s.StatRZInstMovsMMIO; });
866 if (!pMMIODst->CTX_SUFF(pfnWriteCallback) && pMMIODst->pfnWriteCallbackR3)
867 {
868 iomMmioReleaseRange(pVM, pRange);
869 return VINF_IOM_R3_MMIO_READ_WRITE;
870 }
871
872 /* copy loop. */
873 while (cTransfers)
874 {
875 uint32_t u32Data;
876 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
877 if (rc != VINF_SUCCESS)
878 break;
879 rc = iomMMIODoWrite(pVM, pMMIODst, PhysDst, &u32Data, cb);
880 if (rc != VINF_SUCCESS)
881 break;
882
883 Phys += offIncrement;
884 PhysDst += offIncrement;
885 pRegFrame->rsi += offIncrement;
886 pRegFrame->rdi += offIncrement;
887 cTransfers--;
888 }
889 iomMmioReleaseRange(pVM, pRange);
890 }
891 else
892 {
893 /*
894 * Normal: [MMIO] -> [Mem]
895 */
896 /* Access verification first; we currently can't recover properly from traps inside this instruction */
897 rc = PGMVerifyAccess(pVCpu, pu8Virt, cTransfers * cb, X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
898 if (rc != VINF_SUCCESS)
899 {
900 Log(("MOVS will generate a trap -> recompiler, rc=%d\n", rc));
901 return VINF_EM_RAW_EMULATE_INSTR;
902 }
903
904 /* copy loop. */
905#ifdef IN_RC
906 MMGCRamRegisterTrapHandler(pVM);
907#endif
908 while (cTransfers)
909 {
910 uint32_t u32Data;
911 rc = iomMMIODoRead(pVM, pRange, Phys, &u32Data, cb);
912 if (rc != VINF_SUCCESS)
913 break;
914 rc = iomRamWrite(pVCpu, pRegFrame, (RTGCPTR)pu8Virt, &u32Data, cb);
915 if (rc != VINF_SUCCESS)
916 {
917 Log(("iomRamWrite %08X size=%d failed with %d\n", pu8Virt, cb, rc));
918 break;
919 }
920
921 pu8Virt += offIncrement;
922 Phys += offIncrement;
923 pRegFrame->rsi += offIncrement;
924 pRegFrame->rdi += offIncrement;
925 cTransfers--;
926 }
927#ifdef IN_RC
928 MMGCRamDeregisterTrapHandler(pVM);
929#endif
930 }
931
932 /* Update ecx on exit. */
933 if (pCpu->fPrefix & DISPREFIX_REP)
934 pRegFrame->ecx = cTransfers;
935 }
936
937 /* work statistics. */
938 if (rc == VINF_SUCCESS)
939 iomMMIOStatLength(pVM, cb);
940 NOREF(ppStat);
941 return rc;
942}
943#endif /* IOM_WITH_MOVS_SUPPORT */
944
945
946/**
947 * Gets the address / opcode mask corresponding to the given CPU mode.
948 *
949 * @returns Mask.
950 * @param enmCpuMode CPU mode.
951 */
952static uint64_t iomDisModeToMask(DISCPUMODE enmCpuMode)
953{
954 switch (enmCpuMode)
955 {
956 case DISCPUMODE_16BIT: return UINT16_MAX;
957 case DISCPUMODE_32BIT: return UINT32_MAX;
958 case DISCPUMODE_64BIT: return UINT64_MAX;
959 default:
960 AssertFailedReturn(UINT32_MAX);
961 }
962}
963
964
965/**
966 * [REP] STOSB
967 * [REP] STOSW
968 * [REP] STOSD
969 *
970 * Restricted implementation.
971 *
972 *
973 * @returns VBox status code.
974 *
975 * @param pVM The virtual machine.
976 * @param pVCpu Pointer to the virtual CPU structure of the caller.
977 * @param pRegFrame Trap register frame.
978 * @param GCPhysFault The GC physical address corresponding to pvFault.
979 * @param pCpu Disassembler CPU state.
980 * @param pRange Pointer MMIO range.
981 */
982static int iomInterpretSTOS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault,
983 PDISCPUSTATE pCpu, PIOMMMIORANGE pRange)
984{
985 /*
986 * We do not support segment prefixes or REPNE..
987 */
988 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
989 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
990
991 /*
992 * Get bytes/words/dwords/qwords count to copy.
993 */
994 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
995 RTGCUINTREG cTransfers = 1;
996 if (pCpu->fPrefix & DISPREFIX_REP)
997 {
998#ifndef IN_RC
999 if ( CPUMIsGuestIn64BitCode(pVCpu)
1000 && pRegFrame->rcx >= _4G)
1001 return VINF_EM_RAW_EMULATE_INSTR;
1002#endif
1003
1004 cTransfers = pRegFrame->rcx & fAddrMask;
1005 if (!cTransfers)
1006 return VINF_SUCCESS;
1007 }
1008
1009/** @todo r=bird: bounds checks! */
1010
1011 /*
1012 * Get data size.
1013 */
1014 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param1);
1015 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1016 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1017
1018#ifdef VBOX_WITH_STATISTICS
1019 if (pVM->iom.s.cStosMaxBytes < (cTransfers << SIZE_2_SHIFT(cb)))
1020 pVM->iom.s.cStosMaxBytes = cTransfers << SIZE_2_SHIFT(cb);
1021#endif
1022
1023
1024 RTGCPHYS Phys = GCPhysFault;
1025 int rc;
1026 if ( pRange->CTX_SUFF(pfnFillCallback)
1027 && cb <= 4 /* can only fill 32-bit values */)
1028 {
1029 /*
1030 * Use the fill callback.
1031 */
1032 /** @todo pfnFillCallback must return number of bytes successfully written!!! */
1033 if (offIncrement > 0)
1034 {
1035 /* addr++ variant. */
1036 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), Phys,
1037 pRegFrame->eax, cb, cTransfers);
1038 if (rc == VINF_SUCCESS)
1039 {
1040 /* Update registers. */
1041 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1042 | (pRegFrame->rdi & ~fAddrMask);
1043 if (pCpu->fPrefix & DISPREFIX_REP)
1044 pRegFrame->rcx &= ~fAddrMask;
1045 }
1046 }
1047 else
1048 {
1049 /* addr-- variant. */
1050 rc = pRange->CTX_SUFF(pfnFillCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
1051 Phys - ((cTransfers - 1) << SIZE_2_SHIFT(cb)),
1052 pRegFrame->eax, cb, cTransfers);
1053 if (rc == VINF_SUCCESS)
1054 {
1055 /* Update registers. */
1056 pRegFrame->rdi = ((pRegFrame->rdi - (cTransfers << SIZE_2_SHIFT(cb))) & fAddrMask)
1057 | (pRegFrame->rdi & ~fAddrMask);
1058 if (pCpu->fPrefix & DISPREFIX_REP)
1059 pRegFrame->rcx &= ~fAddrMask;
1060 }
1061 }
1062 }
1063 else
1064 {
1065 /*
1066 * Use the write callback.
1067 */
1068 Assert(pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3);
1069 uint64_t u64Data = pRegFrame->rax;
1070
1071 /* fill loop. */
1072 do
1073 {
1074 rc = iomMMIODoWrite(pVM, pVCpu, pRange, Phys, &u64Data, cb);
1075 if (rc != VINF_SUCCESS)
1076 break;
1077
1078 Phys += offIncrement;
1079 pRegFrame->rdi = ((pRegFrame->rdi + offIncrement) & fAddrMask)
1080 | (pRegFrame->rdi & ~fAddrMask);
1081 cTransfers--;
1082 } while (cTransfers);
1083
1084 /* Update rcx on exit. */
1085 if (pCpu->fPrefix & DISPREFIX_REP)
1086 pRegFrame->rcx = (cTransfers & fAddrMask)
1087 | (pRegFrame->rcx & ~fAddrMask);
1088 }
1089
1090 /*
1091 * Work statistics and return.
1092 */
1093 if (rc == VINF_SUCCESS)
1094 iomMMIOStatLength(pVM, cb);
1095 return rc;
1096}
1097
1098
1099/**
1100 * [REP] LODSB
1101 * [REP] LODSW
1102 * [REP] LODSD
1103 *
1104 * Restricted implementation.
1105 *
1106 *
1107 * @returns VBox status code.
1108 *
1109 * @param pVM The virtual machine.
1110 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1111 * @param pRegFrame Trap register frame.
1112 * @param GCPhysFault The GC physical address corresponding to pvFault.
1113 * @param pCpu Disassembler CPU state.
1114 * @param pRange Pointer MMIO range.
1115 */
1116static int iomInterpretLODS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1117 PIOMMMIORANGE pRange)
1118{
1119 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1120
1121 /*
1122 * We do not support segment prefixes or REP*.
1123 */
1124 if (pCpu->fPrefix & (DISPREFIX_SEG | DISPREFIX_REP | DISPREFIX_REPNE))
1125 return VINF_IOM_R3_MMIO_READ_WRITE; /** @todo -> REM instead of HC */
1126
1127 /*
1128 * Get data size.
1129 */
1130 unsigned cb = DISGetParamSize(pCpu, &pCpu->Param2);
1131 AssertMsg(cb > 0 && cb <= sizeof(uint64_t), ("cb=%d\n", cb));
1132 int offIncrement = pRegFrame->eflags.Bits.u1DF ? -(signed)cb : (signed)cb;
1133
1134 /*
1135 * Perform read.
1136 */
1137 int rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &pRegFrame->rax, cb);
1138 if (rc == VINF_SUCCESS)
1139 {
1140 uint64_t const fAddrMask = iomDisModeToMask((DISCPUMODE)pCpu->uAddrMode);
1141 pRegFrame->rsi = ((pRegFrame->rsi + offIncrement) & fAddrMask)
1142 | (pRegFrame->rsi & ~fAddrMask);
1143 }
1144
1145 /*
1146 * Work statistics and return.
1147 */
1148 if (rc == VINF_SUCCESS)
1149 iomMMIOStatLength(pVM, cb);
1150 return rc;
1151}
1152
1153
1154/**
1155 * CMP [MMIO], reg|imm
1156 * CMP reg|imm, [MMIO]
1157 *
1158 * Restricted implementation.
1159 *
1160 *
1161 * @returns VBox status code.
1162 *
1163 * @param pVM The virtual machine.
1164 * @param pRegFrame Trap register frame.
1165 * @param GCPhysFault The GC physical address corresponding to pvFault.
1166 * @param pCpu Disassembler CPU state.
1167 * @param pRange Pointer MMIO range.
1168 */
1169static int iomInterpretCMP(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1170 PIOMMMIORANGE pRange)
1171{
1172 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1173
1174 /*
1175 * Get the operands.
1176 */
1177 unsigned cb = 0;
1178 uint64_t uData1 = 0;
1179 uint64_t uData2 = 0;
1180 int rc;
1181 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1182 /* cmp reg, [MMIO]. */
1183 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb);
1184 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1185 /* cmp [MMIO], reg|imm. */
1186 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb);
1187 else
1188 {
1189 AssertMsgFailed(("Disassember CMP problem..\n"));
1190 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1191 }
1192
1193 if (rc == VINF_SUCCESS)
1194 {
1195#if HC_ARCH_BITS == 32
1196 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1197 if (cb > 4)
1198 return VINF_IOM_R3_MMIO_READ_WRITE;
1199#endif
1200 /* Emulate CMP and update guest flags. */
1201 uint32_t eflags = EMEmulateCmp(uData1, uData2, cb);
1202 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1203 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1204 iomMMIOStatLength(pVM, cb);
1205 }
1206
1207 return rc;
1208}
1209
1210
1211/**
1212 * AND [MMIO], reg|imm
1213 * AND reg, [MMIO]
1214 * OR [MMIO], reg|imm
1215 * OR reg, [MMIO]
1216 *
1217 * Restricted implementation.
1218 *
1219 *
1220 * @returns VBox status code.
1221 *
1222 * @param pVM The virtual machine.
1223 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1224 * @param pRegFrame Trap register frame.
1225 * @param GCPhysFault The GC physical address corresponding to pvFault.
1226 * @param pCpu Disassembler CPU state.
1227 * @param pRange Pointer MMIO range.
1228 * @param pfnEmulate Instruction emulation function.
1229 */
1230static int iomInterpretOrXorAnd(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1231 PIOMMMIORANGE pRange, PFNEMULATEPARAM3 pfnEmulate)
1232{
1233 unsigned cb = 0;
1234 uint64_t uData1 = 0;
1235 uint64_t uData2 = 0;
1236 bool fAndWrite;
1237 int rc;
1238
1239#ifdef LOG_ENABLED
1240 const char *pszInstr;
1241
1242 if (pCpu->pCurInstr->uOpcode == OP_XOR)
1243 pszInstr = "Xor";
1244 else if (pCpu->pCurInstr->uOpcode == OP_OR)
1245 pszInstr = "Or";
1246 else if (pCpu->pCurInstr->uOpcode == OP_AND)
1247 pszInstr = "And";
1248 else
1249 pszInstr = "OrXorAnd??";
1250#endif
1251
1252 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1253 {
1254#if HC_ARCH_BITS == 32
1255 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1256 if (cb > 4)
1257 return VINF_IOM_R3_MMIO_READ_WRITE;
1258#endif
1259 /* and reg, [MMIO]. */
1260 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1261 fAndWrite = false;
1262 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb);
1263 }
1264 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1265 {
1266#if HC_ARCH_BITS == 32
1267 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1268 if (cb > 4)
1269 return VINF_IOM_R3_MMIO_READ_WRITE;
1270#endif
1271 /* and [MMIO], reg|imm. */
1272 fAndWrite = true;
1273 if ( (pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3)
1274 && (pRange->CTX_SUFF(pfnWriteCallback) || !pRange->pfnWriteCallbackR3))
1275 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb);
1276 else
1277 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1278 }
1279 else
1280 {
1281 AssertMsgFailed(("Disassember AND problem..\n"));
1282 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1283 }
1284
1285 if (rc == VINF_SUCCESS)
1286 {
1287 /* Emulate AND and update guest flags. */
1288 uint32_t eflags = pfnEmulate((uint32_t *)&uData1, uData2, cb);
1289
1290 LogFlow(("iomInterpretOrXorAnd %s result %RX64\n", pszInstr, uData1));
1291
1292 if (fAndWrite)
1293 /* Store result to MMIO. */
1294 rc = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb);
1295 else
1296 {
1297 /* Store result to register. */
1298 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData1);
1299 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1300 }
1301 if (rc == VINF_SUCCESS)
1302 {
1303 /* Update guest's eflags and finish. */
1304 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1305 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1306 iomMMIOStatLength(pVM, cb);
1307 }
1308 }
1309
1310 return rc;
1311}
1312
1313
1314/**
1315 * TEST [MMIO], reg|imm
1316 * TEST reg, [MMIO]
1317 *
1318 * Restricted implementation.
1319 *
1320 *
1321 * @returns VBox status code.
1322 *
1323 * @param pVM The virtual machine.
1324 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1325 * @param pRegFrame Trap register frame.
1326 * @param GCPhysFault The GC physical address corresponding to pvFault.
1327 * @param pCpu Disassembler CPU state.
1328 * @param pRange Pointer MMIO range.
1329 */
1330static int iomInterpretTEST(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1331 PIOMMMIORANGE pRange)
1332{
1333 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1334
1335 unsigned cb = 0;
1336 uint64_t uData1 = 0;
1337 uint64_t uData2 = 0;
1338 int rc;
1339
1340 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1341 {
1342 /* and test, [MMIO]. */
1343 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb);
1344 }
1345 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1346 {
1347 /* test [MMIO], reg|imm. */
1348 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb);
1349 }
1350 else
1351 {
1352 AssertMsgFailed(("Disassember TEST problem..\n"));
1353 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1354 }
1355
1356 if (rc == VINF_SUCCESS)
1357 {
1358#if HC_ARCH_BITS == 32
1359 /* Can't deal with 8 byte operands in our 32-bit emulation code. */
1360 if (cb > 4)
1361 return VINF_IOM_R3_MMIO_READ_WRITE;
1362#endif
1363
1364 /* Emulate TEST (=AND without write back) and update guest EFLAGS. */
1365 uint32_t eflags = EMEmulateAnd((uint32_t *)&uData1, uData2, cb);
1366 pRegFrame->eflags.u32 = (pRegFrame->eflags.u32 & ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF))
1367 | (eflags & (X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF));
1368 iomMMIOStatLength(pVM, cb);
1369 }
1370
1371 return rc;
1372}
1373
1374
1375/**
1376 * BT [MMIO], reg|imm
1377 *
1378 * Restricted implementation.
1379 *
1380 *
1381 * @returns VBox status code.
1382 *
1383 * @param pVM The virtual machine.
1384 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1385 * @param pRegFrame Trap register frame.
1386 * @param GCPhysFault The GC physical address corresponding to pvFault.
1387 * @param pCpu Disassembler CPU state.
1388 * @param pRange Pointer MMIO range.
1389 */
1390static int iomInterpretBT(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1391 PIOMMMIORANGE pRange)
1392{
1393 Assert(pRange->CTX_SUFF(pfnReadCallback) || !pRange->pfnReadCallbackR3);
1394
1395 uint64_t uBit = 0;
1396 uint64_t uData = 0;
1397 unsigned cbIgnored;
1398
1399 if (!iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uBit, &cbIgnored))
1400 {
1401 AssertMsgFailed(("Disassember BT problem..\n"));
1402 return VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1403 }
1404 /* The size of the memory operand only matters here. */
1405 unsigned cbData = DISGetParamSize(pCpu, &pCpu->Param1);
1406
1407 /* bt [MMIO], reg|imm. */
1408 int rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData, cbData);
1409 if (rc == VINF_SUCCESS)
1410 {
1411 /* Find the bit inside the faulting address */
1412 pRegFrame->eflags.Bits.u1CF = (uData >> uBit);
1413 iomMMIOStatLength(pVM, cbData);
1414 }
1415
1416 return rc;
1417}
1418
1419/**
1420 * XCHG [MMIO], reg
1421 * XCHG reg, [MMIO]
1422 *
1423 * Restricted implementation.
1424 *
1425 *
1426 * @returns VBox status code.
1427 *
1428 * @param pVM The virtual machine.
1429 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1430 * @param pRegFrame Trap register frame.
1431 * @param GCPhysFault The GC physical address corresponding to pvFault.
1432 * @param pCpu Disassembler CPU state.
1433 * @param pRange Pointer MMIO range.
1434 */
1435static int iomInterpretXCHG(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, PDISCPUSTATE pCpu,
1436 PIOMMMIORANGE pRange)
1437{
1438 /* Check for read & write handlers since IOMMMIOHandler doesn't cover this. */
1439 if ( (!pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3)
1440 || (!pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3))
1441 return VINF_IOM_R3_MMIO_READ_WRITE;
1442
1443 int rc;
1444 unsigned cb = 0;
1445 uint64_t uData1 = 0;
1446 uint64_t uData2 = 0;
1447 if (iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &uData1, &cb))
1448 {
1449 /* xchg reg, [MMIO]. */
1450 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb);
1451 if (rc == VINF_SUCCESS)
1452 {
1453 /* Store result to MMIO. */
1454 rc = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb);
1455
1456 if (rc == VINF_SUCCESS)
1457 {
1458 /* Store result to register. */
1459 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param1, pRegFrame, uData2);
1460 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1461 }
1462 else
1463 Assert(rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE);
1464 }
1465 else
1466 Assert(rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ);
1467 }
1468 else if (iomGetRegImmData(pCpu, &pCpu->Param2, pRegFrame, &uData2, &cb))
1469 {
1470 /* xchg [MMIO], reg. */
1471 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, &uData1, cb);
1472 if (rc == VINF_SUCCESS)
1473 {
1474 /* Store result to MMIO. */
1475 rc = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, &uData2, cb);
1476 if (rc == VINF_SUCCESS)
1477 {
1478 /* Store result to register. */
1479 bool fRc = iomSaveDataToReg(pCpu, &pCpu->Param2, pRegFrame, uData1);
1480 AssertMsg(fRc, ("Failed to store register value!\n")); NOREF(fRc);
1481 }
1482 else
1483 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_WRITE || rc == VINF_PATM_HC_MMIO_PATCH_WRITE || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc));
1484 }
1485 else
1486 AssertMsg(rc == VINF_IOM_R3_MMIO_READ_WRITE || rc == VINF_IOM_R3_MMIO_READ || rc == VINF_PATM_HC_MMIO_PATCH_READ || rc == VINF_EM_RAW_EMULATE_IO_BLOCK, ("rc=%Rrc\n", rc));
1487 }
1488 else
1489 {
1490 AssertMsgFailed(("Disassember XCHG problem..\n"));
1491 rc = VERR_IOM_MMIO_HANDLER_DISASM_ERROR;
1492 }
1493 return rc;
1494}
1495
1496
1497/**
1498 * \#PF Handler callback for MMIO ranges.
1499 *
1500 * @returns VBox status code (appropriate for GC return).
1501 * @param pVM Pointer to the VM.
1502 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1503 * @param uErrorCode CPU Error code. This is UINT32_MAX when we don't have
1504 * any error code (the EPT misconfig hack).
1505 * @param pCtxCore Trap register frame.
1506 * @param GCPhysFault The GC physical address corresponding to pvFault.
1507 * @param pvUser Pointer to the MMIO ring-3 range entry.
1508 */
1509static int iomMMIOHandler(PVM pVM, PVMCPU pVCpu, uint32_t uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault, void *pvUser)
1510{
1511 int rc = IOM_LOCK_SHARED(pVM);
1512#ifndef IN_RING3
1513 if (rc == VERR_SEM_BUSY)
1514 return VINF_IOM_R3_MMIO_READ_WRITE;
1515#endif
1516 AssertRC(rc);
1517
1518 STAM_PROFILE_START(&pVM->iom.s.StatRZMMIOHandler, a);
1519 Log(("iomMMIOHandler: GCPhys=%RGp uErr=%#x rip=%RGv\n", GCPhysFault, uErrorCode, (RTGCPTR)pCtxCore->rip));
1520
1521 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1522 Assert(pRange);
1523 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
1524 iomMmioRetainRange(pRange);
1525#ifndef VBOX_WITH_STATISTICS
1526 IOM_UNLOCK_SHARED(pVM);
1527
1528#else
1529 /*
1530 * Locate the statistics.
1531 */
1532 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhysFault, pRange);
1533 if (!pStats)
1534 {
1535 iomMmioReleaseRange(pVM, pRange);
1536# ifdef IN_RING3
1537 return VERR_NO_MEMORY;
1538# else
1539 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1540 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1541 return VINF_IOM_R3_MMIO_READ_WRITE;
1542# endif
1543 }
1544#endif
1545
1546#ifndef IN_RING3
1547 /*
1548 * Should we defer the request right away? This isn't usually the case, so
1549 * do the simple test first and the try deal with uErrorCode being N/A.
1550 */
1551 if (RT_UNLIKELY( ( !pRange->CTX_SUFF(pfnWriteCallback)
1552 || !pRange->CTX_SUFF(pfnReadCallback))
1553 && ( uErrorCode == UINT32_MAX
1554 ? pRange->pfnWriteCallbackR3 || pRange->pfnReadCallbackR3
1555 : uErrorCode & X86_TRAP_PF_RW
1556 ? !pRange->CTX_SUFF(pfnWriteCallback) && pRange->pfnWriteCallbackR3
1557 : !pRange->CTX_SUFF(pfnReadCallback) && pRange->pfnReadCallbackR3
1558 )
1559 )
1560 )
1561 {
1562 if (uErrorCode & X86_TRAP_PF_RW)
1563 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1564 else
1565 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1566
1567 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1568 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1569 iomMmioReleaseRange(pVM, pRange);
1570 return VINF_IOM_R3_MMIO_READ_WRITE;
1571 }
1572#endif /* !IN_RING3 */
1573
1574 /*
1575 * Retain the range and do locking.
1576 */
1577 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1578 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1579 if (rc != VINF_SUCCESS)
1580 {
1581 iomMmioReleaseRange(pVM, pRange);
1582 return rc;
1583 }
1584
1585 /*
1586 * Disassemble the instruction and interpret it.
1587 */
1588 PDISCPUSTATE pDis = &pVCpu->iom.s.DisState;
1589 unsigned cbOp;
1590 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
1591 if (RT_FAILURE(rc))
1592 {
1593 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1594 iomMmioReleaseRange(pVM, pRange);
1595 return rc;
1596 }
1597 switch (pDis->pCurInstr->uOpcode)
1598 {
1599 case OP_MOV:
1600 case OP_MOVZX:
1601 case OP_MOVSX:
1602 {
1603 STAM_PROFILE_START(&pVM->iom.s.StatRZInstMov, b);
1604 AssertMsg(uErrorCode == UINT32_MAX || DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse) == !!(uErrorCode & X86_TRAP_PF_RW), ("flags1=%#llx/%RTbool flags2=%#llx/%RTbool ErrCd=%#x\n", pDis->Param1.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse), pDis->Param2.fUse, DISUSE_IS_EFFECTIVE_ADDR(pDis->Param2.fUse), uErrorCode));
1605 if (uErrorCode != UINT32_MAX /* EPT+MMIO optimization */
1606 ? uErrorCode & X86_TRAP_PF_RW
1607 : DISUSE_IS_EFFECTIVE_ADDR(pDis->Param1.fUse))
1608 rc = iomInterpretMOVxXWrite(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1609 else
1610 rc = iomInterpretMOVxXRead(pVM, pVCpu, pCtxCore, pDis, pRange, GCPhysFault);
1611 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstMov, b);
1612 break;
1613 }
1614
1615
1616#ifdef IOM_WITH_MOVS_SUPPORT
1617 case OP_MOVSB:
1618 case OP_MOVSWD:
1619 {
1620 if (uErrorCode == UINT32_MAX)
1621 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1622 else
1623 {
1624 STAM_PROFILE_ADV_START(&pVM->iom.s.StatRZInstMovs, c);
1625 PSTAMPROFILE pStat = NULL;
1626 rc = iomInterpretMOVS(pVM, !!(uErrorCode & X86_TRAP_PF_RW), pCtxCore, GCPhysFault, pDis, pRange, &pStat);
1627 STAM_PROFILE_ADV_STOP_EX(&pVM->iom.s.StatRZInstMovs, pStat, c);
1628 }
1629 break;
1630 }
1631#endif
1632
1633 case OP_STOSB:
1634 case OP_STOSWD:
1635 Assert(uErrorCode & X86_TRAP_PF_RW);
1636 STAM_PROFILE_START(&pVM->iom.s.StatRZInstStos, d);
1637 rc = iomInterpretSTOS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1638 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstStos, d);
1639 break;
1640
1641 case OP_LODSB:
1642 case OP_LODSWD:
1643 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1644 STAM_PROFILE_START(&pVM->iom.s.StatRZInstLods, e);
1645 rc = iomInterpretLODS(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1646 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstLods, e);
1647 break;
1648
1649 case OP_CMP:
1650 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1651 STAM_PROFILE_START(&pVM->iom.s.StatRZInstCmp, f);
1652 rc = iomInterpretCMP(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1653 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstCmp, f);
1654 break;
1655
1656 case OP_AND:
1657 STAM_PROFILE_START(&pVM->iom.s.StatRZInstAnd, g);
1658 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateAnd);
1659 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstAnd, g);
1660 break;
1661
1662 case OP_OR:
1663 STAM_PROFILE_START(&pVM->iom.s.StatRZInstOr, k);
1664 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateOr);
1665 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstOr, k);
1666 break;
1667
1668 case OP_XOR:
1669 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXor, m);
1670 rc = iomInterpretOrXorAnd(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange, EMEmulateXor);
1671 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXor, m);
1672 break;
1673
1674 case OP_TEST:
1675 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1676 STAM_PROFILE_START(&pVM->iom.s.StatRZInstTest, h);
1677 rc = iomInterpretTEST(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1678 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstTest, h);
1679 break;
1680
1681 case OP_BT:
1682 Assert(!(uErrorCode & X86_TRAP_PF_RW) || uErrorCode == UINT32_MAX);
1683 STAM_PROFILE_START(&pVM->iom.s.StatRZInstBt, l);
1684 rc = iomInterpretBT(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1685 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstBt, l);
1686 break;
1687
1688 case OP_XCHG:
1689 STAM_PROFILE_START(&pVM->iom.s.StatRZInstXchg, i);
1690 rc = iomInterpretXCHG(pVM, pVCpu, pCtxCore, GCPhysFault, pDis, pRange);
1691 STAM_PROFILE_STOP(&pVM->iom.s.StatRZInstXchg, i);
1692 break;
1693
1694
1695 /*
1696 * The instruction isn't supported. Hand it on to ring-3.
1697 */
1698 default:
1699 STAM_COUNTER_INC(&pVM->iom.s.StatRZInstOther);
1700 rc = VINF_IOM_R3_MMIO_READ_WRITE;
1701 break;
1702 }
1703
1704 /*
1705 * On success advance EIP.
1706 */
1707 if (rc == VINF_SUCCESS)
1708 pCtxCore->rip += cbOp;
1709 else
1710 {
1711 STAM_COUNTER_INC(&pVM->iom.s.StatRZMMIOFailures);
1712#if defined(VBOX_WITH_STATISTICS) && !defined(IN_RING3)
1713 switch (rc)
1714 {
1715 case VINF_IOM_R3_MMIO_READ:
1716 case VINF_IOM_R3_MMIO_READ_WRITE:
1717 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1718 break;
1719 case VINF_IOM_R3_MMIO_WRITE:
1720 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
1721 break;
1722 }
1723#endif
1724 }
1725
1726 STAM_PROFILE_STOP(&pVM->iom.s.StatRZMMIOHandler, a);
1727 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1728 iomMmioReleaseRange(pVM, pRange);
1729 return rc;
1730}
1731
1732/**
1733 * \#PF Handler callback for MMIO ranges.
1734 *
1735 * @returns VBox status code (appropriate for GC return).
1736 * @param pVM Pointer to the VM.
1737 * @param pVCpu Pointer to the cross context CPU context for the
1738 * calling EMT.
1739 * @param uErrorCode CPU Error code.
1740 * @param pCtxCore Trap register frame.
1741 * @param pvFault The fault address (cr2).
1742 * @param GCPhysFault The GC physical address corresponding to pvFault.
1743 * @param pvUser Pointer to the MMIO ring-3 range entry.
1744 */
1745DECLEXPORT(int) iomMmioPfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPTR pvFault,
1746 RTGCPHYS GCPhysFault, void *pvUser)
1747{
1748 LogFlow(("iomMmioPfHandler: GCPhys=%RGp uErr=%#x pvFault=%RGv rip=%RGv\n",
1749 GCPhysFault, (uint32_t)uErrorCode, pvFault, (RTGCPTR)pCtxCore->rip));
1750 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pvUser);
1751 return VBOXSTRICTRC_VAL(rcStrict);
1752}
1753
1754/**
1755 * Physical access handler for MMIO ranges.
1756 *
1757 * @returns VBox status code (appropriate for GC return).
1758 * @param pVM Pointer to the VM.
1759 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1760 * @param uErrorCode CPU Error code.
1761 * @param pCtxCore Trap register frame.
1762 * @param GCPhysFault The GC physical address.
1763 */
1764VMMDECL(VBOXSTRICTRC) IOMMMIOPhysHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pCtxCore, RTGCPHYS GCPhysFault)
1765{
1766 /*
1767 * We don't have a range here, so look it up before calling the common function.
1768 */
1769 int rc2 = IOM_LOCK_SHARED(pVM); NOREF(rc2);
1770#ifndef IN_RING3
1771 if (rc2 == VERR_SEM_BUSY)
1772 return VINF_IOM_R3_MMIO_READ_WRITE;
1773#endif
1774 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhysFault);
1775 if (RT_UNLIKELY(!pRange))
1776 {
1777 IOM_UNLOCK_SHARED(pVM);
1778 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1779 }
1780 iomMmioRetainRange(pRange);
1781 IOM_UNLOCK_SHARED(pVM);
1782
1783 VBOXSTRICTRC rcStrict = iomMMIOHandler(pVM, pVCpu, (uint32_t)uErrorCode, pCtxCore, GCPhysFault, pRange);
1784
1785 iomMmioReleaseRange(pVM, pRange);
1786 return VBOXSTRICTRC_VAL(rcStrict);
1787}
1788
1789
1790/**
1791 * \#PF Handler callback for MMIO ranges.
1792 *
1793 * @returns VINF_SUCCESS if the handler have carried out the operation.
1794 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
1795 * @param pVM Pointer to the VM.
1796 * @param pVCpu The cross context CPU structure for the calling EMT.
1797 * @param GCPhys The physical address the guest is writing to.
1798 * @param pvPhys The HC mapping of that address.
1799 * @param pvBuf What the guest is reading/writing.
1800 * @param cbBuf How much it's reading/writing.
1801 * @param enmAccessType The access type.
1802 * @param enmOrigin Who is making the access.
1803 * @param pvUser Pointer to the MMIO range entry.
1804 */
1805PGM_ALL_CB2_DECL(int) iomMmioHandler(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhysFault, void *pvPhys, void *pvBuf, size_t cbBuf,
1806 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)
1807{
1808 PIOMMMIORANGE pRange = (PIOMMMIORANGE)pvUser;
1809 STAM_COUNTER_INC(&pVM->iom.s.StatR3MMIOHandler);
1810
1811 AssertMsg(cbBuf >= 1 && cbBuf <= 16, ("%zu\n", cbBuf));
1812 AssertPtr(pRange);
1813 NOREF(pvPhys); NOREF(enmOrigin);
1814
1815 /*
1816 * Validate the range.
1817 */
1818 int rc = IOM_LOCK_SHARED(pVM);
1819 AssertRC(rc);
1820 Assert(pRange == iomMmioGetRange(pVM, pVCpu, GCPhysFault));
1821
1822 /*
1823 * Perform locking.
1824 */
1825 iomMmioRetainRange(pRange);
1826 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1827 IOM_UNLOCK_SHARED(pVM);
1828 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ_WRITE);
1829 if (rc != VINF_SUCCESS)
1830 {
1831 iomMmioReleaseRange(pVM, pRange);
1832 return rc;
1833 }
1834
1835 /*
1836 * Perform the access.
1837 */
1838 if (enmAccessType == PGMACCESSTYPE_READ)
1839 rc = iomMMIODoRead(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1840 else
1841 rc = iomMMIODoWrite(pVM, pVCpu, pRange, GCPhysFault, pvBuf, (unsigned)cbBuf);
1842
1843 AssertRC(rc);
1844 iomMmioReleaseRange(pVM, pRange);
1845 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1846 return rc;
1847}
1848
1849
1850/**
1851 * Reads a MMIO register.
1852 *
1853 * @returns VBox status code.
1854 *
1855 * @param pVM Pointer to the VM.
1856 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1857 * @param GCPhys The physical address to read.
1858 * @param pu32Value Where to store the value read.
1859 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1860 */
1861VMMDECL(VBOXSTRICTRC) IOMMMIORead(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t *pu32Value, size_t cbValue)
1862{
1863 /* Take the IOM lock before performing any MMIO. */
1864 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
1865#ifndef IN_RING3
1866 if (rc == VERR_SEM_BUSY)
1867 return VINF_IOM_R3_MMIO_WRITE;
1868#endif
1869 AssertRC(VBOXSTRICTRC_VAL(rc));
1870#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
1871 IEMNotifyMMIORead(pVM, GCPhys, cbValue);
1872#endif
1873
1874 /*
1875 * Lookup the current context range node and statistics.
1876 */
1877 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
1878 if (!pRange)
1879 {
1880 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
1881 IOM_UNLOCK_SHARED(pVM);
1882 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
1883 }
1884 iomMmioRetainRange(pRange);
1885#ifndef VBOX_WITH_STATISTICS
1886 IOM_UNLOCK_SHARED(pVM);
1887
1888#else /* VBOX_WITH_STATISTICS */
1889 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
1890 if (!pStats)
1891 {
1892 iomMmioReleaseRange(pVM, pRange);
1893# ifdef IN_RING3
1894 return VERR_NO_MEMORY;
1895# else
1896 return VINF_IOM_R3_MMIO_READ;
1897# endif
1898 }
1899 STAM_COUNTER_INC(&pStats->Accesses);
1900#endif /* VBOX_WITH_STATISTICS */
1901
1902 if (pRange->CTX_SUFF(pfnReadCallback))
1903 {
1904 /*
1905 * Perform locking.
1906 */
1907 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
1908 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_WRITE);
1909 if (rc != VINF_SUCCESS)
1910 {
1911 iomMmioReleaseRange(pVM, pRange);
1912 return rc;
1913 }
1914
1915 /*
1916 * Perform the read and deal with the result.
1917 */
1918 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a);
1919 if ( (cbValue == 4 && !(GCPhys & 3))
1920 || (pRange->fFlags & IOMMMIO_FLAGS_READ_MODE) == IOMMMIO_FLAGS_READ_PASSTHRU
1921 || (cbValue == 8 && !(GCPhys & 7)) )
1922 rc = pRange->CTX_SUFF(pfnReadCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser), GCPhys,
1923 pu32Value, (unsigned)cbValue);
1924 else
1925 rc = iomMMIODoComplicatedRead(pVM, pRange, GCPhys, pu32Value, (unsigned)cbValue);
1926 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1927 switch (VBOXSTRICTRC_VAL(rc))
1928 {
1929 case VINF_SUCCESS:
1930 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1931 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1932 iomMmioReleaseRange(pVM, pRange);
1933 return rc;
1934#ifndef IN_RING3
1935 case VINF_IOM_R3_MMIO_READ:
1936 case VINF_IOM_R3_MMIO_READ_WRITE:
1937 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1938#endif
1939 default:
1940 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1941 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1942 iomMmioReleaseRange(pVM, pRange);
1943 return rc;
1944
1945 case VINF_IOM_MMIO_UNUSED_00:
1946 iomMMIODoRead00s(pu32Value, cbValue);
1947 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1948 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1949 iomMmioReleaseRange(pVM, pRange);
1950 return VINF_SUCCESS;
1951
1952 case VINF_IOM_MMIO_UNUSED_FF:
1953 iomMMIODoReadFFs(pu32Value, cbValue);
1954 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, *pu32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
1955 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
1956 iomMmioReleaseRange(pVM, pRange);
1957 return VINF_SUCCESS;
1958 }
1959 /* not reached */
1960 }
1961#ifndef IN_RING3
1962 if (pRange->pfnReadCallbackR3)
1963 {
1964 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Read,ToR3));
1965 iomMmioReleaseRange(pVM, pRange);
1966 return VINF_IOM_R3_MMIO_READ;
1967 }
1968#endif
1969
1970 /*
1971 * Unassigned memory - this is actually not supposed t happen...
1972 */
1973 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfRead), a); /** @todo STAM_PROFILE_ADD_ZERO_PERIOD */
1974 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfRead), a);
1975 iomMMIODoReadFFs(pu32Value, cbValue);
1976 Log4(("IOMMMIORead: GCPhys=%RGp *pu32=%08RX32 cb=%d rc=VINF_SUCCESS\n", GCPhys, *pu32Value, cbValue));
1977 iomMmioReleaseRange(pVM, pRange);
1978 return VINF_SUCCESS;
1979}
1980
1981
1982/**
1983 * Writes to a MMIO register.
1984 *
1985 * @returns VBox status code.
1986 *
1987 * @param pVM Pointer to the VM.
1988 * @param pVCpu Pointer to the virtual CPU structure of the caller.
1989 * @param GCPhys The physical address to write to.
1990 * @param u32Value The value to write.
1991 * @param cbValue The size of the register to read in bytes. 1, 2 or 4 bytes.
1992 */
1993VMMDECL(VBOXSTRICTRC) IOMMMIOWrite(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
1994{
1995 /* Take the IOM lock before performing any MMIO. */
1996 VBOXSTRICTRC rc = IOM_LOCK_SHARED(pVM);
1997#ifndef IN_RING3
1998 if (rc == VERR_SEM_BUSY)
1999 return VINF_IOM_R3_MMIO_WRITE;
2000#endif
2001 AssertRC(VBOXSTRICTRC_VAL(rc));
2002#if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)
2003 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);
2004#endif
2005
2006 /*
2007 * Lookup the current context range node.
2008 */
2009 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2010 if (!pRange)
2011 {
2012 AssertMsgFailed(("Handlers and page tables are out of sync or something! GCPhys=%RGp cbValue=%d\n", GCPhys, cbValue));
2013 IOM_UNLOCK_SHARED(pVM);
2014 return VERR_IOM_MMIO_RANGE_NOT_FOUND;
2015 }
2016 iomMmioRetainRange(pRange);
2017#ifndef VBOX_WITH_STATISTICS
2018 IOM_UNLOCK_SHARED(pVM);
2019
2020#else /* VBOX_WITH_STATISTICS */
2021 PIOMMMIOSTATS pStats = iomMmioGetStats(pVM, pVCpu, GCPhys, pRange);
2022 if (!pStats)
2023 {
2024 iomMmioReleaseRange(pVM, pRange);
2025# ifdef IN_RING3
2026 return VERR_NO_MEMORY;
2027# else
2028 return VINF_IOM_R3_MMIO_WRITE;
2029# endif
2030 }
2031 STAM_COUNTER_INC(&pStats->Accesses);
2032#endif /* VBOX_WITH_STATISTICS */
2033
2034 if (pRange->CTX_SUFF(pfnWriteCallback))
2035 {
2036 /*
2037 * Perform locking.
2038 */
2039 PPDMDEVINS pDevIns = pRange->CTX_SUFF(pDevIns);
2040 rc = PDMCritSectEnter(pDevIns->CTX_SUFF(pCritSectRo), VINF_IOM_R3_MMIO_READ);
2041 if (rc != VINF_SUCCESS)
2042 {
2043 iomMmioReleaseRange(pVM, pRange);
2044 return rc;
2045 }
2046
2047 /*
2048 * Perform the write.
2049 */
2050 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2051 if ( (cbValue == 4 && !(GCPhys & 3))
2052 || (pRange->fFlags & IOMMMIO_FLAGS_WRITE_MODE) == IOMMMIO_FLAGS_WRITE_PASSTHRU
2053 || (cbValue == 8 && !(GCPhys & 7)) )
2054 rc = pRange->CTX_SUFF(pfnWriteCallback)(pRange->CTX_SUFF(pDevIns), pRange->CTX_SUFF(pvUser),
2055 GCPhys, &u32Value, (unsigned)cbValue);
2056 else
2057 rc = iomMMIODoComplicatedWrite(pVM, pRange, GCPhys, &u32Value, (unsigned)cbValue);
2058 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2059#ifndef IN_RING3
2060 if ( rc == VINF_IOM_R3_MMIO_WRITE
2061 || rc == VINF_IOM_R3_MMIO_READ_WRITE)
2062 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2063#endif
2064 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VBOXSTRICTRC_VAL(rc)));
2065 iomMmioReleaseRange(pVM, pRange);
2066 PDMCritSectLeave(pDevIns->CTX_SUFF(pCritSectRo));
2067 return rc;
2068 }
2069#ifndef IN_RING3
2070 if (pRange->pfnWriteCallbackR3)
2071 {
2072 STAM_COUNTER_INC(&pStats->CTX_MID_Z(Write,ToR3));
2073 iomMmioReleaseRange(pVM, pRange);
2074 return VINF_IOM_R3_MMIO_WRITE;
2075 }
2076#endif
2077
2078 /*
2079 * No write handler, nothing to do.
2080 */
2081 STAM_PROFILE_START(&pStats->CTX_SUFF_Z(ProfWrite), a);
2082 STAM_PROFILE_STOP(&pStats->CTX_SUFF_Z(ProfWrite), a);
2083 Log4(("IOMMMIOWrite: GCPhys=%RGp u32=%08RX32 cb=%d rc=%Rrc\n", GCPhys, u32Value, cbValue, VINF_SUCCESS));
2084 iomMmioReleaseRange(pVM, pRange);
2085 return VINF_SUCCESS;
2086}
2087
2088
2089/**
2090 * [REP*] INSB/INSW/INSD
2091 * ES:EDI,DX[,ECX]
2092 *
2093 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2094 *
2095 * @returns Strict VBox status code. Informational status codes other than the one documented
2096 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2097 * @retval VINF_SUCCESS Success.
2098 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2099 * status code must be passed on to EM.
2100 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2101 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2102 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2103 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2104 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2105 *
2106 * @param pVM The virtual machine.
2107 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2108 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2109 * @param uPort IO Port
2110 * @param uPrefix IO instruction prefix
2111 * @param enmAddrMode The address mode.
2112 * @param cbTransfer Size of transfer unit
2113 */
2114VMMDECL(VBOXSTRICTRC) IOMInterpretINSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2115 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2116{
2117 STAM_COUNTER_INC(&pVM->iom.s.StatInstIns);
2118
2119 /*
2120 * We do not support REPNE or decrementing destination
2121 * pointer. Segment prefixes are deliberately ignored, as per the instruction specification.
2122 */
2123 if ( (uPrefix & DISPREFIX_REPNE)
2124 || pRegFrame->eflags.Bits.u1DF)
2125 return VINF_EM_RAW_EMULATE_INSTR;
2126
2127 /*
2128 * Get bytes/words/dwords count to transfer.
2129 */
2130 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2131 RTGCUINTREG cTransfers = 1;
2132 if (uPrefix & DISPREFIX_REP)
2133 {
2134#ifndef IN_RC
2135 if ( CPUMIsGuestIn64BitCode(pVCpu)
2136 && pRegFrame->rcx >= _4G)
2137 return VINF_EM_RAW_EMULATE_INSTR;
2138#endif
2139 cTransfers = pRegFrame->rcx & fAddrMask;
2140 if (!cTransfers)
2141 return VINF_SUCCESS;
2142 }
2143
2144 /* Convert destination address es:edi. */
2145 RTGCPTR GCPtrDst;
2146 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_ES, pRegFrame, pRegFrame->rdi & fAddrMask,
2147 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2148 &GCPtrDst);
2149 if (RT_FAILURE(rc2))
2150 {
2151 Log(("INS destination address conversion failed -> fallback, rc2=%d\n", rc2));
2152 return VINF_EM_RAW_EMULATE_INSTR;
2153 }
2154
2155 /* Access verification first; we can't recover from traps inside this instruction, as the port read cannot be repeated. */
2156 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2157 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrDst, cTransfers * cbTransfer,
2158 X86_PTE_RW | ((cpl == 3) ? X86_PTE_US : 0));
2159 if (rc2 != VINF_SUCCESS)
2160 {
2161 Log(("INS will generate a trap -> fallback, rc2=%d\n", rc2));
2162 return VINF_EM_RAW_EMULATE_INSTR;
2163 }
2164
2165 Log(("IOM: rep ins%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2166 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2167 if (cTransfers > 1)
2168 {
2169 /* If the device supports string transfers, ask it to do as
2170 * much as it wants. The rest is done with single-word transfers. */
2171 const RTGCUINTREG cTransfersOrg = cTransfers;
2172 rcStrict = IOMIOPortReadString(pVM, pVCpu, uPort, &GCPtrDst, &cTransfers, cbTransfer);
2173 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
2174 pRegFrame->rdi = ((pRegFrame->rdi + (cTransfersOrg - cTransfers) * cbTransfer) & fAddrMask)
2175 | (pRegFrame->rdi & ~fAddrMask);
2176 }
2177
2178#ifdef IN_RC
2179 MMGCRamRegisterTrapHandler(pVM);
2180#endif
2181 while (cTransfers && rcStrict == VINF_SUCCESS)
2182 {
2183 uint32_t u32Value;
2184 rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &u32Value, cbTransfer);
2185 if (!IOM_SUCCESS(rcStrict))
2186 break;
2187 rc2 = iomRamWrite(pVCpu, pRegFrame, GCPtrDst, &u32Value, cbTransfer);
2188 Assert(rc2 == VINF_SUCCESS); NOREF(rc2);
2189 GCPtrDst = (RTGCPTR)((RTGCUINTPTR)GCPtrDst + cbTransfer);
2190 pRegFrame->rdi = ((pRegFrame->rdi + cbTransfer) & fAddrMask)
2191 | (pRegFrame->rdi & ~fAddrMask);
2192 cTransfers--;
2193 }
2194#ifdef IN_RC
2195 MMGCRamDeregisterTrapHandler(pVM);
2196#endif
2197
2198 /* Update rcx on exit. */
2199 if (uPrefix & DISPREFIX_REP)
2200 pRegFrame->rcx = (cTransfers & fAddrMask)
2201 | (pRegFrame->rcx & ~fAddrMask);
2202
2203 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_READ || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2204 return rcStrict;
2205}
2206
2207
2208#if !defined(VBOX_WITH_FIRST_IEM_STEP) || defined(IN_RC) /* Latter for IOMRCIOPortHandler */
2209/**
2210 * [REP*] INSB/INSW/INSD
2211 * ES:EDI,DX[,ECX]
2212 *
2213 * @returns Strict VBox status code. Informational status codes other than the one documented
2214 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2215 * @retval VINF_SUCCESS Success.
2216 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2217 * status code must be passed on to EM.
2218 * @retval VINF_IOM_R3_IOPORT_READ Defer the read to ring-3. (R0/GC only)
2219 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the read to the REM.
2220 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2221 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2222 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2223 *
2224 * @param pVM The virtual machine.
2225 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2226 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2227 * @param pCpu Disassembler CPU state.
2228 */
2229VMMDECL(VBOXSTRICTRC) IOMInterpretINS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
2230{
2231 /*
2232 * Get port number directly from the register (no need to bother the
2233 * disassembler). And get the I/O register size from the opcode / prefix.
2234 */
2235 RTIOPORT Port = pRegFrame->edx & 0xffff;
2236 unsigned cb = 0;
2237 if (pCpu->pCurInstr->uOpcode == OP_INSB)
2238 cb = 1;
2239 else
2240 cb = (pCpu->uOpMode == DISCPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
2241
2242 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
2243 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2244 {
2245 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2246 return rcStrict;
2247 }
2248
2249 return IOMInterpretINSEx(pVM, pVCpu, pRegFrame, Port, pCpu->fPrefix, (DISCPUMODE)pCpu->uAddrMode, cb);
2250}
2251#endif /* !IEM || RC */
2252
2253
2254/**
2255 * [REP*] OUTSB/OUTSW/OUTSD
2256 * DS:ESI,DX[,ECX]
2257 *
2258 * @remark Assumes caller checked the access privileges (IOMInterpretCheckPortIOAccess)
2259 *
2260 * @returns Strict VBox status code. Informational status codes other than the one documented
2261 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2262 * @retval VINF_SUCCESS Success.
2263 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2264 * status code must be passed on to EM.
2265 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2266 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2267 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2268 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2269 *
2270 * @param pVM The virtual machine.
2271 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2272 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2273 * @param uPort IO Port
2274 * @param uPrefix IO instruction prefix
2275 * @param enmAddrMode The address mode.
2276 * @param cbTransfer Size of transfer unit
2277 */
2278VMMDECL(VBOXSTRICTRC) IOMInterpretOUTSEx(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uPort, uint32_t uPrefix,
2279 DISCPUMODE enmAddrMode, uint32_t cbTransfer)
2280{
2281 STAM_COUNTER_INC(&pVM->iom.s.StatInstOuts);
2282
2283 /*
2284 * We do not support segment prefixes, REPNE or
2285 * decrementing source pointer.
2286 */
2287 if ( (uPrefix & (DISPREFIX_SEG | DISPREFIX_REPNE))
2288 || pRegFrame->eflags.Bits.u1DF)
2289 return VINF_EM_RAW_EMULATE_INSTR;
2290
2291 /*
2292 * Get bytes/words/dwords count to transfer.
2293 */
2294 uint64_t const fAddrMask = iomDisModeToMask(enmAddrMode);
2295 RTGCUINTREG cTransfers = 1;
2296 if (uPrefix & DISPREFIX_REP)
2297 {
2298#ifndef IN_RC
2299 if ( CPUMIsGuestIn64BitCode(pVCpu)
2300 && pRegFrame->rcx >= _4G)
2301 return VINF_EM_RAW_EMULATE_INSTR;
2302#endif
2303 cTransfers = pRegFrame->rcx & fAddrMask;
2304 if (!cTransfers)
2305 return VINF_SUCCESS;
2306 }
2307
2308 /* Convert source address ds:esi. */
2309 RTGCPTR GCPtrSrc;
2310 int rc2 = SELMToFlatEx(pVCpu, DISSELREG_DS, pRegFrame, pRegFrame->rsi & fAddrMask,
2311 SELMTOFLAT_FLAGS_HYPER | SELMTOFLAT_FLAGS_NO_PL,
2312 &GCPtrSrc);
2313 if (RT_FAILURE(rc2))
2314 {
2315 Log(("OUTS source address conversion failed -> fallback, rc2=%Rrc\n", rc2));
2316 return VINF_EM_RAW_EMULATE_INSTR;
2317 }
2318
2319 /* Access verification first; we currently can't recover properly from traps inside this instruction */
2320 uint32_t const cpl = CPUMGetGuestCPL(pVCpu);
2321 rc2 = PGMVerifyAccess(pVCpu, (RTGCUINTPTR)GCPtrSrc, cTransfers * cbTransfer,
2322 (cpl == 3) ? X86_PTE_US : 0);
2323 if (rc2 != VINF_SUCCESS)
2324 {
2325 Log(("OUTS will generate a trap -> fallback, rc2=%Rrc\n", rc2));
2326 return VINF_EM_RAW_EMULATE_INSTR;
2327 }
2328
2329 Log(("IOM: rep outs%d port %#x count %d\n", cbTransfer * 8, uPort, cTransfers));
2330 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2331 if (cTransfers > 1)
2332 {
2333 /*
2334 * If the device supports string transfers, ask it to do as
2335 * much as it wants. The rest is done with single-word transfers.
2336 */
2337 const RTGCUINTREG cTransfersOrg = cTransfers;
2338 rcStrict = IOMIOPortWriteString(pVM, pVCpu, uPort, &GCPtrSrc, &cTransfers, cbTransfer);
2339 AssertRC(VBOXSTRICTRC_VAL(rcStrict)); Assert(cTransfers <= cTransfersOrg);
2340 pRegFrame->rsi = ((pRegFrame->rsi + (cTransfersOrg - cTransfers) * cbTransfer) & fAddrMask)
2341 | (pRegFrame->rsi & ~fAddrMask);
2342 }
2343
2344#ifdef IN_RC
2345 MMGCRamRegisterTrapHandler(pVM);
2346#endif
2347
2348 while (cTransfers && rcStrict == VINF_SUCCESS)
2349 {
2350 uint32_t u32Value = 0;
2351 rcStrict = iomRamRead(pVCpu, &u32Value, GCPtrSrc, cbTransfer);
2352 if (rcStrict != VINF_SUCCESS)
2353 break;
2354 rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, u32Value, cbTransfer);
2355 if (!IOM_SUCCESS(rcStrict))
2356 break;
2357 GCPtrSrc = (RTGCPTR)((RTUINTPTR)GCPtrSrc + cbTransfer);
2358 pRegFrame->rsi = ((pRegFrame->rsi + cbTransfer) & fAddrMask)
2359 | (pRegFrame->rsi & ~fAddrMask);
2360 cTransfers--;
2361 }
2362
2363#ifdef IN_RC
2364 MMGCRamDeregisterTrapHandler(pVM);
2365#endif
2366
2367 /* Update rcx on exit. */
2368 if (uPrefix & DISPREFIX_REP)
2369 pRegFrame->rcx = (cTransfers & fAddrMask)
2370 | (pRegFrame->rcx & ~fAddrMask);
2371
2372 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IOM_R3_IOPORT_WRITE || (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST) || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2373 return rcStrict;
2374}
2375
2376
2377#if !defined(VBOX_WITH_FIRST_IEM_STEP) || defined(IN_RC) /* Latter for IOMRCIOPortHandler */
2378/**
2379 * [REP*] OUTSB/OUTSW/OUTSD
2380 * DS:ESI,DX[,ECX]
2381 *
2382 * @returns Strict VBox status code. Informational status codes other than the one documented
2383 * here are to be treated as internal failure. Use IOM_SUCCESS() to check for success.
2384 * @retval VINF_SUCCESS Success.
2385 * @retval VINF_EM_FIRST-VINF_EM_LAST Success with some exceptions (see IOM_SUCCESS()), the
2386 * status code must be passed on to EM.
2387 * @retval VINF_IOM_R3_IOPORT_WRITE Defer the write to ring-3. (R0/GC only)
2388 * @retval VINF_EM_RAW_EMULATE_INSTR Defer the write to the REM.
2389 * @retval VINF_EM_RAW_GUEST_TRAP The exception was left pending. (TRPMRaiseXcptErr)
2390 * @retval VINF_TRPM_XCPT_DISPATCHED The exception was raised and dispatched for raw-mode execution. (TRPMRaiseXcptErr)
2391 * @retval VINF_EM_RESCHEDULE_REM The exception was dispatched and cannot be executed in raw-mode. (TRPMRaiseXcptErr)
2392 *
2393 * @param pVM The virtual machine.
2394 * @param pVCpu Pointer to the virtual CPU structure of the caller.
2395 * @param pRegFrame Pointer to CPUMCTXCORE guest registers structure.
2396 * @param pCpu Disassembler CPU state.
2397 */
2398VMMDECL(VBOXSTRICTRC) IOMInterpretOUTS(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pCpu)
2399{
2400 /*
2401 * Get port number from the first parameter.
2402 * And get the I/O register size from the opcode / prefix.
2403 */
2404 uint64_t Port = 0;
2405 unsigned cb = 0;
2406 bool fRc = iomGetRegImmData(pCpu, &pCpu->Param1, pRegFrame, &Port, &cb);
2407 AssertMsg(fRc, ("Failed to get reg/imm port number!\n")); NOREF(fRc);
2408 if (pCpu->pCurInstr->uOpcode == OP_OUTSB)
2409 cb = 1;
2410 else
2411 cb = (pCpu->uOpMode == DISCPUMODE_16BIT) ? 2 : 4; /* dword in both 32 & 64 bits mode */
2412
2413 VBOXSTRICTRC rcStrict = IOMInterpretCheckPortIOAccess(pVM, pRegFrame, Port, cb);
2414 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
2415 {
2416 AssertMsg(rcStrict == VINF_EM_RAW_GUEST_TRAP || rcStrict == VINF_TRPM_XCPT_DISPATCHED || RT_FAILURE(rcStrict), ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2417 return rcStrict;
2418 }
2419
2420 return IOMInterpretOUTSEx(pVM, pVCpu, pRegFrame, Port, pCpu->fPrefix, (DISCPUMODE)pCpu->uAddrMode, cb);
2421}
2422#endif /* !IEM || RC */
2423
2424#ifndef IN_RC
2425
2426/**
2427 * Mapping an MMIO2 page in place of an MMIO page for direct access.
2428 *
2429 * (This is a special optimization used by the VGA device.)
2430 *
2431 * @returns VBox status code. This API may return VINF_SUCCESS even if no
2432 * remapping is made,.
2433 *
2434 * @param pVM The virtual machine.
2435 * @param GCPhys The address of the MMIO page to be changed.
2436 * @param GCPhysRemapped The address of the MMIO2 page.
2437 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2438 * for the time being.
2439 */
2440VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags)
2441{
2442# ifndef IEM_VERIFICATION_MODE_FULL
2443 /* Currently only called from the VGA device during MMIO. */
2444 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags));
2445 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2446 PVMCPU pVCpu = VMMGetCpu(pVM);
2447
2448 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2449 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2450 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2451 && !HMIsNestedPagingActive(pVM)))
2452 return VINF_SUCCESS; /* ignore */
2453
2454 int rc = IOM_LOCK_SHARED(pVM);
2455 if (RT_FAILURE(rc))
2456 return VINF_SUCCESS; /* better luck the next time around */
2457
2458 /*
2459 * Lookup the context range node the page belongs to.
2460 */
2461 PIOMMMIORANGE pRange = iomMmioGetRange(pVM, pVCpu, GCPhys);
2462 AssertMsgReturn(pRange,
2463 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2464
2465 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2466 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2467
2468 /*
2469 * Do the aliasing; page align the addresses since PGM is picky.
2470 */
2471 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2472 GCPhysRemapped &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2473
2474 rc = PGMHandlerPhysicalPageAlias(pVM, pRange->GCPhys, GCPhys, GCPhysRemapped);
2475
2476 IOM_UNLOCK_SHARED(pVM);
2477 AssertRCReturn(rc, rc);
2478
2479 /*
2480 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2481 * can simply prefetch it.
2482 *
2483 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2484 */
2485# if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */
2486# ifdef VBOX_STRICT
2487 uint64_t fFlags;
2488 RTHCPHYS HCPhys;
2489 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2490 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2491# endif
2492# endif
2493 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2494 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2495# endif /* !IEM_VERIFICATION_MODE_FULL */
2496 return VINF_SUCCESS;
2497}
2498
2499
2500# ifndef IEM_VERIFICATION_MODE_FULL
2501/**
2502 * Mapping a HC page in place of an MMIO page for direct access.
2503 *
2504 * (This is a special optimization used by the APIC in the VT-x case.)
2505 *
2506 * @returns VBox status code.
2507 *
2508 * @param pVM Pointer to the VM.
2509 * @param pVCpu Pointer to the VMCPU.
2510 * @param GCPhys The address of the MMIO page to be changed.
2511 * @param HCPhys The address of the host physical page.
2512 * @param fPageFlags Page flags to set. Must be (X86_PTE_RW | X86_PTE_P)
2513 * for the time being.
2514 */
2515VMMDECL(int) IOMMMIOMapMMIOHCPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, RTHCPHYS HCPhys, uint64_t fPageFlags)
2516{
2517 /* Currently only called from VT-x code during a page fault. */
2518 Log(("IOMMMIOMapMMIOHCPage %RGp -> %RGp flags=%RX64\n", GCPhys, HCPhys, fPageFlags));
2519
2520 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER);
2521 Assert(HMIsEnabled(pVM));
2522
2523 /*
2524 * Lookup the context range node the page belongs to.
2525 */
2526#ifdef VBOX_STRICT
2527 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2528 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2529 AssertMsgReturn(pRange,
2530 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2531 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2532 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2533#endif
2534
2535 /*
2536 * Do the aliasing; page align the addresses since PGM is picky.
2537 */
2538 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
2539 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;
2540
2541 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);
2542 AssertRCReturn(rc, rc);
2543
2544 /*
2545 * Modify the shadow page table. Since it's an MMIO page it won't be present and we
2546 * can simply prefetch it.
2547 *
2548 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.
2549 */
2550 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);
2551 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2552 return VINF_SUCCESS;
2553}
2554#endif /* !IEM_VERIFICATION_MODE_FULL */
2555
2556
2557/**
2558 * Reset a previously modified MMIO region; restore the access flags.
2559 *
2560 * @returns VBox status code.
2561 *
2562 * @param pVM The virtual machine.
2563 * @param GCPhys Physical address that's part of the MMIO region to be reset.
2564 */
2565VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)
2566{
2567 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));
2568
2569 PVMCPU pVCpu = VMMGetCpu(pVM);
2570
2571 /* This currently only works in real mode, protected mode without paging or with nested paging. */
2572 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */
2573 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)
2574 && !HMIsNestedPagingActive(pVM)))
2575 return VINF_SUCCESS; /* ignore */
2576
2577 /*
2578 * Lookup the context range node the page belongs to.
2579 */
2580#ifdef VBOX_STRICT
2581 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */
2582 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);
2583 AssertMsgReturn(pRange,
2584 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);
2585 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);
2586 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);
2587#endif
2588
2589 /*
2590 * Call PGM to do the job work.
2591 *
2592 * After the call, all the pages should be non-present... unless there is
2593 * a page pool flush pending (unlikely).
2594 */
2595 int rc = PGMHandlerPhysicalReset(pVM, GCPhys);
2596 AssertRC(rc);
2597
2598#ifdef VBOX_STRICT
2599 if (!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3))
2600 {
2601 uint32_t cb = pRange->cb;
2602 GCPhys = pRange->GCPhys;
2603 while (cb)
2604 {
2605 uint64_t fFlags;
2606 RTHCPHYS HCPhys;
2607 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys);
2608 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
2609 cb -= PAGE_SIZE;
2610 GCPhys += PAGE_SIZE;
2611 }
2612 }
2613#endif
2614 return rc;
2615}
2616
2617#endif /* !IN_RC */
2618
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette