VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 42453

Last change on this file since 42453 was 42453, checked in by vboxsync, 13 years ago

IEM: Status code handling. PGM interface for R0 and RC.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 49.6 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 42453 2012-07-30 15:23:18Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Defined Constants And Macros *
21*******************************************************************************/
22#if OP_SIZE == 8
23# define OP_rAX al
24#elif OP_SIZE == 16
25# define OP_rAX ax
26#elif OP_SIZE == 32
27# define OP_rAX eax
28#elif OP_SIZE == 64
29# define OP_rAX rax
30#else
31# error "Bad OP_SIZE."
32#endif
33#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
34
35#if ADDR_SIZE == 16
36# define ADDR_rDI di
37# define ADDR_rSI si
38# define ADDR_rCX cx
39# define ADDR2_TYPE uint32_t
40#elif ADDR_SIZE == 32
41# define ADDR_rDI edi
42# define ADDR_rSI esi
43# define ADDR_rCX ecx
44# define ADDR2_TYPE uint32_t
45#elif ADDR_SIZE == 64
46# define ADDR_rDI rdi
47# define ADDR_rSI rsi
48# define ADDR_rCX rcx
49# define ADDR2_TYPE uint64_t
50#else
51# error "Bad ADDR_SIZE."
52#endif
53#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
54
55
56/**
57 * Implements 'REPE CMPS'.
58 */
59IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
60{
61 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
62
63 /*
64 * Setup.
65 */
66 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
67 if (uCounterReg == 0)
68 {
69 iemRegAddToRip(pIemCpu, cbInstr);
70 return VINF_SUCCESS;
71 }
72
73 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
74 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg);
75 if (rcStrict != VINF_SUCCESS)
76 return rcStrict;
77
78 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
79 if (rcStrict != VINF_SUCCESS)
80 return rcStrict;
81
82 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
83 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
84 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
85 uint32_t uEFlags = pCtx->eflags.u;
86
87 /*
88 * The loop.
89 */
90 do
91 {
92 /*
93 * Do segmentation and virtual page stuff.
94 */
95#if ADDR_SIZE != 64
96 ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg;
97 ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->es.u64Base + uSrc2AddrReg;
98#else
99 uint64_t uVirtSrc1Addr = uSrc1AddrReg;
100 uint64_t uVirtSrc2Addr = uSrc2AddrReg;
101#endif
102 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
103 if (cLeftSrc1Page > uCounterReg)
104 cLeftSrc1Page = uCounterReg;
105 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
106 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
107
108 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
109 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
110#if ADDR_SIZE != 64
111 && uSrc1AddrReg < pSrc1Hid->u32Limit
112 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
113 && uSrc2AddrReg < pCtx->es.u32Limit
114 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
115#endif
116 )
117 {
118 RTGCPHYS GCPhysSrc1Mem;
119 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
120 if (rcStrict != VINF_SUCCESS)
121 return rcStrict;
122
123 RTGCPHYS GCPhysSrc2Mem;
124 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
125 if (rcStrict != VINF_SUCCESS)
126 return rcStrict;
127
128 /*
129 * If we can map the page without trouble, do a block processing
130 * until the end of the current page.
131 */
132 PGMPAGEMAPLOCK PgLockSrc2Mem;
133 OP_TYPE const *puSrc2Mem;
134 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
135 if (rcStrict == VINF_SUCCESS)
136 {
137 PGMPAGEMAPLOCK PgLockSrc1Mem;
138 OP_TYPE const *puSrc1Mem;
139 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
140 if (rcStrict == VINF_SUCCESS)
141 {
142 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
143 {
144 /* All matches, only compare the last itme to get the right eflags. */
145 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
146 uSrc1AddrReg += cLeftPage * cbIncr;
147 uSrc2AddrReg += cLeftPage * cbIncr;
148 uCounterReg -= cLeftPage;
149 }
150 else
151 {
152 /* Some mismatch, compare each item (and keep volatile
153 memory in mind). */
154 uint32_t off = 0;
155 do
156 {
157 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
158 off++;
159 } while ( off < cLeftPage
160 && (uEFlags & X86_EFL_ZF));
161 uSrc1AddrReg += cbIncr * off;
162 uSrc2AddrReg += cbIncr * off;
163 uCounterReg -= off;
164 }
165
166 /* Update the registers before looping. */
167 pCtx->ADDR_rCX = uCounterReg;
168 pCtx->ADDR_rSI = uSrc1AddrReg;
169 pCtx->ADDR_rDI = uSrc2AddrReg;
170 pCtx->eflags.u = uEFlags;
171
172 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
173 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
174 continue;
175 }
176 }
177 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
178 }
179
180 /*
181 * Fallback - slow processing till the end of the current page.
182 * In the cross page boundrary case we will end up here with cLeftPage
183 * as 0, we execute one loop then.
184 */
185 do
186 {
187 OP_TYPE uValue1;
188 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
189 if (rcStrict != VINF_SUCCESS)
190 return rcStrict;
191 OP_TYPE uValue2;
192 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
193 if (rcStrict != VINF_SUCCESS)
194 return rcStrict;
195 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
196
197 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
198 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
199 pCtx->ADDR_rCX = --uCounterReg;
200 pCtx->eflags.u = uEFlags;
201 cLeftPage--;
202 } while ( (int32_t)cLeftPage > 0
203 && (uEFlags & X86_EFL_ZF));
204 } while ( uCounterReg != 0
205 && (uEFlags & X86_EFL_ZF));
206
207 /*
208 * Done.
209 */
210 iemRegAddToRip(pIemCpu, cbInstr);
211 return VINF_SUCCESS;
212}
213
214
215/**
216 * Implements 'REPNE CMPS'.
217 */
218IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
219{
220 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
221
222 /*
223 * Setup.
224 */
225 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
226 if (uCounterReg == 0)
227 {
228 iemRegAddToRip(pIemCpu, cbInstr);
229 return VINF_SUCCESS;
230 }
231
232 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
233 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg);
234 if (rcStrict != VINF_SUCCESS)
235 return rcStrict;
236
237 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
238 if (rcStrict != VINF_SUCCESS)
239 return rcStrict;
240
241 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
242 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
243 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
244 uint32_t uEFlags = pCtx->eflags.u;
245
246 /*
247 * The loop.
248 */
249 do
250 {
251 /*
252 * Do segmentation and virtual page stuff.
253 */
254#if ADDR_SIZE != 64
255 ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg;
256 ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->es.u64Base + uSrc2AddrReg;
257#else
258 uint64_t uVirtSrc1Addr = uSrc1AddrReg;
259 uint64_t uVirtSrc2Addr = uSrc2AddrReg;
260#endif
261 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
262 if (cLeftSrc1Page > uCounterReg)
263 cLeftSrc1Page = uCounterReg;
264 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
265 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
266
267 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
268 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
269#if ADDR_SIZE != 64
270 && uSrc1AddrReg < pSrc1Hid->u32Limit
271 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
272 && uSrc2AddrReg < pCtx->es.u32Limit
273 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
274#endif
275 )
276 {
277 RTGCPHYS GCPhysSrc1Mem;
278 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
279 if (rcStrict != VINF_SUCCESS)
280 return rcStrict;
281
282 RTGCPHYS GCPhysSrc2Mem;
283 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
284 if (rcStrict != VINF_SUCCESS)
285 return rcStrict;
286
287 /*
288 * If we can map the page without trouble, do a block processing
289 * until the end of the current page.
290 */
291 OP_TYPE const *puSrc2Mem;
292 PGMPAGEMAPLOCK PgLockSrc2Mem;
293 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
294 if (rcStrict == VINF_SUCCESS)
295 {
296 OP_TYPE const *puSrc1Mem;
297 PGMPAGEMAPLOCK PgLockSrc1Mem;
298 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
299 if (rcStrict == VINF_SUCCESS)
300 {
301 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
302 {
303 /* All matches, only compare the last item to get the right eflags. */
304 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
305 uSrc1AddrReg += cLeftPage * cbIncr;
306 uSrc2AddrReg += cLeftPage * cbIncr;
307 uCounterReg -= cLeftPage;
308 }
309 else
310 {
311 /* Some mismatch, compare each item (and keep volatile
312 memory in mind). */
313 uint32_t off = 0;
314 do
315 {
316 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
317 off++;
318 } while ( off < cLeftPage
319 && !(uEFlags & X86_EFL_ZF));
320 uSrc1AddrReg += cbIncr * off;
321 uSrc2AddrReg += cbIncr * off;
322 uCounterReg -= off;
323 }
324
325 /* Update the registers before looping. */
326 pCtx->ADDR_rCX = uCounterReg;
327 pCtx->ADDR_rSI = uSrc1AddrReg;
328 pCtx->ADDR_rDI = uSrc2AddrReg;
329 pCtx->eflags.u = uEFlags;
330
331 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
332 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
333 continue;
334 }
335 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
336 }
337 }
338
339 /*
340 * Fallback - slow processing till the end of the current page.
341 * In the cross page boundrary case we will end up here with cLeftPage
342 * as 0, we execute one loop then.
343 */
344 do
345 {
346 OP_TYPE uValue1;
347 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
348 if (rcStrict != VINF_SUCCESS)
349 return rcStrict;
350 OP_TYPE uValue2;
351 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
352 if (rcStrict != VINF_SUCCESS)
353 return rcStrict;
354 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
355
356 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
357 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
358 pCtx->ADDR_rCX = --uCounterReg;
359 pCtx->eflags.u = uEFlags;
360 cLeftPage--;
361 } while ( (int32_t)cLeftPage > 0
362 && !(uEFlags & X86_EFL_ZF));
363 } while ( uCounterReg != 0
364 && !(uEFlags & X86_EFL_ZF));
365
366 /*
367 * Done.
368 */
369 iemRegAddToRip(pIemCpu, cbInstr);
370 return VINF_SUCCESS;
371}
372
373
374/**
375 * Implements 'REPE SCAS'.
376 */
377IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
378{
379 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
380
381 /*
382 * Setup.
383 */
384 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
385 if (uCounterReg == 0)
386 {
387 iemRegAddToRip(pIemCpu, cbInstr);
388 return VINF_SUCCESS;
389 }
390
391 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
392 if (rcStrict != VINF_SUCCESS)
393 return rcStrict;
394
395 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
396 OP_TYPE const uValueReg = pCtx->OP_rAX;
397 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
398 uint32_t uEFlags = pCtx->eflags.u;
399
400 /*
401 * The loop.
402 */
403 do
404 {
405 /*
406 * Do segmentation and virtual page stuff.
407 */
408#if ADDR_SIZE != 64
409 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
410#else
411 uint64_t uVirtAddr = uAddrReg;
412#endif
413 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
414 if (cLeftPage > uCounterReg)
415 cLeftPage = uCounterReg;
416 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
417 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
418#if ADDR_SIZE != 64
419 && uAddrReg < pCtx->es.u32Limit
420 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
421#endif
422 )
423 {
424 RTGCPHYS GCPhysMem;
425 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
426 if (rcStrict != VINF_SUCCESS)
427 return rcStrict;
428
429 /*
430 * If we can map the page without trouble, do a block processing
431 * until the end of the current page.
432 */
433 PGMPAGEMAPLOCK PgLockMem;
434 OP_TYPE const *puMem;
435 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
436 if (rcStrict == VINF_SUCCESS)
437 {
438 /* Search till we find a mismatching item. */
439 OP_TYPE uTmpValue;
440 bool fQuit;
441 uint32_t i = 0;
442 do
443 {
444 uTmpValue = puMem[i++];
445 fQuit = uTmpValue != uValueReg;
446 } while (i < cLeftPage && !fQuit);
447
448 /* Update the regs. */
449 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
450 pCtx->ADDR_rCX = uCounterReg -= i;
451 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
452 pCtx->eflags.u = uEFlags;
453 Assert(!(uEFlags & X86_EFL_ZF) == (i < cLeftPage));
454 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
455 if (fQuit)
456 break;
457
458
459 /* If unaligned, we drop thru and do the page crossing access
460 below. Otherwise, do the next page. */
461 if (!(uVirtAddr & (OP_SIZE - 1)))
462 continue;
463 if (uCounterReg == 0)
464 break;
465 cLeftPage = 0;
466 }
467 }
468
469 /*
470 * Fallback - slow processing till the end of the current page.
471 * In the cross page boundrary case we will end up here with cLeftPage
472 * as 0, we execute one loop then.
473 */
474 do
475 {
476 OP_TYPE uTmpValue;
477 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
478 if (rcStrict != VINF_SUCCESS)
479 return rcStrict;
480 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
481
482 pCtx->ADDR_rDI = uAddrReg += cbIncr;
483 pCtx->ADDR_rCX = --uCounterReg;
484 pCtx->eflags.u = uEFlags;
485 cLeftPage--;
486 } while ( (int32_t)cLeftPage > 0
487 && (uEFlags & X86_EFL_ZF));
488 } while ( uCounterReg != 0
489 && (uEFlags & X86_EFL_ZF));
490
491 /*
492 * Done.
493 */
494 iemRegAddToRip(pIemCpu, cbInstr);
495 return VINF_SUCCESS;
496}
497
498
499/**
500 * Implements 'REPNE SCAS'.
501 */
502IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
503{
504 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
505
506 /*
507 * Setup.
508 */
509 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
510 if (uCounterReg == 0)
511 {
512 iemRegAddToRip(pIemCpu, cbInstr);
513 return VINF_SUCCESS;
514 }
515
516 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
517 if (rcStrict != VINF_SUCCESS)
518 return rcStrict;
519
520 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
521 OP_TYPE const uValueReg = pCtx->OP_rAX;
522 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
523 uint32_t uEFlags = pCtx->eflags.u;
524
525 /*
526 * The loop.
527 */
528 do
529 {
530 /*
531 * Do segmentation and virtual page stuff.
532 */
533#if ADDR_SIZE != 64
534 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
535#else
536 uint64_t uVirtAddr = uAddrReg;
537#endif
538 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
539 if (cLeftPage > uCounterReg)
540 cLeftPage = uCounterReg;
541 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
542 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
543#if ADDR_SIZE != 64
544 && uAddrReg < pCtx->es.u32Limit
545 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
546#endif
547 )
548 {
549 RTGCPHYS GCPhysMem;
550 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
551 if (rcStrict != VINF_SUCCESS)
552 return rcStrict;
553
554 /*
555 * If we can map the page without trouble, do a block processing
556 * until the end of the current page.
557 */
558 PGMPAGEMAPLOCK PgLockMem;
559 OP_TYPE const *puMem;
560 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
561 if (rcStrict == VINF_SUCCESS)
562 {
563 /* Search till we find a mismatching item. */
564 OP_TYPE uTmpValue;
565 bool fQuit;
566 uint32_t i = 0;
567 do
568 {
569 uTmpValue = puMem[i++];
570 fQuit = uTmpValue == uValueReg;
571 } while (i < cLeftPage && !fQuit);
572
573 /* Update the regs. */
574 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
575 pCtx->ADDR_rCX = uCounterReg -= i;
576 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
577 pCtx->eflags.u = uEFlags;
578 Assert((!(uEFlags & X86_EFL_ZF) != (i < cLeftPage)) || (i == cLeftPage));
579 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
580 if (fQuit)
581 break;
582
583
584 /* If unaligned, we drop thru and do the page crossing access
585 below. Otherwise, do the next page. */
586 if (!(uVirtAddr & (OP_SIZE - 1)))
587 continue;
588 if (uCounterReg == 0)
589 break;
590 cLeftPage = 0;
591 }
592 }
593
594 /*
595 * Fallback - slow processing till the end of the current page.
596 * In the cross page boundrary case we will end up here with cLeftPage
597 * as 0, we execute one loop then.
598 */
599 do
600 {
601 OP_TYPE uTmpValue;
602 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
603 if (rcStrict != VINF_SUCCESS)
604 return rcStrict;
605 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
606
607 pCtx->ADDR_rDI = uAddrReg += cbIncr;
608 pCtx->ADDR_rCX = --uCounterReg;
609 pCtx->eflags.u = uEFlags;
610 cLeftPage--;
611 } while ( (int32_t)cLeftPage > 0
612 && !(uEFlags & X86_EFL_ZF));
613 } while ( uCounterReg != 0
614 && !(uEFlags & X86_EFL_ZF));
615
616 /*
617 * Done.
618 */
619 iemRegAddToRip(pIemCpu, cbInstr);
620 return VINF_SUCCESS;
621}
622
623
624
625
626/**
627 * Implements 'REP MOVS'.
628 */
629IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
630{
631 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
632
633 /*
634 * Setup.
635 */
636 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
637 if (uCounterReg == 0)
638 {
639 iemRegAddToRip(pIemCpu, cbInstr);
640 return VINF_SUCCESS;
641 }
642
643 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
644 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg);
645 if (rcStrict != VINF_SUCCESS)
646 return rcStrict;
647
648 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
649 if (rcStrict != VINF_SUCCESS)
650 return rcStrict;
651
652 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
653 ADDR_TYPE uSrcAddrReg = pCtx->ADDR_rSI;
654 ADDR_TYPE uDstAddrReg = pCtx->ADDR_rDI;
655
656 /*
657 * The loop.
658 */
659 do
660 {
661 /*
662 * Do segmentation and virtual page stuff.
663 */
664#if ADDR_SIZE != 64
665 ADDR2_TYPE uVirtSrcAddr = (uint32_t)pSrcHid->u64Base + uSrcAddrReg;
666 ADDR2_TYPE uVirtDstAddr = (uint32_t)pCtx->es.u64Base + uDstAddrReg;
667#else
668 uint64_t uVirtSrcAddr = uSrcAddrReg;
669 uint64_t uVirtDstAddr = uDstAddrReg;
670#endif
671 uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
672 if (cLeftSrcPage > uCounterReg)
673 cLeftSrcPage = uCounterReg;
674 uint32_t cLeftDstPage = (PAGE_SIZE - (uVirtDstAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
675 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
676
677 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
678 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
679#if ADDR_SIZE != 64
680 && uSrcAddrReg < pSrcHid->u32Limit
681 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
682 && uDstAddrReg < pCtx->es.u32Limit
683 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
684#endif
685 )
686 {
687 RTGCPHYS GCPhysSrcMem;
688 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
689 if (rcStrict != VINF_SUCCESS)
690 return rcStrict;
691
692 RTGCPHYS GCPhysDstMem;
693 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
694 if (rcStrict != VINF_SUCCESS)
695 return rcStrict;
696
697 /*
698 * If we can map the page without trouble, do a block processing
699 * until the end of the current page.
700 */
701 PGMPAGEMAPLOCK PgLockDstMem;
702 OP_TYPE *puDstMem;
703 rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
704 if (rcStrict == VINF_SUCCESS)
705 {
706 PGMPAGEMAPLOCK PgLockSrcMem;
707 OP_TYPE const *puSrcMem;
708 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
709 if (rcStrict == VINF_SUCCESS)
710 {
711 /* Perform the operation. */
712 memcpy(puDstMem, puSrcMem, cLeftPage * (OP_SIZE / 8));
713
714 /* Update the registers. */
715 pCtx->ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
716 pCtx->ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
717 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
718
719 iemMemPageUnmap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);
720 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
721 continue;
722 }
723 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
724 }
725 }
726
727 /*
728 * Fallback - slow processing till the end of the current page.
729 * In the cross page boundrary case we will end up here with cLeftPage
730 * as 0, we execute one loop then.
731 */
732 do
733 {
734 OP_TYPE uValue;
735 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uSrcAddrReg);
736 if (rcStrict != VINF_SUCCESS)
737 return rcStrict;
738 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uDstAddrReg, uValue);
739 if (rcStrict != VINF_SUCCESS)
740 return rcStrict;
741
742 pCtx->ADDR_rSI = uSrcAddrReg += cbIncr;
743 pCtx->ADDR_rDI = uDstAddrReg += cbIncr;
744 pCtx->ADDR_rCX = --uCounterReg;
745 cLeftPage--;
746 } while ((int32_t)cLeftPage > 0);
747 } while (uCounterReg != 0);
748
749 /*
750 * Done.
751 */
752 iemRegAddToRip(pIemCpu, cbInstr);
753 return VINF_SUCCESS;
754}
755
756
757/**
758 * Implements 'REP STOS'.
759 */
760IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
761{
762 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
763
764 /*
765 * Setup.
766 */
767 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
768 if (uCounterReg == 0)
769 {
770 iemRegAddToRip(pIemCpu, cbInstr);
771 return VINF_SUCCESS;
772 }
773
774 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
775 if (rcStrict != VINF_SUCCESS)
776 return rcStrict;
777
778 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
779 OP_TYPE const uValue = pCtx->OP_rAX;
780 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
781
782 /*
783 * The loop.
784 */
785 do
786 {
787 /*
788 * Do segmentation and virtual page stuff.
789 */
790#if ADDR_SIZE != 64
791 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
792#else
793 uint64_t uVirtAddr = uAddrReg;
794#endif
795 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
796 if (cLeftPage > uCounterReg)
797 cLeftPage = uCounterReg;
798 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
799 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
800#if ADDR_SIZE != 64
801 && uAddrReg < pCtx->es.u32Limit
802 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
803#endif
804 )
805 {
806 RTGCPHYS GCPhysMem;
807 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
808 if (rcStrict != VINF_SUCCESS)
809 return rcStrict;
810
811 /*
812 * If we can map the page without trouble, do a block processing
813 * until the end of the current page.
814 */
815 PGMPAGEMAPLOCK PgLockMem;
816 OP_TYPE *puMem;
817 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
818 if (rcStrict == VINF_SUCCESS)
819 {
820 /* Update the regs first so we can loop on cLeftPage. */
821 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
822 pCtx->ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
823
824 /* Do the memsetting. */
825#if OP_SIZE == 8
826 memset(puMem, uValue, cLeftPage);
827/*#elif OP_SIZE == 32
828 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
829#else
830 while (cLeftPage-- > 0)
831 *puMem++ = uValue;
832#endif
833
834 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
835
836 /* If unaligned, we drop thru and do the page crossing access
837 below. Otherwise, do the next page. */
838 if (!(uVirtAddr & (OP_SIZE - 1)))
839 continue;
840 if (uCounterReg == 0)
841 break;
842 cLeftPage = 0;
843 }
844 }
845
846 /*
847 * Fallback - slow processing till the end of the current page.
848 * In the cross page boundrary case we will end up here with cLeftPage
849 * as 0, we execute one loop then.
850 */
851 do
852 {
853 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uAddrReg, uValue);
854 if (rcStrict != VINF_SUCCESS)
855 return rcStrict;
856 pCtx->ADDR_rDI = uAddrReg += cbIncr;
857 pCtx->ADDR_rCX = --uCounterReg;
858 cLeftPage--;
859 } while ((int32_t)cLeftPage > 0);
860 } while (uCounterReg != 0);
861
862 /*
863 * Done.
864 */
865 iemRegAddToRip(pIemCpu, cbInstr);
866 return VINF_SUCCESS;
867}
868
869
870/**
871 * Implements 'REP LODS'.
872 */
873IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
874{
875 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
876
877 /*
878 * Setup.
879 */
880 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
881 if (uCounterReg == 0)
882 {
883 iemRegAddToRip(pIemCpu, cbInstr);
884 return VINF_SUCCESS;
885 }
886
887 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
888 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg);
889 if (rcStrict != VINF_SUCCESS)
890 return rcStrict;
891
892 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
893 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
894
895 /*
896 * The loop.
897 */
898 do
899 {
900 /*
901 * Do segmentation and virtual page stuff.
902 */
903#if ADDR_SIZE != 64
904 ADDR2_TYPE uVirtAddr = (uint32_t)pSrcHid->u64Base + uAddrReg;
905#else
906 uint64_t uVirtAddr = uAddrReg;
907#endif
908 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
909 if (cLeftPage > uCounterReg)
910 cLeftPage = uCounterReg;
911 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
912 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
913#if ADDR_SIZE != 64
914 && uAddrReg < pSrcHid->u32Limit
915 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
916#endif
917 )
918 {
919 RTGCPHYS GCPhysMem;
920 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
921 if (rcStrict != VINF_SUCCESS)
922 return rcStrict;
923
924 /*
925 * If we can map the page without trouble, we can get away with
926 * just reading the last value on the page.
927 */
928 PGMPAGEMAPLOCK PgLockMem;
929 OP_TYPE const *puMem;
930 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
931 if (rcStrict == VINF_SUCCESS)
932 {
933 /* Only get the last byte, the rest doesn't matter in direct access mode. */
934#if OP_SIZE == 32
935 pCtx->rax = puMem[cLeftPage - 1];
936#else
937 pCtx->OP_rAX = puMem[cLeftPage - 1];
938#endif
939 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
940 pCtx->ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
941 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
942
943 /* If unaligned, we drop thru and do the page crossing access
944 below. Otherwise, do the next page. */
945 if (!(uVirtAddr & (OP_SIZE - 1)))
946 continue;
947 if (uCounterReg == 0)
948 break;
949 cLeftPage = 0;
950 }
951 }
952
953 /*
954 * Fallback - slow processing till the end of the current page.
955 * In the cross page boundrary case we will end up here with cLeftPage
956 * as 0, we execute one loop then.
957 */
958 do
959 {
960 OP_TYPE uTmpValue;
961 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, iEffSeg, uAddrReg);
962 if (rcStrict != VINF_SUCCESS)
963 return rcStrict;
964#if OP_SIZE == 32
965 pCtx->rax = uTmpValue;
966#else
967 pCtx->OP_rAX = uTmpValue;
968#endif
969 pCtx->ADDR_rSI = uAddrReg += cbIncr;
970 pCtx->ADDR_rCX = --uCounterReg;
971 cLeftPage--;
972 } while ((int32_t)cLeftPage > 0);
973 if (rcStrict != VINF_SUCCESS)
974 break;
975 } while (uCounterReg != 0);
976
977 /*
978 * Done.
979 */
980 iemRegAddToRip(pIemCpu, cbInstr);
981 return VINF_SUCCESS;
982}
983
984
985#if OP_SIZE != 64
986
987/**
988 * Implements 'INS' (no rep)
989 */
990IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE))
991{
992 PVM pVM = IEMCPU_TO_VM(pIemCpu);
993 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
994 VBOXSTRICTRC rcStrict;
995
996 /*
997 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
998 * segmentation and finally any #PF due to virtual address translation.
999 * ASSUMES nothing is read from the I/O port before traps are taken.
1000 */
1001 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1002 if (rcStrict != VINF_SUCCESS)
1003 return rcStrict;
1004
1005 OP_TYPE *puMem;
1006 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
1007 if (rcStrict != VINF_SUCCESS)
1008 return rcStrict;
1009
1010 uint32_t u32Value;
1011 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1012 rcStrict = IOMIOPortRead(pVM, pCtx->dx, &u32Value, OP_SIZE / 8);
1013 else
1014 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, pCtx->dx, &u32Value, OP_SIZE / 8);
1015 if (IOM_SUCCESS(rcStrict))
1016 {
1017 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1018 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1019 {
1020 if (!pCtx->eflags.Bits.u1DF)
1021 pCtx->ADDR_rDI += OP_SIZE / 8;
1022 else
1023 pCtx->ADDR_rDI -= OP_SIZE / 8;
1024 iemRegAddToRip(pIemCpu, cbInstr);
1025 }
1026 /* iemMemMap already check permissions, so this may only be real errors
1027 or access handlers medling. The access handler case is going to
1028 cause misbehavior if the instruction is re-interpreted or smth. So,
1029 we fail with an internal error here instead. */
1030 else
1031 AssertLogRelFailedReturn(VERR_IEM_IPE_1);
1032 }
1033 return rcStrict;
1034}
1035
1036
1037/**
1038 * Implements 'REP INS'.
1039 */
1040IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE))
1041{
1042 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1043 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1044
1045 /*
1046 * Setup.
1047 */
1048 uint16_t const u16Port = pCtx->dx;
1049 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1050 if (rcStrict != VINF_SUCCESS)
1051 return rcStrict;
1052
1053 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1054 if (uCounterReg == 0)
1055 {
1056 iemRegAddToRip(pIemCpu, cbInstr);
1057 return VINF_SUCCESS;
1058 }
1059
1060 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
1061 if (rcStrict != VINF_SUCCESS)
1062 return rcStrict;
1063
1064 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1065 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
1066
1067 /*
1068 * The loop.
1069 */
1070 do
1071 {
1072 /*
1073 * Do segmentation and virtual page stuff.
1074 */
1075#if ADDR_SIZE != 64
1076 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
1077#else
1078 uint64_t uVirtAddr = uAddrReg;
1079#endif
1080 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1081 if (cLeftPage > uCounterReg)
1082 cLeftPage = uCounterReg;
1083 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1084 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1085#if ADDR_SIZE != 64
1086 && uAddrReg < pCtx->es.u32Limit
1087 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
1088#endif
1089 )
1090 {
1091 RTGCPHYS GCPhysMem;
1092 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
1093 if (rcStrict != VINF_SUCCESS)
1094 return rcStrict;
1095
1096 /*
1097 * If we can map the page without trouble, we would've liked to use
1098 * an string I/O method to do the work, but the current IOM
1099 * interface doesn't match our current approach. So, do a regular
1100 * loop instead.
1101 */
1102 /** @todo Change the I/O manager interface to make use of
1103 * mapped buffers instead of leaving those bits to the
1104 * device implementation? */
1105 PGMPAGEMAPLOCK PgLockMem;
1106 OP_TYPE *puMem;
1107 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
1108 if (rcStrict == VINF_SUCCESS)
1109 {
1110 uint32_t off = 0;
1111 while (off < cLeftPage)
1112 {
1113 uint32_t u32Value;
1114 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1115 rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8);
1116 else
1117 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1118 if (IOM_SUCCESS(rcStrict))
1119 {
1120 puMem[off] = (OP_TYPE)u32Value;
1121 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1122 pCtx->ADDR_rCX = --uCounterReg;
1123 }
1124 if (rcStrict != VINF_SUCCESS)
1125 {
1126 if (IOM_SUCCESS(rcStrict))
1127 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1128 if (uCounterReg == 0)
1129 iemRegAddToRip(pIemCpu, cbInstr);
1130 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1131 return rcStrict;
1132 }
1133 off++;
1134 }
1135 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1136
1137 /* If unaligned, we drop thru and do the page crossing access
1138 below. Otherwise, do the next page. */
1139 if (!(uVirtAddr & (OP_SIZE - 1)))
1140 continue;
1141 if (uCounterReg == 0)
1142 break;
1143 cLeftPage = 0;
1144 }
1145 }
1146
1147 /*
1148 * Fallback - slow processing till the end of the current page.
1149 * In the cross page boundrary case we will end up here with cLeftPage
1150 * as 0, we execute one loop then.
1151 *
1152 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1153 * I/O port, otherwise it wouldn't really be restartable.
1154 */
1155 /** @todo investigate what the CPU actually does with \#PF/\#GP
1156 * during INS. */
1157 do
1158 {
1159 OP_TYPE *puMem;
1160 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
1161 if (rcStrict != VINF_SUCCESS)
1162 return rcStrict;
1163
1164 uint32_t u32Value;
1165 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1166 rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8);
1167 else
1168 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1169 if (!IOM_SUCCESS(rcStrict))
1170 return rcStrict;
1171
1172 *puMem = (OP_TYPE)u32Value;
1173 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1174 AssertLogRelReturn(rcStrict2 == VINF_SUCCESS, VERR_IEM_IPE_1); /* See non-rep version. */
1175
1176 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1177 pCtx->ADDR_rCX = --uCounterReg;
1178
1179 cLeftPage--;
1180 if (rcStrict != VINF_SUCCESS)
1181 {
1182 if (IOM_SUCCESS(rcStrict))
1183 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1184 if (uCounterReg == 0)
1185 iemRegAddToRip(pIemCpu, cbInstr);
1186 return rcStrict;
1187 }
1188 } while ((int32_t)cLeftPage > 0);
1189 } while (uCounterReg != 0);
1190
1191 /*
1192 * Done.
1193 */
1194 iemRegAddToRip(pIemCpu, cbInstr);
1195 return VINF_SUCCESS;
1196}
1197
1198
1199/**
1200 * Implements 'OUTS' (no rep)
1201 */
1202IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
1203{
1204 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1205 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1206 VBOXSTRICTRC rcStrict;
1207
1208 /*
1209 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1210 * segmentation and finally any #PF due to virtual address translation.
1211 * ASSUMES nothing is read from the I/O port before traps are taken.
1212 */
1213 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1214 if (rcStrict != VINF_SUCCESS)
1215 return rcStrict;
1216
1217 OP_TYPE uValue;
1218 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);
1219 if (rcStrict == VINF_SUCCESS)
1220 {
1221 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1222 rcStrict = IOMIOPortWrite(pVM, pCtx->dx, uValue, OP_SIZE / 8);
1223 else
1224 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, pCtx->dx, uValue, OP_SIZE / 8);
1225 if (IOM_SUCCESS(rcStrict))
1226 {
1227 if (!pCtx->eflags.Bits.u1DF)
1228 pCtx->ADDR_rSI += OP_SIZE / 8;
1229 else
1230 pCtx->ADDR_rSI -= OP_SIZE / 8;
1231 iemRegAddToRip(pIemCpu, cbInstr);
1232 if (rcStrict != VINF_SUCCESS)
1233 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1234 }
1235 }
1236 return rcStrict;
1237}
1238
1239
1240/**
1241 * Implements 'REP OUTS'.
1242 */
1243IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
1244{
1245 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1246 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1247
1248 /*
1249 * Setup.
1250 */
1251 uint16_t const u16Port = pCtx->dx;
1252 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1253 if (rcStrict != VINF_SUCCESS)
1254 return rcStrict;
1255
1256 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1257 if (uCounterReg == 0)
1258 {
1259 iemRegAddToRip(pIemCpu, cbInstr);
1260 return VINF_SUCCESS;
1261 }
1262
1263 PCCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iEffSeg);
1264 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg);
1265 if (rcStrict != VINF_SUCCESS)
1266 return rcStrict;
1267
1268 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1269 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
1270
1271 /*
1272 * The loop.
1273 */
1274 do
1275 {
1276 /*
1277 * Do segmentation and virtual page stuff.
1278 */
1279#if ADDR_SIZE != 64
1280 ADDR2_TYPE uVirtAddr = (uint32_t)pHid->u64Base + uAddrReg;
1281#else
1282 uint64_t uVirtAddr = uAddrReg;
1283#endif
1284 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1285 if (cLeftPage > uCounterReg)
1286 cLeftPage = uCounterReg;
1287 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1288 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1289#if ADDR_SIZE != 64
1290 && uAddrReg < pHid->u32Limit
1291 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit
1292#endif
1293 )
1294 {
1295 RTGCPHYS GCPhysMem;
1296 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1297 if (rcStrict != VINF_SUCCESS)
1298 return rcStrict;
1299
1300 /*
1301 * If we can map the page without trouble, we would've liked to use
1302 * an string I/O method to do the work, but the current IOM
1303 * interface doesn't match our current approach. So, do a regular
1304 * loop instead.
1305 */
1306 /** @todo Change the I/O manager interface to make use of
1307 * mapped buffers instead of leaving those bits to the
1308 * device implementation? */
1309 PGMPAGEMAPLOCK PgLockMem;
1310 OP_TYPE const *puMem;
1311 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1312 if (rcStrict == VINF_SUCCESS)
1313 {
1314 uint32_t off = 0;
1315 while (off < cLeftPage)
1316 {
1317 uint32_t u32Value = *puMem++;
1318 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1319 rcStrict = IOMIOPortWrite(pVM, u16Port, u32Value, OP_SIZE / 8);
1320 else
1321 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, OP_SIZE / 8);
1322 if (IOM_SUCCESS(rcStrict))
1323 {
1324 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1325 pCtx->ADDR_rCX = --uCounterReg;
1326 }
1327 if (rcStrict != VINF_SUCCESS)
1328 {
1329 if (IOM_SUCCESS(rcStrict))
1330 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1331 if (uCounterReg == 0)
1332 iemRegAddToRip(pIemCpu, cbInstr);
1333 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1334 return rcStrict;
1335 }
1336 off++;
1337 }
1338 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1339
1340 /* If unaligned, we drop thru and do the page crossing access
1341 below. Otherwise, do the next page. */
1342 if (!(uVirtAddr & (OP_SIZE - 1)))
1343 continue;
1344 if (uCounterReg == 0)
1345 break;
1346 cLeftPage = 0;
1347 }
1348 }
1349
1350 /*
1351 * Fallback - slow processing till the end of the current page.
1352 * In the cross page boundrary case we will end up here with cLeftPage
1353 * as 0, we execute one loop then.
1354 *
1355 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1356 * I/O port, otherwise it wouldn't really be restartable.
1357 */
1358 /** @todo investigate what the CPU actually does with \#PF/\#GP
1359 * during INS. */
1360 do
1361 {
1362 OP_TYPE uValue;
1363 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uAddrReg);
1364 if (rcStrict != VINF_SUCCESS)
1365 return rcStrict;
1366
1367 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1368 rcStrict = IOMIOPortWrite(pVM, u16Port, uValue, OP_SIZE / 8);
1369 else
1370 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, uValue, OP_SIZE / 8);
1371 if (IOM_SUCCESS(rcStrict))
1372 {
1373 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1374 pCtx->ADDR_rCX = --uCounterReg;
1375 cLeftPage--;
1376 }
1377 if (rcStrict != VINF_SUCCESS)
1378 {
1379 if (IOM_SUCCESS(rcStrict))
1380 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1381 if (uCounterReg == 0)
1382 iemRegAddToRip(pIemCpu, cbInstr);
1383 return rcStrict;
1384 }
1385 } while ((int32_t)cLeftPage > 0);
1386 } while (uCounterReg != 0);
1387
1388 /*
1389 * Done.
1390 */
1391 iemRegAddToRip(pIemCpu, cbInstr);
1392 return VINF_SUCCESS;
1393}
1394
1395#endif /* OP_SIZE != 64-bit */
1396
1397
1398#undef OP_rAX
1399#undef OP_SIZE
1400#undef ADDR_SIZE
1401#undef ADDR_rDI
1402#undef ADDR_rSI
1403#undef ADDR_rCX
1404#undef ADDR_rIP
1405#undef ADDR2_TYPE
1406#undef ADDR_TYPE
1407#undef ADDR2_TYPE
1408
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette