VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 56416

Last change on this file since 56416 was 56416, checked in by vboxsync, 10 years ago

IEM: Use the modified IOM string I/O APIs.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 51.8 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 56416 2015-06-14 12:26:09Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Defined Constants And Macros *
21*******************************************************************************/
22#if OP_SIZE == 8
23# define OP_rAX al
24#elif OP_SIZE == 16
25# define OP_rAX ax
26#elif OP_SIZE == 32
27# define OP_rAX eax
28#elif OP_SIZE == 64
29# define OP_rAX rax
30#else
31# error "Bad OP_SIZE."
32#endif
33#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
34
35#if ADDR_SIZE == 16
36# define ADDR_rDI di
37# define ADDR_rSI si
38# define ADDR_rCX cx
39# define ADDR2_TYPE uint32_t
40#elif ADDR_SIZE == 32
41# define ADDR_rDI edi
42# define ADDR_rSI esi
43# define ADDR_rCX ecx
44# define ADDR2_TYPE uint32_t
45#elif ADDR_SIZE == 64
46# define ADDR_rDI rdi
47# define ADDR_rSI rsi
48# define ADDR_rCX rcx
49# define ADDR2_TYPE uint64_t
50# define IS_64_BIT_CODE(a_pIemCpu) (true)
51#else
52# error "Bad ADDR_SIZE."
53#endif
54#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
55
56#if ADDR_SIZE == 64 || OP_SIZE == 64
57# define IS_64_BIT_CODE(a_pIemCpu) (true)
58#elif ADDR_SIZE == 32
59# define IS_64_BIT_CODE(a_pIemCpu) ((a_pIemCpu)->enmCpuMode == IEMMODE_64BIT)
60#else
61# define IS_64_BIT_CODE(a_pIemCpu) (false)
62#endif
63
64
65/**
66 * Implements 'REPE CMPS'.
67 */
68IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
69{
70 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
71
72 /*
73 * Setup.
74 */
75 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
76 if (uCounterReg == 0)
77 {
78 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
79 return VINF_SUCCESS;
80 }
81
82 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
83 uint64_t uSrc1Base;
84 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
85 if (rcStrict != VINF_SUCCESS)
86 return rcStrict;
87
88 uint64_t uSrc2Base;
89 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uSrc2Base);
90 if (rcStrict != VINF_SUCCESS)
91 return rcStrict;
92
93 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
94 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
95 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
96 uint32_t uEFlags = pCtx->eflags.u;
97
98 /*
99 * The loop.
100 */
101 do
102 {
103 /*
104 * Do segmentation and virtual page stuff.
105 */
106 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
107 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
108 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
109 if (cLeftSrc1Page > uCounterReg)
110 cLeftSrc1Page = uCounterReg;
111 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
112 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
113
114 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
115 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
116 && ( IS_64_BIT_CODE(pIemCpu)
117 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
118 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
119 && uSrc2AddrReg < pCtx->es.u32Limit
120 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
121 )
122 )
123 {
124 RTGCPHYS GCPhysSrc1Mem;
125 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
126 if (rcStrict != VINF_SUCCESS)
127 return rcStrict;
128
129 RTGCPHYS GCPhysSrc2Mem;
130 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
131 if (rcStrict != VINF_SUCCESS)
132 return rcStrict;
133
134 /*
135 * If we can map the page without trouble, do a block processing
136 * until the end of the current page.
137 */
138 PGMPAGEMAPLOCK PgLockSrc2Mem;
139 OP_TYPE const *puSrc2Mem;
140 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
141 if (rcStrict == VINF_SUCCESS)
142 {
143 PGMPAGEMAPLOCK PgLockSrc1Mem;
144 OP_TYPE const *puSrc1Mem;
145 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
146 if (rcStrict == VINF_SUCCESS)
147 {
148 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
149 {
150 /* All matches, only compare the last itme to get the right eflags. */
151 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
152 uSrc1AddrReg += cLeftPage * cbIncr;
153 uSrc2AddrReg += cLeftPage * cbIncr;
154 uCounterReg -= cLeftPage;
155 }
156 else
157 {
158 /* Some mismatch, compare each item (and keep volatile
159 memory in mind). */
160 uint32_t off = 0;
161 do
162 {
163 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
164 off++;
165 } while ( off < cLeftPage
166 && (uEFlags & X86_EFL_ZF));
167 uSrc1AddrReg += cbIncr * off;
168 uSrc2AddrReg += cbIncr * off;
169 uCounterReg -= off;
170 }
171
172 /* Update the registers before looping. */
173 pCtx->ADDR_rCX = uCounterReg;
174 pCtx->ADDR_rSI = uSrc1AddrReg;
175 pCtx->ADDR_rDI = uSrc2AddrReg;
176 pCtx->eflags.u = uEFlags;
177
178 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
179 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
180 continue;
181 }
182 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
183 }
184 }
185
186 /*
187 * Fallback - slow processing till the end of the current page.
188 * In the cross page boundrary case we will end up here with cLeftPage
189 * as 0, we execute one loop then.
190 */
191 do
192 {
193 OP_TYPE uValue1;
194 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
195 if (rcStrict != VINF_SUCCESS)
196 return rcStrict;
197 OP_TYPE uValue2;
198 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
199 if (rcStrict != VINF_SUCCESS)
200 return rcStrict;
201 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
202
203 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
204 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
205 pCtx->ADDR_rCX = --uCounterReg;
206 pCtx->eflags.u = uEFlags;
207 cLeftPage--;
208 } while ( (int32_t)cLeftPage > 0
209 && (uEFlags & X86_EFL_ZF));
210 } while ( uCounterReg != 0
211 && (uEFlags & X86_EFL_ZF));
212
213 /*
214 * Done.
215 */
216 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
217 return VINF_SUCCESS;
218}
219
220
221/**
222 * Implements 'REPNE CMPS'.
223 */
224IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
225{
226 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
227
228 /*
229 * Setup.
230 */
231 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
232 if (uCounterReg == 0)
233 {
234 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
235 return VINF_SUCCESS;
236 }
237
238 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
239 uint64_t uSrc1Base;
240 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
241 if (rcStrict != VINF_SUCCESS)
242 return rcStrict;
243
244 uint64_t uSrc2Base;
245 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uSrc2Base);
246 if (rcStrict != VINF_SUCCESS)
247 return rcStrict;
248
249 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
250 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
251 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
252 uint32_t uEFlags = pCtx->eflags.u;
253
254 /*
255 * The loop.
256 */
257 do
258 {
259 /*
260 * Do segmentation and virtual page stuff.
261 */
262 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
263 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
264 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
265 if (cLeftSrc1Page > uCounterReg)
266 cLeftSrc1Page = uCounterReg;
267 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
268 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
269
270 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
271 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
272 && ( IS_64_BIT_CODE(pIemCpu)
273 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
274 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
275 && uSrc2AddrReg < pCtx->es.u32Limit
276 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
277 )
278 )
279 {
280 RTGCPHYS GCPhysSrc1Mem;
281 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
282 if (rcStrict != VINF_SUCCESS)
283 return rcStrict;
284
285 RTGCPHYS GCPhysSrc2Mem;
286 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
287 if (rcStrict != VINF_SUCCESS)
288 return rcStrict;
289
290 /*
291 * If we can map the page without trouble, do a block processing
292 * until the end of the current page.
293 */
294 OP_TYPE const *puSrc2Mem;
295 PGMPAGEMAPLOCK PgLockSrc2Mem;
296 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
297 if (rcStrict == VINF_SUCCESS)
298 {
299 OP_TYPE const *puSrc1Mem;
300 PGMPAGEMAPLOCK PgLockSrc1Mem;
301 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
302 if (rcStrict == VINF_SUCCESS)
303 {
304 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
305 {
306 /* All matches, only compare the last item to get the right eflags. */
307 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
308 uSrc1AddrReg += cLeftPage * cbIncr;
309 uSrc2AddrReg += cLeftPage * cbIncr;
310 uCounterReg -= cLeftPage;
311 }
312 else
313 {
314 /* Some mismatch, compare each item (and keep volatile
315 memory in mind). */
316 uint32_t off = 0;
317 do
318 {
319 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
320 off++;
321 } while ( off < cLeftPage
322 && !(uEFlags & X86_EFL_ZF));
323 uSrc1AddrReg += cbIncr * off;
324 uSrc2AddrReg += cbIncr * off;
325 uCounterReg -= off;
326 }
327
328 /* Update the registers before looping. */
329 pCtx->ADDR_rCX = uCounterReg;
330 pCtx->ADDR_rSI = uSrc1AddrReg;
331 pCtx->ADDR_rDI = uSrc2AddrReg;
332 pCtx->eflags.u = uEFlags;
333
334 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
335 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
336 continue;
337 }
338 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
339 }
340 }
341
342 /*
343 * Fallback - slow processing till the end of the current page.
344 * In the cross page boundrary case we will end up here with cLeftPage
345 * as 0, we execute one loop then.
346 */
347 do
348 {
349 OP_TYPE uValue1;
350 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
351 if (rcStrict != VINF_SUCCESS)
352 return rcStrict;
353 OP_TYPE uValue2;
354 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
355 if (rcStrict != VINF_SUCCESS)
356 return rcStrict;
357 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
358
359 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
360 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
361 pCtx->ADDR_rCX = --uCounterReg;
362 pCtx->eflags.u = uEFlags;
363 cLeftPage--;
364 } while ( (int32_t)cLeftPage > 0
365 && !(uEFlags & X86_EFL_ZF));
366 } while ( uCounterReg != 0
367 && !(uEFlags & X86_EFL_ZF));
368
369 /*
370 * Done.
371 */
372 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
373 return VINF_SUCCESS;
374}
375
376
377/**
378 * Implements 'REPE SCAS'.
379 */
380IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
381{
382 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
383
384 /*
385 * Setup.
386 */
387 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
388 if (uCounterReg == 0)
389 {
390 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
391 return VINF_SUCCESS;
392 }
393
394 uint64_t uBaseAddr;
395 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
396 if (rcStrict != VINF_SUCCESS)
397 return rcStrict;
398
399 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
400 OP_TYPE const uValueReg = pCtx->OP_rAX;
401 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
402 uint32_t uEFlags = pCtx->eflags.u;
403
404 /*
405 * The loop.
406 */
407 do
408 {
409 /*
410 * Do segmentation and virtual page stuff.
411 */
412 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
413 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
414 if (cLeftPage > uCounterReg)
415 cLeftPage = uCounterReg;
416 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
417 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
418 && ( IS_64_BIT_CODE(pIemCpu)
419 || ( uAddrReg < pCtx->es.u32Limit
420 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
421 )
422 )
423 {
424 RTGCPHYS GCPhysMem;
425 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
426 if (rcStrict != VINF_SUCCESS)
427 return rcStrict;
428
429 /*
430 * If we can map the page without trouble, do a block processing
431 * until the end of the current page.
432 */
433 PGMPAGEMAPLOCK PgLockMem;
434 OP_TYPE const *puMem;
435 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
436 if (rcStrict == VINF_SUCCESS)
437 {
438 /* Search till we find a mismatching item. */
439 OP_TYPE uTmpValue;
440 bool fQuit;
441 uint32_t i = 0;
442 do
443 {
444 uTmpValue = puMem[i++];
445 fQuit = uTmpValue != uValueReg;
446 } while (i < cLeftPage && !fQuit);
447
448 /* Update the regs. */
449 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
450 pCtx->ADDR_rCX = uCounterReg -= i;
451 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
452 pCtx->eflags.u = uEFlags;
453 Assert(!(uEFlags & X86_EFL_ZF) == fQuit);
454 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
455 if (fQuit)
456 break;
457
458
459 /* If unaligned, we drop thru and do the page crossing access
460 below. Otherwise, do the next page. */
461 if (!(uVirtAddr & (OP_SIZE - 1)))
462 continue;
463 if (uCounterReg == 0)
464 break;
465 cLeftPage = 0;
466 }
467 }
468
469 /*
470 * Fallback - slow processing till the end of the current page.
471 * In the cross page boundrary case we will end up here with cLeftPage
472 * as 0, we execute one loop then.
473 */
474 do
475 {
476 OP_TYPE uTmpValue;
477 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
478 if (rcStrict != VINF_SUCCESS)
479 return rcStrict;
480 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
481
482 pCtx->ADDR_rDI = uAddrReg += cbIncr;
483 pCtx->ADDR_rCX = --uCounterReg;
484 pCtx->eflags.u = uEFlags;
485 cLeftPage--;
486 } while ( (int32_t)cLeftPage > 0
487 && (uEFlags & X86_EFL_ZF));
488 } while ( uCounterReg != 0
489 && (uEFlags & X86_EFL_ZF));
490
491 /*
492 * Done.
493 */
494 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
495 return VINF_SUCCESS;
496}
497
498
499/**
500 * Implements 'REPNE SCAS'.
501 */
502IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
503{
504 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
505
506 /*
507 * Setup.
508 */
509 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
510 if (uCounterReg == 0)
511 {
512 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
513 return VINF_SUCCESS;
514 }
515
516 uint64_t uBaseAddr;
517 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
518 if (rcStrict != VINF_SUCCESS)
519 return rcStrict;
520
521 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
522 OP_TYPE const uValueReg = pCtx->OP_rAX;
523 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
524 uint32_t uEFlags = pCtx->eflags.u;
525
526 /*
527 * The loop.
528 */
529 do
530 {
531 /*
532 * Do segmentation and virtual page stuff.
533 */
534 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
535 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
536 if (cLeftPage > uCounterReg)
537 cLeftPage = uCounterReg;
538 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
539 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
540 && ( IS_64_BIT_CODE(pIemCpu)
541 || ( uAddrReg < pCtx->es.u32Limit
542 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
543 )
544 )
545 {
546 RTGCPHYS GCPhysMem;
547 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
548 if (rcStrict != VINF_SUCCESS)
549 return rcStrict;
550
551 /*
552 * If we can map the page without trouble, do a block processing
553 * until the end of the current page.
554 */
555 PGMPAGEMAPLOCK PgLockMem;
556 OP_TYPE const *puMem;
557 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
558 if (rcStrict == VINF_SUCCESS)
559 {
560 /* Search till we find a mismatching item. */
561 OP_TYPE uTmpValue;
562 bool fQuit;
563 uint32_t i = 0;
564 do
565 {
566 uTmpValue = puMem[i++];
567 fQuit = uTmpValue == uValueReg;
568 } while (i < cLeftPage && !fQuit);
569
570 /* Update the regs. */
571 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
572 pCtx->ADDR_rCX = uCounterReg -= i;
573 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
574 pCtx->eflags.u = uEFlags;
575 Assert(!!(uEFlags & X86_EFL_ZF) == fQuit);
576 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
577 if (fQuit)
578 break;
579
580
581 /* If unaligned, we drop thru and do the page crossing access
582 below. Otherwise, do the next page. */
583 if (!(uVirtAddr & (OP_SIZE - 1)))
584 continue;
585 if (uCounterReg == 0)
586 break;
587 cLeftPage = 0;
588 }
589 }
590
591 /*
592 * Fallback - slow processing till the end of the current page.
593 * In the cross page boundrary case we will end up here with cLeftPage
594 * as 0, we execute one loop then.
595 */
596 do
597 {
598 OP_TYPE uTmpValue;
599 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
600 if (rcStrict != VINF_SUCCESS)
601 return rcStrict;
602 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
603 pCtx->ADDR_rDI = uAddrReg += cbIncr;
604 pCtx->ADDR_rCX = --uCounterReg;
605 pCtx->eflags.u = uEFlags;
606 cLeftPage--;
607 } while ( (int32_t)cLeftPage > 0
608 && !(uEFlags & X86_EFL_ZF));
609 } while ( uCounterReg != 0
610 && !(uEFlags & X86_EFL_ZF));
611
612 /*
613 * Done.
614 */
615 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
616 return VINF_SUCCESS;
617}
618
619
620
621
622/**
623 * Implements 'REP MOVS'.
624 */
625IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
626{
627 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
628
629 /*
630 * Setup.
631 */
632 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
633 if (uCounterReg == 0)
634 {
635 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
636 return VINF_SUCCESS;
637 }
638
639 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
640 uint64_t uSrcBase;
641 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg, &uSrcBase);
642 if (rcStrict != VINF_SUCCESS)
643 return rcStrict;
644
645 uint64_t uDstBase;
646 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uDstBase);
647 if (rcStrict != VINF_SUCCESS)
648 return rcStrict;
649
650 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
651 ADDR_TYPE uSrcAddrReg = pCtx->ADDR_rSI;
652 ADDR_TYPE uDstAddrReg = pCtx->ADDR_rDI;
653
654 /*
655 * Be careful with handle bypassing.
656 */
657 if (pIemCpu->fBypassHandlers)
658 {
659 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
660 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
661 }
662
663 /*
664 * If we're reading back what we write, we have to let the verfication code
665 * to prevent a false positive.
666 * Note! This doesn't take aliasing or wrapping into account - lazy bird.
667 */
668#ifdef IEM_VERIFICATION_MODE_FULL
669 if ( IEM_VERIFICATION_ENABLED(pIemCpu)
670 && (cbIncr > 0
671 ? uSrcAddrReg <= uDstAddrReg
672 && uSrcAddrReg + cbIncr * uCounterReg > uDstAddrReg
673 : uDstAddrReg <= uSrcAddrReg
674 && uDstAddrReg + cbIncr * uCounterReg > uSrcAddrReg))
675 pIemCpu->fOverlappingMovs = true;
676#endif
677
678 /*
679 * The loop.
680 */
681 do
682 {
683 /*
684 * Do segmentation and virtual page stuff.
685 */
686 ADDR2_TYPE uVirtSrcAddr = uSrcAddrReg + (ADDR2_TYPE)uSrcBase;
687 ADDR2_TYPE uVirtDstAddr = uDstAddrReg + (ADDR2_TYPE)uDstBase;
688 uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
689 if (cLeftSrcPage > uCounterReg)
690 cLeftSrcPage = uCounterReg;
691 uint32_t cLeftDstPage = (PAGE_SIZE - (uVirtDstAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
692 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
693
694 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
695 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
696 && ( IS_64_BIT_CODE(pIemCpu)
697 || ( uSrcAddrReg < pSrcHid->u32Limit
698 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
699 && uDstAddrReg < pCtx->es.u32Limit
700 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
701 )
702 )
703 {
704 RTGCPHYS GCPhysSrcMem;
705 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
706 if (rcStrict != VINF_SUCCESS)
707 return rcStrict;
708
709 RTGCPHYS GCPhysDstMem;
710 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
711 if (rcStrict != VINF_SUCCESS)
712 return rcStrict;
713
714 /*
715 * If we can map the page without trouble, do a block processing
716 * until the end of the current page.
717 */
718 PGMPAGEMAPLOCK PgLockDstMem;
719 OP_TYPE *puDstMem;
720 rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
721 if (rcStrict == VINF_SUCCESS)
722 {
723 PGMPAGEMAPLOCK PgLockSrcMem;
724 OP_TYPE const *puSrcMem;
725 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
726 if (rcStrict == VINF_SUCCESS)
727 {
728 Assert( (GCPhysSrcMem >> PAGE_SHIFT) != (GCPhysDstMem >> PAGE_SHIFT)
729 || ((uintptr_t)puSrcMem >> PAGE_SHIFT) == ((uintptr_t)puDstMem >> PAGE_SHIFT));
730
731 /* Perform the operation exactly (don't use memcpy to avoid
732 having to consider how its implementation would affect
733 any overlapping source and destination area). */
734 OP_TYPE const *puSrcCur = puSrcMem;
735 OP_TYPE *puDstCur = puDstMem;
736 uint32_t cTodo = cLeftPage;
737 while (cTodo-- > 0)
738 *puDstCur++ = *puSrcCur++;
739
740 /* Update the registers. */
741 pCtx->ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
742 pCtx->ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
743 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
744
745 iemMemPageUnmap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);
746 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
747 continue;
748 }
749 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
750 }
751 }
752
753 /*
754 * Fallback - slow processing till the end of the current page.
755 * In the cross page boundrary case we will end up here with cLeftPage
756 * as 0, we execute one loop then.
757 */
758 do
759 {
760 OP_TYPE uValue;
761 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uSrcAddrReg);
762 if (rcStrict != VINF_SUCCESS)
763 return rcStrict;
764 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uDstAddrReg, uValue);
765 if (rcStrict != VINF_SUCCESS)
766 return rcStrict;
767
768 pCtx->ADDR_rSI = uSrcAddrReg += cbIncr;
769 pCtx->ADDR_rDI = uDstAddrReg += cbIncr;
770 pCtx->ADDR_rCX = --uCounterReg;
771 cLeftPage--;
772 } while ((int32_t)cLeftPage > 0);
773 } while (uCounterReg != 0);
774
775 /*
776 * Done.
777 */
778 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
779 return VINF_SUCCESS;
780}
781
782
783/**
784 * Implements 'REP STOS'.
785 */
786IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
787{
788 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
789
790 /*
791 * Setup.
792 */
793 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
794 if (uCounterReg == 0)
795 {
796 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
797 return VINF_SUCCESS;
798 }
799
800 uint64_t uBaseAddr;
801 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
802 if (rcStrict != VINF_SUCCESS)
803 return rcStrict;
804
805 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
806 OP_TYPE const uValue = pCtx->OP_rAX;
807 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
808
809 /*
810 * Be careful with handle bypassing.
811 */
812 /** @todo Permit doing a page if correctly aligned. */
813 if (pIemCpu->fBypassHandlers)
814 {
815 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
816 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
817 }
818
819 /*
820 * The loop.
821 */
822 do
823 {
824 /*
825 * Do segmentation and virtual page stuff.
826 */
827 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
828 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
829 if (cLeftPage > uCounterReg)
830 cLeftPage = uCounterReg;
831 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
832 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
833 && ( IS_64_BIT_CODE(pIemCpu)
834 || ( uAddrReg < pCtx->es.u32Limit
835 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
836 )
837 )
838 {
839 RTGCPHYS GCPhysMem;
840 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
841 if (rcStrict != VINF_SUCCESS)
842 return rcStrict;
843
844 /*
845 * If we can map the page without trouble, do a block processing
846 * until the end of the current page.
847 */
848 PGMPAGEMAPLOCK PgLockMem;
849 OP_TYPE *puMem;
850 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
851 if (rcStrict == VINF_SUCCESS)
852 {
853 /* Update the regs first so we can loop on cLeftPage. */
854 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
855 pCtx->ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
856
857 /* Do the memsetting. */
858#if OP_SIZE == 8
859 memset(puMem, uValue, cLeftPage);
860/*#elif OP_SIZE == 32
861 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
862#else
863 while (cLeftPage-- > 0)
864 *puMem++ = uValue;
865#endif
866
867 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
868
869 /* If unaligned, we drop thru and do the page crossing access
870 below. Otherwise, do the next page. */
871 if (!(uVirtAddr & (OP_SIZE - 1)))
872 continue;
873 if (uCounterReg == 0)
874 break;
875 cLeftPage = 0;
876 }
877 }
878
879 /*
880 * Fallback - slow processing till the end of the current page.
881 * In the cross page boundrary case we will end up here with cLeftPage
882 * as 0, we execute one loop then.
883 */
884 do
885 {
886 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uAddrReg, uValue);
887 if (rcStrict != VINF_SUCCESS)
888 return rcStrict;
889 pCtx->ADDR_rDI = uAddrReg += cbIncr;
890 pCtx->ADDR_rCX = --uCounterReg;
891 cLeftPage--;
892 } while ((int32_t)cLeftPage > 0);
893 } while (uCounterReg != 0);
894
895 /*
896 * Done.
897 */
898 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
899 return VINF_SUCCESS;
900}
901
902
903/**
904 * Implements 'REP LODS'.
905 */
906IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
907{
908 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
909
910 /*
911 * Setup.
912 */
913 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
914 if (uCounterReg == 0)
915 {
916 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
917 return VINF_SUCCESS;
918 }
919
920 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
921 uint64_t uBaseAddr;
922 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg, &uBaseAddr);
923 if (rcStrict != VINF_SUCCESS)
924 return rcStrict;
925
926 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
927 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
928
929 /*
930 * The loop.
931 */
932 do
933 {
934 /*
935 * Do segmentation and virtual page stuff.
936 */
937 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
938 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
939 if (cLeftPage > uCounterReg)
940 cLeftPage = uCounterReg;
941 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
942 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
943 && ( IS_64_BIT_CODE(pIemCpu)
944 || ( uAddrReg < pSrcHid->u32Limit
945 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit)
946 )
947 )
948 {
949 RTGCPHYS GCPhysMem;
950 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
951 if (rcStrict != VINF_SUCCESS)
952 return rcStrict;
953
954 /*
955 * If we can map the page without trouble, we can get away with
956 * just reading the last value on the page.
957 */
958 PGMPAGEMAPLOCK PgLockMem;
959 OP_TYPE const *puMem;
960 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
961 if (rcStrict == VINF_SUCCESS)
962 {
963 /* Only get the last byte, the rest doesn't matter in direct access mode. */
964#if OP_SIZE == 32
965 pCtx->rax = puMem[cLeftPage - 1];
966#else
967 pCtx->OP_rAX = puMem[cLeftPage - 1];
968#endif
969 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
970 pCtx->ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
971 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
972
973 /* If unaligned, we drop thru and do the page crossing access
974 below. Otherwise, do the next page. */
975 if (!(uVirtAddr & (OP_SIZE - 1)))
976 continue;
977 if (uCounterReg == 0)
978 break;
979 cLeftPage = 0;
980 }
981 }
982
983 /*
984 * Fallback - slow processing till the end of the current page.
985 * In the cross page boundrary case we will end up here with cLeftPage
986 * as 0, we execute one loop then.
987 */
988 do
989 {
990 OP_TYPE uTmpValue;
991 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, iEffSeg, uAddrReg);
992 if (rcStrict != VINF_SUCCESS)
993 return rcStrict;
994#if OP_SIZE == 32
995 pCtx->rax = uTmpValue;
996#else
997 pCtx->OP_rAX = uTmpValue;
998#endif
999 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1000 pCtx->ADDR_rCX = --uCounterReg;
1001 cLeftPage--;
1002 } while ((int32_t)cLeftPage > 0);
1003 if (rcStrict != VINF_SUCCESS)
1004 break;
1005 } while (uCounterReg != 0);
1006
1007 /*
1008 * Done.
1009 */
1010 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1011 return VINF_SUCCESS;
1012}
1013
1014
1015#if OP_SIZE != 64
1016
1017/**
1018 * Implements 'INS' (no rep)
1019 */
1020IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1021{
1022 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1023 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1024 VBOXSTRICTRC rcStrict;
1025
1026 /*
1027 * Be careful with handle bypassing.
1028 */
1029 if (pIemCpu->fBypassHandlers)
1030 {
1031 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1032 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1033 }
1034
1035 /*
1036 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1037 * segmentation and finally any #PF due to virtual address translation.
1038 * ASSUMES nothing is read from the I/O port before traps are taken.
1039 */
1040 if (!fIoChecked)
1041 {
1042 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1043 if (rcStrict != VINF_SUCCESS)
1044 return rcStrict;
1045 }
1046
1047 OP_TYPE *puMem;
1048 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
1049 if (rcStrict != VINF_SUCCESS)
1050 return rcStrict;
1051
1052 uint32_t u32Value = 0;
1053 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1054 rcStrict = IOMIOPortRead(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, &u32Value, OP_SIZE / 8);
1055 else
1056 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, pCtx->dx, &u32Value, OP_SIZE / 8);
1057 if (IOM_SUCCESS(rcStrict))
1058 {
1059 *puMem = (OP_TYPE)u32Value;
1060 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1061 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1062 {
1063 if (!pCtx->eflags.Bits.u1DF)
1064 pCtx->ADDR_rDI += OP_SIZE / 8;
1065 else
1066 pCtx->ADDR_rDI -= OP_SIZE / 8;
1067 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1068 }
1069 /* iemMemMap already checked permissions, so this may only be real errors
1070 or access handlers meddling. The access handler case is going to
1071 cause misbehavior if the instruction is re-interpreted or smth. So,
1072 we fail with an internal error here instead. */
1073 else
1074 AssertLogRelMsgFailedReturn(("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)), VERR_IEM_IPE_1);
1075 }
1076 return rcStrict;
1077}
1078
1079
1080/**
1081 * Implements 'REP INS'.
1082 */
1083IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1084{
1085 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1086 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1087 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1088
1089 /*
1090 * Setup.
1091 */
1092 uint16_t const u16Port = pCtx->dx;
1093 VBOXSTRICTRC rcStrict;
1094 if (!fIoChecked)
1095 {
1096 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1097 if (rcStrict != VINF_SUCCESS)
1098 return rcStrict;
1099 }
1100
1101 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1102 if (uCounterReg == 0)
1103 {
1104 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1105 return VINF_SUCCESS;
1106 }
1107
1108 uint64_t uBaseAddr;
1109 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
1110 if (rcStrict != VINF_SUCCESS)
1111 return rcStrict;
1112
1113 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1114 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
1115
1116 /*
1117 * Be careful with handle bypassing.
1118 */
1119 if (pIemCpu->fBypassHandlers)
1120 {
1121 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1122 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1123 }
1124
1125 /*
1126 * The loop.
1127 */
1128 do
1129 {
1130 /*
1131 * Do segmentation and virtual page stuff.
1132 */
1133 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1134 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1135 if (cLeftPage > uCounterReg)
1136 cLeftPage = uCounterReg;
1137 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1138 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1139 && ( IS_64_BIT_CODE(pIemCpu)
1140 || ( uAddrReg < pCtx->es.u32Limit
1141 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
1142 )
1143 && !IEM_VERIFICATION_ENABLED(pIemCpu)
1144 )
1145 {
1146 RTGCPHYS GCPhysMem;
1147 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
1148 if (rcStrict != VINF_SUCCESS)
1149 return rcStrict;
1150
1151 /*
1152 * If we can map the page without trouble, use the IOM
1153 * string I/O interface to do the work.
1154 */
1155 PGMPAGEMAPLOCK PgLockMem;
1156 OP_TYPE *puMem;
1157 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
1158 if (rcStrict == VINF_SUCCESS)
1159 {
1160 uint32_t cTransfers = cLeftPage;
1161 rcStrict = IOMIOPortReadString(pVM, pVCpu, u16Port, puMem, &cTransfers, OP_SIZE / 8);
1162
1163 uint32_t cActualTransfers = cLeftPage - cTransfers;
1164 Assert(cActualTransfers <= cLeftPage);
1165 pCtx->ADDR_rDI = uAddrReg += cbIncr * cActualTransfers;
1166 pCtx->ADDR_rCX = uCounterReg -= cActualTransfers;
1167 puMem += cActualTransfers;
1168
1169 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1170
1171 if (rcStrict != VINF_SUCCESS)
1172 {
1173 if (IOM_SUCCESS(rcStrict))
1174 {
1175 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1176 if (uCounterReg == 0)
1177 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1178 }
1179 return rcStrict;
1180 }
1181
1182 /* If unaligned, we drop thru and do the page crossing access
1183 below. Otherwise, do the next page. */
1184 if (!(uVirtAddr & (OP_SIZE - 1)))
1185 continue;
1186 if (uCounterReg == 0)
1187 break;
1188 cLeftPage = 0;
1189 }
1190 }
1191
1192 /*
1193 * Fallback - slow processing till the end of the current page.
1194 * In the cross page boundrary case we will end up here with cLeftPage
1195 * as 0, we execute one loop then.
1196 *
1197 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1198 * I/O port, otherwise it wouldn't really be restartable.
1199 */
1200 /** @todo investigate what the CPU actually does with \#PF/\#GP
1201 * during INS. */
1202 do
1203 {
1204 OP_TYPE *puMem;
1205 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
1206 if (rcStrict != VINF_SUCCESS)
1207 return rcStrict;
1208
1209 uint32_t u32Value = 0;
1210 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1211 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8);
1212 else
1213 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1214 if (!IOM_SUCCESS(rcStrict))
1215 return rcStrict;
1216
1217 *puMem = (OP_TYPE)u32Value;
1218 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1219 AssertLogRelMsgReturn(rcStrict2 == VINF_SUCCESS, ("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)),
1220 VERR_IEM_IPE_1); /* See non-rep version. */
1221
1222 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1223 pCtx->ADDR_rCX = --uCounterReg;
1224
1225 cLeftPage--;
1226 if (rcStrict != VINF_SUCCESS)
1227 {
1228 if (uCounterReg == 0)
1229 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1230 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1231 return rcStrict;
1232 }
1233 } while ((int32_t)cLeftPage > 0);
1234 } while (uCounterReg != 0);
1235
1236 /*
1237 * Done.
1238 */
1239 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1240 return VINF_SUCCESS;
1241}
1242
1243
1244/**
1245 * Implements 'OUTS' (no rep)
1246 */
1247IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1248{
1249 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1250 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1251 VBOXSTRICTRC rcStrict;
1252
1253 /*
1254 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1255 * segmentation and finally any #PF due to virtual address translation.
1256 * ASSUMES nothing is read from the I/O port before traps are taken.
1257 */
1258 if (!fIoChecked)
1259 {
1260 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1261 if (rcStrict != VINF_SUCCESS)
1262 return rcStrict;
1263 }
1264
1265 OP_TYPE uValue;
1266 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);
1267 if (rcStrict == VINF_SUCCESS)
1268 {
1269 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1270 rcStrict = IOMIOPortWrite(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, uValue, OP_SIZE / 8);
1271 else
1272 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, pCtx->dx, uValue, OP_SIZE / 8);
1273 if (IOM_SUCCESS(rcStrict))
1274 {
1275 if (!pCtx->eflags.Bits.u1DF)
1276 pCtx->ADDR_rSI += OP_SIZE / 8;
1277 else
1278 pCtx->ADDR_rSI -= OP_SIZE / 8;
1279 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1280 if (rcStrict != VINF_SUCCESS)
1281 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1282 }
1283 }
1284 return rcStrict;
1285}
1286
1287
1288/**
1289 * Implements 'REP OUTS'.
1290 */
1291IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1292{
1293 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1294 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1295 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1296
1297 /*
1298 * Setup.
1299 */
1300 uint16_t const u16Port = pCtx->dx;
1301 VBOXSTRICTRC rcStrict;
1302 if (!fIoChecked)
1303 {
1304 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1305 if (rcStrict != VINF_SUCCESS)
1306 return rcStrict;
1307 }
1308
1309 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1310 if (uCounterReg == 0)
1311 {
1312 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1313 return VINF_SUCCESS;
1314 }
1315
1316 PCCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iEffSeg);
1317 uint64_t uBaseAddr;
1318 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg, &uBaseAddr);
1319 if (rcStrict != VINF_SUCCESS)
1320 return rcStrict;
1321
1322 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1323 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
1324
1325 /*
1326 * The loop.
1327 */
1328 do
1329 {
1330 /*
1331 * Do segmentation and virtual page stuff.
1332 */
1333 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1334 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1335 if (cLeftPage > uCounterReg)
1336 cLeftPage = uCounterReg;
1337 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1338 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1339 && ( IS_64_BIT_CODE(pIemCpu)
1340 || ( uAddrReg < pHid->u32Limit
1341 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit)
1342 )
1343 && !IEM_VERIFICATION_ENABLED(pIemCpu)
1344 )
1345 {
1346 RTGCPHYS GCPhysMem;
1347 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1348 if (rcStrict != VINF_SUCCESS)
1349 return rcStrict;
1350
1351 /*
1352 * If we can map the page without trouble, we use the IOM
1353 * string I/O interface to do the job.
1354 */
1355 PGMPAGEMAPLOCK PgLockMem;
1356 OP_TYPE const *puMem;
1357 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1358 if (rcStrict == VINF_SUCCESS)
1359 {
1360 uint32_t cTransfers = cLeftPage;
1361 rcStrict = IOMIOPortWriteString(pVM, pVCpu, u16Port, puMem, &cTransfers, OP_SIZE / 8);
1362
1363 uint32_t cActualTransfers = cLeftPage - cTransfers;
1364 Assert(cActualTransfers <= cLeftPage);
1365 pCtx->ADDR_rSI = uAddrReg += cbIncr * cActualTransfers;
1366 pCtx->ADDR_rCX = uCounterReg -= cActualTransfers;
1367 puMem += cActualTransfers;
1368
1369 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1370
1371 if (rcStrict != VINF_SUCCESS)
1372 {
1373 if (IOM_SUCCESS(rcStrict))
1374 {
1375 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1376 if (uCounterReg == 0)
1377 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1378 }
1379 return rcStrict;
1380 }
1381
1382 /* If unaligned, we drop thru and do the page crossing access
1383 below. Otherwise, do the next page. */
1384 if (!(uVirtAddr & (OP_SIZE - 1)))
1385 continue;
1386 if (uCounterReg == 0)
1387 break;
1388 cLeftPage = 0;
1389 }
1390 }
1391
1392 /*
1393 * Fallback - slow processing till the end of the current page.
1394 * In the cross page boundrary case we will end up here with cLeftPage
1395 * as 0, we execute one loop then.
1396 *
1397 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1398 * I/O port, otherwise it wouldn't really be restartable.
1399 */
1400 /** @todo investigate what the CPU actually does with \#PF/\#GP
1401 * during INS. */
1402 do
1403 {
1404 OP_TYPE uValue;
1405 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uAddrReg);
1406 if (rcStrict != VINF_SUCCESS)
1407 return rcStrict;
1408
1409 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1410 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, uValue, OP_SIZE / 8);
1411 else
1412 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, uValue, OP_SIZE / 8);
1413 if (IOM_SUCCESS(rcStrict))
1414 {
1415 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1416 pCtx->ADDR_rCX = --uCounterReg;
1417 cLeftPage--;
1418 }
1419 if (rcStrict != VINF_SUCCESS)
1420 {
1421 if (IOM_SUCCESS(rcStrict))
1422 {
1423 if (uCounterReg == 0)
1424 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1425 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1426 }
1427 return rcStrict;
1428 }
1429 } while ((int32_t)cLeftPage > 0);
1430 } while (uCounterReg != 0);
1431
1432 /*
1433 * Done.
1434 */
1435 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1436 return VINF_SUCCESS;
1437}
1438
1439#endif /* OP_SIZE != 64-bit */
1440
1441
1442#undef OP_rAX
1443#undef OP_SIZE
1444#undef ADDR_SIZE
1445#undef ADDR_rDI
1446#undef ADDR_rSI
1447#undef ADDR_rCX
1448#undef ADDR_rIP
1449#undef ADDR2_TYPE
1450#undef ADDR_TYPE
1451#undef ADDR2_TYPE
1452#undef IS_64_BIT_CODE
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette