VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 60888

Last change on this file since 60888 was 60888, checked in by vboxsync, 9 years ago

IEM: Must not forget to get updated hidden ES selector values for string instructions, as in raw-mode these may be totally out of wack. Adjusted string instruction yielding for verification mode. Fixed nested IEMExecOne calls during verification.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 65.0 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 60888 2016-05-09 11:58:46Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Defined Constants And Macros *
21*******************************************************************************/
22#if OP_SIZE == 8
23# define OP_rAX al
24#elif OP_SIZE == 16
25# define OP_rAX ax
26#elif OP_SIZE == 32
27# define OP_rAX eax
28#elif OP_SIZE == 64
29# define OP_rAX rax
30#else
31# error "Bad OP_SIZE."
32#endif
33#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
34
35#if ADDR_SIZE == 16
36# define ADDR_rDI di
37# define ADDR_rSI si
38# define ADDR_rCX cx
39# define ADDR2_TYPE uint32_t
40#elif ADDR_SIZE == 32
41# define ADDR_rDI edi
42# define ADDR_rSI esi
43# define ADDR_rCX ecx
44# define ADDR2_TYPE uint32_t
45#elif ADDR_SIZE == 64
46# define ADDR_rDI rdi
47# define ADDR_rSI rsi
48# define ADDR_rCX rcx
49# define ADDR2_TYPE uint64_t
50# define IS_64_BIT_CODE(a_pIemCpu) (true)
51#else
52# error "Bad ADDR_SIZE."
53#endif
54#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
55
56#if ADDR_SIZE == 64 || OP_SIZE == 64
57# define IS_64_BIT_CODE(a_pIemCpu) (true)
58#elif ADDR_SIZE == 32
59# define IS_64_BIT_CODE(a_pIemCpu) ((a_pIemCpu)->enmCpuMode == IEMMODE_64BIT)
60#else
61# define IS_64_BIT_CODE(a_pIemCpu) (false)
62#endif
63
64/** @def IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN
65 * Used in the outer (page-by-page) loop to check for reasons for returnning
66 * before completing the instruction. In raw-mode we temporarily enable
67 * interrupts to let the host interrupt us. We cannot let big string operations
68 * hog the CPU, especially not in raw-mode.
69 */
70#ifdef IN_RC
71# define IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_pIemCpu, a_fEflags) \
72 do { \
73 if (RT_LIKELY( ( !VMCPU_FF_IS_PENDING(a_pVCpu, (a_fEflags) & X86_EFL_IF ? VMCPU_FF_YIELD_REPSTR_MASK \
74 : VMCPU_FF_YIELD_REPSTR_NOINT_MASK) \
75 && !VM_FF_IS_PENDING(a_pVM, VM_FF_YIELD_REPSTR_MASK) ) \
76 || IEM_VERIFICATION_ENABLED(a_pIemCpu) )) \
77 { \
78 RTCCUINTREG fSavedFlags = ASMGetFlags(); \
79 if (!(fSavedFlags & X86_EFL_IF)) \
80 { \
81 ASMSetFlags(fSavedFlags | X86_EFL_IF); \
82 ASMNopPause(); \
83 ASMSetFlags(fSavedFlags); \
84 } \
85 } \
86 else \
87 { \
88 LogFlow(("%s: Leaving early (outer)! ffcpu=%#x ffvm=%#x\n", \
89 __FUNCTION__, (a_pVCpu)->fLocalForcedActions, (a_pVM)->fGlobalForcedActions)); \
90 return VINF_SUCCESS; \
91 } \
92 } while (0)
93#else
94# define IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_pIemCpu, a_fEflags) \
95 do { \
96 if (RT_LIKELY( ( !VMCPU_FF_IS_PENDING(a_pVCpu, (a_fEflags) & X86_EFL_IF ? VMCPU_FF_YIELD_REPSTR_MASK \
97 : VMCPU_FF_YIELD_REPSTR_NOINT_MASK) \
98 && !VM_FF_IS_PENDING(a_pVM, VM_FF_YIELD_REPSTR_MASK) ) \
99 || IEM_VERIFICATION_ENABLED(a_pIemCpu) )) \
100 { /* probable */ } \
101 else \
102 { \
103 LogFlow(("%s: Leaving early (outer)! ffcpu=%#x ffvm=%#x\n", \
104 __FUNCTION__, (a_pVCpu)->fLocalForcedActions, (a_pVM)->fGlobalForcedActions)); \
105 return VINF_SUCCESS; \
106 } \
107 } while (0)
108#endif
109
110/** @def IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
111 * This is used in some of the inner loops to make sure we're responding quickly
112 * to outside requests. For I/O instructions this also make absolutely sure we
113 * don't miss out on important stuff that happened while processing a word.
114 */
115#define IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_pIemCpu, a_fExitExpr) \
116 do { \
117 if (RT_LIKELY( ( !VMCPU_FF_IS_PENDING(a_pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK) \
118 && !VM_FF_IS_PENDING(a_pVM, VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK)) \
119 || (a_fExitExpr) \
120 || IEM_VERIFICATION_ENABLED(a_pIemCpu) )) \
121 { /* very likely */ } \
122 else \
123 { \
124 LogFlow(("%s: Leaving early (inner)! ffcpu=%#x ffvm=%#x\n", \
125 __FUNCTION__, (a_pVCpu)->fLocalForcedActions, (a_pVM)->fGlobalForcedActions)); \
126 return VINF_SUCCESS; \
127 } \
128 } while (0)
129
130
131/**
132 * Implements 'REPE CMPS'.
133 */
134IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
135{
136 PVM pVM = IEMCPU_TO_VM(pIemCpu);
137 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
138 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
139
140 /*
141 * Setup.
142 */
143 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
144 if (uCounterReg == 0)
145 {
146 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
147 return VINF_SUCCESS;
148 }
149
150 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
151 uint64_t uSrc1Base;
152 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
153 if (rcStrict != VINF_SUCCESS)
154 return rcStrict;
155
156 uint64_t uSrc2Base;
157 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, iemSRegUpdateHid(pIemCpu, &pCtx->es), X86_SREG_ES, &uSrc2Base);
158 if (rcStrict != VINF_SUCCESS)
159 return rcStrict;
160
161 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
162 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
163 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
164 uint32_t uEFlags = pCtx->eflags.u;
165
166 /*
167 * The loop.
168 */
169 for (;;)
170 {
171 /*
172 * Do segmentation and virtual page stuff.
173 */
174 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
175 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
176 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
177 if (cLeftSrc1Page > uCounterReg)
178 cLeftSrc1Page = uCounterReg;
179 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
180 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
181
182 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
183 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
184 && ( IS_64_BIT_CODE(pIemCpu)
185 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
186 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
187 && uSrc2AddrReg < pCtx->es.u32Limit
188 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
189 )
190 )
191 {
192 RTGCPHYS GCPhysSrc1Mem;
193 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
194 if (rcStrict != VINF_SUCCESS)
195 return rcStrict;
196
197 RTGCPHYS GCPhysSrc2Mem;
198 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
199 if (rcStrict != VINF_SUCCESS)
200 return rcStrict;
201
202 /*
203 * If we can map the page without trouble, do a block processing
204 * until the end of the current page.
205 */
206 PGMPAGEMAPLOCK PgLockSrc2Mem;
207 OP_TYPE const *puSrc2Mem;
208 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
209 if (rcStrict == VINF_SUCCESS)
210 {
211 PGMPAGEMAPLOCK PgLockSrc1Mem;
212 OP_TYPE const *puSrc1Mem;
213 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
214 if (rcStrict == VINF_SUCCESS)
215 {
216 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
217 {
218 /* All matches, only compare the last itme to get the right eflags. */
219 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
220 uSrc1AddrReg += cLeftPage * cbIncr;
221 uSrc2AddrReg += cLeftPage * cbIncr;
222 uCounterReg -= cLeftPage;
223 }
224 else
225 {
226 /* Some mismatch, compare each item (and keep volatile
227 memory in mind). */
228 uint32_t off = 0;
229 do
230 {
231 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
232 off++;
233 } while ( off < cLeftPage
234 && (uEFlags & X86_EFL_ZF));
235 uSrc1AddrReg += cbIncr * off;
236 uSrc2AddrReg += cbIncr * off;
237 uCounterReg -= off;
238 }
239
240 /* Update the registers before looping. */
241 pCtx->ADDR_rCX = uCounterReg;
242 pCtx->ADDR_rSI = uSrc1AddrReg;
243 pCtx->ADDR_rDI = uSrc2AddrReg;
244 pCtx->eflags.u = uEFlags;
245
246 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
247 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
248 if ( uCounterReg == 0
249 || !(uEFlags & X86_EFL_ZF))
250 break;
251 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, uEFlags);
252 continue;
253 }
254 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
255 }
256 }
257
258 /*
259 * Fallback - slow processing till the end of the current page.
260 * In the cross page boundrary case we will end up here with cLeftPage
261 * as 0, we execute one loop then.
262 */
263 do
264 {
265 OP_TYPE uValue1;
266 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
267 if (rcStrict != VINF_SUCCESS)
268 return rcStrict;
269 OP_TYPE uValue2;
270 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
271 if (rcStrict != VINF_SUCCESS)
272 return rcStrict;
273 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
274
275 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
276 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
277 pCtx->ADDR_rCX = --uCounterReg;
278 pCtx->eflags.u = uEFlags;
279 cLeftPage--;
280 /* Skipping IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN here, assuming rcStrict does the job. */
281 } while ( (int32_t)cLeftPage > 0
282 && (uEFlags & X86_EFL_ZF));
283
284 /*
285 * Next page? Must check for interrupts and stuff here.
286 */
287 if ( uCounterReg == 0
288 || !(uEFlags & X86_EFL_ZF))
289 break;
290 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, uEFlags);
291 }
292
293 /*
294 * Done.
295 */
296 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
297 return VINF_SUCCESS;
298}
299
300
301/**
302 * Implements 'REPNE CMPS'.
303 */
304IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
305{
306 PVM pVM = IEMCPU_TO_VM(pIemCpu);
307 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
308 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
309
310 /*
311 * Setup.
312 */
313 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
314 if (uCounterReg == 0)
315 {
316 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
317 return VINF_SUCCESS;
318 }
319
320 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
321 uint64_t uSrc1Base;
322 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
323 if (rcStrict != VINF_SUCCESS)
324 return rcStrict;
325
326 uint64_t uSrc2Base;
327 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, iemSRegUpdateHid(pIemCpu, &pCtx->es), X86_SREG_ES, &uSrc2Base);
328 if (rcStrict != VINF_SUCCESS)
329 return rcStrict;
330
331 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
332 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
333 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
334 uint32_t uEFlags = pCtx->eflags.u;
335
336 /*
337 * The loop.
338 */
339 for (;;)
340 {
341 /*
342 * Do segmentation and virtual page stuff.
343 */
344 ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
345 ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
346 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
347 if (cLeftSrc1Page > uCounterReg)
348 cLeftSrc1Page = uCounterReg;
349 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
350 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
351
352 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
353 && cbIncr > 0 /** @todo Optimize reverse direction string ops. */
354 && ( IS_64_BIT_CODE(pIemCpu)
355 || ( uSrc1AddrReg < pSrc1Hid->u32Limit
356 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
357 && uSrc2AddrReg < pCtx->es.u32Limit
358 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
359 )
360 )
361 {
362 RTGCPHYS GCPhysSrc1Mem;
363 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
364 if (rcStrict != VINF_SUCCESS)
365 return rcStrict;
366
367 RTGCPHYS GCPhysSrc2Mem;
368 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
369 if (rcStrict != VINF_SUCCESS)
370 return rcStrict;
371
372 /*
373 * If we can map the page without trouble, do a block processing
374 * until the end of the current page.
375 */
376 OP_TYPE const *puSrc2Mem;
377 PGMPAGEMAPLOCK PgLockSrc2Mem;
378 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
379 if (rcStrict == VINF_SUCCESS)
380 {
381 OP_TYPE const *puSrc1Mem;
382 PGMPAGEMAPLOCK PgLockSrc1Mem;
383 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
384 if (rcStrict == VINF_SUCCESS)
385 {
386 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
387 {
388 /* All matches, only compare the last item to get the right eflags. */
389 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
390 uSrc1AddrReg += cLeftPage * cbIncr;
391 uSrc2AddrReg += cLeftPage * cbIncr;
392 uCounterReg -= cLeftPage;
393 }
394 else
395 {
396 /* Some mismatch, compare each item (and keep volatile
397 memory in mind). */
398 uint32_t off = 0;
399 do
400 {
401 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
402 off++;
403 } while ( off < cLeftPage
404 && !(uEFlags & X86_EFL_ZF));
405 uSrc1AddrReg += cbIncr * off;
406 uSrc2AddrReg += cbIncr * off;
407 uCounterReg -= off;
408 }
409
410 /* Update the registers before looping. */
411 pCtx->ADDR_rCX = uCounterReg;
412 pCtx->ADDR_rSI = uSrc1AddrReg;
413 pCtx->ADDR_rDI = uSrc2AddrReg;
414 pCtx->eflags.u = uEFlags;
415
416 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
417 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
418 if ( uCounterReg == 0
419 || (uEFlags & X86_EFL_ZF))
420 break;
421 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, uEFlags);
422 continue;
423 }
424 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
425 }
426 }
427
428 /*
429 * Fallback - slow processing till the end of the current page.
430 * In the cross page boundrary case we will end up here with cLeftPage
431 * as 0, we execute one loop then.
432 */
433 do
434 {
435 OP_TYPE uValue1;
436 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
437 if (rcStrict != VINF_SUCCESS)
438 return rcStrict;
439 OP_TYPE uValue2;
440 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
441 if (rcStrict != VINF_SUCCESS)
442 return rcStrict;
443 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
444
445 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
446 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
447 pCtx->ADDR_rCX = --uCounterReg;
448 pCtx->eflags.u = uEFlags;
449 cLeftPage--;
450 /* Skipping IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN here, assuming rcStrict does the job. */
451 } while ( (int32_t)cLeftPage > 0
452 && !(uEFlags & X86_EFL_ZF));
453
454 /*
455 * Next page? Must check for interrupts and stuff here.
456 */
457 if ( uCounterReg == 0
458 || (uEFlags & X86_EFL_ZF))
459 break;
460 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, uEFlags);
461 }
462
463 /*
464 * Done.
465 */
466 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
467 return VINF_SUCCESS;
468}
469
470
471/**
472 * Implements 'REPE SCAS'.
473 */
474IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
475{
476 PVM pVM = IEMCPU_TO_VM(pIemCpu);
477 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
478 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
479
480 /*
481 * Setup.
482 */
483 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
484 if (uCounterReg == 0)
485 {
486 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
487 return VINF_SUCCESS;
488 }
489
490 uint64_t uBaseAddr;
491 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, iemSRegUpdateHid(pIemCpu, &pCtx->es), X86_SREG_ES, &uBaseAddr);
492 if (rcStrict != VINF_SUCCESS)
493 return rcStrict;
494
495 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
496 OP_TYPE const uValueReg = pCtx->OP_rAX;
497 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
498 uint32_t uEFlags = pCtx->eflags.u;
499
500 /*
501 * The loop.
502 */
503 for (;;)
504 {
505 /*
506 * Do segmentation and virtual page stuff.
507 */
508 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
509 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
510 if (cLeftPage > uCounterReg)
511 cLeftPage = uCounterReg;
512 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
513 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
514 && ( IS_64_BIT_CODE(pIemCpu)
515 || ( uAddrReg < pCtx->es.u32Limit
516 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
517 )
518 )
519 {
520 RTGCPHYS GCPhysMem;
521 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
522 if (rcStrict != VINF_SUCCESS)
523 return rcStrict;
524
525 /*
526 * If we can map the page without trouble, do a block processing
527 * until the end of the current page.
528 */
529 PGMPAGEMAPLOCK PgLockMem;
530 OP_TYPE const *puMem;
531 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
532 if (rcStrict == VINF_SUCCESS)
533 {
534 /* Search till we find a mismatching item. */
535 OP_TYPE uTmpValue;
536 bool fQuit;
537 uint32_t i = 0;
538 do
539 {
540 uTmpValue = puMem[i++];
541 fQuit = uTmpValue != uValueReg;
542 } while (i < cLeftPage && !fQuit);
543
544 /* Update the regs. */
545 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
546 pCtx->ADDR_rCX = uCounterReg -= i;
547 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
548 pCtx->eflags.u = uEFlags;
549 Assert(!(uEFlags & X86_EFL_ZF) == fQuit);
550 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
551 if ( fQuit
552 || uCounterReg == 0)
553 break;
554
555 /* If unaligned, we drop thru and do the page crossing access
556 below. Otherwise, do the next page. */
557 if (!(uVirtAddr & (OP_SIZE - 1)))
558 {
559 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, uEFlags);
560 continue;
561 }
562 cLeftPage = 0;
563 }
564 }
565
566 /*
567 * Fallback - slow processing till the end of the current page.
568 * In the cross page boundrary case we will end up here with cLeftPage
569 * as 0, we execute one loop then.
570 */
571 do
572 {
573 OP_TYPE uTmpValue;
574 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
575 if (rcStrict != VINF_SUCCESS)
576 return rcStrict;
577 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
578
579 pCtx->ADDR_rDI = uAddrReg += cbIncr;
580 pCtx->ADDR_rCX = --uCounterReg;
581 pCtx->eflags.u = uEFlags;
582 cLeftPage--;
583 /* Skipping IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN here, assuming rcStrict does the job. */
584 } while ( (int32_t)cLeftPage > 0
585 && (uEFlags & X86_EFL_ZF));
586
587 /*
588 * Next page? Must check for interrupts and stuff here.
589 */
590 if ( uCounterReg == 0
591 || !(uEFlags & X86_EFL_ZF))
592 break;
593 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, uEFlags);
594 }
595
596 /*
597 * Done.
598 */
599 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
600 return VINF_SUCCESS;
601}
602
603
604/**
605 * Implements 'REPNE SCAS'.
606 */
607IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
608{
609 PVM pVM = IEMCPU_TO_VM(pIemCpu);
610 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
611 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
612
613 /*
614 * Setup.
615 */
616 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
617 if (uCounterReg == 0)
618 {
619 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
620 return VINF_SUCCESS;
621 }
622
623 uint64_t uBaseAddr;
624 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, iemSRegUpdateHid(pIemCpu, &pCtx->es), X86_SREG_ES, &uBaseAddr);
625 if (rcStrict != VINF_SUCCESS)
626 return rcStrict;
627
628 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
629 OP_TYPE const uValueReg = pCtx->OP_rAX;
630 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
631 uint32_t uEFlags = pCtx->eflags.u;
632
633 /*
634 * The loop.
635 */
636 for (;;)
637 {
638 /*
639 * Do segmentation and virtual page stuff.
640 */
641 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
642 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
643 if (cLeftPage > uCounterReg)
644 cLeftPage = uCounterReg;
645 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
646 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
647 && ( IS_64_BIT_CODE(pIemCpu)
648 || ( uAddrReg < pCtx->es.u32Limit
649 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
650 )
651 )
652 {
653 RTGCPHYS GCPhysMem;
654 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
655 if (rcStrict != VINF_SUCCESS)
656 return rcStrict;
657
658 /*
659 * If we can map the page without trouble, do a block processing
660 * until the end of the current page.
661 */
662 PGMPAGEMAPLOCK PgLockMem;
663 OP_TYPE const *puMem;
664 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
665 if (rcStrict == VINF_SUCCESS)
666 {
667 /* Search till we find a mismatching item. */
668 OP_TYPE uTmpValue;
669 bool fQuit;
670 uint32_t i = 0;
671 do
672 {
673 uTmpValue = puMem[i++];
674 fQuit = uTmpValue == uValueReg;
675 } while (i < cLeftPage && !fQuit);
676
677 /* Update the regs. */
678 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
679 pCtx->ADDR_rCX = uCounterReg -= i;
680 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
681 pCtx->eflags.u = uEFlags;
682 Assert(!!(uEFlags & X86_EFL_ZF) == fQuit);
683 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
684 if ( fQuit
685 || uCounterReg == 0)
686 break;
687
688 /* If unaligned, we drop thru and do the page crossing access
689 below. Otherwise, do the next page. */
690 if (!(uVirtAddr & (OP_SIZE - 1)))
691 {
692 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, uEFlags);
693 continue;
694 }
695 cLeftPage = 0;
696 }
697 }
698
699 /*
700 * Fallback - slow processing till the end of the current page.
701 * In the cross page boundrary case we will end up here with cLeftPage
702 * as 0, we execute one loop then.
703 */
704 do
705 {
706 OP_TYPE uTmpValue;
707 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
708 if (rcStrict != VINF_SUCCESS)
709 return rcStrict;
710 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
711 pCtx->ADDR_rDI = uAddrReg += cbIncr;
712 pCtx->ADDR_rCX = --uCounterReg;
713 pCtx->eflags.u = uEFlags;
714 cLeftPage--;
715 /* Skipping IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN here, assuming rcStrict does the job. */
716 } while ( (int32_t)cLeftPage > 0
717 && !(uEFlags & X86_EFL_ZF));
718
719 /*
720 * Next page? Must check for interrupts and stuff here.
721 */
722 if ( uCounterReg == 0
723 || (uEFlags & X86_EFL_ZF))
724 break;
725 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, uEFlags);
726 }
727
728 /*
729 * Done.
730 */
731 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
732 return VINF_SUCCESS;
733}
734
735
736
737
738/**
739 * Implements 'REP MOVS'.
740 */
741IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
742{
743 PVM pVM = IEMCPU_TO_VM(pIemCpu);
744 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
745 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
746
747 /*
748 * Setup.
749 */
750 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
751 if (uCounterReg == 0)
752 {
753 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
754 return VINF_SUCCESS;
755 }
756
757 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
758 uint64_t uSrcBase;
759 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg, &uSrcBase);
760 if (rcStrict != VINF_SUCCESS)
761 return rcStrict;
762
763 uint64_t uDstBase;
764 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, iemSRegUpdateHid(pIemCpu, &pCtx->es), X86_SREG_ES, &uDstBase);
765 if (rcStrict != VINF_SUCCESS)
766 return rcStrict;
767
768 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
769 ADDR_TYPE uSrcAddrReg = pCtx->ADDR_rSI;
770 ADDR_TYPE uDstAddrReg = pCtx->ADDR_rDI;
771
772 /*
773 * Be careful with handle bypassing.
774 */
775 if (pIemCpu->fBypassHandlers)
776 {
777 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
778 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
779 }
780
781 /*
782 * If we're reading back what we write, we have to let the verfication code
783 * to prevent a false positive.
784 * Note! This doesn't take aliasing or wrapping into account - lazy bird.
785 */
786#ifdef IEM_VERIFICATION_MODE_FULL
787 if ( IEM_VERIFICATION_ENABLED(pIemCpu)
788 && (cbIncr > 0
789 ? uSrcAddrReg <= uDstAddrReg
790 && uSrcAddrReg + cbIncr * uCounterReg > uDstAddrReg
791 : uDstAddrReg <= uSrcAddrReg
792 && uDstAddrReg + cbIncr * uCounterReg > uSrcAddrReg))
793 pIemCpu->fOverlappingMovs = true;
794#endif
795
796 /*
797 * The loop.
798 */
799 for (;;)
800 {
801 /*
802 * Do segmentation and virtual page stuff.
803 */
804 ADDR2_TYPE uVirtSrcAddr = uSrcAddrReg + (ADDR2_TYPE)uSrcBase;
805 ADDR2_TYPE uVirtDstAddr = uDstAddrReg + (ADDR2_TYPE)uDstBase;
806 uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
807 if (cLeftSrcPage > uCounterReg)
808 cLeftSrcPage = uCounterReg;
809 uint32_t cLeftDstPage = (PAGE_SIZE - (uVirtDstAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
810 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
811
812 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
813 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
814 && ( IS_64_BIT_CODE(pIemCpu)
815 || ( uSrcAddrReg < pSrcHid->u32Limit
816 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
817 && uDstAddrReg < pCtx->es.u32Limit
818 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
819 )
820 )
821 {
822 RTGCPHYS GCPhysSrcMem;
823 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
824 if (rcStrict != VINF_SUCCESS)
825 return rcStrict;
826
827 RTGCPHYS GCPhysDstMem;
828 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
829 if (rcStrict != VINF_SUCCESS)
830 return rcStrict;
831
832 /*
833 * If we can map the page without trouble, do a block processing
834 * until the end of the current page.
835 */
836 PGMPAGEMAPLOCK PgLockDstMem;
837 OP_TYPE *puDstMem;
838 rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
839 if (rcStrict == VINF_SUCCESS)
840 {
841 PGMPAGEMAPLOCK PgLockSrcMem;
842 OP_TYPE const *puSrcMem;
843 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
844 if (rcStrict == VINF_SUCCESS)
845 {
846 Assert( (GCPhysSrcMem >> PAGE_SHIFT) != (GCPhysDstMem >> PAGE_SHIFT)
847 || ((uintptr_t)puSrcMem >> PAGE_SHIFT) == ((uintptr_t)puDstMem >> PAGE_SHIFT));
848
849 /* Perform the operation exactly (don't use memcpy to avoid
850 having to consider how its implementation would affect
851 any overlapping source and destination area). */
852 OP_TYPE const *puSrcCur = puSrcMem;
853 OP_TYPE *puDstCur = puDstMem;
854 uint32_t cTodo = cLeftPage;
855 while (cTodo-- > 0)
856 *puDstCur++ = *puSrcCur++;
857
858 /* Update the registers. */
859 pCtx->ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
860 pCtx->ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
861 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
862
863 iemMemPageUnmap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);
864 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
865
866 if (uCounterReg == 0)
867 break;
868 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, pCtx->eflags.u);
869 continue;
870 }
871 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
872 }
873 }
874
875 /*
876 * Fallback - slow processing till the end of the current page.
877 * In the cross page boundrary case we will end up here with cLeftPage
878 * as 0, we execute one loop then.
879 */
880 do
881 {
882 OP_TYPE uValue;
883 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uSrcAddrReg);
884 if (rcStrict != VINF_SUCCESS)
885 return rcStrict;
886 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uDstAddrReg, uValue);
887 if (rcStrict != VINF_SUCCESS)
888 return rcStrict;
889
890 pCtx->ADDR_rSI = uSrcAddrReg += cbIncr;
891 pCtx->ADDR_rDI = uDstAddrReg += cbIncr;
892 pCtx->ADDR_rCX = --uCounterReg;
893 cLeftPage--;
894 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, uCounterReg == 0);
895 } while ((int32_t)cLeftPage > 0);
896
897 /*
898 * Next page. Must check for interrupts and stuff here.
899 */
900 if (uCounterReg == 0)
901 break;
902 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, pCtx->eflags.u);
903 }
904
905 /*
906 * Done.
907 */
908 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
909 return VINF_SUCCESS;
910}
911
912
913/**
914 * Implements 'REP STOS'.
915 */
916IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
917{
918 PVM pVM = IEMCPU_TO_VM(pIemCpu);
919 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
920 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
921
922 /*
923 * Setup.
924 */
925 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
926 if (uCounterReg == 0)
927 {
928 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
929 return VINF_SUCCESS;
930 }
931
932 uint64_t uBaseAddr;
933 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, iemSRegUpdateHid(pIemCpu, &pCtx->es), X86_SREG_ES, &uBaseAddr);
934 if (rcStrict != VINF_SUCCESS)
935 return rcStrict;
936
937 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
938 OP_TYPE const uValue = pCtx->OP_rAX;
939 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
940
941 /*
942 * Be careful with handle bypassing.
943 */
944 /** @todo Permit doing a page if correctly aligned. */
945 if (pIemCpu->fBypassHandlers)
946 {
947 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
948 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
949 }
950
951 /*
952 * The loop.
953 */
954 for (;;)
955 {
956 /*
957 * Do segmentation and virtual page stuff.
958 */
959 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
960 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
961 if (cLeftPage > uCounterReg)
962 cLeftPage = uCounterReg;
963 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
964 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
965 && ( IS_64_BIT_CODE(pIemCpu)
966 || ( uAddrReg < pCtx->es.u32Limit
967 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
968 )
969 )
970 {
971 RTGCPHYS GCPhysMem;
972 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
973 if (rcStrict != VINF_SUCCESS)
974 return rcStrict;
975
976 /*
977 * If we can map the page without trouble, do a block processing
978 * until the end of the current page.
979 */
980 PGMPAGEMAPLOCK PgLockMem;
981 OP_TYPE *puMem;
982 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
983 if (rcStrict == VINF_SUCCESS)
984 {
985 /* Update the regs first so we can loop on cLeftPage. */
986 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
987 pCtx->ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
988
989 /* Do the memsetting. */
990#if OP_SIZE == 8
991 memset(puMem, uValue, cLeftPage);
992/*#elif OP_SIZE == 32
993 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
994#else
995 while (cLeftPage-- > 0)
996 *puMem++ = uValue;
997#endif
998
999 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1000
1001 if (uCounterReg == 0)
1002 break;
1003
1004 /* If unaligned, we drop thru and do the page crossing access
1005 below. Otherwise, do the next page. */
1006 if (!(uVirtAddr & (OP_SIZE - 1)))
1007 {
1008 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, pCtx->eflags.u);
1009 continue;
1010 }
1011 cLeftPage = 0;
1012 }
1013 }
1014
1015 /*
1016 * Fallback - slow processing till the end of the current page.
1017 * In the cross page boundrary case we will end up here with cLeftPage
1018 * as 0, we execute one loop then.
1019 */
1020 do
1021 {
1022 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uAddrReg, uValue);
1023 if (rcStrict != VINF_SUCCESS)
1024 return rcStrict;
1025 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1026 pCtx->ADDR_rCX = --uCounterReg;
1027 cLeftPage--;
1028 /* Skipping IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN here, assuming rcStrict does the job. */
1029 } while ((int32_t)cLeftPage > 0);
1030
1031 /*
1032 * Next page. Must check for interrupts and stuff here.
1033 */
1034 if (uCounterReg == 0)
1035 break;
1036 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, pCtx->eflags.u);
1037 }
1038
1039 /*
1040 * Done.
1041 */
1042 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1043 return VINF_SUCCESS;
1044}
1045
1046
1047/**
1048 * Implements 'REP LODS'.
1049 */
1050IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
1051{
1052 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1053 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1054 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1055
1056 /*
1057 * Setup.
1058 */
1059 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1060 if (uCounterReg == 0)
1061 {
1062 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1063 return VINF_SUCCESS;
1064 }
1065
1066 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
1067 uint64_t uBaseAddr;
1068 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg, &uBaseAddr);
1069 if (rcStrict != VINF_SUCCESS)
1070 return rcStrict;
1071
1072 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1073 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
1074
1075 /*
1076 * The loop.
1077 */
1078 for (;;)
1079 {
1080 /*
1081 * Do segmentation and virtual page stuff.
1082 */
1083 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1084 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1085 if (cLeftPage > uCounterReg)
1086 cLeftPage = uCounterReg;
1087 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1088 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1089 && ( IS_64_BIT_CODE(pIemCpu)
1090 || ( uAddrReg < pSrcHid->u32Limit
1091 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit)
1092 )
1093 )
1094 {
1095 RTGCPHYS GCPhysMem;
1096 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1097 if (rcStrict != VINF_SUCCESS)
1098 return rcStrict;
1099
1100 /*
1101 * If we can map the page without trouble, we can get away with
1102 * just reading the last value on the page.
1103 */
1104 PGMPAGEMAPLOCK PgLockMem;
1105 OP_TYPE const *puMem;
1106 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1107 if (rcStrict == VINF_SUCCESS)
1108 {
1109 /* Only get the last byte, the rest doesn't matter in direct access mode. */
1110#if OP_SIZE == 32
1111 pCtx->rax = puMem[cLeftPage - 1];
1112#else
1113 pCtx->OP_rAX = puMem[cLeftPage - 1];
1114#endif
1115 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
1116 pCtx->ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
1117 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1118
1119 if (uCounterReg == 0)
1120 break;
1121
1122 /* If unaligned, we drop thru and do the page crossing access
1123 below. Otherwise, do the next page. */
1124 if (!(uVirtAddr & (OP_SIZE - 1)))
1125 {
1126 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, pCtx->eflags.u);
1127 continue;
1128 }
1129 cLeftPage = 0;
1130 }
1131 }
1132
1133 /*
1134 * Fallback - slow processing till the end of the current page.
1135 * In the cross page boundrary case we will end up here with cLeftPage
1136 * as 0, we execute one loop then.
1137 */
1138 do
1139 {
1140 OP_TYPE uTmpValue;
1141 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, iEffSeg, uAddrReg);
1142 if (rcStrict != VINF_SUCCESS)
1143 return rcStrict;
1144#if OP_SIZE == 32
1145 pCtx->rax = uTmpValue;
1146#else
1147 pCtx->OP_rAX = uTmpValue;
1148#endif
1149 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1150 pCtx->ADDR_rCX = --uCounterReg;
1151 cLeftPage--;
1152 /* Skipping IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN here, assuming rcStrict does the job. */
1153 } while ((int32_t)cLeftPage > 0);
1154
1155 if (rcStrict != VINF_SUCCESS)
1156 break;
1157
1158 /*
1159 * Next page. Must check for interrupts and stuff here.
1160 */
1161 if (uCounterReg == 0)
1162 break;
1163 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, pCtx->eflags.u);
1164 }
1165
1166 /*
1167 * Done.
1168 */
1169 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1170 return VINF_SUCCESS;
1171}
1172
1173
1174#if OP_SIZE != 64
1175
1176# if !defined(IN_RING3) && !defined(IEMCIMPL_INS_INLINES)
1177# define IEMCIMPL_INS_INLINES 1
1178
1179/**
1180 * Check if we should postpone committing an INS instruction to ring-3, or if we
1181 * should rather panic.
1182 *
1183 * @returns true if we should postpone it, false if it's better to panic.
1184 * @param rcStrictMem The status code returned by the memory write.
1185 */
1186DECLINLINE(bool) iemCImpl_ins_shouldPostponeCommitToRing3(VBOXSTRICTRC rcStrictMem)
1187{
1188 /*
1189 * The following requires executing the write in ring-3.
1190 * See PGMPhysWrite for status code explanations.
1191 */
1192 if ( rcStrictMem == VINF_IOM_R3_MMIO_WRITE
1193 || rcStrictMem == VINF_IOM_R3_MMIO_READ_WRITE
1194 || rcStrictMem == VINF_EM_RAW_EMULATE_INSTR
1195# ifdef IN_RC
1196 || rcStrictMem == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
1197 || rcStrictMem == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
1198 || rcStrictMem == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
1199 || rcStrictMem == VINF_CSAM_PENDING_ACTION
1200 || rcStrictMem == VINF_PATM_CHECK_PATCH_PAGE
1201# endif
1202 )
1203 return true;
1204
1205 /* For the other status code, the pass-up handling should already have
1206 caught them. So, anything getting down here is a real problem worth
1207 meditating over. */
1208 return false;
1209}
1210
1211
1212/**
1213 * Merges a iemCImpl_ins_shouldPostponeCommitToRing3() status with the I/O port
1214 * status.
1215 *
1216 * @returns status code.
1217 * @param rcStrictPort The status returned by the I/O port read.
1218 * @param rcStrictMem The status code returned by the memory write.
1219 */
1220DECLINLINE(VBOXSTRICTRC) iemCImpl_ins_mergePostponedCommitStatuses(VBOXSTRICTRC rcStrictPort, VBOXSTRICTRC rcStrictMem)
1221{
1222 /* Turns out we don't need a lot of merging, since we'll be redoing the
1223 write anyway. (CSAM, PATM status codes, perhaps, but that's about it.) */
1224 return rcStrictPort == VINF_SUCCESS ? VINF_EM_RAW_TO_R3 : rcStrictPort;
1225}
1226
1227# endif /* !IN_RING3 || !IEMCIMPL_INS_INLINES */
1228
1229
1230/**
1231 * Implements 'INS' (no rep)
1232 */
1233IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1234{
1235 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1236 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1237 VBOXSTRICTRC rcStrict;
1238
1239 /*
1240 * Be careful with handle bypassing.
1241 */
1242 if (pIemCpu->fBypassHandlers)
1243 {
1244 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1245 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1246 }
1247
1248 /*
1249 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1250 * segmentation and finally any #PF due to virtual address translation.
1251 * ASSUMES nothing is read from the I/O port before traps are taken.
1252 */
1253 if (!fIoChecked)
1254 {
1255 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1256 if (rcStrict != VINF_SUCCESS)
1257 return rcStrict;
1258 }
1259
1260 OP_TYPE *puMem;
1261 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
1262 if (rcStrict != VINF_SUCCESS)
1263 return rcStrict;
1264
1265 uint32_t u32Value = 0;
1266 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1267 rcStrict = IOMIOPortRead(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, &u32Value, OP_SIZE / 8);
1268 else
1269 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, pCtx->dx, &u32Value, OP_SIZE / 8);
1270 if (IOM_SUCCESS(rcStrict))
1271 {
1272 *puMem = (OP_TYPE)u32Value;
1273 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1274 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1275 {
1276 if (!pCtx->eflags.Bits.u1DF)
1277 pCtx->ADDR_rDI += OP_SIZE / 8;
1278 else
1279 pCtx->ADDR_rDI -= OP_SIZE / 8;
1280 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1281 }
1282#ifndef IN_RING3
1283 /* iemMemMap already checked permissions, so this may only be real errors
1284 or access handlers meddling. In the access handler case, we must postpone
1285 the instruction committing to ring-3. */
1286 else if (iemCImpl_ins_shouldPostponeCommitToRing3(rcStrict2))
1287 {
1288 pIemCpu->PendingCommit.cbInstr = cbInstr;
1289 pIemCpu->PendingCommit.uValue = u32Value;
1290 pIemCpu->PendingCommit.enmFn = RT_CONCAT4(IEMCOMMIT_INS_OP,OP_SIZE,_ADDR,ADDR_SIZE);
1291 pIemCpu->cPendingCommit++;
1292 VMCPU_FF_SET(IEMCPU_TO_VMCPU(pIemCpu), VMCPU_FF_IEM);
1293 Log(("%s: Postponing to ring-3; cbInstr=%#x u32Value=%#x rcStrict2=%Rrc rcStrict=%Rrc\n", __FUNCTION__,
1294 cbInstr, u32Value, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict)));
1295 rcStrict = iemCImpl_ins_mergePostponedCommitStatuses(rcStrict, rcStrict2);
1296 }
1297#endif
1298 else
1299 AssertLogRelMsgFailedReturn(("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)), RT_FAILURE_NP(rcStrict2) ? rcStrict2 : VERR_IEM_IPE_1);
1300 }
1301 return rcStrict;
1302}
1303
1304
1305# ifdef IN_RING3
1306/**
1307 * Called in ring-3 when raw-mode or ring-0 was forced to return while
1308 * committing the instruction (hit access handler).
1309 */
1310IEM_CIMPL_DEF_0(RT_CONCAT4(iemR3CImpl_commit_ins_op,OP_SIZE,_addr,ADDR_SIZE))
1311{
1312 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1313 VBOXSTRICTRC rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, pCtx->ADDR_rDI, (OP_TYPE)pIemCpu->PendingCommit.uValue);
1314 if (rcStrict == VINF_SUCCESS)
1315 {
1316 if (!pCtx->eflags.Bits.u1DF)
1317 pCtx->ADDR_rDI += OP_SIZE / 8;
1318 else
1319 pCtx->ADDR_rDI -= OP_SIZE / 8;
1320 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1321 }
1322 return rcStrict;
1323}
1324# endif /* IN_RING3 */
1325
1326
1327/**
1328 * Implements 'REP INS'.
1329 */
1330IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1331{
1332 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1333 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1334 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1335
1336 /*
1337 * Setup.
1338 */
1339 uint16_t const u16Port = pCtx->dx;
1340 VBOXSTRICTRC rcStrict;
1341 if (!fIoChecked)
1342 {
1343 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1344 if (rcStrict != VINF_SUCCESS)
1345 return rcStrict;
1346 }
1347
1348 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1349 if (uCounterReg == 0)
1350 {
1351 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1352 return VINF_SUCCESS;
1353 }
1354
1355 uint64_t uBaseAddr;
1356 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, iemSRegUpdateHid(pIemCpu, &pCtx->es), X86_SREG_ES, &uBaseAddr);
1357 if (rcStrict != VINF_SUCCESS)
1358 return rcStrict;
1359
1360 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1361 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
1362
1363 /*
1364 * Be careful with handle bypassing.
1365 */
1366 if (pIemCpu->fBypassHandlers)
1367 {
1368 Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1369 return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1370 }
1371
1372 /*
1373 * The loop.
1374 */
1375 for (;;)
1376 {
1377 /*
1378 * Do segmentation and virtual page stuff.
1379 */
1380 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1381 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1382 if (cLeftPage > uCounterReg)
1383 cLeftPage = uCounterReg;
1384 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1385 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1386 && ( IS_64_BIT_CODE(pIemCpu)
1387 || ( uAddrReg < pCtx->es.u32Limit
1388 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
1389 )
1390 && !IEM_VERIFICATION_ENABLED(pIemCpu)
1391 )
1392 {
1393 RTGCPHYS GCPhysMem;
1394 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
1395 if (rcStrict != VINF_SUCCESS)
1396 return rcStrict;
1397
1398 /*
1399 * If we can map the page without trouble, use the IOM
1400 * string I/O interface to do the work.
1401 */
1402 PGMPAGEMAPLOCK PgLockMem;
1403 OP_TYPE *puMem;
1404 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
1405 if (rcStrict == VINF_SUCCESS)
1406 {
1407 uint32_t cTransfers = cLeftPage;
1408 rcStrict = IOMIOPortReadString(pVM, pVCpu, u16Port, puMem, &cTransfers, OP_SIZE / 8);
1409
1410 uint32_t cActualTransfers = cLeftPage - cTransfers;
1411 Assert(cActualTransfers <= cLeftPage);
1412 pCtx->ADDR_rDI = uAddrReg += cbIncr * cActualTransfers;
1413 pCtx->ADDR_rCX = uCounterReg -= cActualTransfers;
1414 puMem += cActualTransfers;
1415
1416 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1417
1418 if (rcStrict != VINF_SUCCESS)
1419 {
1420 if (IOM_SUCCESS(rcStrict))
1421 {
1422 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1423 if (uCounterReg == 0)
1424 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1425 }
1426 return rcStrict;
1427 }
1428
1429 /* If unaligned, we drop thru and do the page crossing access
1430 below. Otherwise, do the next page. */
1431 if (uCounterReg == 0)
1432 break;
1433 if (!(uVirtAddr & (OP_SIZE - 1)))
1434 {
1435 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, pCtx->eflags.u);
1436 continue;
1437 }
1438 cLeftPage = 0;
1439 }
1440 }
1441
1442 /*
1443 * Fallback - slow processing till the end of the current page.
1444 * In the cross page boundrary case we will end up here with cLeftPage
1445 * as 0, we execute one loop then.
1446 *
1447 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1448 * I/O port, otherwise it wouldn't really be restartable.
1449 */
1450 /** @todo investigate what the CPU actually does with \#PF/\#GP
1451 * during INS. */
1452 do
1453 {
1454 OP_TYPE *puMem;
1455 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
1456 if (rcStrict != VINF_SUCCESS)
1457 return rcStrict;
1458
1459 uint32_t u32Value = 0;
1460 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1461 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8);
1462 else
1463 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1464 if (!IOM_SUCCESS(rcStrict))
1465 return rcStrict;
1466
1467 *puMem = (OP_TYPE)u32Value;
1468 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1469 if (rcStrict2 == VINF_SUCCESS)
1470 { /* likely */ }
1471#ifndef IN_RING3
1472 /* iemMemMap already checked permissions, so this may only be real errors
1473 or access handlers meddling. In the access handler case, we must postpone
1474 the instruction committing to ring-3. */
1475 else if (iemCImpl_ins_shouldPostponeCommitToRing3(rcStrict2))
1476 {
1477 pIemCpu->PendingCommit.cbInstr = cbInstr;
1478 pIemCpu->PendingCommit.uValue = u32Value;
1479 pIemCpu->PendingCommit.enmFn = RT_CONCAT4(IEMCOMMIT_REP_INS_OP,OP_SIZE,_ADDR,ADDR_SIZE);
1480 pIemCpu->cPendingCommit++;
1481 VMCPU_FF_SET(IEMCPU_TO_VMCPU(pIemCpu), VMCPU_FF_IEM);
1482 Log(("%s: Postponing to ring-3; cbInstr=%#x u32Value=%#x rcStrict2=%Rrc rcStrict=%Rrc\n", __FUNCTION__,
1483 cbInstr, u32Value, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict)));
1484 return iemCImpl_ins_mergePostponedCommitStatuses(rcStrict, rcStrict2);
1485 }
1486#endif
1487 else
1488 AssertLogRelMsgFailedReturn(("rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)),
1489 RT_FAILURE(rcStrict2) ? rcStrict2 : VERR_IEM_IPE_1);
1490
1491 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1492 pCtx->ADDR_rCX = --uCounterReg;
1493
1494 cLeftPage--;
1495 if (rcStrict != VINF_SUCCESS)
1496 {
1497 if (uCounterReg == 0)
1498 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1499 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1500 return rcStrict;
1501 }
1502 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, uCounterReg == 0);
1503 } while ((int32_t)cLeftPage > 0);
1504
1505
1506 /*
1507 * Next page. Must check for interrupts and stuff here.
1508 */
1509 if (uCounterReg == 0)
1510 break;
1511 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, pCtx->eflags.u);
1512 }
1513
1514 /*
1515 * Done.
1516 */
1517 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1518 return VINF_SUCCESS;
1519}
1520
1521# ifdef IN_RING3
1522/**
1523 * Called in ring-3 when raw-mode or ring-0 was forced to return while
1524 * committing the instruction (hit access handler).
1525 */
1526IEM_CIMPL_DEF_0(RT_CONCAT4(iemR3CImpl_commit_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE))
1527{
1528 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1529 VBOXSTRICTRC rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, pCtx->ADDR_rDI, (OP_TYPE)pIemCpu->PendingCommit.uValue);
1530 if (rcStrict == VINF_SUCCESS)
1531 {
1532 if (!pCtx->eflags.Bits.u1DF)
1533 pCtx->ADDR_rDI += OP_SIZE / 8;
1534 else
1535 pCtx->ADDR_rDI -= OP_SIZE / 8;
1536 pCtx->ADDR_rCX -= 1;
1537 if (pCtx->ADDR_rCX == 0)
1538 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1539 }
1540 return rcStrict;
1541}
1542# endif /* IN_RING3 */
1543
1544
1545/**
1546 * Implements 'OUTS' (no rep)
1547 */
1548IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1549{
1550 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1551 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1552 VBOXSTRICTRC rcStrict;
1553
1554 /*
1555 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1556 * segmentation and finally any #PF due to virtual address translation.
1557 * ASSUMES nothing is read from the I/O port before traps are taken.
1558 */
1559 if (!fIoChecked)
1560 {
1561 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1562 if (rcStrict != VINF_SUCCESS)
1563 return rcStrict;
1564 }
1565
1566 OP_TYPE uValue;
1567 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);
1568 if (rcStrict == VINF_SUCCESS)
1569 {
1570 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1571 rcStrict = IOMIOPortWrite(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, uValue, OP_SIZE / 8);
1572 else
1573 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, pCtx->dx, uValue, OP_SIZE / 8);
1574 if (IOM_SUCCESS(rcStrict))
1575 {
1576 if (!pCtx->eflags.Bits.u1DF)
1577 pCtx->ADDR_rSI += OP_SIZE / 8;
1578 else
1579 pCtx->ADDR_rSI -= OP_SIZE / 8;
1580 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1581 if (rcStrict != VINF_SUCCESS)
1582 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1583 }
1584 }
1585 return rcStrict;
1586}
1587
1588
1589/**
1590 * Implements 'REP OUTS'.
1591 */
1592IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1593{
1594 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1595 PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1596 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1597
1598 /*
1599 * Setup.
1600 */
1601 uint16_t const u16Port = pCtx->dx;
1602 VBOXSTRICTRC rcStrict;
1603 if (!fIoChecked)
1604 {
1605 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1606 if (rcStrict != VINF_SUCCESS)
1607 return rcStrict;
1608 }
1609
1610 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1611 if (uCounterReg == 0)
1612 {
1613 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1614 return VINF_SUCCESS;
1615 }
1616
1617 PCCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iEffSeg);
1618 uint64_t uBaseAddr;
1619 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg, &uBaseAddr);
1620 if (rcStrict != VINF_SUCCESS)
1621 return rcStrict;
1622
1623 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1624 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
1625
1626 /*
1627 * The loop.
1628 */
1629 for (;;)
1630 {
1631 /*
1632 * Do segmentation and virtual page stuff.
1633 */
1634 ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1635 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1636 if (cLeftPage > uCounterReg)
1637 cLeftPage = uCounterReg;
1638 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1639 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1640 && ( IS_64_BIT_CODE(pIemCpu)
1641 || ( uAddrReg < pHid->u32Limit
1642 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit)
1643 )
1644 && !IEM_VERIFICATION_ENABLED(pIemCpu)
1645 )
1646 {
1647 RTGCPHYS GCPhysMem;
1648 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1649 if (rcStrict != VINF_SUCCESS)
1650 return rcStrict;
1651
1652 /*
1653 * If we can map the page without trouble, we use the IOM
1654 * string I/O interface to do the job.
1655 */
1656 PGMPAGEMAPLOCK PgLockMem;
1657 OP_TYPE const *puMem;
1658 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1659 if (rcStrict == VINF_SUCCESS)
1660 {
1661 uint32_t cTransfers = cLeftPage;
1662 rcStrict = IOMIOPortWriteString(pVM, pVCpu, u16Port, puMem, &cTransfers, OP_SIZE / 8);
1663
1664 uint32_t cActualTransfers = cLeftPage - cTransfers;
1665 Assert(cActualTransfers <= cLeftPage);
1666 pCtx->ADDR_rSI = uAddrReg += cbIncr * cActualTransfers;
1667 pCtx->ADDR_rCX = uCounterReg -= cActualTransfers;
1668 puMem += cActualTransfers;
1669
1670 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1671
1672 if (rcStrict != VINF_SUCCESS)
1673 {
1674 if (IOM_SUCCESS(rcStrict))
1675 {
1676 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1677 if (uCounterReg == 0)
1678 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1679 }
1680 return rcStrict;
1681 }
1682
1683 if (uCounterReg == 0)
1684 break;
1685
1686 /* If unaligned, we drop thru and do the page crossing access
1687 below. Otherwise, do the next page. */
1688 if (!(uVirtAddr & (OP_SIZE - 1)))
1689 {
1690 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, pCtx->eflags.u);
1691 continue;
1692 }
1693 cLeftPage = 0;
1694 }
1695 }
1696
1697 /*
1698 * Fallback - slow processing till the end of the current page.
1699 * In the cross page boundrary case we will end up here with cLeftPage
1700 * as 0, we execute one loop then.
1701 *
1702 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1703 * I/O port, otherwise it wouldn't really be restartable.
1704 */
1705 /** @todo investigate what the CPU actually does with \#PF/\#GP
1706 * during INS. */
1707 do
1708 {
1709 OP_TYPE uValue;
1710 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uAddrReg);
1711 if (rcStrict != VINF_SUCCESS)
1712 return rcStrict;
1713
1714 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1715 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, uValue, OP_SIZE / 8);
1716 else
1717 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, uValue, OP_SIZE / 8);
1718 if (IOM_SUCCESS(rcStrict))
1719 {
1720 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1721 pCtx->ADDR_rCX = --uCounterReg;
1722 cLeftPage--;
1723 }
1724 if (rcStrict != VINF_SUCCESS)
1725 {
1726 if (IOM_SUCCESS(rcStrict))
1727 {
1728 if (uCounterReg == 0)
1729 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1730 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1731 }
1732 return rcStrict;
1733 }
1734 IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, uCounterReg == 0);
1735 } while ((int32_t)cLeftPage > 0);
1736
1737
1738 /*
1739 * Next page. Must check for interrupts and stuff here.
1740 */
1741 if (uCounterReg == 0)
1742 break;
1743 IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(pVM, pVCpu, pIemCpu, pCtx->eflags.u);
1744 }
1745
1746 /*
1747 * Done.
1748 */
1749 iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1750 return VINF_SUCCESS;
1751}
1752
1753#endif /* OP_SIZE != 64-bit */
1754
1755
1756#undef OP_rAX
1757#undef OP_SIZE
1758#undef ADDR_SIZE
1759#undef ADDR_rDI
1760#undef ADDR_rSI
1761#undef ADDR_rCX
1762#undef ADDR_rIP
1763#undef ADDR2_TYPE
1764#undef ADDR_TYPE
1765#undef ADDR2_TYPE
1766#undef IS_64_BIT_CODE
1767#undef IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN
1768#undef IEM_CHECK_FF_HIGH_PRIORITY_POST_REPSTR_MAYBE_RETURN
1769
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette