VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h@ 42621

Last change on this file since 42621 was 42621, checked in by vboxsync, 13 years ago

IEM: Implemented SIDT, SGDT, SLDT and STR. Fixed LLDT and LTR mode checks. Fixed hidden selector handling bugs in selector loading and mode switching code. Fixed clobbering of high EBP bits in ENTER (16-bit stack). Changed MOVS to not use memcpy and flag the verifier if the src/dst areas are in overlapping in any important way.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 50.7 KB
Line 
1/* $Id: IEMAllCImplStrInstr.cpp.h 42621 2012-08-06 13:39:55Z vboxsync $ */
2/** @file
3 * IEM - String Instruction Implementation Code Template.
4 */
5
6/*
7 * Copyright (C) 2011-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.215389.xyz. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Defined Constants And Macros *
21*******************************************************************************/
22#if OP_SIZE == 8
23# define OP_rAX al
24#elif OP_SIZE == 16
25# define OP_rAX ax
26#elif OP_SIZE == 32
27# define OP_rAX eax
28#elif OP_SIZE == 64
29# define OP_rAX rax
30#else
31# error "Bad OP_SIZE."
32#endif
33#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
34
35#if ADDR_SIZE == 16
36# define ADDR_rDI di
37# define ADDR_rSI si
38# define ADDR_rCX cx
39# define ADDR2_TYPE uint32_t
40#elif ADDR_SIZE == 32
41# define ADDR_rDI edi
42# define ADDR_rSI esi
43# define ADDR_rCX ecx
44# define ADDR2_TYPE uint32_t
45#elif ADDR_SIZE == 64
46# define ADDR_rDI rdi
47# define ADDR_rSI rsi
48# define ADDR_rCX rcx
49# define ADDR2_TYPE uint64_t
50#else
51# error "Bad ADDR_SIZE."
52#endif
53#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
54
55
56/**
57 * Implements 'REPE CMPS'.
58 */
59IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
60{
61 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
62
63 /*
64 * Setup.
65 */
66 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
67 if (uCounterReg == 0)
68 {
69 iemRegAddToRip(pIemCpu, cbInstr);
70 return VINF_SUCCESS;
71 }
72
73 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
74 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg);
75 if (rcStrict != VINF_SUCCESS)
76 return rcStrict;
77
78 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
79 if (rcStrict != VINF_SUCCESS)
80 return rcStrict;
81
82 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
83 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
84 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
85 uint32_t uEFlags = pCtx->eflags.u;
86
87 /*
88 * The loop.
89 */
90 do
91 {
92 /*
93 * Do segmentation and virtual page stuff.
94 */
95#if ADDR_SIZE != 64
96 ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg;
97 ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->es.u64Base + uSrc2AddrReg;
98#else
99 uint64_t uVirtSrc1Addr = uSrc1AddrReg;
100 uint64_t uVirtSrc2Addr = uSrc2AddrReg;
101#endif
102 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
103 if (cLeftSrc1Page > uCounterReg)
104 cLeftSrc1Page = uCounterReg;
105 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
106 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
107
108 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
109 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
110#if ADDR_SIZE != 64
111 && uSrc1AddrReg < pSrc1Hid->u32Limit
112 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
113 && uSrc2AddrReg < pCtx->es.u32Limit
114 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
115#endif
116 )
117 {
118 RTGCPHYS GCPhysSrc1Mem;
119 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
120 if (rcStrict != VINF_SUCCESS)
121 return rcStrict;
122
123 RTGCPHYS GCPhysSrc2Mem;
124 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
125 if (rcStrict != VINF_SUCCESS)
126 return rcStrict;
127
128 /*
129 * If we can map the page without trouble, do a block processing
130 * until the end of the current page.
131 */
132 PGMPAGEMAPLOCK PgLockSrc2Mem;
133 OP_TYPE const *puSrc2Mem;
134 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
135 if (rcStrict == VINF_SUCCESS)
136 {
137 PGMPAGEMAPLOCK PgLockSrc1Mem;
138 OP_TYPE const *puSrc1Mem;
139 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
140 if (rcStrict == VINF_SUCCESS)
141 {
142 if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
143 {
144 /* All matches, only compare the last itme to get the right eflags. */
145 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
146 uSrc1AddrReg += cLeftPage * cbIncr;
147 uSrc2AddrReg += cLeftPage * cbIncr;
148 uCounterReg -= cLeftPage;
149 }
150 else
151 {
152 /* Some mismatch, compare each item (and keep volatile
153 memory in mind). */
154 uint32_t off = 0;
155 do
156 {
157 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
158 off++;
159 } while ( off < cLeftPage
160 && (uEFlags & X86_EFL_ZF));
161 uSrc1AddrReg += cbIncr * off;
162 uSrc2AddrReg += cbIncr * off;
163 uCounterReg -= off;
164 }
165
166 /* Update the registers before looping. */
167 pCtx->ADDR_rCX = uCounterReg;
168 pCtx->ADDR_rSI = uSrc1AddrReg;
169 pCtx->ADDR_rDI = uSrc2AddrReg;
170 pCtx->eflags.u = uEFlags;
171
172 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
173 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
174 continue;
175 }
176 }
177 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
178 }
179
180 /*
181 * Fallback - slow processing till the end of the current page.
182 * In the cross page boundrary case we will end up here with cLeftPage
183 * as 0, we execute one loop then.
184 */
185 do
186 {
187 OP_TYPE uValue1;
188 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
189 if (rcStrict != VINF_SUCCESS)
190 return rcStrict;
191 OP_TYPE uValue2;
192 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
193 if (rcStrict != VINF_SUCCESS)
194 return rcStrict;
195 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
196
197 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
198 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
199 pCtx->ADDR_rCX = --uCounterReg;
200 pCtx->eflags.u = uEFlags;
201 cLeftPage--;
202 } while ( (int32_t)cLeftPage > 0
203 && (uEFlags & X86_EFL_ZF));
204 } while ( uCounterReg != 0
205 && (uEFlags & X86_EFL_ZF));
206
207 /*
208 * Done.
209 */
210 iemRegAddToRip(pIemCpu, cbInstr);
211 return VINF_SUCCESS;
212}
213
214
215/**
216 * Implements 'REPNE CMPS'.
217 */
218IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
219{
220 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
221
222 /*
223 * Setup.
224 */
225 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
226 if (uCounterReg == 0)
227 {
228 iemRegAddToRip(pIemCpu, cbInstr);
229 return VINF_SUCCESS;
230 }
231
232 PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
233 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg);
234 if (rcStrict != VINF_SUCCESS)
235 return rcStrict;
236
237 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
238 if (rcStrict != VINF_SUCCESS)
239 return rcStrict;
240
241 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
242 ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
243 ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
244 uint32_t uEFlags = pCtx->eflags.u;
245
246 /*
247 * The loop.
248 */
249 do
250 {
251 /*
252 * Do segmentation and virtual page stuff.
253 */
254#if ADDR_SIZE != 64
255 ADDR2_TYPE uVirtSrc1Addr = (uint32_t)pSrc1Hid->u64Base + uSrc1AddrReg;
256 ADDR2_TYPE uVirtSrc2Addr = (uint32_t)pCtx->es.u64Base + uSrc2AddrReg;
257#else
258 uint64_t uVirtSrc1Addr = uSrc1AddrReg;
259 uint64_t uVirtSrc2Addr = uSrc2AddrReg;
260#endif
261 uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
262 if (cLeftSrc1Page > uCounterReg)
263 cLeftSrc1Page = uCounterReg;
264 uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
265 uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
266
267 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
268 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
269#if ADDR_SIZE != 64
270 && uSrc1AddrReg < pSrc1Hid->u32Limit
271 && uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
272 && uSrc2AddrReg < pCtx->es.u32Limit
273 && uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
274#endif
275 )
276 {
277 RTGCPHYS GCPhysSrc1Mem;
278 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
279 if (rcStrict != VINF_SUCCESS)
280 return rcStrict;
281
282 RTGCPHYS GCPhysSrc2Mem;
283 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
284 if (rcStrict != VINF_SUCCESS)
285 return rcStrict;
286
287 /*
288 * If we can map the page without trouble, do a block processing
289 * until the end of the current page.
290 */
291 OP_TYPE const *puSrc2Mem;
292 PGMPAGEMAPLOCK PgLockSrc2Mem;
293 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
294 if (rcStrict == VINF_SUCCESS)
295 {
296 OP_TYPE const *puSrc1Mem;
297 PGMPAGEMAPLOCK PgLockSrc1Mem;
298 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
299 if (rcStrict == VINF_SUCCESS)
300 {
301 if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
302 {
303 /* All matches, only compare the last item to get the right eflags. */
304 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
305 uSrc1AddrReg += cLeftPage * cbIncr;
306 uSrc2AddrReg += cLeftPage * cbIncr;
307 uCounterReg -= cLeftPage;
308 }
309 else
310 {
311 /* Some mismatch, compare each item (and keep volatile
312 memory in mind). */
313 uint32_t off = 0;
314 do
315 {
316 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
317 off++;
318 } while ( off < cLeftPage
319 && !(uEFlags & X86_EFL_ZF));
320 uSrc1AddrReg += cbIncr * off;
321 uSrc2AddrReg += cbIncr * off;
322 uCounterReg -= off;
323 }
324
325 /* Update the registers before looping. */
326 pCtx->ADDR_rCX = uCounterReg;
327 pCtx->ADDR_rSI = uSrc1AddrReg;
328 pCtx->ADDR_rDI = uSrc2AddrReg;
329 pCtx->eflags.u = uEFlags;
330
331 iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
332 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
333 continue;
334 }
335 iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
336 }
337 }
338
339 /*
340 * Fallback - slow processing till the end of the current page.
341 * In the cross page boundrary case we will end up here with cLeftPage
342 * as 0, we execute one loop then.
343 */
344 do
345 {
346 OP_TYPE uValue1;
347 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
348 if (rcStrict != VINF_SUCCESS)
349 return rcStrict;
350 OP_TYPE uValue2;
351 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
352 if (rcStrict != VINF_SUCCESS)
353 return rcStrict;
354 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
355
356 pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
357 pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
358 pCtx->ADDR_rCX = --uCounterReg;
359 pCtx->eflags.u = uEFlags;
360 cLeftPage--;
361 } while ( (int32_t)cLeftPage > 0
362 && !(uEFlags & X86_EFL_ZF));
363 } while ( uCounterReg != 0
364 && !(uEFlags & X86_EFL_ZF));
365
366 /*
367 * Done.
368 */
369 iemRegAddToRip(pIemCpu, cbInstr);
370 return VINF_SUCCESS;
371}
372
373
374/**
375 * Implements 'REPE SCAS'.
376 */
377IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
378{
379 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
380
381 /*
382 * Setup.
383 */
384 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
385 if (uCounterReg == 0)
386 {
387 iemRegAddToRip(pIemCpu, cbInstr);
388 return VINF_SUCCESS;
389 }
390
391 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
392 if (rcStrict != VINF_SUCCESS)
393 return rcStrict;
394
395 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
396 OP_TYPE const uValueReg = pCtx->OP_rAX;
397 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
398 uint32_t uEFlags = pCtx->eflags.u;
399
400 /*
401 * The loop.
402 */
403 do
404 {
405 /*
406 * Do segmentation and virtual page stuff.
407 */
408#if ADDR_SIZE != 64
409 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
410#else
411 uint64_t uVirtAddr = uAddrReg;
412#endif
413 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
414 if (cLeftPage > uCounterReg)
415 cLeftPage = uCounterReg;
416 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
417 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
418#if ADDR_SIZE != 64
419 && uAddrReg < pCtx->es.u32Limit
420 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
421#endif
422 )
423 {
424 RTGCPHYS GCPhysMem;
425 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
426 if (rcStrict != VINF_SUCCESS)
427 return rcStrict;
428
429 /*
430 * If we can map the page without trouble, do a block processing
431 * until the end of the current page.
432 */
433 PGMPAGEMAPLOCK PgLockMem;
434 OP_TYPE const *puMem;
435 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
436 if (rcStrict == VINF_SUCCESS)
437 {
438 /* Search till we find a mismatching item. */
439 OP_TYPE uTmpValue;
440 bool fQuit;
441 uint32_t i = 0;
442 do
443 {
444 uTmpValue = puMem[i++];
445 fQuit = uTmpValue != uValueReg;
446 } while (i < cLeftPage && !fQuit);
447
448 /* Update the regs. */
449 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
450 pCtx->ADDR_rCX = uCounterReg -= i;
451 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
452 pCtx->eflags.u = uEFlags;
453 Assert(!(uEFlags & X86_EFL_ZF) == (i < cLeftPage));
454 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
455 if (fQuit)
456 break;
457
458
459 /* If unaligned, we drop thru and do the page crossing access
460 below. Otherwise, do the next page. */
461 if (!(uVirtAddr & (OP_SIZE - 1)))
462 continue;
463 if (uCounterReg == 0)
464 break;
465 cLeftPage = 0;
466 }
467 }
468
469 /*
470 * Fallback - slow processing till the end of the current page.
471 * In the cross page boundrary case we will end up here with cLeftPage
472 * as 0, we execute one loop then.
473 */
474 do
475 {
476 OP_TYPE uTmpValue;
477 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
478 if (rcStrict != VINF_SUCCESS)
479 return rcStrict;
480 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
481
482 pCtx->ADDR_rDI = uAddrReg += cbIncr;
483 pCtx->ADDR_rCX = --uCounterReg;
484 pCtx->eflags.u = uEFlags;
485 cLeftPage--;
486 } while ( (int32_t)cLeftPage > 0
487 && (uEFlags & X86_EFL_ZF));
488 } while ( uCounterReg != 0
489 && (uEFlags & X86_EFL_ZF));
490
491 /*
492 * Done.
493 */
494 iemRegAddToRip(pIemCpu, cbInstr);
495 return VINF_SUCCESS;
496}
497
498
499/**
500 * Implements 'REPNE SCAS'.
501 */
502IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
503{
504 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
505
506 /*
507 * Setup.
508 */
509 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
510 if (uCounterReg == 0)
511 {
512 iemRegAddToRip(pIemCpu, cbInstr);
513 return VINF_SUCCESS;
514 }
515
516 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
517 if (rcStrict != VINF_SUCCESS)
518 return rcStrict;
519
520 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
521 OP_TYPE const uValueReg = pCtx->OP_rAX;
522 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
523 uint32_t uEFlags = pCtx->eflags.u;
524
525 /*
526 * The loop.
527 */
528 do
529 {
530 /*
531 * Do segmentation and virtual page stuff.
532 */
533#if ADDR_SIZE != 64
534 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
535#else
536 uint64_t uVirtAddr = uAddrReg;
537#endif
538 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
539 if (cLeftPage > uCounterReg)
540 cLeftPage = uCounterReg;
541 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
542 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
543#if ADDR_SIZE != 64
544 && uAddrReg < pCtx->es.u32Limit
545 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
546#endif
547 )
548 {
549 RTGCPHYS GCPhysMem;
550 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
551 if (rcStrict != VINF_SUCCESS)
552 return rcStrict;
553
554 /*
555 * If we can map the page without trouble, do a block processing
556 * until the end of the current page.
557 */
558 PGMPAGEMAPLOCK PgLockMem;
559 OP_TYPE const *puMem;
560 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
561 if (rcStrict == VINF_SUCCESS)
562 {
563 /* Search till we find a mismatching item. */
564 OP_TYPE uTmpValue;
565 bool fQuit;
566 uint32_t i = 0;
567 do
568 {
569 uTmpValue = puMem[i++];
570 fQuit = uTmpValue == uValueReg;
571 } while (i < cLeftPage && !fQuit);
572
573 /* Update the regs. */
574 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
575 pCtx->ADDR_rCX = uCounterReg -= i;
576 pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
577 pCtx->eflags.u = uEFlags;
578 Assert((!(uEFlags & X86_EFL_ZF) != (i < cLeftPage)) || (i == cLeftPage));
579 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
580 if (fQuit)
581 break;
582
583
584 /* If unaligned, we drop thru and do the page crossing access
585 below. Otherwise, do the next page. */
586 if (!(uVirtAddr & (OP_SIZE - 1)))
587 continue;
588 if (uCounterReg == 0)
589 break;
590 cLeftPage = 0;
591 }
592 }
593
594 /*
595 * Fallback - slow processing till the end of the current page.
596 * In the cross page boundrary case we will end up here with cLeftPage
597 * as 0, we execute one loop then.
598 */
599 do
600 {
601 OP_TYPE uTmpValue;
602 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
603 if (rcStrict != VINF_SUCCESS)
604 return rcStrict;
605 RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
606
607 pCtx->ADDR_rDI = uAddrReg += cbIncr;
608 pCtx->ADDR_rCX = --uCounterReg;
609 pCtx->eflags.u = uEFlags;
610 cLeftPage--;
611 } while ( (int32_t)cLeftPage > 0
612 && !(uEFlags & X86_EFL_ZF));
613 } while ( uCounterReg != 0
614 && !(uEFlags & X86_EFL_ZF));
615
616 /*
617 * Done.
618 */
619 iemRegAddToRip(pIemCpu, cbInstr);
620 return VINF_SUCCESS;
621}
622
623
624
625
626/**
627 * Implements 'REP MOVS'.
628 */
629IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
630{
631 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
632
633 /*
634 * Setup.
635 */
636 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
637 if (uCounterReg == 0)
638 {
639 iemRegAddToRip(pIemCpu, cbInstr);
640 return VINF_SUCCESS;
641 }
642
643 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
644 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg);
645 if (rcStrict != VINF_SUCCESS)
646 return rcStrict;
647
648 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
649 if (rcStrict != VINF_SUCCESS)
650 return rcStrict;
651
652 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
653 ADDR_TYPE uSrcAddrReg = pCtx->ADDR_rSI;
654 ADDR_TYPE uDstAddrReg = pCtx->ADDR_rDI;
655
656 /*
657 * If we're reading back what we write, we have to let the verfication code
658 * to prevent a false positive.
659 * Note! This doesn't take aliasing or wrapping into account - lazy bird.
660 */
661 if ( IEM_VERIFICATION_ENABLED(pIemCpu)
662 && (cbIncr > 0
663 ? uSrcAddrReg <= uDstAddrReg
664 && uSrcAddrReg + cbIncr * uCounterReg > uDstAddrReg
665 : uDstAddrReg <= uSrcAddrReg
666 && uDstAddrReg + cbIncr * uCounterReg > uSrcAddrReg))
667 pIemCpu->fOverlappingMovs = true;
668
669 /*
670 * The loop.
671 */
672 do
673 {
674 /*
675 * Do segmentation and virtual page stuff.
676 */
677#if ADDR_SIZE != 64
678 ADDR2_TYPE uVirtSrcAddr = (uint32_t)pSrcHid->u64Base + uSrcAddrReg;
679 ADDR2_TYPE uVirtDstAddr = (uint32_t)pCtx->es.u64Base + uDstAddrReg;
680#else
681 uint64_t uVirtSrcAddr = uSrcAddrReg;
682 uint64_t uVirtDstAddr = uDstAddrReg;
683#endif
684 uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
685 if (cLeftSrcPage > uCounterReg)
686 cLeftSrcPage = uCounterReg;
687 uint32_t cLeftDstPage = (PAGE_SIZE - (uVirtDstAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
688 uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
689
690 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
691 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
692#if ADDR_SIZE != 64
693 && uSrcAddrReg < pSrcHid->u32Limit
694 && uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
695 && uDstAddrReg < pCtx->es.u32Limit
696 && uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
697#endif
698 )
699 {
700 RTGCPHYS GCPhysSrcMem;
701 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
702 if (rcStrict != VINF_SUCCESS)
703 return rcStrict;
704
705 RTGCPHYS GCPhysDstMem;
706 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
707 if (rcStrict != VINF_SUCCESS)
708 return rcStrict;
709
710 /*
711 * If we can map the page without trouble, do a block processing
712 * until the end of the current page.
713 */
714 PGMPAGEMAPLOCK PgLockDstMem;
715 OP_TYPE *puDstMem;
716 rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
717 if (rcStrict == VINF_SUCCESS)
718 {
719 PGMPAGEMAPLOCK PgLockSrcMem;
720 OP_TYPE const *puSrcMem;
721 rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
722 if (rcStrict == VINF_SUCCESS)
723 {
724 Assert( (GCPhysSrcMem >> PAGE_SHIFT) != (GCPhysDstMem >> PAGE_SHIFT)
725 || ((uintptr_t)puSrcMem >> PAGE_SHIFT) == ((uintptr_t)puDstMem >> PAGE_SHIFT));
726
727 /* Perform the operation exactly (don't use memcpy to avoid
728 having to consider how its implementation would affect
729 any overlapping source and destination area). */
730 OP_TYPE const *puSrcCur = puSrcMem;
731 OP_TYPE *puDstCur = puDstMem;
732 uint32_t cTodo = cLeftPage;
733 while (cTodo-- > 0)
734 *puDstCur++ = *puSrcCur++;
735
736 /* Update the registers. */
737 pCtx->ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
738 pCtx->ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
739 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
740
741 iemMemPageUnmap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);
742 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
743 continue;
744 }
745 iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
746 }
747 }
748
749 /*
750 * Fallback - slow processing till the end of the current page.
751 * In the cross page boundrary case we will end up here with cLeftPage
752 * as 0, we execute one loop then.
753 */
754 do
755 {
756 OP_TYPE uValue;
757 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uSrcAddrReg);
758 if (rcStrict != VINF_SUCCESS)
759 return rcStrict;
760 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uDstAddrReg, uValue);
761 if (rcStrict != VINF_SUCCESS)
762 return rcStrict;
763
764 pCtx->ADDR_rSI = uSrcAddrReg += cbIncr;
765 pCtx->ADDR_rDI = uDstAddrReg += cbIncr;
766 pCtx->ADDR_rCX = --uCounterReg;
767 cLeftPage--;
768 } while ((int32_t)cLeftPage > 0);
769 } while (uCounterReg != 0);
770
771 /*
772 * Done.
773 */
774 iemRegAddToRip(pIemCpu, cbInstr);
775 return VINF_SUCCESS;
776}
777
778
779/**
780 * Implements 'REP STOS'.
781 */
782IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
783{
784 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
785
786 /*
787 * Setup.
788 */
789 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
790 if (uCounterReg == 0)
791 {
792 iemRegAddToRip(pIemCpu, cbInstr);
793 return VINF_SUCCESS;
794 }
795
796 VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
797 if (rcStrict != VINF_SUCCESS)
798 return rcStrict;
799
800 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
801 OP_TYPE const uValue = pCtx->OP_rAX;
802 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
803
804 /*
805 * The loop.
806 */
807 do
808 {
809 /*
810 * Do segmentation and virtual page stuff.
811 */
812#if ADDR_SIZE != 64
813 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
814#else
815 uint64_t uVirtAddr = uAddrReg;
816#endif
817 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
818 if (cLeftPage > uCounterReg)
819 cLeftPage = uCounterReg;
820 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
821 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
822#if ADDR_SIZE != 64
823 && uAddrReg < pCtx->es.u32Limit
824 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
825#endif
826 )
827 {
828 RTGCPHYS GCPhysMem;
829 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
830 if (rcStrict != VINF_SUCCESS)
831 return rcStrict;
832
833 /*
834 * If we can map the page without trouble, do a block processing
835 * until the end of the current page.
836 */
837 PGMPAGEMAPLOCK PgLockMem;
838 OP_TYPE *puMem;
839 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
840 if (rcStrict == VINF_SUCCESS)
841 {
842 /* Update the regs first so we can loop on cLeftPage. */
843 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
844 pCtx->ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
845
846 /* Do the memsetting. */
847#if OP_SIZE == 8
848 memset(puMem, uValue, cLeftPage);
849/*#elif OP_SIZE == 32
850 ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
851#else
852 while (cLeftPage-- > 0)
853 *puMem++ = uValue;
854#endif
855
856 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
857
858 /* If unaligned, we drop thru and do the page crossing access
859 below. Otherwise, do the next page. */
860 if (!(uVirtAddr & (OP_SIZE - 1)))
861 continue;
862 if (uCounterReg == 0)
863 break;
864 cLeftPage = 0;
865 }
866 }
867
868 /*
869 * Fallback - slow processing till the end of the current page.
870 * In the cross page boundrary case we will end up here with cLeftPage
871 * as 0, we execute one loop then.
872 */
873 do
874 {
875 rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uAddrReg, uValue);
876 if (rcStrict != VINF_SUCCESS)
877 return rcStrict;
878 pCtx->ADDR_rDI = uAddrReg += cbIncr;
879 pCtx->ADDR_rCX = --uCounterReg;
880 cLeftPage--;
881 } while ((int32_t)cLeftPage > 0);
882 } while (uCounterReg != 0);
883
884 /*
885 * Done.
886 */
887 iemRegAddToRip(pIemCpu, cbInstr);
888 return VINF_SUCCESS;
889}
890
891
892/**
893 * Implements 'REP LODS'.
894 */
895IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
896{
897 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
898
899 /*
900 * Setup.
901 */
902 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
903 if (uCounterReg == 0)
904 {
905 iemRegAddToRip(pIemCpu, cbInstr);
906 return VINF_SUCCESS;
907 }
908
909 PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
910 VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg);
911 if (rcStrict != VINF_SUCCESS)
912 return rcStrict;
913
914 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
915 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
916
917 /*
918 * The loop.
919 */
920 do
921 {
922 /*
923 * Do segmentation and virtual page stuff.
924 */
925#if ADDR_SIZE != 64
926 ADDR2_TYPE uVirtAddr = (uint32_t)pSrcHid->u64Base + uAddrReg;
927#else
928 uint64_t uVirtAddr = uAddrReg;
929#endif
930 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
931 if (cLeftPage > uCounterReg)
932 cLeftPage = uCounterReg;
933 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
934 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
935#if ADDR_SIZE != 64
936 && uAddrReg < pSrcHid->u32Limit
937 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
938#endif
939 )
940 {
941 RTGCPHYS GCPhysMem;
942 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
943 if (rcStrict != VINF_SUCCESS)
944 return rcStrict;
945
946 /*
947 * If we can map the page without trouble, we can get away with
948 * just reading the last value on the page.
949 */
950 PGMPAGEMAPLOCK PgLockMem;
951 OP_TYPE const *puMem;
952 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
953 if (rcStrict == VINF_SUCCESS)
954 {
955 /* Only get the last byte, the rest doesn't matter in direct access mode. */
956#if OP_SIZE == 32
957 pCtx->rax = puMem[cLeftPage - 1];
958#else
959 pCtx->OP_rAX = puMem[cLeftPage - 1];
960#endif
961 pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
962 pCtx->ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
963 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
964
965 /* If unaligned, we drop thru and do the page crossing access
966 below. Otherwise, do the next page. */
967 if (!(uVirtAddr & (OP_SIZE - 1)))
968 continue;
969 if (uCounterReg == 0)
970 break;
971 cLeftPage = 0;
972 }
973 }
974
975 /*
976 * Fallback - slow processing till the end of the current page.
977 * In the cross page boundrary case we will end up here with cLeftPage
978 * as 0, we execute one loop then.
979 */
980 do
981 {
982 OP_TYPE uTmpValue;
983 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, iEffSeg, uAddrReg);
984 if (rcStrict != VINF_SUCCESS)
985 return rcStrict;
986#if OP_SIZE == 32
987 pCtx->rax = uTmpValue;
988#else
989 pCtx->OP_rAX = uTmpValue;
990#endif
991 pCtx->ADDR_rSI = uAddrReg += cbIncr;
992 pCtx->ADDR_rCX = --uCounterReg;
993 cLeftPage--;
994 } while ((int32_t)cLeftPage > 0);
995 if (rcStrict != VINF_SUCCESS)
996 break;
997 } while (uCounterReg != 0);
998
999 /*
1000 * Done.
1001 */
1002 iemRegAddToRip(pIemCpu, cbInstr);
1003 return VINF_SUCCESS;
1004}
1005
1006
1007#if OP_SIZE != 64
1008
1009/**
1010 * Implements 'INS' (no rep)
1011 */
1012IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE))
1013{
1014 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1015 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1016 VBOXSTRICTRC rcStrict;
1017
1018 /*
1019 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1020 * segmentation and finally any #PF due to virtual address translation.
1021 * ASSUMES nothing is read from the I/O port before traps are taken.
1022 */
1023 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1024 if (rcStrict != VINF_SUCCESS)
1025 return rcStrict;
1026
1027 OP_TYPE *puMem;
1028 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
1029 if (rcStrict != VINF_SUCCESS)
1030 return rcStrict;
1031
1032 uint32_t u32Value;
1033 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1034 rcStrict = IOMIOPortRead(pVM, pCtx->dx, &u32Value, OP_SIZE / 8);
1035 else
1036 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, pCtx->dx, &u32Value, OP_SIZE / 8);
1037 if (IOM_SUCCESS(rcStrict))
1038 {
1039 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1040 if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1041 {
1042 if (!pCtx->eflags.Bits.u1DF)
1043 pCtx->ADDR_rDI += OP_SIZE / 8;
1044 else
1045 pCtx->ADDR_rDI -= OP_SIZE / 8;
1046 iemRegAddToRip(pIemCpu, cbInstr);
1047 }
1048 /* iemMemMap already check permissions, so this may only be real errors
1049 or access handlers medling. The access handler case is going to
1050 cause misbehavior if the instruction is re-interpreted or smth. So,
1051 we fail with an internal error here instead. */
1052 else
1053 AssertLogRelFailedReturn(VERR_IEM_IPE_1);
1054 }
1055 return rcStrict;
1056}
1057
1058
1059/**
1060 * Implements 'REP INS'.
1061 */
1062IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE))
1063{
1064 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1065 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1066
1067 /*
1068 * Setup.
1069 */
1070 uint16_t const u16Port = pCtx->dx;
1071 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1072 if (rcStrict != VINF_SUCCESS)
1073 return rcStrict;
1074
1075 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1076 if (uCounterReg == 0)
1077 {
1078 iemRegAddToRip(pIemCpu, cbInstr);
1079 return VINF_SUCCESS;
1080 }
1081
1082 rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES);
1083 if (rcStrict != VINF_SUCCESS)
1084 return rcStrict;
1085
1086 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1087 ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
1088
1089 /*
1090 * The loop.
1091 */
1092 do
1093 {
1094 /*
1095 * Do segmentation and virtual page stuff.
1096 */
1097#if ADDR_SIZE != 64
1098 ADDR2_TYPE uVirtAddr = (uint32_t)pCtx->es.u64Base + uAddrReg;
1099#else
1100 uint64_t uVirtAddr = uAddrReg;
1101#endif
1102 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1103 if (cLeftPage > uCounterReg)
1104 cLeftPage = uCounterReg;
1105 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1106 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1107#if ADDR_SIZE != 64
1108 && uAddrReg < pCtx->es.u32Limit
1109 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit
1110#endif
1111 )
1112 {
1113 RTGCPHYS GCPhysMem;
1114 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
1115 if (rcStrict != VINF_SUCCESS)
1116 return rcStrict;
1117
1118 /*
1119 * If we can map the page without trouble, we would've liked to use
1120 * an string I/O method to do the work, but the current IOM
1121 * interface doesn't match our current approach. So, do a regular
1122 * loop instead.
1123 */
1124 /** @todo Change the I/O manager interface to make use of
1125 * mapped buffers instead of leaving those bits to the
1126 * device implementation? */
1127 PGMPAGEMAPLOCK PgLockMem;
1128 OP_TYPE *puMem;
1129 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
1130 if (rcStrict == VINF_SUCCESS)
1131 {
1132 uint32_t off = 0;
1133 while (off < cLeftPage)
1134 {
1135 uint32_t u32Value;
1136 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1137 rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8);
1138 else
1139 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1140 if (IOM_SUCCESS(rcStrict))
1141 {
1142 puMem[off] = (OP_TYPE)u32Value;
1143 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1144 pCtx->ADDR_rCX = --uCounterReg;
1145 }
1146 if (rcStrict != VINF_SUCCESS)
1147 {
1148 if (IOM_SUCCESS(rcStrict))
1149 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1150 if (uCounterReg == 0)
1151 iemRegAddToRip(pIemCpu, cbInstr);
1152 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1153 return rcStrict;
1154 }
1155 off++;
1156 }
1157 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1158
1159 /* If unaligned, we drop thru and do the page crossing access
1160 below. Otherwise, do the next page. */
1161 if (!(uVirtAddr & (OP_SIZE - 1)))
1162 continue;
1163 if (uCounterReg == 0)
1164 break;
1165 cLeftPage = 0;
1166 }
1167 }
1168
1169 /*
1170 * Fallback - slow processing till the end of the current page.
1171 * In the cross page boundrary case we will end up here with cLeftPage
1172 * as 0, we execute one loop then.
1173 *
1174 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1175 * I/O port, otherwise it wouldn't really be restartable.
1176 */
1177 /** @todo investigate what the CPU actually does with \#PF/\#GP
1178 * during INS. */
1179 do
1180 {
1181 OP_TYPE *puMem;
1182 rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
1183 if (rcStrict != VINF_SUCCESS)
1184 return rcStrict;
1185
1186 uint32_t u32Value;
1187 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1188 rcStrict = IOMIOPortRead(pVM, u16Port, &u32Value, OP_SIZE / 8);
1189 else
1190 rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1191 if (!IOM_SUCCESS(rcStrict))
1192 return rcStrict;
1193
1194 *puMem = (OP_TYPE)u32Value;
1195 VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1196 AssertLogRelReturn(rcStrict2 == VINF_SUCCESS, VERR_IEM_IPE_1); /* See non-rep version. */
1197
1198 pCtx->ADDR_rDI = uAddrReg += cbIncr;
1199 pCtx->ADDR_rCX = --uCounterReg;
1200
1201 cLeftPage--;
1202 if (rcStrict != VINF_SUCCESS)
1203 {
1204 if (IOM_SUCCESS(rcStrict))
1205 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1206 if (uCounterReg == 0)
1207 iemRegAddToRip(pIemCpu, cbInstr);
1208 return rcStrict;
1209 }
1210 } while ((int32_t)cLeftPage > 0);
1211 } while (uCounterReg != 0);
1212
1213 /*
1214 * Done.
1215 */
1216 iemRegAddToRip(pIemCpu, cbInstr);
1217 return VINF_SUCCESS;
1218}
1219
1220
1221/**
1222 * Implements 'OUTS' (no rep)
1223 */
1224IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
1225{
1226 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1227 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1228 VBOXSTRICTRC rcStrict;
1229
1230 /*
1231 * ASSUMES the #GP for I/O permission is taken first, then any #GP for
1232 * segmentation and finally any #PF due to virtual address translation.
1233 * ASSUMES nothing is read from the I/O port before traps are taken.
1234 */
1235 rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1236 if (rcStrict != VINF_SUCCESS)
1237 return rcStrict;
1238
1239 OP_TYPE uValue;
1240 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);
1241 if (rcStrict == VINF_SUCCESS)
1242 {
1243 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1244 rcStrict = IOMIOPortWrite(pVM, pCtx->dx, uValue, OP_SIZE / 8);
1245 else
1246 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, pCtx->dx, uValue, OP_SIZE / 8);
1247 if (IOM_SUCCESS(rcStrict))
1248 {
1249 if (!pCtx->eflags.Bits.u1DF)
1250 pCtx->ADDR_rSI += OP_SIZE / 8;
1251 else
1252 pCtx->ADDR_rSI -= OP_SIZE / 8;
1253 iemRegAddToRip(pIemCpu, cbInstr);
1254 if (rcStrict != VINF_SUCCESS)
1255 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1256 }
1257 }
1258 return rcStrict;
1259}
1260
1261
1262/**
1263 * Implements 'REP OUTS'.
1264 */
1265IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
1266{
1267 PVM pVM = IEMCPU_TO_VM(pIemCpu);
1268 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1269
1270 /*
1271 * Setup.
1272 */
1273 uint16_t const u16Port = pCtx->dx;
1274 VBOXSTRICTRC rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1275 if (rcStrict != VINF_SUCCESS)
1276 return rcStrict;
1277
1278 ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1279 if (uCounterReg == 0)
1280 {
1281 iemRegAddToRip(pIemCpu, cbInstr);
1282 return VINF_SUCCESS;
1283 }
1284
1285 PCCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iEffSeg);
1286 rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg);
1287 if (rcStrict != VINF_SUCCESS)
1288 return rcStrict;
1289
1290 int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1291 ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
1292
1293 /*
1294 * The loop.
1295 */
1296 do
1297 {
1298 /*
1299 * Do segmentation and virtual page stuff.
1300 */
1301#if ADDR_SIZE != 64
1302 ADDR2_TYPE uVirtAddr = (uint32_t)pHid->u64Base + uAddrReg;
1303#else
1304 uint64_t uVirtAddr = uAddrReg;
1305#endif
1306 uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1307 if (cLeftPage > uCounterReg)
1308 cLeftPage = uCounterReg;
1309 if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1310 && cbIncr > 0 /** @todo Implement reverse direction string ops. */
1311#if ADDR_SIZE != 64
1312 && uAddrReg < pHid->u32Limit
1313 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit
1314#endif
1315 )
1316 {
1317 RTGCPHYS GCPhysMem;
1318 rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1319 if (rcStrict != VINF_SUCCESS)
1320 return rcStrict;
1321
1322 /*
1323 * If we can map the page without trouble, we would've liked to use
1324 * an string I/O method to do the work, but the current IOM
1325 * interface doesn't match our current approach. So, do a regular
1326 * loop instead.
1327 */
1328 /** @todo Change the I/O manager interface to make use of
1329 * mapped buffers instead of leaving those bits to the
1330 * device implementation? */
1331 PGMPAGEMAPLOCK PgLockMem;
1332 OP_TYPE const *puMem;
1333 rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1334 if (rcStrict == VINF_SUCCESS)
1335 {
1336 uint32_t off = 0;
1337 while (off < cLeftPage)
1338 {
1339 uint32_t u32Value = *puMem++;
1340 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1341 rcStrict = IOMIOPortWrite(pVM, u16Port, u32Value, OP_SIZE / 8);
1342 else
1343 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, OP_SIZE / 8);
1344 if (IOM_SUCCESS(rcStrict))
1345 {
1346 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1347 pCtx->ADDR_rCX = --uCounterReg;
1348 }
1349 if (rcStrict != VINF_SUCCESS)
1350 {
1351 if (IOM_SUCCESS(rcStrict))
1352 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1353 if (uCounterReg == 0)
1354 iemRegAddToRip(pIemCpu, cbInstr);
1355 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1356 return rcStrict;
1357 }
1358 off++;
1359 }
1360 iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1361
1362 /* If unaligned, we drop thru and do the page crossing access
1363 below. Otherwise, do the next page. */
1364 if (!(uVirtAddr & (OP_SIZE - 1)))
1365 continue;
1366 if (uCounterReg == 0)
1367 break;
1368 cLeftPage = 0;
1369 }
1370 }
1371
1372 /*
1373 * Fallback - slow processing till the end of the current page.
1374 * In the cross page boundrary case we will end up here with cLeftPage
1375 * as 0, we execute one loop then.
1376 *
1377 * Note! We ASSUME the CPU will raise #PF or #GP before access the
1378 * I/O port, otherwise it wouldn't really be restartable.
1379 */
1380 /** @todo investigate what the CPU actually does with \#PF/\#GP
1381 * during INS. */
1382 do
1383 {
1384 OP_TYPE uValue;
1385 rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uAddrReg);
1386 if (rcStrict != VINF_SUCCESS)
1387 return rcStrict;
1388
1389 if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1390 rcStrict = IOMIOPortWrite(pVM, u16Port, uValue, OP_SIZE / 8);
1391 else
1392 rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, uValue, OP_SIZE / 8);
1393 if (IOM_SUCCESS(rcStrict))
1394 {
1395 pCtx->ADDR_rSI = uAddrReg += cbIncr;
1396 pCtx->ADDR_rCX = --uCounterReg;
1397 cLeftPage--;
1398 }
1399 if (rcStrict != VINF_SUCCESS)
1400 {
1401 if (IOM_SUCCESS(rcStrict))
1402 rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1403 if (uCounterReg == 0)
1404 iemRegAddToRip(pIemCpu, cbInstr);
1405 return rcStrict;
1406 }
1407 } while ((int32_t)cLeftPage > 0);
1408 } while (uCounterReg != 0);
1409
1410 /*
1411 * Done.
1412 */
1413 iemRegAddToRip(pIemCpu, cbInstr);
1414 return VINF_SUCCESS;
1415}
1416
1417#endif /* OP_SIZE != 64-bit */
1418
1419
1420#undef OP_rAX
1421#undef OP_SIZE
1422#undef ADDR_SIZE
1423#undef ADDR_rDI
1424#undef ADDR_rSI
1425#undef ADDR_rCX
1426#undef ADDR_rIP
1427#undef ADDR2_TYPE
1428#undef ADDR_TYPE
1429#undef ADDR2_TYPE
1430
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette