VirtualBox

source: vbox/trunk/src/recompiler_new/target-i386/op_helper.c@ 15901

Last change on this file since 15901 was 15901, checked in by vboxsync, 16 years ago

REM: restored 0xffff'ing of mask, made IF reading in VME fully correct

File size: 194.5 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#define CPU_NO_GLOBAL_REGS
30#include "exec.h"
31#include "host-utils.h"
32
33#ifdef VBOX
34# ifdef VBOX_WITH_VMI
35# include <VBox/parav.h>
36# endif
37#include "qemu-common.h"
38#include <math.h>
39#include "tcg.h"
40#endif
41//#define DEBUG_PCALL
42
43#if 0
44#define raise_exception_err(a, b)\
45do {\
46 if (logfile)\
47 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
48 (raise_exception_err)(a, b);\
49} while (0)
50#endif
51
52const uint8_t parity_table[256] = {
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85};
86
87/* modulo 17 table */
88const uint8_t rclw_table[32] = {
89 0, 1, 2, 3, 4, 5, 6, 7,
90 8, 9,10,11,12,13,14,15,
91 16, 0, 1, 2, 3, 4, 5, 6,
92 7, 8, 9,10,11,12,13,14,
93};
94
95/* modulo 9 table */
96const uint8_t rclb_table[32] = {
97 0, 1, 2, 3, 4, 5, 6, 7,
98 8, 0, 1, 2, 3, 4, 5, 6,
99 7, 8, 0, 1, 2, 3, 4, 5,
100 6, 7, 8, 0, 1, 2, 3, 4,
101};
102
103const CPU86_LDouble f15rk[7] =
104{
105 0.00000000000000000000L,
106 1.00000000000000000000L,
107 3.14159265358979323851L, /*pi*/
108 0.30102999566398119523L, /*lg2*/
109 0.69314718055994530943L, /*ln2*/
110 1.44269504088896340739L, /*l2e*/
111 3.32192809488736234781L, /*l2t*/
112};
113
114/* broken thread support */
115
116spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
117
118void helper_lock(void)
119{
120 spin_lock(&global_cpu_lock);
121}
122
123void helper_unlock(void)
124{
125 spin_unlock(&global_cpu_lock);
126}
127
128void helper_write_eflags(target_ulong t0, uint32_t update_mask)
129{
130 load_eflags(t0, update_mask);
131}
132
133target_ulong helper_read_eflags(void)
134{
135 uint32_t eflags;
136 eflags = cc_table[CC_OP].compute_all();
137 eflags |= (DF & DF_MASK);
138 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
139 return eflags;
140}
141
142#ifdef VBOX
143void helper_write_eflags_vme(target_ulong t0)
144{
145 unsigned int new_eflags = t0;
146
147 assert(env->eflags & (1<<VM_SHIFT));
148
149 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
150 /* if TF will be set -> #GP */
151 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
152 || (new_eflags & TF_MASK)) {
153 raise_exception(EXCP0D_GPF);
154 } else {
155 load_eflags(new_eflags,
156 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
157
158 if (new_eflags & IF_MASK) {
159 env->eflags |= VIF_MASK;
160 } else {
161 env->eflags &= ~VIF_MASK;
162 }
163 }
164}
165
166target_ulong helper_read_eflags_vme(void)
167{
168 uint32_t eflags;
169 eflags = cc_table[CC_OP].compute_all();
170 eflags |= (DF & DF_MASK);
171 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
172 if (env->eflags & VIF_MASK)
173 eflags |= IF_MASK;
174 else
175 eflags &= ~IF_MASK;
176 return eflags & 0xffff;
177}
178
179void helper_dump_state()
180{
181 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
182 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
183 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
184 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
185 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
186 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
187 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
188}
189#endif
190
191/* return non zero if error */
192#ifndef VBOX
193static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
194#else /* VBOX */
195DECLINLINE(int) load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
196#endif /* VBOX */
197 int selector)
198{
199 SegmentCache *dt;
200 int index;
201 target_ulong ptr;
202
203#ifdef VBOX
204 /* Trying to load a selector with CPL=1? */
205 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
206 {
207 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
208 selector = selector & 0xfffc;
209 }
210#endif
211
212 if (selector & 0x4)
213 dt = &env->ldt;
214 else
215 dt = &env->gdt;
216 index = selector & ~7;
217 if ((index + 7) > dt->limit)
218 return -1;
219 ptr = dt->base + index;
220 *e1_ptr = ldl_kernel(ptr);
221 *e2_ptr = ldl_kernel(ptr + 4);
222 return 0;
223}
224
225#ifndef VBOX
226static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
227#else /* VBOX */
228DECLINLINE(unsigned int) get_seg_limit(uint32_t e1, uint32_t e2)
229#endif /* VBOX */
230{
231 unsigned int limit;
232 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
233 if (e2 & DESC_G_MASK)
234 limit = (limit << 12) | 0xfff;
235 return limit;
236}
237
238#ifndef VBOX
239static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
240#else /* VBOX */
241DECLINLINE(uint32_t) get_seg_base(uint32_t e1, uint32_t e2)
242#endif /* VBOX */
243{
244 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
245}
246
247#ifndef VBOX
248static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
249#else /* VBOX */
250DECLINLINE(void) load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
251#endif /* VBOX */
252{
253 sc->base = get_seg_base(e1, e2);
254 sc->limit = get_seg_limit(e1, e2);
255 sc->flags = e2;
256}
257
258/* init the segment cache in vm86 mode. */
259#ifndef VBOX
260static inline void load_seg_vm(int seg, int selector)
261#else /* VBOX */
262DECLINLINE(void) load_seg_vm(int seg, int selector)
263#endif /* VBOX */
264{
265 selector &= 0xffff;
266#ifdef VBOX
267 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK;
268
269 if (seg == R_CS)
270 flags |= DESC_CS_MASK;
271
272 cpu_x86_load_seg_cache(env, seg, selector,
273 (selector << 4), 0xffff, flags);
274#else
275 cpu_x86_load_seg_cache(env, seg, selector,
276 (selector << 4), 0xffff, 0);
277#endif
278}
279
280#ifndef VBOX
281static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
282#else /* VBOX */
283DECLINLINE(void) get_ss_esp_from_tss(uint32_t *ss_ptr,
284#endif /* VBOX */
285 uint32_t *esp_ptr, int dpl)
286{
287#ifndef VBOX
288 int type, index, shift;
289#else
290 unsigned int type, index, shift;
291#endif
292
293#if 0
294 {
295 int i;
296 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
297 for(i=0;i<env->tr.limit;i++) {
298 printf("%02x ", env->tr.base[i]);
299 if ((i & 7) == 7) printf("\n");
300 }
301 printf("\n");
302 }
303#endif
304
305 if (!(env->tr.flags & DESC_P_MASK))
306 cpu_abort(env, "invalid tss");
307 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
308 if ((type & 7) != 1)
309 cpu_abort(env, "invalid tss type");
310 shift = type >> 3;
311 index = (dpl * 4 + 2) << shift;
312 if (index + (4 << shift) - 1 > env->tr.limit)
313 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
314 if (shift == 0) {
315 *esp_ptr = lduw_kernel(env->tr.base + index);
316 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
317 } else {
318 *esp_ptr = ldl_kernel(env->tr.base + index);
319 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
320 }
321}
322
323/* XXX: merge with load_seg() */
324static void tss_load_seg(int seg_reg, int selector)
325{
326 uint32_t e1, e2;
327 int rpl, dpl, cpl;
328
329#ifdef VBOX
330 e1 = e2 = 0;
331 cpl = env->hflags & HF_CPL_MASK;
332 /* Trying to load a selector with CPL=1? */
333 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
334 {
335 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
336 selector = selector & 0xfffc;
337 }
338#endif
339
340 if ((selector & 0xfffc) != 0) {
341 if (load_segment(&e1, &e2, selector) != 0)
342 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
343 if (!(e2 & DESC_S_MASK))
344 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
345 rpl = selector & 3;
346 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
347 cpl = env->hflags & HF_CPL_MASK;
348 if (seg_reg == R_CS) {
349 if (!(e2 & DESC_CS_MASK))
350 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
351 /* XXX: is it correct ? */
352 if (dpl != rpl)
353 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
354 if ((e2 & DESC_C_MASK) && dpl > rpl)
355 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
356 } else if (seg_reg == R_SS) {
357 /* SS must be writable data */
358 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
359 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
360 if (dpl != cpl || dpl != rpl)
361 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
362 } else {
363 /* not readable code */
364 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
365 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
366 /* if data or non conforming code, checks the rights */
367 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
368 if (dpl < cpl || dpl < rpl)
369 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
370 }
371 }
372 if (!(e2 & DESC_P_MASK))
373 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
374 cpu_x86_load_seg_cache(env, seg_reg, selector,
375 get_seg_base(e1, e2),
376 get_seg_limit(e1, e2),
377 e2);
378 } else {
379 if (seg_reg == R_SS || seg_reg == R_CS)
380 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
381 }
382}
383
384#define SWITCH_TSS_JMP 0
385#define SWITCH_TSS_IRET 1
386#define SWITCH_TSS_CALL 2
387
388/* XXX: restore CPU state in registers (PowerPC case) */
389static void switch_tss(int tss_selector,
390 uint32_t e1, uint32_t e2, int source,
391 uint32_t next_eip)
392{
393 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
394 target_ulong tss_base;
395 uint32_t new_regs[8], new_segs[6];
396 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
397 uint32_t old_eflags, eflags_mask;
398 SegmentCache *dt;
399#ifndef VBOX
400 int index;
401#else
402 unsigned int index;
403#endif
404 target_ulong ptr;
405
406 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
407#ifdef DEBUG_PCALL
408 if (loglevel & CPU_LOG_PCALL)
409 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
410#endif
411
412#if defined(VBOX) && defined(DEBUG)
413 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
414#endif
415
416 /* if task gate, we read the TSS segment and we load it */
417 if (type == 5) {
418 if (!(e2 & DESC_P_MASK))
419 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
420 tss_selector = e1 >> 16;
421 if (tss_selector & 4)
422 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
423 if (load_segment(&e1, &e2, tss_selector) != 0)
424 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
425 if (e2 & DESC_S_MASK)
426 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
427 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
428 if ((type & 7) != 1)
429 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
430 }
431
432 if (!(e2 & DESC_P_MASK))
433 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
434
435 if (type & 8)
436 tss_limit_max = 103;
437 else
438 tss_limit_max = 43;
439 tss_limit = get_seg_limit(e1, e2);
440 tss_base = get_seg_base(e1, e2);
441 if ((tss_selector & 4) != 0 ||
442 tss_limit < tss_limit_max)
443 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
444 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
445 if (old_type & 8)
446 old_tss_limit_max = 103;
447 else
448 old_tss_limit_max = 43;
449
450 /* read all the registers from the new TSS */
451 if (type & 8) {
452 /* 32 bit */
453 new_cr3 = ldl_kernel(tss_base + 0x1c);
454 new_eip = ldl_kernel(tss_base + 0x20);
455 new_eflags = ldl_kernel(tss_base + 0x24);
456 for(i = 0; i < 8; i++)
457 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
458 for(i = 0; i < 6; i++)
459 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
460 new_ldt = lduw_kernel(tss_base + 0x60);
461 new_trap = ldl_kernel(tss_base + 0x64);
462 } else {
463 /* 16 bit */
464 new_cr3 = 0;
465 new_eip = lduw_kernel(tss_base + 0x0e);
466 new_eflags = lduw_kernel(tss_base + 0x10);
467 for(i = 0; i < 8; i++)
468 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
469 for(i = 0; i < 4; i++)
470 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
471 new_ldt = lduw_kernel(tss_base + 0x2a);
472 new_segs[R_FS] = 0;
473 new_segs[R_GS] = 0;
474 new_trap = 0;
475 }
476
477 /* NOTE: we must avoid memory exceptions during the task switch,
478 so we make dummy accesses before */
479 /* XXX: it can still fail in some cases, so a bigger hack is
480 necessary to valid the TLB after having done the accesses */
481
482 v1 = ldub_kernel(env->tr.base);
483 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
484 stb_kernel(env->tr.base, v1);
485 stb_kernel(env->tr.base + old_tss_limit_max, v2);
486
487 /* clear busy bit (it is restartable) */
488 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
489 target_ulong ptr;
490 uint32_t e2;
491 ptr = env->gdt.base + (env->tr.selector & ~7);
492 e2 = ldl_kernel(ptr + 4);
493 e2 &= ~DESC_TSS_BUSY_MASK;
494 stl_kernel(ptr + 4, e2);
495 }
496 old_eflags = compute_eflags();
497 if (source == SWITCH_TSS_IRET)
498 old_eflags &= ~NT_MASK;
499
500 /* save the current state in the old TSS */
501 if (type & 8) {
502 /* 32 bit */
503 stl_kernel(env->tr.base + 0x20, next_eip);
504 stl_kernel(env->tr.base + 0x24, old_eflags);
505 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
506 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
507 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
508 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
509 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
510 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
511 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
512 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
513 for(i = 0; i < 6; i++)
514 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
515#if defined(VBOX) && defined(DEBUG)
516 printf("TSS 32 bits switch\n");
517 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
518#endif
519 } else {
520 /* 16 bit */
521 stw_kernel(env->tr.base + 0x0e, next_eip);
522 stw_kernel(env->tr.base + 0x10, old_eflags);
523 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
524 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
525 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
526 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
527 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
528 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
529 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
530 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
531 for(i = 0; i < 4; i++)
532 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
533 }
534
535 /* now if an exception occurs, it will occurs in the next task
536 context */
537
538 if (source == SWITCH_TSS_CALL) {
539 stw_kernel(tss_base, env->tr.selector);
540 new_eflags |= NT_MASK;
541 }
542
543 /* set busy bit */
544 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
545 target_ulong ptr;
546 uint32_t e2;
547 ptr = env->gdt.base + (tss_selector & ~7);
548 e2 = ldl_kernel(ptr + 4);
549 e2 |= DESC_TSS_BUSY_MASK;
550 stl_kernel(ptr + 4, e2);
551 }
552
553 /* set the new CPU state */
554 /* from this point, any exception which occurs can give problems */
555 env->cr[0] |= CR0_TS_MASK;
556 env->hflags |= HF_TS_MASK;
557 env->tr.selector = tss_selector;
558 env->tr.base = tss_base;
559 env->tr.limit = tss_limit;
560 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
561
562 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
563 cpu_x86_update_cr3(env, new_cr3);
564 }
565
566 /* load all registers without an exception, then reload them with
567 possible exception */
568 env->eip = new_eip;
569 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
570 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
571 if (!(type & 8))
572 eflags_mask &= 0xffff;
573 load_eflags(new_eflags, eflags_mask);
574 /* XXX: what to do in 16 bit case ? */
575 EAX = new_regs[0];
576 ECX = new_regs[1];
577 EDX = new_regs[2];
578 EBX = new_regs[3];
579 ESP = new_regs[4];
580 EBP = new_regs[5];
581 ESI = new_regs[6];
582 EDI = new_regs[7];
583 if (new_eflags & VM_MASK) {
584 for(i = 0; i < 6; i++)
585 load_seg_vm(i, new_segs[i]);
586 /* in vm86, CPL is always 3 */
587 cpu_x86_set_cpl(env, 3);
588 } else {
589 /* CPL is set the RPL of CS */
590 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
591 /* first just selectors as the rest may trigger exceptions */
592 for(i = 0; i < 6; i++)
593 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
594 }
595
596 env->ldt.selector = new_ldt & ~4;
597 env->ldt.base = 0;
598 env->ldt.limit = 0;
599 env->ldt.flags = 0;
600
601 /* load the LDT */
602 if (new_ldt & 4)
603 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
604
605 if ((new_ldt & 0xfffc) != 0) {
606 dt = &env->gdt;
607 index = new_ldt & ~7;
608 if ((index + 7) > dt->limit)
609 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
610 ptr = dt->base + index;
611 e1 = ldl_kernel(ptr);
612 e2 = ldl_kernel(ptr + 4);
613 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
614 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
615 if (!(e2 & DESC_P_MASK))
616 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
617 load_seg_cache_raw_dt(&env->ldt, e1, e2);
618 }
619
620 /* load the segments */
621 if (!(new_eflags & VM_MASK)) {
622 tss_load_seg(R_CS, new_segs[R_CS]);
623 tss_load_seg(R_SS, new_segs[R_SS]);
624 tss_load_seg(R_ES, new_segs[R_ES]);
625 tss_load_seg(R_DS, new_segs[R_DS]);
626 tss_load_seg(R_FS, new_segs[R_FS]);
627 tss_load_seg(R_GS, new_segs[R_GS]);
628 }
629
630 /* check that EIP is in the CS segment limits */
631 if (new_eip > env->segs[R_CS].limit) {
632 /* XXX: different exception if CALL ? */
633 raise_exception_err(EXCP0D_GPF, 0);
634 }
635}
636
637/* check if Port I/O is allowed in TSS */
638#ifndef VBOX
639static inline void check_io(int addr, int size)
640{
641 int io_offset, val, mask;
642
643#else /* VBOX */
644DECLINLINE(void) check_io(int addr, int size)
645{
646 int val, mask;
647 unsigned int io_offset;
648#endif /* VBOX */
649 /* TSS must be a valid 32 bit one */
650 if (!(env->tr.flags & DESC_P_MASK) ||
651 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
652 env->tr.limit < 103)
653 goto fail;
654 io_offset = lduw_kernel(env->tr.base + 0x66);
655 io_offset += (addr >> 3);
656 /* Note: the check needs two bytes */
657 if ((io_offset + 1) > env->tr.limit)
658 goto fail;
659 val = lduw_kernel(env->tr.base + io_offset);
660 val >>= (addr & 7);
661 mask = (1 << size) - 1;
662 /* all bits must be zero to allow the I/O */
663 if ((val & mask) != 0) {
664 fail:
665 raise_exception_err(EXCP0D_GPF, 0);
666 }
667}
668
669#ifdef VBOX
670/* Keep in sync with gen_check_external_event() */
671void helper_check_external_event()
672{
673 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
674 | CPU_INTERRUPT_EXTERNAL_TIMER
675 | CPU_INTERRUPT_EXTERNAL_DMA))
676 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
677 && (env->eflags & IF_MASK)
678 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
679 {
680 helper_external_event();
681 }
682
683}
684
685void helper_sync_seg(uint32_t reg)
686{
687 assert(env->segs[reg].newselector != 0);
688 sync_seg(env, reg, env->segs[reg].newselector);
689}
690#endif
691
692void helper_check_iob(uint32_t t0)
693{
694 check_io(t0, 1);
695}
696
697void helper_check_iow(uint32_t t0)
698{
699 check_io(t0, 2);
700}
701
702void helper_check_iol(uint32_t t0)
703{
704 check_io(t0, 4);
705}
706
707void helper_outb(uint32_t port, uint32_t data)
708{
709 cpu_outb(env, port, data & 0xff);
710}
711
712target_ulong helper_inb(uint32_t port)
713{
714 return cpu_inb(env, port);
715}
716
717void helper_outw(uint32_t port, uint32_t data)
718{
719 cpu_outw(env, port, data & 0xffff);
720}
721
722target_ulong helper_inw(uint32_t port)
723{
724 return cpu_inw(env, port);
725}
726
727void helper_outl(uint32_t port, uint32_t data)
728{
729 cpu_outl(env, port, data);
730}
731
732target_ulong helper_inl(uint32_t port)
733{
734 return cpu_inl(env, port);
735}
736
737#ifndef VBOX
738static inline unsigned int get_sp_mask(unsigned int e2)
739#else /* VBOX */
740DECLINLINE(unsigned int) get_sp_mask(unsigned int e2)
741#endif /* VBOX */
742{
743 if (e2 & DESC_B_MASK)
744 return 0xffffffff;
745 else
746 return 0xffff;
747}
748
749#ifdef TARGET_X86_64
750#define SET_ESP(val, sp_mask)\
751do {\
752 if ((sp_mask) == 0xffff)\
753 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
754 else if ((sp_mask) == 0xffffffffLL)\
755 ESP = (uint32_t)(val);\
756 else\
757 ESP = (val);\
758} while (0)
759#else
760#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
761#endif
762
763/* in 64-bit machines, this can overflow. So this segment addition macro
764 * can be used to trim the value to 32-bit whenever needed */
765#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
766
767/* XXX: add a is_user flag to have proper security support */
768#define PUSHW(ssp, sp, sp_mask, val)\
769{\
770 sp -= 2;\
771 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
772}
773
774#define PUSHL(ssp, sp, sp_mask, val)\
775{\
776 sp -= 4;\
777 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
778}
779
780#define POPW(ssp, sp, sp_mask, val)\
781{\
782 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
783 sp += 2;\
784}
785
786#define POPL(ssp, sp, sp_mask, val)\
787{\
788 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
789 sp += 4;\
790}
791
792/* protected mode interrupt */
793static void do_interrupt_protected(int intno, int is_int, int error_code,
794 unsigned int next_eip, int is_hw)
795{
796 SegmentCache *dt;
797 target_ulong ptr, ssp;
798 int type, dpl, selector, ss_dpl, cpl;
799 int has_error_code, new_stack, shift;
800 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
801 uint32_t old_eip, sp_mask;
802
803#ifdef VBOX
804 ss = ss_e1 = ss_e2 = 0;
805# ifdef VBOX_WITH_VMI
806 if ( intno == 6
807 && PARAVIsBiosCall(env->pVM, (RTRCPTR)next_eip, env->regs[R_EAX]))
808 {
809 env->exception_index = EXCP_PARAV_CALL;
810 cpu_loop_exit();
811 }
812# endif
813 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
814 cpu_loop_exit();
815#endif
816
817 has_error_code = 0;
818 if (!is_int && !is_hw) {
819 switch(intno) {
820 case 8:
821 case 10:
822 case 11:
823 case 12:
824 case 13:
825 case 14:
826 case 17:
827 has_error_code = 1;
828 break;
829 }
830 }
831 if (is_int)
832 old_eip = next_eip;
833 else
834 old_eip = env->eip;
835
836 dt = &env->idt;
837#ifndef VBOX
838 if (intno * 8 + 7 > dt->limit)
839#else
840 if ((unsigned)intno * 8 + 7 > dt->limit)
841#endif
842 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
843 ptr = dt->base + intno * 8;
844 e1 = ldl_kernel(ptr);
845 e2 = ldl_kernel(ptr + 4);
846 /* check gate type */
847 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
848 switch(type) {
849 case 5: /* task gate */
850 /* must do that check here to return the correct error code */
851 if (!(e2 & DESC_P_MASK))
852 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
853 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
854 if (has_error_code) {
855 int type;
856 uint32_t mask;
857 /* push the error code */
858 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
859 shift = type >> 3;
860 if (env->segs[R_SS].flags & DESC_B_MASK)
861 mask = 0xffffffff;
862 else
863 mask = 0xffff;
864 esp = (ESP - (2 << shift)) & mask;
865 ssp = env->segs[R_SS].base + esp;
866 if (shift)
867 stl_kernel(ssp, error_code);
868 else
869 stw_kernel(ssp, error_code);
870 SET_ESP(esp, mask);
871 }
872 return;
873 case 6: /* 286 interrupt gate */
874 case 7: /* 286 trap gate */
875 case 14: /* 386 interrupt gate */
876 case 15: /* 386 trap gate */
877 break;
878 default:
879 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
880 break;
881 }
882 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
883 cpl = env->hflags & HF_CPL_MASK;
884 /* check privilege if software int */
885 if (is_int && dpl < cpl)
886 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
887 /* check valid bit */
888 if (!(e2 & DESC_P_MASK))
889 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
890 selector = e1 >> 16;
891 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
892 if ((selector & 0xfffc) == 0)
893 raise_exception_err(EXCP0D_GPF, 0);
894
895 if (load_segment(&e1, &e2, selector) != 0)
896 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
897 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
898 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
899 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
900 if (dpl > cpl)
901 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
902 if (!(e2 & DESC_P_MASK))
903 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
904 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
905 /* to inner privilege */
906 get_ss_esp_from_tss(&ss, &esp, dpl);
907 if ((ss & 0xfffc) == 0)
908 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
909 if ((ss & 3) != dpl)
910 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
911 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
912 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
913 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
914 if (ss_dpl != dpl)
915 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
916 if (!(ss_e2 & DESC_S_MASK) ||
917 (ss_e2 & DESC_CS_MASK) ||
918 !(ss_e2 & DESC_W_MASK))
919 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
920 if (!(ss_e2 & DESC_P_MASK))
921#ifdef VBOX /* See page 3-477 of 253666.pdf */
922 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
923#else
924 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
925#endif
926 new_stack = 1;
927 sp_mask = get_sp_mask(ss_e2);
928 ssp = get_seg_base(ss_e1, ss_e2);
929#if defined(VBOX) && defined(DEBUG)
930 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
931#endif
932 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
933 /* to same privilege */
934 if (env->eflags & VM_MASK)
935 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
936 new_stack = 0;
937 sp_mask = get_sp_mask(env->segs[R_SS].flags);
938 ssp = env->segs[R_SS].base;
939 esp = ESP;
940 dpl = cpl;
941 } else {
942 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
943 new_stack = 0; /* avoid warning */
944 sp_mask = 0; /* avoid warning */
945 ssp = 0; /* avoid warning */
946 esp = 0; /* avoid warning */
947 }
948
949 shift = type >> 3;
950
951#if 0
952 /* XXX: check that enough room is available */
953 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
954 if (env->eflags & VM_MASK)
955 push_size += 8;
956 push_size <<= shift;
957#endif
958 if (shift == 1) {
959 if (new_stack) {
960 if (env->eflags & VM_MASK) {
961 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
962 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
963 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
964 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
965 }
966 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
967 PUSHL(ssp, esp, sp_mask, ESP);
968 }
969 PUSHL(ssp, esp, sp_mask, compute_eflags());
970 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
971 PUSHL(ssp, esp, sp_mask, old_eip);
972 if (has_error_code) {
973 PUSHL(ssp, esp, sp_mask, error_code);
974 }
975 } else {
976 if (new_stack) {
977 if (env->eflags & VM_MASK) {
978 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
979 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
980 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
981 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
982 }
983 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
984 PUSHW(ssp, esp, sp_mask, ESP);
985 }
986 PUSHW(ssp, esp, sp_mask, compute_eflags());
987 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
988 PUSHW(ssp, esp, sp_mask, old_eip);
989 if (has_error_code) {
990 PUSHW(ssp, esp, sp_mask, error_code);
991 }
992 }
993
994 if (new_stack) {
995 if (env->eflags & VM_MASK) {
996 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
997 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
998 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
999 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1000 }
1001 ss = (ss & ~3) | dpl;
1002 cpu_x86_load_seg_cache(env, R_SS, ss,
1003 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1004 }
1005 SET_ESP(esp, sp_mask);
1006
1007 selector = (selector & ~3) | dpl;
1008 cpu_x86_load_seg_cache(env, R_CS, selector,
1009 get_seg_base(e1, e2),
1010 get_seg_limit(e1, e2),
1011 e2);
1012 cpu_x86_set_cpl(env, dpl);
1013 env->eip = offset;
1014
1015 /* interrupt gate clear IF mask */
1016 if ((type & 1) == 0) {
1017 env->eflags &= ~IF_MASK;
1018 }
1019 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1020}
1021#ifdef VBOX
1022
1023/* check if VME interrupt redirection is enabled in TSS */
1024DECLINLINE(bool) is_vme_irq_redirected(int intno)
1025{
1026 unsigned int io_offset, intredir_offset;
1027 unsigned char val, mask;
1028
1029 /* TSS must be a valid 32 bit one */
1030 if (!(env->tr.flags & DESC_P_MASK) ||
1031 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1032 env->tr.limit < 103)
1033 goto fail;
1034 io_offset = lduw_kernel(env->tr.base + 0x66);
1035 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1036 if (io_offset < 0x68 + 0x20)
1037 io_offset = 0x68 + 0x20;
1038 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1039 intredir_offset = io_offset - 0x20;
1040
1041 intredir_offset += (intno >> 3);
1042 if ((intredir_offset) > env->tr.limit)
1043 goto fail;
1044
1045 val = ldub_kernel(env->tr.base + intredir_offset);
1046 mask = 1 << (unsigned char)(intno & 7);
1047
1048 /* bit set means no redirection. */
1049 if ((val & mask) != 0) {
1050 return false;
1051 }
1052 return true;
1053
1054fail:
1055 raise_exception_err(EXCP0D_GPF, 0);
1056 return true;
1057}
1058
1059/* V86 mode software interrupt with CR4.VME=1 */
1060static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1061{
1062 target_ulong ptr, ssp;
1063 int selector;
1064 uint32_t offset, esp;
1065 uint32_t old_cs, old_eflags;
1066 uint32_t iopl;
1067
1068 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1069
1070 if (!is_vme_irq_redirected(intno))
1071 {
1072 if (iopl == 3)
1073 {
1074 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1075 return;
1076 }
1077 else
1078 raise_exception_err(EXCP0D_GPF, 0);
1079 }
1080
1081 /* virtual mode idt is at linear address 0 */
1082 ptr = 0 + intno * 4;
1083 offset = lduw_kernel(ptr);
1084 selector = lduw_kernel(ptr + 2);
1085 esp = ESP;
1086 ssp = env->segs[R_SS].base;
1087 old_cs = env->segs[R_CS].selector;
1088
1089 old_eflags = compute_eflags();
1090 if (iopl < 3)
1091 {
1092 /* copy VIF into IF and set IOPL to 3 */
1093 if (env->eflags & VIF_MASK)
1094 old_eflags |= IF_MASK;
1095 else
1096 old_eflags &= ~IF_MASK;
1097
1098 old_eflags |= (3 << IOPL_SHIFT);
1099 }
1100
1101 /* XXX: use SS segment size ? */
1102 PUSHW(ssp, esp, 0xffff, old_eflags);
1103 PUSHW(ssp, esp, 0xffff, old_cs);
1104 PUSHW(ssp, esp, 0xffff, next_eip);
1105
1106 /* update processor state */
1107 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1108 env->eip = offset;
1109 env->segs[R_CS].selector = selector;
1110 env->segs[R_CS].base = (selector << 4);
1111 env->eflags &= ~(TF_MASK | RF_MASK);
1112
1113 if (iopl < 3)
1114 env->eflags &= ~VIF_MASK;
1115 else
1116 env->eflags &= ~IF_MASK;
1117}
1118#endif /* VBOX */
1119
1120#ifdef TARGET_X86_64
1121
1122#define PUSHQ(sp, val)\
1123{\
1124 sp -= 8;\
1125 stq_kernel(sp, (val));\
1126}
1127
1128#define POPQ(sp, val)\
1129{\
1130 val = ldq_kernel(sp);\
1131 sp += 8;\
1132}
1133
1134#ifndef VBOX
1135static inline target_ulong get_rsp_from_tss(int level)
1136#else /* VBOX */
1137DECLINLINE(target_ulong) get_rsp_from_tss(int level)
1138#endif /* VBOX */
1139{
1140 int index;
1141
1142#if 0
1143 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1144 env->tr.base, env->tr.limit);
1145#endif
1146
1147 if (!(env->tr.flags & DESC_P_MASK))
1148 cpu_abort(env, "invalid tss");
1149 index = 8 * level + 4;
1150 if ((index + 7) > env->tr.limit)
1151 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1152 return ldq_kernel(env->tr.base + index);
1153}
1154
1155/* 64 bit interrupt */
1156static void do_interrupt64(int intno, int is_int, int error_code,
1157 target_ulong next_eip, int is_hw)
1158{
1159 SegmentCache *dt;
1160 target_ulong ptr;
1161 int type, dpl, selector, cpl, ist;
1162 int has_error_code, new_stack;
1163 uint32_t e1, e2, e3, ss;
1164 target_ulong old_eip, esp, offset;
1165
1166#ifdef VBOX
1167 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1168 cpu_loop_exit();
1169#endif
1170
1171 has_error_code = 0;
1172 if (!is_int && !is_hw) {
1173 switch(intno) {
1174 case 8:
1175 case 10:
1176 case 11:
1177 case 12:
1178 case 13:
1179 case 14:
1180 case 17:
1181 has_error_code = 1;
1182 break;
1183 }
1184 }
1185 if (is_int)
1186 old_eip = next_eip;
1187 else
1188 old_eip = env->eip;
1189
1190 dt = &env->idt;
1191 if (intno * 16 + 15 > dt->limit)
1192 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1193 ptr = dt->base + intno * 16;
1194 e1 = ldl_kernel(ptr);
1195 e2 = ldl_kernel(ptr + 4);
1196 e3 = ldl_kernel(ptr + 8);
1197 /* check gate type */
1198 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1199 switch(type) {
1200 case 14: /* 386 interrupt gate */
1201 case 15: /* 386 trap gate */
1202 break;
1203 default:
1204 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1205 break;
1206 }
1207 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1208 cpl = env->hflags & HF_CPL_MASK;
1209 /* check privilege if software int */
1210 if (is_int && dpl < cpl)
1211 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1212 /* check valid bit */
1213 if (!(e2 & DESC_P_MASK))
1214 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1215 selector = e1 >> 16;
1216 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1217 ist = e2 & 7;
1218 if ((selector & 0xfffc) == 0)
1219 raise_exception_err(EXCP0D_GPF, 0);
1220
1221 if (load_segment(&e1, &e2, selector) != 0)
1222 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1223 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1224 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1225 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1226 if (dpl > cpl)
1227 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1228 if (!(e2 & DESC_P_MASK))
1229 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1230 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1231 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1232 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1233 /* to inner privilege */
1234 if (ist != 0)
1235 esp = get_rsp_from_tss(ist + 3);
1236 else
1237 esp = get_rsp_from_tss(dpl);
1238 esp &= ~0xfLL; /* align stack */
1239 ss = 0;
1240 new_stack = 1;
1241 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1242 /* to same privilege */
1243 if (env->eflags & VM_MASK)
1244 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1245 new_stack = 0;
1246 if (ist != 0)
1247 esp = get_rsp_from_tss(ist + 3);
1248 else
1249 esp = ESP;
1250 esp &= ~0xfLL; /* align stack */
1251 dpl = cpl;
1252 } else {
1253 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1254 new_stack = 0; /* avoid warning */
1255 esp = 0; /* avoid warning */
1256 }
1257
1258 PUSHQ(esp, env->segs[R_SS].selector);
1259 PUSHQ(esp, ESP);
1260 PUSHQ(esp, compute_eflags());
1261 PUSHQ(esp, env->segs[R_CS].selector);
1262 PUSHQ(esp, old_eip);
1263 if (has_error_code) {
1264 PUSHQ(esp, error_code);
1265 }
1266
1267 if (new_stack) {
1268 ss = 0 | dpl;
1269 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1270 }
1271 ESP = esp;
1272
1273 selector = (selector & ~3) | dpl;
1274 cpu_x86_load_seg_cache(env, R_CS, selector,
1275 get_seg_base(e1, e2),
1276 get_seg_limit(e1, e2),
1277 e2);
1278 cpu_x86_set_cpl(env, dpl);
1279 env->eip = offset;
1280
1281 /* interrupt gate clear IF mask */
1282 if ((type & 1) == 0) {
1283 env->eflags &= ~IF_MASK;
1284 }
1285 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1286}
1287#endif
1288
1289#if defined(CONFIG_USER_ONLY)
1290void helper_syscall(int next_eip_addend)
1291{
1292 env->exception_index = EXCP_SYSCALL;
1293 env->exception_next_eip = env->eip + next_eip_addend;
1294 cpu_loop_exit();
1295}
1296#else
1297void helper_syscall(int next_eip_addend)
1298{
1299 int selector;
1300
1301 if (!(env->efer & MSR_EFER_SCE)) {
1302 raise_exception_err(EXCP06_ILLOP, 0);
1303 }
1304 selector = (env->star >> 32) & 0xffff;
1305#ifdef TARGET_X86_64
1306 if (env->hflags & HF_LMA_MASK) {
1307 int code64;
1308
1309 ECX = env->eip + next_eip_addend;
1310 env->regs[11] = compute_eflags();
1311
1312 code64 = env->hflags & HF_CS64_MASK;
1313
1314 cpu_x86_set_cpl(env, 0);
1315 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1316 0, 0xffffffff,
1317 DESC_G_MASK | DESC_P_MASK |
1318 DESC_S_MASK |
1319 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1320 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1321 0, 0xffffffff,
1322 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1323 DESC_S_MASK |
1324 DESC_W_MASK | DESC_A_MASK);
1325 env->eflags &= ~env->fmask;
1326 load_eflags(env->eflags, 0);
1327 if (code64)
1328 env->eip = env->lstar;
1329 else
1330 env->eip = env->cstar;
1331 } else
1332#endif
1333 {
1334 ECX = (uint32_t)(env->eip + next_eip_addend);
1335
1336 cpu_x86_set_cpl(env, 0);
1337 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1338 0, 0xffffffff,
1339 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1340 DESC_S_MASK |
1341 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1342 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1343 0, 0xffffffff,
1344 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1345 DESC_S_MASK |
1346 DESC_W_MASK | DESC_A_MASK);
1347 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1348 env->eip = (uint32_t)env->star;
1349 }
1350}
1351#endif
1352
1353void helper_sysret(int dflag)
1354{
1355 int cpl, selector;
1356
1357 if (!(env->efer & MSR_EFER_SCE)) {
1358 raise_exception_err(EXCP06_ILLOP, 0);
1359 }
1360 cpl = env->hflags & HF_CPL_MASK;
1361 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1362 raise_exception_err(EXCP0D_GPF, 0);
1363 }
1364 selector = (env->star >> 48) & 0xffff;
1365#ifdef TARGET_X86_64
1366 if (env->hflags & HF_LMA_MASK) {
1367 if (dflag == 2) {
1368 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1369 0, 0xffffffff,
1370 DESC_G_MASK | DESC_P_MASK |
1371 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1372 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1373 DESC_L_MASK);
1374 env->eip = ECX;
1375 } else {
1376 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1377 0, 0xffffffff,
1378 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1379 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1380 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1381 env->eip = (uint32_t)ECX;
1382 }
1383 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1384 0, 0xffffffff,
1385 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1386 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1387 DESC_W_MASK | DESC_A_MASK);
1388 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1389 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1390 cpu_x86_set_cpl(env, 3);
1391 } else
1392#endif
1393 {
1394 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1395 0, 0xffffffff,
1396 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1397 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1398 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1399 env->eip = (uint32_t)ECX;
1400 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1401 0, 0xffffffff,
1402 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1403 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1404 DESC_W_MASK | DESC_A_MASK);
1405 env->eflags |= IF_MASK;
1406 cpu_x86_set_cpl(env, 3);
1407 }
1408#ifdef USE_KQEMU
1409 if (kqemu_is_ok(env)) {
1410 if (env->hflags & HF_LMA_MASK)
1411 CC_OP = CC_OP_EFLAGS;
1412 env->exception_index = -1;
1413 cpu_loop_exit();
1414 }
1415#endif
1416}
1417
1418#ifdef VBOX
1419/**
1420 * Checks and processes external VMM events.
1421 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1422 */
1423void helper_external_event(void)
1424{
1425#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1426 uintptr_t uESP;
1427 __asm__ __volatile__("movl %%esp, %0" : "=r" (uESP));
1428 AssertMsg(!(uESP & 15), ("esp=%#p\n", uESP));
1429#endif
1430 /* Keep in sync with flags checked by gen_check_external_event() */
1431 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1432 {
1433 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1434 ~CPU_INTERRUPT_EXTERNAL_HARD);
1435 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1436 }
1437 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1438 {
1439 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1440 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1441 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1442 }
1443 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1444 {
1445 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1446 ~CPU_INTERRUPT_EXTERNAL_DMA);
1447 remR3DmaRun(env);
1448 }
1449 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1450 {
1451 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1452 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1453 remR3TimersRun(env);
1454 }
1455}
1456/* helper for recording call instruction addresses for later scanning */
1457void helper_record_call()
1458{
1459 if ( !(env->state & CPU_RAW_RING0)
1460 && (env->cr[0] & CR0_PG_MASK)
1461 && !(env->eflags & X86_EFL_IF))
1462 remR3RecordCall(env);
1463}
1464#endif /* VBOX */
1465
1466/* real mode interrupt */
1467static void do_interrupt_real(int intno, int is_int, int error_code,
1468 unsigned int next_eip)
1469{
1470 SegmentCache *dt;
1471 target_ulong ptr, ssp;
1472 int selector;
1473 uint32_t offset, esp;
1474 uint32_t old_cs, old_eip;
1475
1476 /* real mode (simpler !) */
1477 dt = &env->idt;
1478#ifndef VBOX
1479 if (intno * 4 + 3 > dt->limit)
1480#else
1481 if ((unsigned)intno * 4 + 3 > dt->limit)
1482#endif
1483 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1484 ptr = dt->base + intno * 4;
1485 offset = lduw_kernel(ptr);
1486 selector = lduw_kernel(ptr + 2);
1487 esp = ESP;
1488 ssp = env->segs[R_SS].base;
1489 if (is_int)
1490 old_eip = next_eip;
1491 else
1492 old_eip = env->eip;
1493 old_cs = env->segs[R_CS].selector;
1494 /* XXX: use SS segment size ? */
1495 PUSHW(ssp, esp, 0xffff, compute_eflags());
1496 PUSHW(ssp, esp, 0xffff, old_cs);
1497 PUSHW(ssp, esp, 0xffff, old_eip);
1498
1499 /* update processor state */
1500 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1501 env->eip = offset;
1502 env->segs[R_CS].selector = selector;
1503 env->segs[R_CS].base = (selector << 4);
1504 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1505}
1506
1507/* fake user mode interrupt */
1508void do_interrupt_user(int intno, int is_int, int error_code,
1509 target_ulong next_eip)
1510{
1511 SegmentCache *dt;
1512 target_ulong ptr;
1513 int dpl, cpl, shift;
1514 uint32_t e2;
1515
1516 dt = &env->idt;
1517 if (env->hflags & HF_LMA_MASK) {
1518 shift = 4;
1519 } else {
1520 shift = 3;
1521 }
1522 ptr = dt->base + (intno << shift);
1523 e2 = ldl_kernel(ptr + 4);
1524
1525 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1526 cpl = env->hflags & HF_CPL_MASK;
1527 /* check privilege if software int */
1528 if (is_int && dpl < cpl)
1529 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1530
1531 /* Since we emulate only user space, we cannot do more than
1532 exiting the emulation with the suitable exception and error
1533 code */
1534 if (is_int)
1535 EIP = next_eip;
1536}
1537
1538/*
1539 * Begin execution of an interruption. is_int is TRUE if coming from
1540 * the int instruction. next_eip is the EIP value AFTER the interrupt
1541 * instruction. It is only relevant if is_int is TRUE.
1542 */
1543void do_interrupt(int intno, int is_int, int error_code,
1544 target_ulong next_eip, int is_hw)
1545{
1546 if (loglevel & CPU_LOG_INT) {
1547 if ((env->cr[0] & CR0_PE_MASK)) {
1548 static int count;
1549 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1550 count, intno, error_code, is_int,
1551 env->hflags & HF_CPL_MASK,
1552 env->segs[R_CS].selector, EIP,
1553 (int)env->segs[R_CS].base + EIP,
1554 env->segs[R_SS].selector, ESP);
1555 if (intno == 0x0e) {
1556 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1557 } else {
1558 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1559 }
1560 fprintf(logfile, "\n");
1561 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1562#if 0
1563 {
1564 int i;
1565 uint8_t *ptr;
1566 fprintf(logfile, " code=");
1567 ptr = env->segs[R_CS].base + env->eip;
1568 for(i = 0; i < 16; i++) {
1569 fprintf(logfile, " %02x", ldub(ptr + i));
1570 }
1571 fprintf(logfile, "\n");
1572 }
1573#endif
1574 count++;
1575 }
1576 }
1577 if (env->cr[0] & CR0_PE_MASK) {
1578#ifdef TARGET_X86_64
1579 if (env->hflags & HF_LMA_MASK) {
1580 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1581 } else
1582#endif
1583 {
1584#ifdef VBOX
1585 /* int xx *, v86 code and VME enabled? */
1586 if ( (env->eflags & VM_MASK)
1587 && (env->cr[4] & CR4_VME_MASK)
1588 && is_int
1589 && !is_hw
1590 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1591 )
1592 do_soft_interrupt_vme(intno, error_code, next_eip);
1593 else
1594#endif /* VBOX */
1595 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1596 }
1597 } else {
1598 do_interrupt_real(intno, is_int, error_code, next_eip);
1599 }
1600}
1601
1602/*
1603 * Check nested exceptions and change to double or triple fault if
1604 * needed. It should only be called, if this is not an interrupt.
1605 * Returns the new exception number.
1606 */
1607static int check_exception(int intno, int *error_code)
1608{
1609 int first_contributory = env->old_exception == 0 ||
1610 (env->old_exception >= 10 &&
1611 env->old_exception <= 13);
1612 int second_contributory = intno == 0 ||
1613 (intno >= 10 && intno <= 13);
1614
1615 if (loglevel & CPU_LOG_INT)
1616 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1617 env->old_exception, intno);
1618
1619 if (env->old_exception == EXCP08_DBLE)
1620 cpu_abort(env, "triple fault");
1621
1622 if ((first_contributory && second_contributory)
1623 || (env->old_exception == EXCP0E_PAGE &&
1624 (second_contributory || (intno == EXCP0E_PAGE)))) {
1625 intno = EXCP08_DBLE;
1626 *error_code = 0;
1627 }
1628
1629 if (second_contributory || (intno == EXCP0E_PAGE) ||
1630 (intno == EXCP08_DBLE))
1631 env->old_exception = intno;
1632
1633 return intno;
1634}
1635
1636/*
1637 * Signal an interruption. It is executed in the main CPU loop.
1638 * is_int is TRUE if coming from the int instruction. next_eip is the
1639 * EIP value AFTER the interrupt instruction. It is only relevant if
1640 * is_int is TRUE.
1641 */
1642void raise_interrupt(int intno, int is_int, int error_code,
1643 int next_eip_addend)
1644{
1645#if defined(VBOX) && defined(DEBUG)
1646 NOT_DMIK(Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, env->eip + next_eip_addend)));
1647#endif
1648 if (!is_int) {
1649 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1650 intno = check_exception(intno, &error_code);
1651 } else {
1652 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1653 }
1654
1655 env->exception_index = intno;
1656 env->error_code = error_code;
1657 env->exception_is_int = is_int;
1658 env->exception_next_eip = env->eip + next_eip_addend;
1659 cpu_loop_exit();
1660}
1661
1662/* shortcuts to generate exceptions */
1663
1664void (raise_exception_err)(int exception_index, int error_code)
1665{
1666 raise_interrupt(exception_index, 0, error_code, 0);
1667}
1668
1669void raise_exception(int exception_index)
1670{
1671 raise_interrupt(exception_index, 0, 0, 0);
1672}
1673
1674/* SMM support */
1675
1676#if defined(CONFIG_USER_ONLY)
1677
1678void do_smm_enter(void)
1679{
1680}
1681
1682void helper_rsm(void)
1683{
1684}
1685
1686#else
1687
1688#ifdef TARGET_X86_64
1689#define SMM_REVISION_ID 0x00020064
1690#else
1691#define SMM_REVISION_ID 0x00020000
1692#endif
1693
1694void do_smm_enter(void)
1695{
1696 target_ulong sm_state;
1697 SegmentCache *dt;
1698 int i, offset;
1699
1700 if (loglevel & CPU_LOG_INT) {
1701 fprintf(logfile, "SMM: enter\n");
1702 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1703 }
1704
1705 env->hflags |= HF_SMM_MASK;
1706 cpu_smm_update(env);
1707
1708 sm_state = env->smbase + 0x8000;
1709
1710#ifdef TARGET_X86_64
1711 for(i = 0; i < 6; i++) {
1712 dt = &env->segs[i];
1713 offset = 0x7e00 + i * 16;
1714 stw_phys(sm_state + offset, dt->selector);
1715 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1716 stl_phys(sm_state + offset + 4, dt->limit);
1717 stq_phys(sm_state + offset + 8, dt->base);
1718 }
1719
1720 stq_phys(sm_state + 0x7e68, env->gdt.base);
1721 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1722
1723 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1724 stq_phys(sm_state + 0x7e78, env->ldt.base);
1725 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1726 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1727
1728 stq_phys(sm_state + 0x7e88, env->idt.base);
1729 stl_phys(sm_state + 0x7e84, env->idt.limit);
1730
1731 stw_phys(sm_state + 0x7e90, env->tr.selector);
1732 stq_phys(sm_state + 0x7e98, env->tr.base);
1733 stl_phys(sm_state + 0x7e94, env->tr.limit);
1734 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1735
1736 stq_phys(sm_state + 0x7ed0, env->efer);
1737
1738 stq_phys(sm_state + 0x7ff8, EAX);
1739 stq_phys(sm_state + 0x7ff0, ECX);
1740 stq_phys(sm_state + 0x7fe8, EDX);
1741 stq_phys(sm_state + 0x7fe0, EBX);
1742 stq_phys(sm_state + 0x7fd8, ESP);
1743 stq_phys(sm_state + 0x7fd0, EBP);
1744 stq_phys(sm_state + 0x7fc8, ESI);
1745 stq_phys(sm_state + 0x7fc0, EDI);
1746 for(i = 8; i < 16; i++)
1747 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1748 stq_phys(sm_state + 0x7f78, env->eip);
1749 stl_phys(sm_state + 0x7f70, compute_eflags());
1750 stl_phys(sm_state + 0x7f68, env->dr[6]);
1751 stl_phys(sm_state + 0x7f60, env->dr[7]);
1752
1753 stl_phys(sm_state + 0x7f48, env->cr[4]);
1754 stl_phys(sm_state + 0x7f50, env->cr[3]);
1755 stl_phys(sm_state + 0x7f58, env->cr[0]);
1756
1757 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1758 stl_phys(sm_state + 0x7f00, env->smbase);
1759#else
1760 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1761 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1762 stl_phys(sm_state + 0x7ff4, compute_eflags());
1763 stl_phys(sm_state + 0x7ff0, env->eip);
1764 stl_phys(sm_state + 0x7fec, EDI);
1765 stl_phys(sm_state + 0x7fe8, ESI);
1766 stl_phys(sm_state + 0x7fe4, EBP);
1767 stl_phys(sm_state + 0x7fe0, ESP);
1768 stl_phys(sm_state + 0x7fdc, EBX);
1769 stl_phys(sm_state + 0x7fd8, EDX);
1770 stl_phys(sm_state + 0x7fd4, ECX);
1771 stl_phys(sm_state + 0x7fd0, EAX);
1772 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1773 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1774
1775 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1776 stl_phys(sm_state + 0x7f64, env->tr.base);
1777 stl_phys(sm_state + 0x7f60, env->tr.limit);
1778 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1779
1780 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1781 stl_phys(sm_state + 0x7f80, env->ldt.base);
1782 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1783 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1784
1785 stl_phys(sm_state + 0x7f74, env->gdt.base);
1786 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1787
1788 stl_phys(sm_state + 0x7f58, env->idt.base);
1789 stl_phys(sm_state + 0x7f54, env->idt.limit);
1790
1791 for(i = 0; i < 6; i++) {
1792 dt = &env->segs[i];
1793 if (i < 3)
1794 offset = 0x7f84 + i * 12;
1795 else
1796 offset = 0x7f2c + (i - 3) * 12;
1797 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1798 stl_phys(sm_state + offset + 8, dt->base);
1799 stl_phys(sm_state + offset + 4, dt->limit);
1800 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1801 }
1802 stl_phys(sm_state + 0x7f14, env->cr[4]);
1803
1804 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1805 stl_phys(sm_state + 0x7ef8, env->smbase);
1806#endif
1807 /* init SMM cpu state */
1808
1809#ifdef TARGET_X86_64
1810 cpu_load_efer(env, 0);
1811#endif
1812 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1813 env->eip = 0x00008000;
1814 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1815 0xffffffff, 0);
1816 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1817 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1818 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1819 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1820 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1821
1822 cpu_x86_update_cr0(env,
1823 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1824 cpu_x86_update_cr4(env, 0);
1825 env->dr[7] = 0x00000400;
1826 CC_OP = CC_OP_EFLAGS;
1827}
1828
1829void helper_rsm(void)
1830{
1831#ifdef VBOX
1832 cpu_abort(env, "helper_rsm");
1833#else /* !VBOX */
1834 target_ulong sm_
1835
1836 target_ulong sm_state;
1837 int i, offset;
1838 uint32_t val;
1839
1840 sm_state = env->smbase + 0x8000;
1841#ifdef TARGET_X86_64
1842 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1843
1844 for(i = 0; i < 6; i++) {
1845 offset = 0x7e00 + i * 16;
1846 cpu_x86_load_seg_cache(env, i,
1847 lduw_phys(sm_state + offset),
1848 ldq_phys(sm_state + offset + 8),
1849 ldl_phys(sm_state + offset + 4),
1850 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1851 }
1852
1853 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1854 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1855
1856 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1857 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1858 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1859 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1860
1861 env->idt.base = ldq_phys(sm_state + 0x7e88);
1862 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1863
1864 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1865 env->tr.base = ldq_phys(sm_state + 0x7e98);
1866 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1867 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1868
1869 EAX = ldq_phys(sm_state + 0x7ff8);
1870 ECX = ldq_phys(sm_state + 0x7ff0);
1871 EDX = ldq_phys(sm_state + 0x7fe8);
1872 EBX = ldq_phys(sm_state + 0x7fe0);
1873 ESP = ldq_phys(sm_state + 0x7fd8);
1874 EBP = ldq_phys(sm_state + 0x7fd0);
1875 ESI = ldq_phys(sm_state + 0x7fc8);
1876 EDI = ldq_phys(sm_state + 0x7fc0);
1877 for(i = 8; i < 16; i++)
1878 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1879 env->eip = ldq_phys(sm_state + 0x7f78);
1880 load_eflags(ldl_phys(sm_state + 0x7f70),
1881 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1882 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1883 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1884
1885 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1886 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1887 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1888
1889 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1890 if (val & 0x20000) {
1891 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1892 }
1893#else
1894 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1895 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1896 load_eflags(ldl_phys(sm_state + 0x7ff4),
1897 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1898 env->eip = ldl_phys(sm_state + 0x7ff0);
1899 EDI = ldl_phys(sm_state + 0x7fec);
1900 ESI = ldl_phys(sm_state + 0x7fe8);
1901 EBP = ldl_phys(sm_state + 0x7fe4);
1902 ESP = ldl_phys(sm_state + 0x7fe0);
1903 EBX = ldl_phys(sm_state + 0x7fdc);
1904 EDX = ldl_phys(sm_state + 0x7fd8);
1905 ECX = ldl_phys(sm_state + 0x7fd4);
1906 EAX = ldl_phys(sm_state + 0x7fd0);
1907 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1908 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1909
1910 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1911 env->tr.base = ldl_phys(sm_state + 0x7f64);
1912 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1913 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1914
1915 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1916 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1917 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1918 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1919
1920 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1921 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1922
1923 env->idt.base = ldl_phys(sm_state + 0x7f58);
1924 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1925
1926 for(i = 0; i < 6; i++) {
1927 if (i < 3)
1928 offset = 0x7f84 + i * 12;
1929 else
1930 offset = 0x7f2c + (i - 3) * 12;
1931 cpu_x86_load_seg_cache(env, i,
1932 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1933 ldl_phys(sm_state + offset + 8),
1934 ldl_phys(sm_state + offset + 4),
1935 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1936 }
1937 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1938
1939 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1940 if (val & 0x20000) {
1941 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1942 }
1943#endif
1944 CC_OP = CC_OP_EFLAGS;
1945 env->hflags &= ~HF_SMM_MASK;
1946 cpu_smm_update(env);
1947
1948 if (loglevel & CPU_LOG_INT) {
1949 fprintf(logfile, "SMM: after RSM\n");
1950 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1951 }
1952#endif /* !VBOX */
1953}
1954
1955#endif /* !CONFIG_USER_ONLY */
1956
1957
1958/* division, flags are undefined */
1959
1960void helper_divb_AL(target_ulong t0)
1961{
1962 unsigned int num, den, q, r;
1963
1964 num = (EAX & 0xffff);
1965 den = (t0 & 0xff);
1966 if (den == 0) {
1967 raise_exception(EXCP00_DIVZ);
1968 }
1969 q = (num / den);
1970 if (q > 0xff)
1971 raise_exception(EXCP00_DIVZ);
1972 q &= 0xff;
1973 r = (num % den) & 0xff;
1974 EAX = (EAX & ~0xffff) | (r << 8) | q;
1975}
1976
1977void helper_idivb_AL(target_ulong t0)
1978{
1979 int num, den, q, r;
1980
1981 num = (int16_t)EAX;
1982 den = (int8_t)t0;
1983 if (den == 0) {
1984 raise_exception(EXCP00_DIVZ);
1985 }
1986 q = (num / den);
1987 if (q != (int8_t)q)
1988 raise_exception(EXCP00_DIVZ);
1989 q &= 0xff;
1990 r = (num % den) & 0xff;
1991 EAX = (EAX & ~0xffff) | (r << 8) | q;
1992}
1993
1994void helper_divw_AX(target_ulong t0)
1995{
1996 unsigned int num, den, q, r;
1997
1998 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1999 den = (t0 & 0xffff);
2000 if (den == 0) {
2001 raise_exception(EXCP00_DIVZ);
2002 }
2003 q = (num / den);
2004 if (q > 0xffff)
2005 raise_exception(EXCP00_DIVZ);
2006 q &= 0xffff;
2007 r = (num % den) & 0xffff;
2008 EAX = (EAX & ~0xffff) | q;
2009 EDX = (EDX & ~0xffff) | r;
2010}
2011
2012void helper_idivw_AX(target_ulong t0)
2013{
2014 int num, den, q, r;
2015
2016 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2017 den = (int16_t)t0;
2018 if (den == 0) {
2019 raise_exception(EXCP00_DIVZ);
2020 }
2021 q = (num / den);
2022 if (q != (int16_t)q)
2023 raise_exception(EXCP00_DIVZ);
2024 q &= 0xffff;
2025 r = (num % den) & 0xffff;
2026 EAX = (EAX & ~0xffff) | q;
2027 EDX = (EDX & ~0xffff) | r;
2028}
2029
2030void helper_divl_EAX(target_ulong t0)
2031{
2032 unsigned int den, r;
2033 uint64_t num, q;
2034
2035 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2036 den = t0;
2037 if (den == 0) {
2038 raise_exception(EXCP00_DIVZ);
2039 }
2040 q = (num / den);
2041 r = (num % den);
2042 if (q > 0xffffffff)
2043 raise_exception(EXCP00_DIVZ);
2044 EAX = (uint32_t)q;
2045 EDX = (uint32_t)r;
2046}
2047
2048void helper_idivl_EAX(target_ulong t0)
2049{
2050 int den, r;
2051 int64_t num, q;
2052
2053 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2054 den = t0;
2055 if (den == 0) {
2056 raise_exception(EXCP00_DIVZ);
2057 }
2058 q = (num / den);
2059 r = (num % den);
2060 if (q != (int32_t)q)
2061 raise_exception(EXCP00_DIVZ);
2062 EAX = (uint32_t)q;
2063 EDX = (uint32_t)r;
2064}
2065
2066/* bcd */
2067
2068/* XXX: exception */
2069void helper_aam(int base)
2070{
2071 int al, ah;
2072 al = EAX & 0xff;
2073 ah = al / base;
2074 al = al % base;
2075 EAX = (EAX & ~0xffff) | al | (ah << 8);
2076 CC_DST = al;
2077}
2078
2079void helper_aad(int base)
2080{
2081 int al, ah;
2082 al = EAX & 0xff;
2083 ah = (EAX >> 8) & 0xff;
2084 al = ((ah * base) + al) & 0xff;
2085 EAX = (EAX & ~0xffff) | al;
2086 CC_DST = al;
2087}
2088
2089void helper_aaa(void)
2090{
2091 int icarry;
2092 int al, ah, af;
2093 int eflags;
2094
2095 eflags = cc_table[CC_OP].compute_all();
2096 af = eflags & CC_A;
2097 al = EAX & 0xff;
2098 ah = (EAX >> 8) & 0xff;
2099
2100 icarry = (al > 0xf9);
2101 if (((al & 0x0f) > 9 ) || af) {
2102 al = (al + 6) & 0x0f;
2103 ah = (ah + 1 + icarry) & 0xff;
2104 eflags |= CC_C | CC_A;
2105 } else {
2106 eflags &= ~(CC_C | CC_A);
2107 al &= 0x0f;
2108 }
2109 EAX = (EAX & ~0xffff) | al | (ah << 8);
2110 CC_SRC = eflags;
2111 FORCE_RET();
2112}
2113
2114void helper_aas(void)
2115{
2116 int icarry;
2117 int al, ah, af;
2118 int eflags;
2119
2120 eflags = cc_table[CC_OP].compute_all();
2121 af = eflags & CC_A;
2122 al = EAX & 0xff;
2123 ah = (EAX >> 8) & 0xff;
2124
2125 icarry = (al < 6);
2126 if (((al & 0x0f) > 9 ) || af) {
2127 al = (al - 6) & 0x0f;
2128 ah = (ah - 1 - icarry) & 0xff;
2129 eflags |= CC_C | CC_A;
2130 } else {
2131 eflags &= ~(CC_C | CC_A);
2132 al &= 0x0f;
2133 }
2134 EAX = (EAX & ~0xffff) | al | (ah << 8);
2135 CC_SRC = eflags;
2136 FORCE_RET();
2137}
2138
2139void helper_daa(void)
2140{
2141 int al, af, cf;
2142 int eflags;
2143
2144 eflags = cc_table[CC_OP].compute_all();
2145 cf = eflags & CC_C;
2146 af = eflags & CC_A;
2147 al = EAX & 0xff;
2148
2149 eflags = 0;
2150 if (((al & 0x0f) > 9 ) || af) {
2151 al = (al + 6) & 0xff;
2152 eflags |= CC_A;
2153 }
2154 if ((al > 0x9f) || cf) {
2155 al = (al + 0x60) & 0xff;
2156 eflags |= CC_C;
2157 }
2158 EAX = (EAX & ~0xff) | al;
2159 /* well, speed is not an issue here, so we compute the flags by hand */
2160 eflags |= (al == 0) << 6; /* zf */
2161 eflags |= parity_table[al]; /* pf */
2162 eflags |= (al & 0x80); /* sf */
2163 CC_SRC = eflags;
2164 FORCE_RET();
2165}
2166
2167void helper_das(void)
2168{
2169 int al, al1, af, cf;
2170 int eflags;
2171
2172 eflags = cc_table[CC_OP].compute_all();
2173 cf = eflags & CC_C;
2174 af = eflags & CC_A;
2175 al = EAX & 0xff;
2176
2177 eflags = 0;
2178 al1 = al;
2179 if (((al & 0x0f) > 9 ) || af) {
2180 eflags |= CC_A;
2181 if (al < 6 || cf)
2182 eflags |= CC_C;
2183 al = (al - 6) & 0xff;
2184 }
2185 if ((al1 > 0x99) || cf) {
2186 al = (al - 0x60) & 0xff;
2187 eflags |= CC_C;
2188 }
2189 EAX = (EAX & ~0xff) | al;
2190 /* well, speed is not an issue here, so we compute the flags by hand */
2191 eflags |= (al == 0) << 6; /* zf */
2192 eflags |= parity_table[al]; /* pf */
2193 eflags |= (al & 0x80); /* sf */
2194 CC_SRC = eflags;
2195 FORCE_RET();
2196}
2197
2198void helper_into(int next_eip_addend)
2199{
2200 int eflags;
2201 eflags = cc_table[CC_OP].compute_all();
2202 if (eflags & CC_O) {
2203 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2204 }
2205}
2206
2207void helper_cmpxchg8b(target_ulong a0)
2208{
2209 uint64_t d;
2210 int eflags;
2211
2212 eflags = cc_table[CC_OP].compute_all();
2213 d = ldq(a0);
2214 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2215 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2216 eflags |= CC_Z;
2217 } else {
2218 /* always do the store */
2219 stq(a0, d);
2220 EDX = (uint32_t)(d >> 32);
2221 EAX = (uint32_t)d;
2222 eflags &= ~CC_Z;
2223 }
2224 CC_SRC = eflags;
2225}
2226
2227#ifdef TARGET_X86_64
2228void helper_cmpxchg16b(target_ulong a0)
2229{
2230 uint64_t d0, d1;
2231 int eflags;
2232
2233 if ((a0 & 0xf) != 0)
2234 raise_exception(EXCP0D_GPF);
2235 eflags = cc_table[CC_OP].compute_all();
2236 d0 = ldq(a0);
2237 d1 = ldq(a0 + 8);
2238 if (d0 == EAX && d1 == EDX) {
2239 stq(a0, EBX);
2240 stq(a0 + 8, ECX);
2241 eflags |= CC_Z;
2242 } else {
2243 /* always do the store */
2244 stq(a0, d0);
2245 stq(a0 + 8, d1);
2246 EDX = d1;
2247 EAX = d0;
2248 eflags &= ~CC_Z;
2249 }
2250 CC_SRC = eflags;
2251}
2252#endif
2253
2254void helper_single_step(void)
2255{
2256 env->dr[6] |= 0x4000;
2257 raise_exception(EXCP01_SSTP);
2258}
2259
2260void helper_cpuid(void)
2261{
2262#ifndef VBOX
2263 uint32_t index;
2264
2265 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2266
2267 index = (uint32_t)EAX;
2268 /* test if maximum index reached */
2269 if (index & 0x80000000) {
2270 if (index > env->cpuid_xlevel)
2271 index = env->cpuid_level;
2272 } else {
2273 if (index > env->cpuid_level)
2274 index = env->cpuid_level;
2275 }
2276
2277 switch(index) {
2278 case 0:
2279 EAX = env->cpuid_level;
2280 EBX = env->cpuid_vendor1;
2281 EDX = env->cpuid_vendor2;
2282 ECX = env->cpuid_vendor3;
2283 break;
2284 case 1:
2285 EAX = env->cpuid_version;
2286 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2287 ECX = env->cpuid_ext_features;
2288 EDX = env->cpuid_features;
2289 break;
2290 case 2:
2291 /* cache info: needed for Pentium Pro compatibility */
2292 EAX = 1;
2293 EBX = 0;
2294 ECX = 0;
2295 EDX = 0x2c307d;
2296 break;
2297 case 4:
2298 /* cache info: needed for Core compatibility */
2299 switch (ECX) {
2300 case 0: /* L1 dcache info */
2301 EAX = 0x0000121;
2302 EBX = 0x1c0003f;
2303 ECX = 0x000003f;
2304 EDX = 0x0000001;
2305 break;
2306 case 1: /* L1 icache info */
2307 EAX = 0x0000122;
2308 EBX = 0x1c0003f;
2309 ECX = 0x000003f;
2310 EDX = 0x0000001;
2311 break;
2312 case 2: /* L2 cache info */
2313 EAX = 0x0000143;
2314 EBX = 0x3c0003f;
2315 ECX = 0x0000fff;
2316 EDX = 0x0000001;
2317 break;
2318 default: /* end of info */
2319 EAX = 0;
2320 EBX = 0;
2321 ECX = 0;
2322 EDX = 0;
2323 break;
2324 }
2325
2326 break;
2327 case 5:
2328 /* mwait info: needed for Core compatibility */
2329 EAX = 0; /* Smallest monitor-line size in bytes */
2330 EBX = 0; /* Largest monitor-line size in bytes */
2331 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2332 EDX = 0;
2333 break;
2334 case 6:
2335 /* Thermal and Power Leaf */
2336 EAX = 0;
2337 EBX = 0;
2338 ECX = 0;
2339 EDX = 0;
2340 break;
2341 case 9:
2342 /* Direct Cache Access Information Leaf */
2343 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2344 EBX = 0;
2345 ECX = 0;
2346 EDX = 0;
2347 break;
2348 case 0xA:
2349 /* Architectural Performance Monitoring Leaf */
2350 EAX = 0;
2351 EBX = 0;
2352 ECX = 0;
2353 EDX = 0;
2354 break;
2355 case 0x80000000:
2356 EAX = env->cpuid_xlevel;
2357 EBX = env->cpuid_vendor1;
2358 EDX = env->cpuid_vendor2;
2359 ECX = env->cpuid_vendor3;
2360 break;
2361 case 0x80000001:
2362 EAX = env->cpuid_features;
2363 EBX = 0;
2364 ECX = env->cpuid_ext3_features;
2365 EDX = env->cpuid_ext2_features;
2366 break;
2367 case 0x80000002:
2368 case 0x80000003:
2369 case 0x80000004:
2370 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2371 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2372 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2373 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2374 break;
2375 case 0x80000005:
2376 /* cache info (L1 cache) */
2377 EAX = 0x01ff01ff;
2378 EBX = 0x01ff01ff;
2379 ECX = 0x40020140;
2380 EDX = 0x40020140;
2381 break;
2382 case 0x80000006:
2383 /* cache info (L2 cache) */
2384 EAX = 0;
2385 EBX = 0x42004200;
2386 ECX = 0x02008140;
2387 EDX = 0;
2388 break;
2389 case 0x80000008:
2390 /* virtual & phys address size in low 2 bytes. */
2391/* XXX: This value must match the one used in the MMU code. */
2392 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2393 /* 64 bit processor */
2394#if defined(USE_KQEMU)
2395 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2396#else
2397/* XXX: The physical address space is limited to 42 bits in exec.c. */
2398 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2399#endif
2400 } else {
2401#if defined(USE_KQEMU)
2402 EAX = 0x00000020; /* 32 bits physical */
2403#else
2404 if (env->cpuid_features & CPUID_PSE36)
2405 EAX = 0x00000024; /* 36 bits physical */
2406 else
2407 EAX = 0x00000020; /* 32 bits physical */
2408#endif
2409 }
2410 EBX = 0;
2411 ECX = 0;
2412 EDX = 0;
2413 break;
2414 case 0x8000000A:
2415 EAX = 0x00000001;
2416 EBX = 0;
2417 ECX = 0;
2418 EDX = 0;
2419 break;
2420 default:
2421 /* reserved values: zero */
2422 EAX = 0;
2423 EBX = 0;
2424 ECX = 0;
2425 EDX = 0;
2426 break;
2427 }
2428#else /* VBOX */
2429 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2430#endif /* VBOX */
2431}
2432
2433void helper_enter_level(int level, int data32, target_ulong t1)
2434{
2435 target_ulong ssp;
2436 uint32_t esp_mask, esp, ebp;
2437
2438 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2439 ssp = env->segs[R_SS].base;
2440 ebp = EBP;
2441 esp = ESP;
2442 if (data32) {
2443 /* 32 bit */
2444 esp -= 4;
2445 while (--level) {
2446 esp -= 4;
2447 ebp -= 4;
2448 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2449 }
2450 esp -= 4;
2451 stl(ssp + (esp & esp_mask), t1);
2452 } else {
2453 /* 16 bit */
2454 esp -= 2;
2455 while (--level) {
2456 esp -= 2;
2457 ebp -= 2;
2458 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2459 }
2460 esp -= 2;
2461 stw(ssp + (esp & esp_mask), t1);
2462 }
2463}
2464
2465#ifdef TARGET_X86_64
2466void helper_enter64_level(int level, int data64, target_ulong t1)
2467{
2468 target_ulong esp, ebp;
2469 ebp = EBP;
2470 esp = ESP;
2471
2472 if (data64) {
2473 /* 64 bit */
2474 esp -= 8;
2475 while (--level) {
2476 esp -= 8;
2477 ebp -= 8;
2478 stq(esp, ldq(ebp));
2479 }
2480 esp -= 8;
2481 stq(esp, t1);
2482 } else {
2483 /* 16 bit */
2484 esp -= 2;
2485 while (--level) {
2486 esp -= 2;
2487 ebp -= 2;
2488 stw(esp, lduw(ebp));
2489 }
2490 esp -= 2;
2491 stw(esp, t1);
2492 }
2493}
2494#endif
2495
2496void helper_lldt(int selector)
2497{
2498 SegmentCache *dt;
2499 uint32_t e1, e2;
2500#ifndef VBOX
2501 int index, entry_limit;
2502#else
2503 unsigned int index, entry_limit;
2504#endif
2505 target_ulong ptr;
2506
2507#ifdef VBOX
2508 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2509 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2510#endif
2511
2512 selector &= 0xffff;
2513 if ((selector & 0xfffc) == 0) {
2514 /* XXX: NULL selector case: invalid LDT */
2515 env->ldt.base = 0;
2516 env->ldt.limit = 0;
2517 } else {
2518 if (selector & 0x4)
2519 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2520 dt = &env->gdt;
2521 index = selector & ~7;
2522#ifdef TARGET_X86_64
2523 if (env->hflags & HF_LMA_MASK)
2524 entry_limit = 15;
2525 else
2526#endif
2527 entry_limit = 7;
2528 if ((index + entry_limit) > dt->limit)
2529 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2530 ptr = dt->base + index;
2531 e1 = ldl_kernel(ptr);
2532 e2 = ldl_kernel(ptr + 4);
2533 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2534 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2535 if (!(e2 & DESC_P_MASK))
2536 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2537#ifdef TARGET_X86_64
2538 if (env->hflags & HF_LMA_MASK) {
2539 uint32_t e3;
2540 e3 = ldl_kernel(ptr + 8);
2541 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2542 env->ldt.base |= (target_ulong)e3 << 32;
2543 } else
2544#endif
2545 {
2546 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2547 }
2548 }
2549 env->ldt.selector = selector;
2550#ifdef VBOX
2551 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2552 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2553#endif
2554}
2555
2556void helper_ltr(int selector)
2557{
2558 SegmentCache *dt;
2559 uint32_t e1, e2;
2560#ifndef VBOX
2561 int index, type, entry_limit;
2562#else
2563 unsigned int index;
2564 int type, entry_limit;
2565#endif
2566 target_ulong ptr;
2567
2568#ifdef VBOX
2569 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2570 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2571 env->tr.flags, (RTSEL)(selector & 0xffff)));
2572#endif
2573 selector &= 0xffff;
2574 if ((selector & 0xfffc) == 0) {
2575 /* NULL selector case: invalid TR */
2576 env->tr.base = 0;
2577 env->tr.limit = 0;
2578 env->tr.flags = 0;
2579 } else {
2580 if (selector & 0x4)
2581 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2582 dt = &env->gdt;
2583 index = selector & ~7;
2584#ifdef TARGET_X86_64
2585 if (env->hflags & HF_LMA_MASK)
2586 entry_limit = 15;
2587 else
2588#endif
2589 entry_limit = 7;
2590 if ((index + entry_limit) > dt->limit)
2591 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2592 ptr = dt->base + index;
2593 e1 = ldl_kernel(ptr);
2594 e2 = ldl_kernel(ptr + 4);
2595 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2596 if ((e2 & DESC_S_MASK) ||
2597 (type != 1 && type != 9))
2598 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2599 if (!(e2 & DESC_P_MASK))
2600 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2601#ifdef TARGET_X86_64
2602 if (env->hflags & HF_LMA_MASK) {
2603 uint32_t e3, e4;
2604 e3 = ldl_kernel(ptr + 8);
2605 e4 = ldl_kernel(ptr + 12);
2606 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2607 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2608 load_seg_cache_raw_dt(&env->tr, e1, e2);
2609 env->tr.base |= (target_ulong)e3 << 32;
2610 } else
2611#endif
2612 {
2613 load_seg_cache_raw_dt(&env->tr, e1, e2);
2614 }
2615 e2 |= DESC_TSS_BUSY_MASK;
2616 stl_kernel(ptr + 4, e2);
2617 }
2618 env->tr.selector = selector;
2619#ifdef VBOX
2620 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2621 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2622 env->tr.flags, (RTSEL)(selector & 0xffff)));
2623#endif
2624}
2625
2626/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2627void helper_load_seg(int seg_reg, int selector)
2628{
2629 uint32_t e1, e2;
2630 int cpl, dpl, rpl;
2631 SegmentCache *dt;
2632#ifndef VBOX
2633 int index;
2634#else
2635 unsigned int index;
2636#endif
2637 target_ulong ptr;
2638
2639 selector &= 0xffff;
2640 cpl = env->hflags & HF_CPL_MASK;
2641
2642#ifdef VBOX
2643 /* Trying to load a selector with CPL=1? */
2644 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2645 {
2646 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2647 selector = selector & 0xfffc;
2648 }
2649#endif
2650 if ((selector & 0xfffc) == 0) {
2651 /* null selector case */
2652 if (seg_reg == R_SS
2653#ifdef TARGET_X86_64
2654 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2655#endif
2656 )
2657 raise_exception_err(EXCP0D_GPF, 0);
2658 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2659 } else {
2660
2661 if (selector & 0x4)
2662 dt = &env->ldt;
2663 else
2664 dt = &env->gdt;
2665 index = selector & ~7;
2666 if ((index + 7) > dt->limit)
2667 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2668 ptr = dt->base + index;
2669 e1 = ldl_kernel(ptr);
2670 e2 = ldl_kernel(ptr + 4);
2671
2672 if (!(e2 & DESC_S_MASK))
2673 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2674 rpl = selector & 3;
2675 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2676 if (seg_reg == R_SS) {
2677 /* must be writable segment */
2678 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2679 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2680 if (rpl != cpl || dpl != cpl)
2681 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2682 } else {
2683 /* must be readable segment */
2684 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2685 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2686
2687 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2688 /* if not conforming code, test rights */
2689 if (dpl < cpl || dpl < rpl)
2690 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2691 }
2692 }
2693
2694 if (!(e2 & DESC_P_MASK)) {
2695 if (seg_reg == R_SS)
2696 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2697 else
2698 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2699 }
2700
2701 /* set the access bit if not already set */
2702 if (!(e2 & DESC_A_MASK)) {
2703 e2 |= DESC_A_MASK;
2704 stl_kernel(ptr + 4, e2);
2705 }
2706
2707 cpu_x86_load_seg_cache(env, seg_reg, selector,
2708 get_seg_base(e1, e2),
2709 get_seg_limit(e1, e2),
2710 e2);
2711#if 0
2712 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2713 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2714#endif
2715 }
2716}
2717
2718/* protected mode jump */
2719void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2720 int next_eip_addend)
2721{
2722 int gate_cs, type;
2723 uint32_t e1, e2, cpl, dpl, rpl, limit;
2724 target_ulong next_eip;
2725
2726#ifdef VBOX
2727 e1 = e2 = 0;
2728#endif
2729 if ((new_cs & 0xfffc) == 0)
2730 raise_exception_err(EXCP0D_GPF, 0);
2731 if (load_segment(&e1, &e2, new_cs) != 0)
2732 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2733 cpl = env->hflags & HF_CPL_MASK;
2734 if (e2 & DESC_S_MASK) {
2735 if (!(e2 & DESC_CS_MASK))
2736 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2737 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2738 if (e2 & DESC_C_MASK) {
2739 /* conforming code segment */
2740 if (dpl > cpl)
2741 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2742 } else {
2743 /* non conforming code segment */
2744 rpl = new_cs & 3;
2745 if (rpl > cpl)
2746 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2747 if (dpl != cpl)
2748 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2749 }
2750 if (!(e2 & DESC_P_MASK))
2751 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2752 limit = get_seg_limit(e1, e2);
2753 if (new_eip > limit &&
2754 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2755 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2756 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2757 get_seg_base(e1, e2), limit, e2);
2758 EIP = new_eip;
2759 } else {
2760 /* jump to call or task gate */
2761 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2762 rpl = new_cs & 3;
2763 cpl = env->hflags & HF_CPL_MASK;
2764 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2765 switch(type) {
2766 case 1: /* 286 TSS */
2767 case 9: /* 386 TSS */
2768 case 5: /* task gate */
2769 if (dpl < cpl || dpl < rpl)
2770 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2771 next_eip = env->eip + next_eip_addend;
2772 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2773 CC_OP = CC_OP_EFLAGS;
2774 break;
2775 case 4: /* 286 call gate */
2776 case 12: /* 386 call gate */
2777 if ((dpl < cpl) || (dpl < rpl))
2778 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2779 if (!(e2 & DESC_P_MASK))
2780 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2781 gate_cs = e1 >> 16;
2782 new_eip = (e1 & 0xffff);
2783 if (type == 12)
2784 new_eip |= (e2 & 0xffff0000);
2785 if (load_segment(&e1, &e2, gate_cs) != 0)
2786 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2787 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2788 /* must be code segment */
2789 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2790 (DESC_S_MASK | DESC_CS_MASK)))
2791 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2792 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2793 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2794 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2795 if (!(e2 & DESC_P_MASK))
2796#ifdef VBOX /* See page 3-514 of 253666.pdf */
2797 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2798#else
2799 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2800#endif
2801 limit = get_seg_limit(e1, e2);
2802 if (new_eip > limit)
2803 raise_exception_err(EXCP0D_GPF, 0);
2804 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2805 get_seg_base(e1, e2), limit, e2);
2806 EIP = new_eip;
2807 break;
2808 default:
2809 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2810 break;
2811 }
2812 }
2813}
2814
2815/* real mode call */
2816void helper_lcall_real(int new_cs, target_ulong new_eip1,
2817 int shift, int next_eip)
2818{
2819 int new_eip;
2820 uint32_t esp, esp_mask;
2821 target_ulong ssp;
2822
2823 new_eip = new_eip1;
2824 esp = ESP;
2825 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2826 ssp = env->segs[R_SS].base;
2827 if (shift) {
2828 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2829 PUSHL(ssp, esp, esp_mask, next_eip);
2830 } else {
2831 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2832 PUSHW(ssp, esp, esp_mask, next_eip);
2833 }
2834
2835 SET_ESP(esp, esp_mask);
2836 env->eip = new_eip;
2837 env->segs[R_CS].selector = new_cs;
2838 env->segs[R_CS].base = (new_cs << 4);
2839}
2840
2841/* protected mode call */
2842void helper_lcall_protected(int new_cs, target_ulong new_eip,
2843 int shift, int next_eip_addend)
2844{
2845 int new_stack, i;
2846 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2847 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2848 uint32_t val, limit, old_sp_mask;
2849 target_ulong ssp, old_ssp, next_eip;
2850
2851#ifdef VBOX
2852 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2853#endif
2854 next_eip = env->eip + next_eip_addend;
2855#ifdef DEBUG_PCALL
2856 if (loglevel & CPU_LOG_PCALL) {
2857 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2858 new_cs, (uint32_t)new_eip, shift);
2859 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2860 }
2861#endif
2862 if ((new_cs & 0xfffc) == 0)
2863 raise_exception_err(EXCP0D_GPF, 0);
2864 if (load_segment(&e1, &e2, new_cs) != 0)
2865 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2866 cpl = env->hflags & HF_CPL_MASK;
2867#ifdef DEBUG_PCALL
2868 if (loglevel & CPU_LOG_PCALL) {
2869 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2870 }
2871#endif
2872 if (e2 & DESC_S_MASK) {
2873 if (!(e2 & DESC_CS_MASK))
2874 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2875 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2876 if (e2 & DESC_C_MASK) {
2877 /* conforming code segment */
2878 if (dpl > cpl)
2879 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2880 } else {
2881 /* non conforming code segment */
2882 rpl = new_cs & 3;
2883 if (rpl > cpl)
2884 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2885 if (dpl != cpl)
2886 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2887 }
2888 if (!(e2 & DESC_P_MASK))
2889 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2890
2891#ifdef TARGET_X86_64
2892 /* XXX: check 16/32 bit cases in long mode */
2893 if (shift == 2) {
2894 target_ulong rsp;
2895 /* 64 bit case */
2896 rsp = ESP;
2897 PUSHQ(rsp, env->segs[R_CS].selector);
2898 PUSHQ(rsp, next_eip);
2899 /* from this point, not restartable */
2900 ESP = rsp;
2901 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2902 get_seg_base(e1, e2),
2903 get_seg_limit(e1, e2), e2);
2904 EIP = new_eip;
2905 } else
2906#endif
2907 {
2908 sp = ESP;
2909 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2910 ssp = env->segs[R_SS].base;
2911 if (shift) {
2912 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2913 PUSHL(ssp, sp, sp_mask, next_eip);
2914 } else {
2915 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2916 PUSHW(ssp, sp, sp_mask, next_eip);
2917 }
2918
2919 limit = get_seg_limit(e1, e2);
2920 if (new_eip > limit)
2921 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2922 /* from this point, not restartable */
2923 SET_ESP(sp, sp_mask);
2924 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2925 get_seg_base(e1, e2), limit, e2);
2926 EIP = new_eip;
2927 }
2928 } else {
2929 /* check gate type */
2930 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2931 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2932 rpl = new_cs & 3;
2933 switch(type) {
2934 case 1: /* available 286 TSS */
2935 case 9: /* available 386 TSS */
2936 case 5: /* task gate */
2937 if (dpl < cpl || dpl < rpl)
2938 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2939 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2940 CC_OP = CC_OP_EFLAGS;
2941 return;
2942 case 4: /* 286 call gate */
2943 case 12: /* 386 call gate */
2944 break;
2945 default:
2946 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2947 break;
2948 }
2949 shift = type >> 3;
2950
2951 if (dpl < cpl || dpl < rpl)
2952 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2953 /* check valid bit */
2954 if (!(e2 & DESC_P_MASK))
2955 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2956 selector = e1 >> 16;
2957 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2958 param_count = e2 & 0x1f;
2959 if ((selector & 0xfffc) == 0)
2960 raise_exception_err(EXCP0D_GPF, 0);
2961
2962 if (load_segment(&e1, &e2, selector) != 0)
2963 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2964 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2965 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2966 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2967 if (dpl > cpl)
2968 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2969 if (!(e2 & DESC_P_MASK))
2970 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2971
2972 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2973 /* to inner privilege */
2974 get_ss_esp_from_tss(&ss, &sp, dpl);
2975#ifdef DEBUG_PCALL
2976 if (loglevel & CPU_LOG_PCALL)
2977 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2978 ss, sp, param_count, ESP);
2979#endif
2980 if ((ss & 0xfffc) == 0)
2981 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2982 if ((ss & 3) != dpl)
2983 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2984 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2985 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2986 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2987 if (ss_dpl != dpl)
2988 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2989 if (!(ss_e2 & DESC_S_MASK) ||
2990 (ss_e2 & DESC_CS_MASK) ||
2991 !(ss_e2 & DESC_W_MASK))
2992 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2993 if (!(ss_e2 & DESC_P_MASK))
2994#ifdef VBOX /* See page 3-99 of 253666.pdf */
2995 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2996#else
2997 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2998#endif
2999
3000 // push_size = ((param_count * 2) + 8) << shift;
3001
3002 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3003 old_ssp = env->segs[R_SS].base;
3004
3005 sp_mask = get_sp_mask(ss_e2);
3006 ssp = get_seg_base(ss_e1, ss_e2);
3007 if (shift) {
3008 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3009 PUSHL(ssp, sp, sp_mask, ESP);
3010 for(i = param_count - 1; i >= 0; i--) {
3011 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3012 PUSHL(ssp, sp, sp_mask, val);
3013 }
3014 } else {
3015 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3016 PUSHW(ssp, sp, sp_mask, ESP);
3017 for(i = param_count - 1; i >= 0; i--) {
3018 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3019 PUSHW(ssp, sp, sp_mask, val);
3020 }
3021 }
3022 new_stack = 1;
3023 } else {
3024 /* to same privilege */
3025 sp = ESP;
3026 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3027 ssp = env->segs[R_SS].base;
3028 // push_size = (4 << shift);
3029 new_stack = 0;
3030 }
3031
3032 if (shift) {
3033 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3034 PUSHL(ssp, sp, sp_mask, next_eip);
3035 } else {
3036 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3037 PUSHW(ssp, sp, sp_mask, next_eip);
3038 }
3039
3040 /* from this point, not restartable */
3041
3042 if (new_stack) {
3043 ss = (ss & ~3) | dpl;
3044 cpu_x86_load_seg_cache(env, R_SS, ss,
3045 ssp,
3046 get_seg_limit(ss_e1, ss_e2),
3047 ss_e2);
3048 }
3049
3050 selector = (selector & ~3) | dpl;
3051 cpu_x86_load_seg_cache(env, R_CS, selector,
3052 get_seg_base(e1, e2),
3053 get_seg_limit(e1, e2),
3054 e2);
3055 cpu_x86_set_cpl(env, dpl);
3056 SET_ESP(sp, sp_mask);
3057 EIP = offset;
3058 }
3059#ifdef USE_KQEMU
3060 if (kqemu_is_ok(env)) {
3061 env->exception_index = -1;
3062 cpu_loop_exit();
3063 }
3064#endif
3065}
3066
3067/* real and vm86 mode iret */
3068void helper_iret_real(int shift)
3069{
3070 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3071 target_ulong ssp;
3072 int eflags_mask;
3073#ifdef VBOX
3074 bool fVME = false;
3075
3076 remR3TrapClear(env->pVM);
3077#endif /* VBOX */
3078
3079 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3080 sp = ESP;
3081 ssp = env->segs[R_SS].base;
3082 if (shift == 1) {
3083 /* 32 bits */
3084 POPL(ssp, sp, sp_mask, new_eip);
3085 POPL(ssp, sp, sp_mask, new_cs);
3086 new_cs &= 0xffff;
3087 POPL(ssp, sp, sp_mask, new_eflags);
3088 } else {
3089 /* 16 bits */
3090 POPW(ssp, sp, sp_mask, new_eip);
3091 POPW(ssp, sp, sp_mask, new_cs);
3092 POPW(ssp, sp, sp_mask, new_eflags);
3093 }
3094#ifdef VBOX
3095 if ( (env->eflags & VM_MASK)
3096 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3097 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3098 {
3099 fVME = true;
3100 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3101 /* if TF will be set -> #GP */
3102 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3103 || (new_eflags & TF_MASK))
3104 raise_exception(EXCP0D_GPF);
3105 }
3106#endif /* VBOX */
3107 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3108 env->segs[R_CS].selector = new_cs;
3109 env->segs[R_CS].base = (new_cs << 4);
3110 env->eip = new_eip;
3111#ifdef VBOX
3112 if (fVME)
3113 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3114 else
3115#endif
3116 if (env->eflags & VM_MASK)
3117 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3118 else
3119 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3120 if (shift == 0)
3121 eflags_mask &= 0xffff;
3122 load_eflags(new_eflags, eflags_mask);
3123 env->hflags2 &= ~HF2_NMI_MASK;
3124#ifdef VBOX
3125 if (fVME)
3126 {
3127 if (new_eflags & IF_MASK)
3128 env->eflags |= VIF_MASK;
3129 else
3130 env->eflags &= ~VIF_MASK;
3131 }
3132#endif /* VBOX */
3133}
3134
3135#ifndef VBOX
3136static inline void validate_seg(int seg_reg, int cpl)
3137#else /* VBOX */
3138DECLINLINE(void) validate_seg(int seg_reg, int cpl)
3139#endif /* VBOX */
3140{
3141 int dpl;
3142 uint32_t e2;
3143
3144 /* XXX: on x86_64, we do not want to nullify FS and GS because
3145 they may still contain a valid base. I would be interested to
3146 know how a real x86_64 CPU behaves */
3147 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3148 (env->segs[seg_reg].selector & 0xfffc) == 0)
3149 return;
3150
3151 e2 = env->segs[seg_reg].flags;
3152 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3153 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3154 /* data or non conforming code segment */
3155 if (dpl < cpl) {
3156 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3157 }
3158 }
3159}
3160
3161/* protected mode iret */
3162#ifndef VBOX
3163static inline void helper_ret_protected(int shift, int is_iret, int addend)
3164#else /* VBOX */
3165DECLINLINE(void) helper_ret_protected(int shift, int is_iret, int addend)
3166#endif /* VBOX */
3167{
3168 uint32_t new_cs, new_eflags, new_ss;
3169 uint32_t new_es, new_ds, new_fs, new_gs;
3170 uint32_t e1, e2, ss_e1, ss_e2;
3171 int cpl, dpl, rpl, eflags_mask, iopl;
3172 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3173
3174#ifdef VBOX
3175 ss_e1 = ss_e2 = e1 = e2 = 0;
3176#endif
3177
3178#ifdef TARGET_X86_64
3179 if (shift == 2)
3180 sp_mask = -1;
3181 else
3182#endif
3183 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3184 sp = ESP;
3185 ssp = env->segs[R_SS].base;
3186 new_eflags = 0; /* avoid warning */
3187#ifdef TARGET_X86_64
3188 if (shift == 2) {
3189 POPQ(sp, new_eip);
3190 POPQ(sp, new_cs);
3191 new_cs &= 0xffff;
3192 if (is_iret) {
3193 POPQ(sp, new_eflags);
3194 }
3195 } else
3196#endif
3197 if (shift == 1) {
3198 /* 32 bits */
3199 POPL(ssp, sp, sp_mask, new_eip);
3200 POPL(ssp, sp, sp_mask, new_cs);
3201 new_cs &= 0xffff;
3202 if (is_iret) {
3203 POPL(ssp, sp, sp_mask, new_eflags);
3204#if defined(VBOX) && defined(DEBUG)
3205 printf("iret: new CS %04X\n", new_cs);
3206 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3207 printf("iret: new EFLAGS %08X\n", new_eflags);
3208 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3209#endif
3210 if (new_eflags & VM_MASK)
3211 goto return_to_vm86;
3212 }
3213#ifdef VBOX
3214 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3215 {
3216#ifdef DEBUG
3217 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3218#endif
3219 new_cs = new_cs & 0xfffc;
3220 }
3221#endif
3222 } else {
3223 /* 16 bits */
3224 POPW(ssp, sp, sp_mask, new_eip);
3225 POPW(ssp, sp, sp_mask, new_cs);
3226 if (is_iret)
3227 POPW(ssp, sp, sp_mask, new_eflags);
3228 }
3229#ifdef DEBUG_PCALL
3230 if (loglevel & CPU_LOG_PCALL) {
3231 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3232 new_cs, new_eip, shift, addend);
3233 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3234 }
3235#endif
3236 if ((new_cs & 0xfffc) == 0)
3237 {
3238#if defined(VBOX) && defined(DEBUG)
3239 printf("new_cs & 0xfffc) == 0\n");
3240#endif
3241 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3242 }
3243 if (load_segment(&e1, &e2, new_cs) != 0)
3244 {
3245#if defined(VBOX) && defined(DEBUG)
3246 printf("load_segment failed\n");
3247#endif
3248 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3249 }
3250 if (!(e2 & DESC_S_MASK) ||
3251 !(e2 & DESC_CS_MASK))
3252 {
3253#if defined(VBOX) && defined(DEBUG)
3254 printf("e2 mask %08x\n", e2);
3255#endif
3256 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3257 }
3258 cpl = env->hflags & HF_CPL_MASK;
3259 rpl = new_cs & 3;
3260 if (rpl < cpl)
3261 {
3262#if defined(VBOX) && defined(DEBUG)
3263 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3264#endif
3265 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3266 }
3267 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3268 if (e2 & DESC_C_MASK) {
3269 if (dpl > rpl)
3270 {
3271#if defined(VBOX) && defined(DEBUG)
3272 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3273#endif
3274 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3275 }
3276 } else {
3277 if (dpl != rpl)
3278 {
3279#if defined(VBOX) && defined(DEBUG)
3280 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3281#endif
3282 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3283 }
3284 }
3285 if (!(e2 & DESC_P_MASK))
3286 {
3287#if defined(VBOX) && defined(DEBUG)
3288 printf("DESC_P_MASK e2=%08x\n", e2);
3289#endif
3290 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3291 }
3292
3293 sp += addend;
3294 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3295 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3296 /* return to same privilege level */
3297 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3298 get_seg_base(e1, e2),
3299 get_seg_limit(e1, e2),
3300 e2);
3301 } else {
3302 /* return to different privilege level */
3303#ifdef TARGET_X86_64
3304 if (shift == 2) {
3305 POPQ(sp, new_esp);
3306 POPQ(sp, new_ss);
3307 new_ss &= 0xffff;
3308 } else
3309#endif
3310 if (shift == 1) {
3311 /* 32 bits */
3312 POPL(ssp, sp, sp_mask, new_esp);
3313 POPL(ssp, sp, sp_mask, new_ss);
3314 new_ss &= 0xffff;
3315 } else {
3316 /* 16 bits */
3317 POPW(ssp, sp, sp_mask, new_esp);
3318 POPW(ssp, sp, sp_mask, new_ss);
3319 }
3320#ifdef DEBUG_PCALL
3321 if (loglevel & CPU_LOG_PCALL) {
3322 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3323 new_ss, new_esp);
3324 }
3325#endif
3326 if ((new_ss & 0xfffc) == 0) {
3327#ifdef TARGET_X86_64
3328 /* NULL ss is allowed in long mode if cpl != 3*/
3329 /* XXX: test CS64 ? */
3330 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3331 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3332 0, 0xffffffff,
3333 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3334 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3335 DESC_W_MASK | DESC_A_MASK);
3336 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3337 } else
3338#endif
3339 {
3340 raise_exception_err(EXCP0D_GPF, 0);
3341 }
3342 } else {
3343 if ((new_ss & 3) != rpl)
3344 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3345 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3346 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3347 if (!(ss_e2 & DESC_S_MASK) ||
3348 (ss_e2 & DESC_CS_MASK) ||
3349 !(ss_e2 & DESC_W_MASK))
3350 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3351 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3352 if (dpl != rpl)
3353 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3354 if (!(ss_e2 & DESC_P_MASK))
3355 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3356 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3357 get_seg_base(ss_e1, ss_e2),
3358 get_seg_limit(ss_e1, ss_e2),
3359 ss_e2);
3360 }
3361
3362 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3363 get_seg_base(e1, e2),
3364 get_seg_limit(e1, e2),
3365 e2);
3366 cpu_x86_set_cpl(env, rpl);
3367 sp = new_esp;
3368#ifdef TARGET_X86_64
3369 if (env->hflags & HF_CS64_MASK)
3370 sp_mask = -1;
3371 else
3372#endif
3373 sp_mask = get_sp_mask(ss_e2);
3374
3375 /* validate data segments */
3376 validate_seg(R_ES, rpl);
3377 validate_seg(R_DS, rpl);
3378 validate_seg(R_FS, rpl);
3379 validate_seg(R_GS, rpl);
3380
3381 sp += addend;
3382 }
3383 SET_ESP(sp, sp_mask);
3384 env->eip = new_eip;
3385 if (is_iret) {
3386 /* NOTE: 'cpl' is the _old_ CPL */
3387 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3388 if (cpl == 0)
3389#ifdef VBOX
3390 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3391#else
3392 eflags_mask |= IOPL_MASK;
3393#endif
3394 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3395 if (cpl <= iopl)
3396 eflags_mask |= IF_MASK;
3397 if (shift == 0)
3398 eflags_mask &= 0xffff;
3399 load_eflags(new_eflags, eflags_mask);
3400 }
3401 return;
3402
3403 return_to_vm86:
3404 POPL(ssp, sp, sp_mask, new_esp);
3405 POPL(ssp, sp, sp_mask, new_ss);
3406 POPL(ssp, sp, sp_mask, new_es);
3407 POPL(ssp, sp, sp_mask, new_ds);
3408 POPL(ssp, sp, sp_mask, new_fs);
3409 POPL(ssp, sp, sp_mask, new_gs);
3410
3411 /* modify processor state */
3412 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3413 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3414 load_seg_vm(R_CS, new_cs & 0xffff);
3415 cpu_x86_set_cpl(env, 3);
3416 load_seg_vm(R_SS, new_ss & 0xffff);
3417 load_seg_vm(R_ES, new_es & 0xffff);
3418 load_seg_vm(R_DS, new_ds & 0xffff);
3419 load_seg_vm(R_FS, new_fs & 0xffff);
3420 load_seg_vm(R_GS, new_gs & 0xffff);
3421
3422 env->eip = new_eip & 0xffff;
3423 ESP = new_esp;
3424}
3425
3426void helper_iret_protected(int shift, int next_eip)
3427{
3428 int tss_selector, type;
3429 uint32_t e1, e2;
3430
3431#ifdef VBOX
3432 e1 = e2 = 0;
3433 remR3TrapClear(env->pVM);
3434#endif
3435
3436 /* specific case for TSS */
3437 if (env->eflags & NT_MASK) {
3438#ifdef TARGET_X86_64
3439 if (env->hflags & HF_LMA_MASK)
3440 raise_exception_err(EXCP0D_GPF, 0);
3441#endif
3442 tss_selector = lduw_kernel(env->tr.base + 0);
3443 if (tss_selector & 4)
3444 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3445 if (load_segment(&e1, &e2, tss_selector) != 0)
3446 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3447 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3448 /* NOTE: we check both segment and busy TSS */
3449 if (type != 3)
3450 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3451 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3452 } else {
3453 helper_ret_protected(shift, 1, 0);
3454 }
3455 env->hflags2 &= ~HF2_NMI_MASK;
3456#ifdef USE_KQEMU
3457 if (kqemu_is_ok(env)) {
3458 CC_OP = CC_OP_EFLAGS;
3459 env->exception_index = -1;
3460 cpu_loop_exit();
3461 }
3462#endif
3463}
3464
3465void helper_lret_protected(int shift, int addend)
3466{
3467 helper_ret_protected(shift, 0, addend);
3468#ifdef USE_KQEMU
3469 if (kqemu_is_ok(env)) {
3470 env->exception_index = -1;
3471 cpu_loop_exit();
3472 }
3473#endif
3474}
3475
3476void helper_sysenter(void)
3477{
3478 if (env->sysenter_cs == 0) {
3479 raise_exception_err(EXCP0D_GPF, 0);
3480 }
3481 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3482 cpu_x86_set_cpl(env, 0);
3483
3484#ifdef TARGET_X86_64
3485 if (env->hflags & HF_LMA_MASK) {
3486 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3487 0, 0xffffffff,
3488 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3489 DESC_S_MASK |
3490 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3491 } else
3492#endif
3493 {
3494 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3495 0, 0xffffffff,
3496 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3497 DESC_S_MASK |
3498 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3499 }
3500 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3501 0, 0xffffffff,
3502 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3503 DESC_S_MASK |
3504 DESC_W_MASK | DESC_A_MASK);
3505 ESP = env->sysenter_esp;
3506 EIP = env->sysenter_eip;
3507}
3508
3509void helper_sysexit(int dflag)
3510{
3511 int cpl;
3512
3513 cpl = env->hflags & HF_CPL_MASK;
3514 if (env->sysenter_cs == 0 || cpl != 0) {
3515 raise_exception_err(EXCP0D_GPF, 0);
3516 }
3517 cpu_x86_set_cpl(env, 3);
3518#ifdef TARGET_X86_64
3519 if (dflag == 2) {
3520 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3521 0, 0xffffffff,
3522 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3523 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3524 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3525 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3526 0, 0xffffffff,
3527 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3528 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3529 DESC_W_MASK | DESC_A_MASK);
3530 } else
3531#endif
3532 {
3533 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3534 0, 0xffffffff,
3535 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3536 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3537 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3538 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3539 0, 0xffffffff,
3540 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3541 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3542 DESC_W_MASK | DESC_A_MASK);
3543 }
3544 ESP = ECX;
3545 EIP = EDX;
3546#ifdef USE_KQEMU
3547 if (kqemu_is_ok(env)) {
3548 env->exception_index = -1;
3549 cpu_loop_exit();
3550 }
3551#endif
3552}
3553
3554#if defined(CONFIG_USER_ONLY)
3555target_ulong helper_read_crN(int reg)
3556{
3557 return 0;
3558}
3559
3560void helper_write_crN(int reg, target_ulong t0)
3561{
3562}
3563#else
3564target_ulong helper_read_crN(int reg)
3565{
3566 target_ulong val;
3567
3568 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3569 switch(reg) {
3570 default:
3571 val = env->cr[reg];
3572 break;
3573 case 8:
3574 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3575 val = cpu_get_apic_tpr(env);
3576 } else {
3577 val = env->v_tpr;
3578 }
3579 break;
3580 }
3581 return val;
3582}
3583
3584void helper_write_crN(int reg, target_ulong t0)
3585{
3586 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3587 switch(reg) {
3588 case 0:
3589 cpu_x86_update_cr0(env, t0);
3590 break;
3591 case 3:
3592 cpu_x86_update_cr3(env, t0);
3593 break;
3594 case 4:
3595 cpu_x86_update_cr4(env, t0);
3596 break;
3597 case 8:
3598 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3599 cpu_set_apic_tpr(env, t0);
3600 }
3601 env->v_tpr = t0 & 0x0f;
3602 break;
3603 default:
3604 env->cr[reg] = t0;
3605 break;
3606 }
3607}
3608#endif
3609
3610void helper_lmsw(target_ulong t0)
3611{
3612 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3613 if already set to one. */
3614 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3615 helper_write_crN(0, t0);
3616}
3617
3618void helper_clts(void)
3619{
3620 env->cr[0] &= ~CR0_TS_MASK;
3621 env->hflags &= ~HF_TS_MASK;
3622}
3623
3624/* XXX: do more */
3625void helper_movl_drN_T0(int reg, target_ulong t0)
3626{
3627 env->dr[reg] = t0;
3628}
3629
3630void helper_invlpg(target_ulong addr)
3631{
3632 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3633 tlb_flush_page(env, addr);
3634}
3635
3636void helper_rdtsc(void)
3637{
3638 uint64_t val;
3639
3640 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3641 raise_exception(EXCP0D_GPF);
3642 }
3643 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3644
3645 val = cpu_get_tsc(env) + env->tsc_offset;
3646 EAX = (uint32_t)(val);
3647 EDX = (uint32_t)(val >> 32);
3648}
3649
3650#ifdef VBOX
3651void helper_rdtscp(void)
3652{
3653 uint64_t val;
3654 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3655 raise_exception(EXCP0D_GPF);
3656 }
3657
3658 val = cpu_get_tsc(env);
3659 EAX = (uint32_t)(val);
3660 EDX = (uint32_t)(val >> 32);
3661 ECX = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3662}
3663#endif
3664
3665void helper_rdpmc(void)
3666{
3667 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3668 raise_exception(EXCP0D_GPF);
3669 }
3670 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3671
3672 /* currently unimplemented */
3673 raise_exception_err(EXCP06_ILLOP, 0);
3674}
3675
3676#if defined(CONFIG_USER_ONLY)
3677void helper_wrmsr(void)
3678{
3679}
3680
3681void helper_rdmsr(void)
3682{
3683}
3684#else
3685void helper_wrmsr(void)
3686{
3687 uint64_t val;
3688
3689 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3690
3691 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3692
3693 switch((uint32_t)ECX) {
3694 case MSR_IA32_SYSENTER_CS:
3695 env->sysenter_cs = val & 0xffff;
3696 break;
3697 case MSR_IA32_SYSENTER_ESP:
3698 env->sysenter_esp = val;
3699 break;
3700 case MSR_IA32_SYSENTER_EIP:
3701 env->sysenter_eip = val;
3702 break;
3703 case MSR_IA32_APICBASE:
3704 cpu_set_apic_base(env, val);
3705 break;
3706 case MSR_EFER:
3707 {
3708 uint64_t update_mask;
3709 update_mask = 0;
3710 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3711 update_mask |= MSR_EFER_SCE;
3712 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3713 update_mask |= MSR_EFER_LME;
3714 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3715 update_mask |= MSR_EFER_FFXSR;
3716 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3717 update_mask |= MSR_EFER_NXE;
3718 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3719 update_mask |= MSR_EFER_SVME;
3720 cpu_load_efer(env, (env->efer & ~update_mask) |
3721 (val & update_mask));
3722 }
3723 break;
3724 case MSR_STAR:
3725 env->star = val;
3726 break;
3727 case MSR_PAT:
3728 env->pat = val;
3729 break;
3730 case MSR_VM_HSAVE_PA:
3731 env->vm_hsave = val;
3732 break;
3733#ifdef TARGET_X86_64
3734 case MSR_LSTAR:
3735 env->lstar = val;
3736 break;
3737 case MSR_CSTAR:
3738 env->cstar = val;
3739 break;
3740 case MSR_FMASK:
3741 env->fmask = val;
3742 break;
3743 case MSR_FSBASE:
3744 env->segs[R_FS].base = val;
3745 break;
3746 case MSR_GSBASE:
3747 env->segs[R_GS].base = val;
3748 break;
3749 case MSR_KERNELGSBASE:
3750 env->kernelgsbase = val;
3751 break;
3752#endif
3753 default:
3754#ifndef VBOX
3755 /* XXX: exception ? */
3756 break;
3757#else /* VBOX */
3758 {
3759 uint32_t ecx = (uint32_t)ECX;
3760 /* In X2APIC specification this range is reserved for APIC control. */
3761 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3762 cpu_apic_wrmsr(env, ecx, val);
3763 /** @todo else exception? */
3764 break;
3765 }
3766 case MSR_K8_TSC_AUX:
3767 cpu_wrmsr(env, MSR_K8_TSC_AUX, val);
3768 break;
3769#endif /* VBOX */
3770 }
3771}
3772
3773void helper_rdmsr(void)
3774{
3775 uint64_t val;
3776
3777 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3778
3779 switch((uint32_t)ECX) {
3780 case MSR_IA32_SYSENTER_CS:
3781 val = env->sysenter_cs;
3782 break;
3783 case MSR_IA32_SYSENTER_ESP:
3784 val = env->sysenter_esp;
3785 break;
3786 case MSR_IA32_SYSENTER_EIP:
3787 val = env->sysenter_eip;
3788 break;
3789 case MSR_IA32_APICBASE:
3790 val = cpu_get_apic_base(env);
3791 break;
3792 case MSR_EFER:
3793 val = env->efer;
3794 break;
3795 case MSR_STAR:
3796 val = env->star;
3797 break;
3798 case MSR_PAT:
3799 val = env->pat;
3800 break;
3801 case MSR_VM_HSAVE_PA:
3802 val = env->vm_hsave;
3803 break;
3804 case MSR_IA32_PERF_STATUS:
3805 /* tsc_increment_by_tick */
3806 val = 1000ULL;
3807 /* CPU multiplier */
3808 val |= (((uint64_t)4ULL) << 40);
3809 break;
3810#ifdef TARGET_X86_64
3811 case MSR_LSTAR:
3812 val = env->lstar;
3813 break;
3814 case MSR_CSTAR:
3815 val = env->cstar;
3816 break;
3817 case MSR_FMASK:
3818 val = env->fmask;
3819 break;
3820 case MSR_FSBASE:
3821 val = env->segs[R_FS].base;
3822 break;
3823 case MSR_GSBASE:
3824 val = env->segs[R_GS].base;
3825 break;
3826 case MSR_KERNELGSBASE:
3827 val = env->kernelgsbase;
3828 break;
3829#endif
3830#ifdef USE_KQEMU
3831 case MSR_QPI_COMMBASE:
3832 if (env->kqemu_enabled) {
3833 val = kqemu_comm_base;
3834 } else {
3835 val = 0;
3836 }
3837 break;
3838#endif
3839 default:
3840#ifndef VBOX
3841 /* XXX: exception ? */
3842 val = 0;
3843 break;
3844#else /* VBOX */
3845 {
3846 uint32_t ecx = (uint32_t)ECX;
3847 /* In X2APIC specification this range is reserved for APIC control. */
3848 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3849 val = cpu_apic_rdmsr(env, ecx);
3850 else
3851 val = 0; /** @todo else exception? */
3852 break;
3853 }
3854 case MSR_K8_TSC_AUX:
3855 val = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3856 break;
3857#endif /* VBOX */
3858 }
3859 EAX = (uint32_t)(val);
3860 EDX = (uint32_t)(val >> 32);
3861}
3862#endif
3863
3864target_ulong helper_lsl(target_ulong selector1)
3865{
3866 unsigned int limit;
3867 uint32_t e1, e2, eflags, selector;
3868 int rpl, dpl, cpl, type;
3869
3870 selector = selector1 & 0xffff;
3871 eflags = cc_table[CC_OP].compute_all();
3872 if (load_segment(&e1, &e2, selector) != 0)
3873 goto fail;
3874 rpl = selector & 3;
3875 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3876 cpl = env->hflags & HF_CPL_MASK;
3877 if (e2 & DESC_S_MASK) {
3878 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3879 /* conforming */
3880 } else {
3881 if (dpl < cpl || dpl < rpl)
3882 goto fail;
3883 }
3884 } else {
3885 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3886 switch(type) {
3887 case 1:
3888 case 2:
3889 case 3:
3890 case 9:
3891 case 11:
3892 break;
3893 default:
3894 goto fail;
3895 }
3896 if (dpl < cpl || dpl < rpl) {
3897 fail:
3898 CC_SRC = eflags & ~CC_Z;
3899 return 0;
3900 }
3901 }
3902 limit = get_seg_limit(e1, e2);
3903 CC_SRC = eflags | CC_Z;
3904 return limit;
3905}
3906
3907target_ulong helper_lar(target_ulong selector1)
3908{
3909 uint32_t e1, e2, eflags, selector;
3910 int rpl, dpl, cpl, type;
3911
3912 selector = selector1 & 0xffff;
3913 eflags = cc_table[CC_OP].compute_all();
3914 if ((selector & 0xfffc) == 0)
3915 goto fail;
3916 if (load_segment(&e1, &e2, selector) != 0)
3917 goto fail;
3918 rpl = selector & 3;
3919 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3920 cpl = env->hflags & HF_CPL_MASK;
3921 if (e2 & DESC_S_MASK) {
3922 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3923 /* conforming */
3924 } else {
3925 if (dpl < cpl || dpl < rpl)
3926 goto fail;
3927 }
3928 } else {
3929 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3930 switch(type) {
3931 case 1:
3932 case 2:
3933 case 3:
3934 case 4:
3935 case 5:
3936 case 9:
3937 case 11:
3938 case 12:
3939 break;
3940 default:
3941 goto fail;
3942 }
3943 if (dpl < cpl || dpl < rpl) {
3944 fail:
3945 CC_SRC = eflags & ~CC_Z;
3946 return 0;
3947 }
3948 }
3949 CC_SRC = eflags | CC_Z;
3950 return e2 & 0x00f0ff00;
3951}
3952
3953void helper_verr(target_ulong selector1)
3954{
3955 uint32_t e1, e2, eflags, selector;
3956 int rpl, dpl, cpl;
3957
3958 selector = selector1 & 0xffff;
3959 eflags = cc_table[CC_OP].compute_all();
3960 if ((selector & 0xfffc) == 0)
3961 goto fail;
3962 if (load_segment(&e1, &e2, selector) != 0)
3963 goto fail;
3964 if (!(e2 & DESC_S_MASK))
3965 goto fail;
3966 rpl = selector & 3;
3967 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3968 cpl = env->hflags & HF_CPL_MASK;
3969 if (e2 & DESC_CS_MASK) {
3970 if (!(e2 & DESC_R_MASK))
3971 goto fail;
3972 if (!(e2 & DESC_C_MASK)) {
3973 if (dpl < cpl || dpl < rpl)
3974 goto fail;
3975 }
3976 } else {
3977 if (dpl < cpl || dpl < rpl) {
3978 fail:
3979 CC_SRC = eflags & ~CC_Z;
3980 return;
3981 }
3982 }
3983 CC_SRC = eflags | CC_Z;
3984}
3985
3986void helper_verw(target_ulong selector1)
3987{
3988 uint32_t e1, e2, eflags, selector;
3989 int rpl, dpl, cpl;
3990
3991 selector = selector1 & 0xffff;
3992 eflags = cc_table[CC_OP].compute_all();
3993 if ((selector & 0xfffc) == 0)
3994 goto fail;
3995 if (load_segment(&e1, &e2, selector) != 0)
3996 goto fail;
3997 if (!(e2 & DESC_S_MASK))
3998 goto fail;
3999 rpl = selector & 3;
4000 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4001 cpl = env->hflags & HF_CPL_MASK;
4002 if (e2 & DESC_CS_MASK) {
4003 goto fail;
4004 } else {
4005 if (dpl < cpl || dpl < rpl)
4006 goto fail;
4007 if (!(e2 & DESC_W_MASK)) {
4008 fail:
4009 CC_SRC = eflags & ~CC_Z;
4010 return;
4011 }
4012 }
4013 CC_SRC = eflags | CC_Z;
4014}
4015
4016/* x87 FPU helpers */
4017
4018static void fpu_set_exception(int mask)
4019{
4020 env->fpus |= mask;
4021 if (env->fpus & (~env->fpuc & FPUC_EM))
4022 env->fpus |= FPUS_SE | FPUS_B;
4023}
4024
4025#ifndef VBOX
4026static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4027#else /* VBOX */
4028DECLINLINE(CPU86_LDouble) helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4029#endif /* VBOX */
4030{
4031 if (b == 0.0)
4032 fpu_set_exception(FPUS_ZE);
4033 return a / b;
4034}
4035
4036void fpu_raise_exception(void)
4037{
4038 if (env->cr[0] & CR0_NE_MASK) {
4039 raise_exception(EXCP10_COPR);
4040 }
4041#if !defined(CONFIG_USER_ONLY)
4042 else {
4043 cpu_set_ferr(env);
4044 }
4045#endif
4046}
4047
4048void helper_flds_FT0(uint32_t val)
4049{
4050 union {
4051 float32 f;
4052 uint32_t i;
4053 } u;
4054 u.i = val;
4055 FT0 = float32_to_floatx(u.f, &env->fp_status);
4056}
4057
4058void helper_fldl_FT0(uint64_t val)
4059{
4060 union {
4061 float64 f;
4062 uint64_t i;
4063 } u;
4064 u.i = val;
4065 FT0 = float64_to_floatx(u.f, &env->fp_status);
4066}
4067
4068void helper_fildl_FT0(int32_t val)
4069{
4070 FT0 = int32_to_floatx(val, &env->fp_status);
4071}
4072
4073void helper_flds_ST0(uint32_t val)
4074{
4075 int new_fpstt;
4076 union {
4077 float32 f;
4078 uint32_t i;
4079 } u;
4080 new_fpstt = (env->fpstt - 1) & 7;
4081 u.i = val;
4082 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4083 env->fpstt = new_fpstt;
4084 env->fptags[new_fpstt] = 0; /* validate stack entry */
4085}
4086
4087void helper_fldl_ST0(uint64_t val)
4088{
4089 int new_fpstt;
4090 union {
4091 float64 f;
4092 uint64_t i;
4093 } u;
4094 new_fpstt = (env->fpstt - 1) & 7;
4095 u.i = val;
4096 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4097 env->fpstt = new_fpstt;
4098 env->fptags[new_fpstt] = 0; /* validate stack entry */
4099}
4100
4101void helper_fildl_ST0(int32_t val)
4102{
4103 int new_fpstt;
4104 new_fpstt = (env->fpstt - 1) & 7;
4105 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4106 env->fpstt = new_fpstt;
4107 env->fptags[new_fpstt] = 0; /* validate stack entry */
4108}
4109
4110void helper_fildll_ST0(int64_t val)
4111{
4112 int new_fpstt;
4113 new_fpstt = (env->fpstt - 1) & 7;
4114 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4115 env->fpstt = new_fpstt;
4116 env->fptags[new_fpstt] = 0; /* validate stack entry */
4117}
4118
4119#ifndef VBOX
4120uint32_t helper_fsts_ST0(void)
4121#else
4122RTCCUINTREG helper_fsts_ST0(void)
4123#endif
4124{
4125 union {
4126 float32 f;
4127 uint32_t i;
4128 } u;
4129 u.f = floatx_to_float32(ST0, &env->fp_status);
4130 return u.i;
4131}
4132
4133uint64_t helper_fstl_ST0(void)
4134{
4135 union {
4136 float64 f;
4137 uint64_t i;
4138 } u;
4139 u.f = floatx_to_float64(ST0, &env->fp_status);
4140 return u.i;
4141}
4142#ifndef VBOX
4143int32_t helper_fist_ST0(void)
4144#else
4145RTCCINTREG helper_fist_ST0(void)
4146#endif
4147{
4148 int32_t val;
4149 val = floatx_to_int32(ST0, &env->fp_status);
4150 if (val != (int16_t)val)
4151 val = -32768;
4152 return val;
4153}
4154
4155#ifndef VBOX
4156int32_t helper_fistl_ST0(void)
4157#else
4158RTCCINTREG helper_fistl_ST0(void)
4159#endif
4160{
4161 int32_t val;
4162 val = floatx_to_int32(ST0, &env->fp_status);
4163 return val;
4164}
4165
4166int64_t helper_fistll_ST0(void)
4167{
4168 int64_t val;
4169 val = floatx_to_int64(ST0, &env->fp_status);
4170 return val;
4171}
4172
4173#ifndef VBOX
4174int32_t helper_fistt_ST0(void)
4175#else
4176RTCCINTREG helper_fistt_ST0(void)
4177#endif
4178{
4179 int32_t val;
4180 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4181 if (val != (int16_t)val)
4182 val = -32768;
4183 return val;
4184}
4185
4186#ifndef VBOX
4187int32_t helper_fisttl_ST0(void)
4188#else
4189RTCCINTREG helper_fisttl_ST0(void)
4190#endif
4191{
4192 int32_t val;
4193 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4194 return val;
4195}
4196
4197int64_t helper_fisttll_ST0(void)
4198{
4199 int64_t val;
4200 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4201 return val;
4202}
4203
4204void helper_fldt_ST0(target_ulong ptr)
4205{
4206 int new_fpstt;
4207 new_fpstt = (env->fpstt - 1) & 7;
4208 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4209 env->fpstt = new_fpstt;
4210 env->fptags[new_fpstt] = 0; /* validate stack entry */
4211}
4212
4213void helper_fstt_ST0(target_ulong ptr)
4214{
4215 helper_fstt(ST0, ptr);
4216}
4217
4218void helper_fpush(void)
4219{
4220 fpush();
4221}
4222
4223void helper_fpop(void)
4224{
4225 fpop();
4226}
4227
4228void helper_fdecstp(void)
4229{
4230 env->fpstt = (env->fpstt - 1) & 7;
4231 env->fpus &= (~0x4700);
4232}
4233
4234void helper_fincstp(void)
4235{
4236 env->fpstt = (env->fpstt + 1) & 7;
4237 env->fpus &= (~0x4700);
4238}
4239
4240/* FPU move */
4241
4242void helper_ffree_STN(int st_index)
4243{
4244 env->fptags[(env->fpstt + st_index) & 7] = 1;
4245}
4246
4247void helper_fmov_ST0_FT0(void)
4248{
4249 ST0 = FT0;
4250}
4251
4252void helper_fmov_FT0_STN(int st_index)
4253{
4254 FT0 = ST(st_index);
4255}
4256
4257void helper_fmov_ST0_STN(int st_index)
4258{
4259 ST0 = ST(st_index);
4260}
4261
4262void helper_fmov_STN_ST0(int st_index)
4263{
4264 ST(st_index) = ST0;
4265}
4266
4267void helper_fxchg_ST0_STN(int st_index)
4268{
4269 CPU86_LDouble tmp;
4270 tmp = ST(st_index);
4271 ST(st_index) = ST0;
4272 ST0 = tmp;
4273}
4274
4275/* FPU operations */
4276
4277static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4278
4279void helper_fcom_ST0_FT0(void)
4280{
4281 int ret;
4282
4283 ret = floatx_compare(ST0, FT0, &env->fp_status);
4284 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4285 FORCE_RET();
4286}
4287
4288void helper_fucom_ST0_FT0(void)
4289{
4290 int ret;
4291
4292 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4293 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4294 FORCE_RET();
4295}
4296
4297static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4298
4299void helper_fcomi_ST0_FT0(void)
4300{
4301 int eflags;
4302 int ret;
4303
4304 ret = floatx_compare(ST0, FT0, &env->fp_status);
4305 eflags = cc_table[CC_OP].compute_all();
4306 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4307 CC_SRC = eflags;
4308 FORCE_RET();
4309}
4310
4311void helper_fucomi_ST0_FT0(void)
4312{
4313 int eflags;
4314 int ret;
4315
4316 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4317 eflags = cc_table[CC_OP].compute_all();
4318 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4319 CC_SRC = eflags;
4320 FORCE_RET();
4321}
4322
4323void helper_fadd_ST0_FT0(void)
4324{
4325 ST0 += FT0;
4326}
4327
4328void helper_fmul_ST0_FT0(void)
4329{
4330 ST0 *= FT0;
4331}
4332
4333void helper_fsub_ST0_FT0(void)
4334{
4335 ST0 -= FT0;
4336}
4337
4338void helper_fsubr_ST0_FT0(void)
4339{
4340 ST0 = FT0 - ST0;
4341}
4342
4343void helper_fdiv_ST0_FT0(void)
4344{
4345 ST0 = helper_fdiv(ST0, FT0);
4346}
4347
4348void helper_fdivr_ST0_FT0(void)
4349{
4350 ST0 = helper_fdiv(FT0, ST0);
4351}
4352
4353/* fp operations between STN and ST0 */
4354
4355void helper_fadd_STN_ST0(int st_index)
4356{
4357 ST(st_index) += ST0;
4358}
4359
4360void helper_fmul_STN_ST0(int st_index)
4361{
4362 ST(st_index) *= ST0;
4363}
4364
4365void helper_fsub_STN_ST0(int st_index)
4366{
4367 ST(st_index) -= ST0;
4368}
4369
4370void helper_fsubr_STN_ST0(int st_index)
4371{
4372 CPU86_LDouble *p;
4373 p = &ST(st_index);
4374 *p = ST0 - *p;
4375}
4376
4377void helper_fdiv_STN_ST0(int st_index)
4378{
4379 CPU86_LDouble *p;
4380 p = &ST(st_index);
4381 *p = helper_fdiv(*p, ST0);
4382}
4383
4384void helper_fdivr_STN_ST0(int st_index)
4385{
4386 CPU86_LDouble *p;
4387 p = &ST(st_index);
4388 *p = helper_fdiv(ST0, *p);
4389}
4390
4391/* misc FPU operations */
4392void helper_fchs_ST0(void)
4393{
4394 ST0 = floatx_chs(ST0);
4395}
4396
4397void helper_fabs_ST0(void)
4398{
4399 ST0 = floatx_abs(ST0);
4400}
4401
4402void helper_fld1_ST0(void)
4403{
4404 ST0 = f15rk[1];
4405}
4406
4407void helper_fldl2t_ST0(void)
4408{
4409 ST0 = f15rk[6];
4410}
4411
4412void helper_fldl2e_ST0(void)
4413{
4414 ST0 = f15rk[5];
4415}
4416
4417void helper_fldpi_ST0(void)
4418{
4419 ST0 = f15rk[2];
4420}
4421
4422void helper_fldlg2_ST0(void)
4423{
4424 ST0 = f15rk[3];
4425}
4426
4427void helper_fldln2_ST0(void)
4428{
4429 ST0 = f15rk[4];
4430}
4431
4432void helper_fldz_ST0(void)
4433{
4434 ST0 = f15rk[0];
4435}
4436
4437void helper_fldz_FT0(void)
4438{
4439 FT0 = f15rk[0];
4440}
4441
4442#ifndef VBOX
4443uint32_t helper_fnstsw(void)
4444#else
4445RTCCUINTREG helper_fnstsw(void)
4446#endif
4447{
4448 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4449}
4450
4451#ifndef VBOX
4452uint32_t helper_fnstcw(void)
4453#else
4454RTCCUINTREG helper_fnstcw(void)
4455#endif
4456{
4457 return env->fpuc;
4458}
4459
4460static void update_fp_status(void)
4461{
4462 int rnd_type;
4463
4464 /* set rounding mode */
4465 switch(env->fpuc & RC_MASK) {
4466 default:
4467 case RC_NEAR:
4468 rnd_type = float_round_nearest_even;
4469 break;
4470 case RC_DOWN:
4471 rnd_type = float_round_down;
4472 break;
4473 case RC_UP:
4474 rnd_type = float_round_up;
4475 break;
4476 case RC_CHOP:
4477 rnd_type = float_round_to_zero;
4478 break;
4479 }
4480 set_float_rounding_mode(rnd_type, &env->fp_status);
4481#ifdef FLOATX80
4482 switch((env->fpuc >> 8) & 3) {
4483 case 0:
4484 rnd_type = 32;
4485 break;
4486 case 2:
4487 rnd_type = 64;
4488 break;
4489 case 3:
4490 default:
4491 rnd_type = 80;
4492 break;
4493 }
4494 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4495#endif
4496}
4497
4498void helper_fldcw(uint32_t val)
4499{
4500 env->fpuc = val;
4501 update_fp_status();
4502}
4503
4504void helper_fclex(void)
4505{
4506 env->fpus &= 0x7f00;
4507}
4508
4509void helper_fwait(void)
4510{
4511 if (env->fpus & FPUS_SE)
4512 fpu_raise_exception();
4513 FORCE_RET();
4514}
4515
4516void helper_fninit(void)
4517{
4518 env->fpus = 0;
4519 env->fpstt = 0;
4520 env->fpuc = 0x37f;
4521 env->fptags[0] = 1;
4522 env->fptags[1] = 1;
4523 env->fptags[2] = 1;
4524 env->fptags[3] = 1;
4525 env->fptags[4] = 1;
4526 env->fptags[5] = 1;
4527 env->fptags[6] = 1;
4528 env->fptags[7] = 1;
4529}
4530
4531/* BCD ops */
4532
4533void helper_fbld_ST0(target_ulong ptr)
4534{
4535 CPU86_LDouble tmp;
4536 uint64_t val;
4537 unsigned int v;
4538 int i;
4539
4540 val = 0;
4541 for(i = 8; i >= 0; i--) {
4542 v = ldub(ptr + i);
4543 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4544 }
4545 tmp = val;
4546 if (ldub(ptr + 9) & 0x80)
4547 tmp = -tmp;
4548 fpush();
4549 ST0 = tmp;
4550}
4551
4552void helper_fbst_ST0(target_ulong ptr)
4553{
4554 int v;
4555 target_ulong mem_ref, mem_end;
4556 int64_t val;
4557
4558 val = floatx_to_int64(ST0, &env->fp_status);
4559 mem_ref = ptr;
4560 mem_end = mem_ref + 9;
4561 if (val < 0) {
4562 stb(mem_end, 0x80);
4563 val = -val;
4564 } else {
4565 stb(mem_end, 0x00);
4566 }
4567 while (mem_ref < mem_end) {
4568 if (val == 0)
4569 break;
4570 v = val % 100;
4571 val = val / 100;
4572 v = ((v / 10) << 4) | (v % 10);
4573 stb(mem_ref++, v);
4574 }
4575 while (mem_ref < mem_end) {
4576 stb(mem_ref++, 0);
4577 }
4578}
4579
4580void helper_f2xm1(void)
4581{
4582 ST0 = pow(2.0,ST0) - 1.0;
4583}
4584
4585void helper_fyl2x(void)
4586{
4587 CPU86_LDouble fptemp;
4588
4589 fptemp = ST0;
4590 if (fptemp>0.0){
4591 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4592 ST1 *= fptemp;
4593 fpop();
4594 } else {
4595 env->fpus &= (~0x4700);
4596 env->fpus |= 0x400;
4597 }
4598}
4599
4600void helper_fptan(void)
4601{
4602 CPU86_LDouble fptemp;
4603
4604 fptemp = ST0;
4605 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4606 env->fpus |= 0x400;
4607 } else {
4608 ST0 = tan(fptemp);
4609 fpush();
4610 ST0 = 1.0;
4611 env->fpus &= (~0x400); /* C2 <-- 0 */
4612 /* the above code is for |arg| < 2**52 only */
4613 }
4614}
4615
4616void helper_fpatan(void)
4617{
4618 CPU86_LDouble fptemp, fpsrcop;
4619
4620 fpsrcop = ST1;
4621 fptemp = ST0;
4622 ST1 = atan2(fpsrcop,fptemp);
4623 fpop();
4624}
4625
4626void helper_fxtract(void)
4627{
4628 CPU86_LDoubleU temp;
4629 unsigned int expdif;
4630
4631 temp.d = ST0;
4632 expdif = EXPD(temp) - EXPBIAS;
4633 /*DP exponent bias*/
4634 ST0 = expdif;
4635 fpush();
4636 BIASEXPONENT(temp);
4637 ST0 = temp.d;
4638}
4639
4640#ifdef VBOX
4641#ifdef _MSC_VER
4642/* MSC cannot divide by zero */
4643extern double _Nan;
4644#define NaN _Nan
4645#else
4646#define NaN (0.0 / 0.0)
4647#endif
4648#endif /* VBOX */
4649
4650void helper_fprem1(void)
4651{
4652 CPU86_LDouble dblq, fpsrcop, fptemp;
4653 CPU86_LDoubleU fpsrcop1, fptemp1;
4654 int expdif;
4655 signed long long int q;
4656
4657#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4658 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4659#else
4660 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4661#endif
4662 ST0 = 0.0 / 0.0; /* NaN */
4663 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4664 return;
4665 }
4666
4667 fpsrcop = ST0;
4668 fptemp = ST1;
4669 fpsrcop1.d = fpsrcop;
4670 fptemp1.d = fptemp;
4671 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4672
4673 if (expdif < 0) {
4674 /* optimisation? taken from the AMD docs */
4675 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4676 /* ST0 is unchanged */
4677 return;
4678 }
4679
4680 if (expdif < 53) {
4681 dblq = fpsrcop / fptemp;
4682 /* round dblq towards nearest integer */
4683 dblq = rint(dblq);
4684 ST0 = fpsrcop - fptemp * dblq;
4685
4686 /* convert dblq to q by truncating towards zero */
4687 if (dblq < 0.0)
4688 q = (signed long long int)(-dblq);
4689 else
4690 q = (signed long long int)dblq;
4691
4692 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4693 /* (C0,C3,C1) <-- (q2,q1,q0) */
4694 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4695 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4696 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4697 } else {
4698 env->fpus |= 0x400; /* C2 <-- 1 */
4699 fptemp = pow(2.0, expdif - 50);
4700 fpsrcop = (ST0 / ST1) / fptemp;
4701 /* fpsrcop = integer obtained by chopping */
4702 fpsrcop = (fpsrcop < 0.0) ?
4703 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4704 ST0 -= (ST1 * fpsrcop * fptemp);
4705 }
4706}
4707
4708void helper_fprem(void)
4709{
4710 CPU86_LDouble dblq, fpsrcop, fptemp;
4711 CPU86_LDoubleU fpsrcop1, fptemp1;
4712 int expdif;
4713 signed long long int q;
4714
4715#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4716 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4717#else
4718 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4719#endif
4720 ST0 = 0.0 / 0.0; /* NaN */
4721 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4722 return;
4723 }
4724
4725 fpsrcop = (CPU86_LDouble)ST0;
4726 fptemp = (CPU86_LDouble)ST1;
4727 fpsrcop1.d = fpsrcop;
4728 fptemp1.d = fptemp;
4729 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4730
4731 if (expdif < 0) {
4732 /* optimisation? taken from the AMD docs */
4733 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4734 /* ST0 is unchanged */
4735 return;
4736 }
4737
4738 if ( expdif < 53 ) {
4739 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4740 /* round dblq towards zero */
4741 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4742 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4743
4744 /* convert dblq to q by truncating towards zero */
4745 if (dblq < 0.0)
4746 q = (signed long long int)(-dblq);
4747 else
4748 q = (signed long long int)dblq;
4749
4750 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4751 /* (C0,C3,C1) <-- (q2,q1,q0) */
4752 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4753 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4754 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4755 } else {
4756 int N = 32 + (expdif % 32); /* as per AMD docs */
4757 env->fpus |= 0x400; /* C2 <-- 1 */
4758 fptemp = pow(2.0, (double)(expdif - N));
4759 fpsrcop = (ST0 / ST1) / fptemp;
4760 /* fpsrcop = integer obtained by chopping */
4761 fpsrcop = (fpsrcop < 0.0) ?
4762 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4763 ST0 -= (ST1 * fpsrcop * fptemp);
4764 }
4765}
4766
4767void helper_fyl2xp1(void)
4768{
4769 CPU86_LDouble fptemp;
4770
4771 fptemp = ST0;
4772 if ((fptemp+1.0)>0.0) {
4773 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4774 ST1 *= fptemp;
4775 fpop();
4776 } else {
4777 env->fpus &= (~0x4700);
4778 env->fpus |= 0x400;
4779 }
4780}
4781
4782void helper_fsqrt(void)
4783{
4784 CPU86_LDouble fptemp;
4785
4786 fptemp = ST0;
4787 if (fptemp<0.0) {
4788 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4789 env->fpus |= 0x400;
4790 }
4791 ST0 = sqrt(fptemp);
4792}
4793
4794void helper_fsincos(void)
4795{
4796 CPU86_LDouble fptemp;
4797
4798 fptemp = ST0;
4799 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4800 env->fpus |= 0x400;
4801 } else {
4802 ST0 = sin(fptemp);
4803 fpush();
4804 ST0 = cos(fptemp);
4805 env->fpus &= (~0x400); /* C2 <-- 0 */
4806 /* the above code is for |arg| < 2**63 only */
4807 }
4808}
4809
4810void helper_frndint(void)
4811{
4812 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4813}
4814
4815void helper_fscale(void)
4816{
4817 ST0 = ldexp (ST0, (int)(ST1));
4818}
4819
4820void helper_fsin(void)
4821{
4822 CPU86_LDouble fptemp;
4823
4824 fptemp = ST0;
4825 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4826 env->fpus |= 0x400;
4827 } else {
4828 ST0 = sin(fptemp);
4829 env->fpus &= (~0x400); /* C2 <-- 0 */
4830 /* the above code is for |arg| < 2**53 only */
4831 }
4832}
4833
4834void helper_fcos(void)
4835{
4836 CPU86_LDouble fptemp;
4837
4838 fptemp = ST0;
4839 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4840 env->fpus |= 0x400;
4841 } else {
4842 ST0 = cos(fptemp);
4843 env->fpus &= (~0x400); /* C2 <-- 0 */
4844 /* the above code is for |arg5 < 2**63 only */
4845 }
4846}
4847
4848void helper_fxam_ST0(void)
4849{
4850 CPU86_LDoubleU temp;
4851 int expdif;
4852
4853 temp.d = ST0;
4854
4855 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4856 if (SIGND(temp))
4857 env->fpus |= 0x200; /* C1 <-- 1 */
4858
4859 /* XXX: test fptags too */
4860 expdif = EXPD(temp);
4861 if (expdif == MAXEXPD) {
4862#ifdef USE_X86LDOUBLE
4863 if (MANTD(temp) == 0x8000000000000000ULL)
4864#else
4865 if (MANTD(temp) == 0)
4866#endif
4867 env->fpus |= 0x500 /*Infinity*/;
4868 else
4869 env->fpus |= 0x100 /*NaN*/;
4870 } else if (expdif == 0) {
4871 if (MANTD(temp) == 0)
4872 env->fpus |= 0x4000 /*Zero*/;
4873 else
4874 env->fpus |= 0x4400 /*Denormal*/;
4875 } else {
4876 env->fpus |= 0x400;
4877 }
4878}
4879
4880void helper_fstenv(target_ulong ptr, int data32)
4881{
4882 int fpus, fptag, exp, i;
4883 uint64_t mant;
4884 CPU86_LDoubleU tmp;
4885
4886 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4887 fptag = 0;
4888 for (i=7; i>=0; i--) {
4889 fptag <<= 2;
4890 if (env->fptags[i]) {
4891 fptag |= 3;
4892 } else {
4893 tmp.d = env->fpregs[i].d;
4894 exp = EXPD(tmp);
4895 mant = MANTD(tmp);
4896 if (exp == 0 && mant == 0) {
4897 /* zero */
4898 fptag |= 1;
4899 } else if (exp == 0 || exp == MAXEXPD
4900#ifdef USE_X86LDOUBLE
4901 || (mant & (1LL << 63)) == 0
4902#endif
4903 ) {
4904 /* NaNs, infinity, denormal */
4905 fptag |= 2;
4906 }
4907 }
4908 }
4909 if (data32) {
4910 /* 32 bit */
4911 stl(ptr, env->fpuc);
4912 stl(ptr + 4, fpus);
4913 stl(ptr + 8, fptag);
4914 stl(ptr + 12, 0); /* fpip */
4915 stl(ptr + 16, 0); /* fpcs */
4916 stl(ptr + 20, 0); /* fpoo */
4917 stl(ptr + 24, 0); /* fpos */
4918 } else {
4919 /* 16 bit */
4920 stw(ptr, env->fpuc);
4921 stw(ptr + 2, fpus);
4922 stw(ptr + 4, fptag);
4923 stw(ptr + 6, 0);
4924 stw(ptr + 8, 0);
4925 stw(ptr + 10, 0);
4926 stw(ptr + 12, 0);
4927 }
4928}
4929
4930void helper_fldenv(target_ulong ptr, int data32)
4931{
4932 int i, fpus, fptag;
4933
4934 if (data32) {
4935 env->fpuc = lduw(ptr);
4936 fpus = lduw(ptr + 4);
4937 fptag = lduw(ptr + 8);
4938 }
4939 else {
4940 env->fpuc = lduw(ptr);
4941 fpus = lduw(ptr + 2);
4942 fptag = lduw(ptr + 4);
4943 }
4944 env->fpstt = (fpus >> 11) & 7;
4945 env->fpus = fpus & ~0x3800;
4946 for(i = 0;i < 8; i++) {
4947 env->fptags[i] = ((fptag & 3) == 3);
4948 fptag >>= 2;
4949 }
4950}
4951
4952void helper_fsave(target_ulong ptr, int data32)
4953{
4954 CPU86_LDouble tmp;
4955 int i;
4956
4957 helper_fstenv(ptr, data32);
4958
4959 ptr += (14 << data32);
4960 for(i = 0;i < 8; i++) {
4961 tmp = ST(i);
4962 helper_fstt(tmp, ptr);
4963 ptr += 10;
4964 }
4965
4966 /* fninit */
4967 env->fpus = 0;
4968 env->fpstt = 0;
4969 env->fpuc = 0x37f;
4970 env->fptags[0] = 1;
4971 env->fptags[1] = 1;
4972 env->fptags[2] = 1;
4973 env->fptags[3] = 1;
4974 env->fptags[4] = 1;
4975 env->fptags[5] = 1;
4976 env->fptags[6] = 1;
4977 env->fptags[7] = 1;
4978}
4979
4980void helper_frstor(target_ulong ptr, int data32)
4981{
4982 CPU86_LDouble tmp;
4983 int i;
4984
4985 helper_fldenv(ptr, data32);
4986 ptr += (14 << data32);
4987
4988 for(i = 0;i < 8; i++) {
4989 tmp = helper_fldt(ptr);
4990 ST(i) = tmp;
4991 ptr += 10;
4992 }
4993}
4994
4995void helper_fxsave(target_ulong ptr, int data64)
4996{
4997 int fpus, fptag, i, nb_xmm_regs;
4998 CPU86_LDouble tmp;
4999 target_ulong addr;
5000
5001 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5002 fptag = 0;
5003 for(i = 0; i < 8; i++) {
5004 fptag |= (env->fptags[i] << i);
5005 }
5006 stw(ptr, env->fpuc);
5007 stw(ptr + 2, fpus);
5008 stw(ptr + 4, fptag ^ 0xff);
5009#ifdef TARGET_X86_64
5010 if (data64) {
5011 stq(ptr + 0x08, 0); /* rip */
5012 stq(ptr + 0x10, 0); /* rdp */
5013 } else
5014#endif
5015 {
5016 stl(ptr + 0x08, 0); /* eip */
5017 stl(ptr + 0x0c, 0); /* sel */
5018 stl(ptr + 0x10, 0); /* dp */
5019 stl(ptr + 0x14, 0); /* sel */
5020 }
5021
5022 addr = ptr + 0x20;
5023 for(i = 0;i < 8; i++) {
5024 tmp = ST(i);
5025 helper_fstt(tmp, addr);
5026 addr += 16;
5027 }
5028
5029 if (env->cr[4] & CR4_OSFXSR_MASK) {
5030 /* XXX: finish it */
5031 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5032 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5033 if (env->hflags & HF_CS64_MASK)
5034 nb_xmm_regs = 16;
5035 else
5036 nb_xmm_regs = 8;
5037 addr = ptr + 0xa0;
5038 for(i = 0; i < nb_xmm_regs; i++) {
5039 stq(addr, env->xmm_regs[i].XMM_Q(0));
5040 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5041 addr += 16;
5042 }
5043 }
5044}
5045
5046void helper_fxrstor(target_ulong ptr, int data64)
5047{
5048 int i, fpus, fptag, nb_xmm_regs;
5049 CPU86_LDouble tmp;
5050 target_ulong addr;
5051
5052 env->fpuc = lduw(ptr);
5053 fpus = lduw(ptr + 2);
5054 fptag = lduw(ptr + 4);
5055 env->fpstt = (fpus >> 11) & 7;
5056 env->fpus = fpus & ~0x3800;
5057 fptag ^= 0xff;
5058 for(i = 0;i < 8; i++) {
5059 env->fptags[i] = ((fptag >> i) & 1);
5060 }
5061
5062 addr = ptr + 0x20;
5063 for(i = 0;i < 8; i++) {
5064 tmp = helper_fldt(addr);
5065 ST(i) = tmp;
5066 addr += 16;
5067 }
5068
5069 if (env->cr[4] & CR4_OSFXSR_MASK) {
5070 /* XXX: finish it */
5071 env->mxcsr = ldl(ptr + 0x18);
5072 //ldl(ptr + 0x1c);
5073 if (env->hflags & HF_CS64_MASK)
5074 nb_xmm_regs = 16;
5075 else
5076 nb_xmm_regs = 8;
5077 addr = ptr + 0xa0;
5078 for(i = 0; i < nb_xmm_regs; i++) {
5079#if !defined(VBOX) || __GNUC__ < 4
5080 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5081 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5082#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5083# if 1
5084 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5085 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5086 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5087 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5088# else
5089 /* this works fine on Mac OS X, gcc 4.0.1 */
5090 uint64_t u64 = ldq(addr);
5091 env->xmm_regs[i].XMM_Q(0);
5092 u64 = ldq(addr + 4);
5093 env->xmm_regs[i].XMM_Q(1) = u64;
5094# endif
5095#endif
5096 addr += 16;
5097 }
5098 }
5099}
5100
5101#ifndef USE_X86LDOUBLE
5102
5103void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5104{
5105 CPU86_LDoubleU temp;
5106 int e;
5107
5108 temp.d = f;
5109 /* mantissa */
5110 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5111 /* exponent + sign */
5112 e = EXPD(temp) - EXPBIAS + 16383;
5113 e |= SIGND(temp) >> 16;
5114 *pexp = e;
5115}
5116
5117CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5118{
5119 CPU86_LDoubleU temp;
5120 int e;
5121 uint64_t ll;
5122
5123 /* XXX: handle overflow ? */
5124 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5125 e |= (upper >> 4) & 0x800; /* sign */
5126 ll = (mant >> 11) & ((1LL << 52) - 1);
5127#ifdef __arm__
5128 temp.l.upper = (e << 20) | (ll >> 32);
5129 temp.l.lower = ll;
5130#else
5131 temp.ll = ll | ((uint64_t)e << 52);
5132#endif
5133 return temp.d;
5134}
5135
5136#else
5137
5138void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5139{
5140 CPU86_LDoubleU temp;
5141
5142 temp.d = f;
5143 *pmant = temp.l.lower;
5144 *pexp = temp.l.upper;
5145}
5146
5147CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5148{
5149 CPU86_LDoubleU temp;
5150
5151 temp.l.upper = upper;
5152 temp.l.lower = mant;
5153 return temp.d;
5154}
5155#endif
5156
5157#ifdef TARGET_X86_64
5158
5159//#define DEBUG_MULDIV
5160
5161static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5162{
5163 *plow += a;
5164 /* carry test */
5165 if (*plow < a)
5166 (*phigh)++;
5167 *phigh += b;
5168}
5169
5170static void neg128(uint64_t *plow, uint64_t *phigh)
5171{
5172 *plow = ~ *plow;
5173 *phigh = ~ *phigh;
5174 add128(plow, phigh, 1, 0);
5175}
5176
5177/* return TRUE if overflow */
5178static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5179{
5180 uint64_t q, r, a1, a0;
5181 int i, qb, ab;
5182
5183 a0 = *plow;
5184 a1 = *phigh;
5185 if (a1 == 0) {
5186 q = a0 / b;
5187 r = a0 % b;
5188 *plow = q;
5189 *phigh = r;
5190 } else {
5191 if (a1 >= b)
5192 return 1;
5193 /* XXX: use a better algorithm */
5194 for(i = 0; i < 64; i++) {
5195 ab = a1 >> 63;
5196 a1 = (a1 << 1) | (a0 >> 63);
5197 if (ab || a1 >= b) {
5198 a1 -= b;
5199 qb = 1;
5200 } else {
5201 qb = 0;
5202 }
5203 a0 = (a0 << 1) | qb;
5204 }
5205#if defined(DEBUG_MULDIV)
5206 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5207 *phigh, *plow, b, a0, a1);
5208#endif
5209 *plow = a0;
5210 *phigh = a1;
5211 }
5212 return 0;
5213}
5214
5215/* return TRUE if overflow */
5216static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5217{
5218 int sa, sb;
5219 sa = ((int64_t)*phigh < 0);
5220 if (sa)
5221 neg128(plow, phigh);
5222 sb = (b < 0);
5223 if (sb)
5224 b = -b;
5225 if (div64(plow, phigh, b) != 0)
5226 return 1;
5227 if (sa ^ sb) {
5228 if (*plow > (1ULL << 63))
5229 return 1;
5230 *plow = - *plow;
5231 } else {
5232 if (*plow >= (1ULL << 63))
5233 return 1;
5234 }
5235 if (sa)
5236 *phigh = - *phigh;
5237 return 0;
5238}
5239
5240void helper_mulq_EAX_T0(target_ulong t0)
5241{
5242 uint64_t r0, r1;
5243
5244 mulu64(&r0, &r1, EAX, t0);
5245 EAX = r0;
5246 EDX = r1;
5247 CC_DST = r0;
5248 CC_SRC = r1;
5249}
5250
5251void helper_imulq_EAX_T0(target_ulong t0)
5252{
5253 uint64_t r0, r1;
5254
5255 muls64(&r0, &r1, EAX, t0);
5256 EAX = r0;
5257 EDX = r1;
5258 CC_DST = r0;
5259 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5260}
5261
5262target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5263{
5264 uint64_t r0, r1;
5265
5266 muls64(&r0, &r1, t0, t1);
5267 CC_DST = r0;
5268 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5269 return r0;
5270}
5271
5272void helper_divq_EAX(target_ulong t0)
5273{
5274 uint64_t r0, r1;
5275 if (t0 == 0) {
5276 raise_exception(EXCP00_DIVZ);
5277 }
5278 r0 = EAX;
5279 r1 = EDX;
5280 if (div64(&r0, &r1, t0))
5281 raise_exception(EXCP00_DIVZ);
5282 EAX = r0;
5283 EDX = r1;
5284}
5285
5286void helper_idivq_EAX(target_ulong t0)
5287{
5288 uint64_t r0, r1;
5289 if (t0 == 0) {
5290 raise_exception(EXCP00_DIVZ);
5291 }
5292 r0 = EAX;
5293 r1 = EDX;
5294 if (idiv64(&r0, &r1, t0))
5295 raise_exception(EXCP00_DIVZ);
5296 EAX = r0;
5297 EDX = r1;
5298}
5299#endif
5300
5301static void do_hlt(void)
5302{
5303 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5304 env->halted = 1;
5305 env->exception_index = EXCP_HLT;
5306 cpu_loop_exit();
5307}
5308
5309void helper_hlt(int next_eip_addend)
5310{
5311 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5312 EIP += next_eip_addend;
5313
5314 do_hlt();
5315}
5316
5317void helper_monitor(target_ulong ptr)
5318{
5319 if ((uint32_t)ECX != 0)
5320 raise_exception(EXCP0D_GPF);
5321 /* XXX: store address ? */
5322 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5323}
5324
5325void helper_mwait(int next_eip_addend)
5326{
5327 if ((uint32_t)ECX != 0)
5328 raise_exception(EXCP0D_GPF);
5329#ifdef VBOX
5330 helper_hlt(next_eip_addend);
5331#else
5332 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5333 EIP += next_eip_addend;
5334
5335 /* XXX: not complete but not completely erroneous */
5336 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5337 /* more than one CPU: do not sleep because another CPU may
5338 wake this one */
5339 } else {
5340 do_hlt();
5341 }
5342#endif
5343}
5344
5345void helper_debug(void)
5346{
5347 env->exception_index = EXCP_DEBUG;
5348 cpu_loop_exit();
5349}
5350
5351void helper_raise_interrupt(int intno, int next_eip_addend)
5352{
5353 raise_interrupt(intno, 1, 0, next_eip_addend);
5354}
5355
5356void helper_raise_exception(int exception_index)
5357{
5358 raise_exception(exception_index);
5359}
5360
5361void helper_cli(void)
5362{
5363 env->eflags &= ~IF_MASK;
5364}
5365
5366void helper_sti(void)
5367{
5368 env->eflags |= IF_MASK;
5369}
5370
5371#ifdef VBOX
5372void helper_cli_vme(void)
5373{
5374 env->eflags &= ~VIF_MASK;
5375}
5376
5377void helper_sti_vme(void)
5378{
5379 /* First check, then change eflags according to the AMD manual */
5380 if (env->eflags & VIP_MASK) {
5381 raise_exception(EXCP0D_GPF);
5382 }
5383 env->eflags |= VIF_MASK;
5384}
5385#endif
5386
5387#if 0
5388/* vm86plus instructions */
5389void helper_cli_vm(void)
5390{
5391 env->eflags &= ~VIF_MASK;
5392}
5393
5394void helper_sti_vm(void)
5395{
5396 env->eflags |= VIF_MASK;
5397 if (env->eflags & VIP_MASK) {
5398 raise_exception(EXCP0D_GPF);
5399 }
5400}
5401#endif
5402
5403void helper_set_inhibit_irq(void)
5404{
5405 env->hflags |= HF_INHIBIT_IRQ_MASK;
5406}
5407
5408void helper_reset_inhibit_irq(void)
5409{
5410 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5411}
5412
5413void helper_boundw(target_ulong a0, int v)
5414{
5415 int low, high;
5416 low = ldsw(a0);
5417 high = ldsw(a0 + 2);
5418 v = (int16_t)v;
5419 if (v < low || v > high) {
5420 raise_exception(EXCP05_BOUND);
5421 }
5422 FORCE_RET();
5423}
5424
5425void helper_boundl(target_ulong a0, int v)
5426{
5427 int low, high;
5428 low = ldl(a0);
5429 high = ldl(a0 + 4);
5430 if (v < low || v > high) {
5431 raise_exception(EXCP05_BOUND);
5432 }
5433 FORCE_RET();
5434}
5435
5436static float approx_rsqrt(float a)
5437{
5438 return 1.0 / sqrt(a);
5439}
5440
5441static float approx_rcp(float a)
5442{
5443 return 1.0 / a;
5444}
5445
5446#if !defined(CONFIG_USER_ONLY)
5447
5448#define MMUSUFFIX _mmu
5449
5450#define SHIFT 0
5451#include "softmmu_template.h"
5452
5453#define SHIFT 1
5454#include "softmmu_template.h"
5455
5456#define SHIFT 2
5457#include "softmmu_template.h"
5458
5459#define SHIFT 3
5460#include "softmmu_template.h"
5461
5462#endif
5463
5464#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5465/* This code assumes real physical address always fit into host CPU reg,
5466 which is wrong in general, but true for our current use cases. */
5467RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5468{
5469 return remR3PhysReadS8(addr);
5470}
5471RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5472{
5473 return remR3PhysReadU8(addr);
5474}
5475void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5476{
5477 remR3PhysWriteU8(addr, val);
5478}
5479RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5480{
5481 return remR3PhysReadS16(addr);
5482}
5483RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5484{
5485 return remR3PhysReadU16(addr);
5486}
5487void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5488{
5489 remR3PhysWriteU16(addr, val);
5490}
5491RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5492{
5493 return remR3PhysReadS32(addr);
5494}
5495RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5496{
5497 return remR3PhysReadU32(addr);
5498}
5499void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5500{
5501 remR3PhysWriteU32(addr, val);
5502}
5503uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5504{
5505 return remR3PhysReadU64(addr);
5506}
5507void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5508{
5509 remR3PhysWriteU64(addr, val);
5510}
5511#endif
5512
5513/* try to fill the TLB and return an exception if error. If retaddr is
5514 NULL, it means that the function was called in C code (i.e. not
5515 from generated code or from helper.c) */
5516/* XXX: fix it to restore all registers */
5517void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5518{
5519 TranslationBlock *tb;
5520 int ret;
5521 unsigned long pc;
5522 CPUX86State *saved_env;
5523
5524 /* XXX: hack to restore env in all cases, even if not called from
5525 generated code */
5526 saved_env = env;
5527 env = cpu_single_env;
5528
5529 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5530 if (ret) {
5531 if (retaddr) {
5532 /* now we have a real cpu fault */
5533 pc = (unsigned long)retaddr;
5534 tb = tb_find_pc(pc);
5535 if (tb) {
5536 /* the PC is inside the translated code. It means that we have
5537 a virtual CPU fault */
5538 cpu_restore_state(tb, env, pc, NULL);
5539 }
5540 }
5541 raise_exception_err(env->exception_index, env->error_code);
5542 }
5543 env = saved_env;
5544}
5545
5546#ifdef VBOX
5547
5548/**
5549 * Correctly computes the eflags.
5550 * @returns eflags.
5551 * @param env1 CPU environment.
5552 */
5553uint32_t raw_compute_eflags(CPUX86State *env1)
5554{
5555 CPUX86State *savedenv = env;
5556 uint32_t efl;
5557 env = env1;
5558 efl = compute_eflags();
5559 env = savedenv;
5560 return efl;
5561}
5562
5563/**
5564 * Reads byte from virtual address in guest memory area.
5565 * XXX: is it working for any addresses? swapped out pages?
5566 * @returns readed data byte.
5567 * @param env1 CPU environment.
5568 * @param pvAddr GC Virtual address.
5569 */
5570uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5571{
5572 CPUX86State *savedenv = env;
5573 uint8_t u8;
5574 env = env1;
5575 u8 = ldub_kernel(addr);
5576 env = savedenv;
5577 return u8;
5578}
5579
5580/**
5581 * Reads byte from virtual address in guest memory area.
5582 * XXX: is it working for any addresses? swapped out pages?
5583 * @returns readed data byte.
5584 * @param env1 CPU environment.
5585 * @param pvAddr GC Virtual address.
5586 */
5587uint16_t read_word(CPUX86State *env1, target_ulong addr)
5588{
5589 CPUX86State *savedenv = env;
5590 uint16_t u16;
5591 env = env1;
5592 u16 = lduw_kernel(addr);
5593 env = savedenv;
5594 return u16;
5595}
5596
5597/**
5598 * Reads byte from virtual address in guest memory area.
5599 * XXX: is it working for any addresses? swapped out pages?
5600 * @returns readed data byte.
5601 * @param env1 CPU environment.
5602 * @param pvAddr GC Virtual address.
5603 */
5604uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5605{
5606 CPUX86State *savedenv = env;
5607 uint32_t u32;
5608 env = env1;
5609 u32 = ldl_kernel(addr);
5610 env = savedenv;
5611 return u32;
5612}
5613
5614/**
5615 * Writes byte to virtual address in guest memory area.
5616 * XXX: is it working for any addresses? swapped out pages?
5617 * @returns readed data byte.
5618 * @param env1 CPU environment.
5619 * @param pvAddr GC Virtual address.
5620 * @param val byte value
5621 */
5622void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5623{
5624 CPUX86State *savedenv = env;
5625 env = env1;
5626 stb(addr, val);
5627 env = savedenv;
5628}
5629
5630void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5631{
5632 CPUX86State *savedenv = env;
5633 env = env1;
5634 stw(addr, val);
5635 env = savedenv;
5636}
5637
5638void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5639{
5640 CPUX86State *savedenv = env;
5641 env = env1;
5642 stl(addr, val);
5643 env = savedenv;
5644}
5645
5646/**
5647 * Correctly loads selector into segment register with updating internal
5648 * qemu data/caches.
5649 * @param env1 CPU environment.
5650 * @param seg_reg Segment register.
5651 * @param selector Selector to load.
5652 */
5653void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5654{
5655 CPUX86State *savedenv = env;
5656 jmp_buf old_buf;
5657
5658 env = env1;
5659
5660 if ( env->eflags & X86_EFL_VM
5661 || !(env->cr[0] & X86_CR0_PE))
5662 {
5663 load_seg_vm(seg_reg, selector);
5664
5665 env = savedenv;
5666
5667 /* Successful sync. */
5668 env1->segs[seg_reg].newselector = 0;
5669 }
5670 else
5671 {
5672 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5673 time critical - let's not do that */
5674#if 0
5675 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5676#endif
5677 if (setjmp(env1->jmp_env) == 0)
5678 {
5679 if (seg_reg == R_CS)
5680 {
5681 uint32_t e1, e2;
5682 e1 = e2 = 0;
5683 load_segment(&e1, &e2, selector);
5684 cpu_x86_load_seg_cache(env, R_CS, selector,
5685 get_seg_base(e1, e2),
5686 get_seg_limit(e1, e2),
5687 e2);
5688 }
5689 else
5690 tss_load_seg(seg_reg, selector);
5691 env = savedenv;
5692
5693 /* Successful sync. */
5694 env1->segs[seg_reg].newselector = 0;
5695 }
5696 else
5697 {
5698 env = savedenv;
5699
5700 /* Postpone sync until the guest uses the selector. */
5701 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5702 env1->segs[seg_reg].newselector = selector;
5703 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5704 env1->exception_index = -1;
5705 env1->error_code = 0;
5706 env1->old_exception = -1;
5707 }
5708#if 0
5709 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5710#endif
5711 }
5712
5713}
5714
5715DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5716{
5717 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5718}
5719
5720
5721int emulate_single_instr(CPUX86State *env1)
5722{
5723 TranslationBlock *tb;
5724 TranslationBlock *current;
5725 int flags;
5726 uint8_t *tc_ptr;
5727 target_ulong old_eip;
5728
5729 /* ensures env is loaded! */
5730 CPUX86State *savedenv = env;
5731 env = env1;
5732
5733 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5734
5735 current = env->current_tb;
5736 env->current_tb = NULL;
5737 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5738
5739 /*
5740 * Translate only one instruction.
5741 */
5742 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5743 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5744 env->segs[R_CS].base, flags, 0);
5745
5746 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5747
5748
5749 /* tb_link_phys: */
5750 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5751 tb->jmp_next[0] = NULL;
5752 tb->jmp_next[1] = NULL;
5753 Assert(tb->jmp_next[0] == NULL);
5754 Assert(tb->jmp_next[1] == NULL);
5755 if (tb->tb_next_offset[0] != 0xffff)
5756 tb_reset_jump(tb, 0);
5757 if (tb->tb_next_offset[1] != 0xffff)
5758 tb_reset_jump(tb, 1);
5759
5760 /*
5761 * Execute it using emulation
5762 */
5763 old_eip = env->eip;
5764 env->current_tb = tb;
5765
5766 /*
5767 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5768 * perhaps not a very safe hack
5769 */
5770 while(old_eip == env->eip)
5771 {
5772 tc_ptr = tb->tc_ptr;
5773
5774#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5775 int fake_ret;
5776 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5777#else
5778 tcg_qemu_tb_exec(tc_ptr);
5779#endif
5780 /*
5781 * Exit once we detect an external interrupt and interrupts are enabled
5782 */
5783 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5784 ( (env->eflags & IF_MASK) &&
5785 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5786 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5787 {
5788 break;
5789 }
5790 }
5791 env->current_tb = current;
5792
5793 tb_phys_invalidate(tb, -1);
5794 tb_free(tb);
5795/*
5796 Assert(tb->tb_next_offset[0] == 0xffff);
5797 Assert(tb->tb_next_offset[1] == 0xffff);
5798 Assert(tb->tb_next[0] == 0xffff);
5799 Assert(tb->tb_next[1] == 0xffff);
5800 Assert(tb->jmp_next[0] == NULL);
5801 Assert(tb->jmp_next[1] == NULL);
5802 Assert(tb->jmp_first == NULL); */
5803
5804 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5805
5806 /*
5807 * Execute the next instruction when we encounter instruction fusing.
5808 */
5809 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5810 {
5811 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5812 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5813 emulate_single_instr(env);
5814 }
5815
5816 env = savedenv;
5817 return 0;
5818}
5819
5820/**
5821 * Correctly loads a new ldtr selector.
5822 *
5823 * @param env1 CPU environment.
5824 * @param selector Selector to load.
5825 */
5826void sync_ldtr(CPUX86State *env1, int selector)
5827{
5828 CPUX86State *saved_env = env;
5829 if (setjmp(env1->jmp_env) == 0)
5830 {
5831 env = env1;
5832 helper_lldt(selector);
5833 env = saved_env;
5834 }
5835 else
5836 {
5837 env = saved_env;
5838#ifdef VBOX_STRICT
5839 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5840#endif
5841 }
5842}
5843
5844/**
5845 * Correctly loads a new tr selector.
5846 *
5847 * @param env1 CPU environment.
5848 * @param selector Selector to load.
5849 */
5850int sync_tr(CPUX86State *env1, int selector)
5851{
5852 /* ARG! this was going to call helper_ltr_T0 but that won't work because of busy flag. */
5853 SegmentCache *dt;
5854 uint32_t e1, e2;
5855 int index, type, entry_limit;
5856 target_ulong ptr;
5857 CPUX86State *saved_env = env;
5858 env = env1;
5859
5860 selector &= 0xffff;
5861 if ((selector & 0xfffc) == 0) {
5862 /* NULL selector case: invalid TR */
5863 env->tr.base = 0;
5864 env->tr.limit = 0;
5865 env->tr.flags = 0;
5866 } else {
5867 if (selector & 0x4)
5868 goto l_failure;
5869 dt = &env->gdt;
5870 index = selector & ~7;
5871#ifdef TARGET_X86_64
5872 if (env->hflags & HF_LMA_MASK)
5873 entry_limit = 15;
5874 else
5875#endif
5876 entry_limit = 7;
5877 if ((index + entry_limit) > dt->limit)
5878 goto l_failure;
5879 ptr = dt->base + index;
5880 e1 = ldl_kernel(ptr);
5881 e2 = ldl_kernel(ptr + 4);
5882 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
5883 if ((e2 & DESC_S_MASK) /*||
5884 (type != 1 && type != 9)*/)
5885 goto l_failure;
5886 if (!(e2 & DESC_P_MASK))
5887 goto l_failure;
5888#ifdef TARGET_X86_64
5889 if (env->hflags & HF_LMA_MASK) {
5890 uint32_t e3;
5891 e3 = ldl_kernel(ptr + 8);
5892 load_seg_cache_raw_dt(&env->tr, e1, e2);
5893 env->tr.base |= (target_ulong)e3 << 32;
5894 } else
5895#endif
5896 {
5897 load_seg_cache_raw_dt(&env->tr, e1, e2);
5898 }
5899 e2 |= DESC_TSS_BUSY_MASK;
5900 stl_kernel(ptr + 4, e2);
5901 }
5902 env->tr.selector = selector;
5903
5904 env = saved_env;
5905 return 0;
5906l_failure:
5907 AssertMsgFailed(("selector=%d\n", selector));
5908 return -1;
5909}
5910
5911
5912int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5913 uint32_t *esp_ptr, int dpl)
5914{
5915 int type, index, shift;
5916
5917 CPUX86State *savedenv = env;
5918 env = env1;
5919
5920 if (!(env->tr.flags & DESC_P_MASK))
5921 cpu_abort(env, "invalid tss");
5922 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5923 if ((type & 7) != 1)
5924 cpu_abort(env, "invalid tss type %d", type);
5925 shift = type >> 3;
5926 index = (dpl * 4 + 2) << shift;
5927 if (index + (4 << shift) - 1 > env->tr.limit)
5928 {
5929 env = savedenv;
5930 return 0;
5931 }
5932 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5933
5934 if (shift == 0) {
5935 *esp_ptr = lduw_kernel(env->tr.base + index);
5936 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5937 } else {
5938 *esp_ptr = ldl_kernel(env->tr.base + index);
5939 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5940 }
5941
5942 env = savedenv;
5943 return 1;
5944}
5945
5946//*****************************************************************************
5947// Needs to be at the bottom of the file (overriding macros)
5948
5949#ifndef VBOX
5950static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5951#else /* VBOX */
5952DECLINLINE(CPU86_LDouble) helper_fldt_raw(uint8_t *ptr)
5953#endif /* VBOX */
5954{
5955 return *(CPU86_LDouble *)ptr;
5956}
5957
5958#ifndef VBOX
5959static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5960#else /* VBOX */
5961DECLINLINE(void) helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5962#endif /* VBOX */
5963{
5964 *(CPU86_LDouble *)ptr = f;
5965}
5966
5967#undef stw
5968#undef stl
5969#undef stq
5970#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5971#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5972#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5973#define data64 0
5974
5975//*****************************************************************************
5976void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5977{
5978 int fpus, fptag, i, nb_xmm_regs;
5979 CPU86_LDouble tmp;
5980 uint8_t *addr;
5981
5982 if (env->cpuid_features & CPUID_FXSR)
5983 {
5984 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5985 fptag = 0;
5986 for(i = 0; i < 8; i++) {
5987 fptag |= (env->fptags[i] << i);
5988 }
5989 stw(ptr, env->fpuc);
5990 stw(ptr + 2, fpus);
5991 stw(ptr + 4, fptag ^ 0xff);
5992
5993 addr = ptr + 0x20;
5994 for(i = 0;i < 8; i++) {
5995 tmp = ST(i);
5996 helper_fstt_raw(tmp, addr);
5997 addr += 16;
5998 }
5999
6000 if (env->cr[4] & CR4_OSFXSR_MASK) {
6001 /* XXX: finish it */
6002 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6003 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6004 nb_xmm_regs = 8 << data64;
6005 addr = ptr + 0xa0;
6006 for(i = 0; i < nb_xmm_regs; i++) {
6007#if __GNUC__ < 4
6008 stq(addr, env->xmm_regs[i].XMM_Q(0));
6009 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6010#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6011 stl(addr, env->xmm_regs[i].XMM_L(0));
6012 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6013 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6014 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6015#endif
6016 addr += 16;
6017 }
6018 }
6019 }
6020 else
6021 {
6022 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6023 int fptag;
6024
6025 fp->FCW = env->fpuc;
6026 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6027 fptag = 0;
6028 for (i=7; i>=0; i--) {
6029 fptag <<= 2;
6030 if (env->fptags[i]) {
6031 fptag |= 3;
6032 } else {
6033 /* the FPU automatically computes it */
6034 }
6035 }
6036 fp->FTW = fptag;
6037
6038 for(i = 0;i < 8; i++) {
6039 tmp = ST(i);
6040 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
6041 }
6042 }
6043}
6044
6045//*****************************************************************************
6046#undef lduw
6047#undef ldl
6048#undef ldq
6049#define lduw(a) *(uint16_t *)(a)
6050#define ldl(a) *(uint32_t *)(a)
6051#define ldq(a) *(uint64_t *)(a)
6052//*****************************************************************************
6053void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6054{
6055 int i, fpus, fptag, nb_xmm_regs;
6056 CPU86_LDouble tmp;
6057 uint8_t *addr;
6058
6059 if (env->cpuid_features & CPUID_FXSR)
6060 {
6061 env->fpuc = lduw(ptr);
6062 fpus = lduw(ptr + 2);
6063 fptag = lduw(ptr + 4);
6064 env->fpstt = (fpus >> 11) & 7;
6065 env->fpus = fpus & ~0x3800;
6066 fptag ^= 0xff;
6067 for(i = 0;i < 8; i++) {
6068 env->fptags[i] = ((fptag >> i) & 1);
6069 }
6070
6071 addr = ptr + 0x20;
6072 for(i = 0;i < 8; i++) {
6073 tmp = helper_fldt_raw(addr);
6074 ST(i) = tmp;
6075 addr += 16;
6076 }
6077
6078 if (env->cr[4] & CR4_OSFXSR_MASK) {
6079 /* XXX: finish it, endianness */
6080 env->mxcsr = ldl(ptr + 0x18);
6081 //ldl(ptr + 0x1c);
6082 nb_xmm_regs = 8 << data64;
6083 addr = ptr + 0xa0;
6084 for(i = 0; i < nb_xmm_regs; i++) {
6085#if HC_ARCH_BITS == 32
6086 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6087 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6088 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6089 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6090 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6091#else
6092 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6093 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6094#endif
6095 addr += 16;
6096 }
6097 }
6098 }
6099 else
6100 {
6101 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6102 int fptag, j;
6103
6104 env->fpuc = fp->FCW;
6105 env->fpstt = (fp->FSW >> 11) & 7;
6106 env->fpus = fp->FSW & ~0x3800;
6107 fptag = fp->FTW;
6108 for(i = 0;i < 8; i++) {
6109 env->fptags[i] = ((fptag & 3) == 3);
6110 fptag >>= 2;
6111 }
6112 j = env->fpstt;
6113 for(i = 0;i < 8; i++) {
6114 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
6115 ST(i) = tmp;
6116 }
6117 }
6118}
6119//*****************************************************************************
6120//*****************************************************************************
6121
6122#endif /* VBOX */
6123
6124/* Secure Virtual Machine helpers */
6125
6126#if defined(CONFIG_USER_ONLY)
6127
6128void helper_vmrun(int aflag, int next_eip_addend)
6129{
6130}
6131void helper_vmmcall(void)
6132{
6133}
6134void helper_vmload(int aflag)
6135{
6136}
6137void helper_vmsave(int aflag)
6138{
6139}
6140void helper_stgi(void)
6141{
6142}
6143void helper_clgi(void)
6144{
6145}
6146void helper_skinit(void)
6147{
6148}
6149void helper_invlpga(int aflag)
6150{
6151}
6152void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6153{
6154}
6155void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6156{
6157}
6158
6159void helper_svm_check_io(uint32_t port, uint32_t param,
6160 uint32_t next_eip_addend)
6161{
6162}
6163#else
6164
6165#ifndef VBOX
6166static inline void svm_save_seg(target_phys_addr_t addr,
6167#else /* VBOX */
6168DECLINLINE(void) svm_save_seg(target_phys_addr_t addr,
6169#endif /* VBOX */
6170 const SegmentCache *sc)
6171{
6172 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6173 sc->selector);
6174 stq_phys(addr + offsetof(struct vmcb_seg, base),
6175 sc->base);
6176 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6177 sc->limit);
6178 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6179 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6180}
6181
6182#ifndef VBOX
6183static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6184#else /* VBOX */
6185DECLINLINE(void) svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6186#endif /* VBOX */
6187{
6188 unsigned int flags;
6189
6190 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6191 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6192 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6193 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6194 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6195}
6196
6197#ifndef VBOX
6198static inline void svm_load_seg_cache(target_phys_addr_t addr,
6199#else /* VBOX */
6200DECLINLINE(void) svm_load_seg_cache(target_phys_addr_t addr,
6201#endif /* VBOX */
6202 CPUState *env, int seg_reg)
6203{
6204 SegmentCache sc1, *sc = &sc1;
6205 svm_load_seg(addr, sc);
6206 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6207 sc->base, sc->limit, sc->flags);
6208}
6209
6210void helper_vmrun(int aflag, int next_eip_addend)
6211{
6212 target_ulong addr;
6213 uint32_t event_inj;
6214 uint32_t int_ctl;
6215
6216 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6217
6218 if (aflag == 2)
6219 addr = EAX;
6220 else
6221 addr = (uint32_t)EAX;
6222
6223 if (loglevel & CPU_LOG_TB_IN_ASM)
6224 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6225
6226 env->vm_vmcb = addr;
6227
6228 /* save the current CPU state in the hsave page */
6229 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6230 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6231
6232 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6233 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6234
6235 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6236 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6237 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6238 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6239 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6240 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6241
6242 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6243 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6244
6245 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6246 &env->segs[R_ES]);
6247 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6248 &env->segs[R_CS]);
6249 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6250 &env->segs[R_SS]);
6251 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6252 &env->segs[R_DS]);
6253
6254 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6255 EIP + next_eip_addend);
6256 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6257 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6258
6259 /* load the interception bitmaps so we do not need to access the
6260 vmcb in svm mode */
6261 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6262 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6263 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6264 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6265 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6266 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6267
6268 /* enable intercepts */
6269 env->hflags |= HF_SVMI_MASK;
6270
6271 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6272
6273 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6274 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6275
6276 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6277 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6278
6279 /* clear exit_info_2 so we behave like the real hardware */
6280 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6281
6282 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6283 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6284 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6285 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6286 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6287 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6288 if (int_ctl & V_INTR_MASKING_MASK) {
6289 env->v_tpr = int_ctl & V_TPR_MASK;
6290 env->hflags2 |= HF2_VINTR_MASK;
6291 if (env->eflags & IF_MASK)
6292 env->hflags2 |= HF2_HIF_MASK;
6293 }
6294
6295 cpu_load_efer(env,
6296 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6297 env->eflags = 0;
6298 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6299 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6300 CC_OP = CC_OP_EFLAGS;
6301
6302 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6303 env, R_ES);
6304 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6305 env, R_CS);
6306 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6307 env, R_SS);
6308 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6309 env, R_DS);
6310
6311 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6312 env->eip = EIP;
6313 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6314 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6315 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6316 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6317 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6318
6319 /* FIXME: guest state consistency checks */
6320
6321 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6322 case TLB_CONTROL_DO_NOTHING:
6323 break;
6324 case TLB_CONTROL_FLUSH_ALL_ASID:
6325 /* FIXME: this is not 100% correct but should work for now */
6326 tlb_flush(env, 1);
6327 break;
6328 }
6329
6330 env->hflags2 |= HF2_GIF_MASK;
6331
6332 if (int_ctl & V_IRQ_MASK) {
6333 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6334 }
6335
6336 /* maybe we need to inject an event */
6337 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6338 if (event_inj & SVM_EVTINJ_VALID) {
6339 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6340 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6341 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6342 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6343
6344 if (loglevel & CPU_LOG_TB_IN_ASM)
6345 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6346 /* FIXME: need to implement valid_err */
6347 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6348 case SVM_EVTINJ_TYPE_INTR:
6349 env->exception_index = vector;
6350 env->error_code = event_inj_err;
6351 env->exception_is_int = 0;
6352 env->exception_next_eip = -1;
6353 if (loglevel & CPU_LOG_TB_IN_ASM)
6354 fprintf(logfile, "INTR");
6355 /* XXX: is it always correct ? */
6356 do_interrupt(vector, 0, 0, 0, 1);
6357 break;
6358 case SVM_EVTINJ_TYPE_NMI:
6359 env->exception_index = EXCP02_NMI;
6360 env->error_code = event_inj_err;
6361 env->exception_is_int = 0;
6362 env->exception_next_eip = EIP;
6363 if (loglevel & CPU_LOG_TB_IN_ASM)
6364 fprintf(logfile, "NMI");
6365 cpu_loop_exit();
6366 break;
6367 case SVM_EVTINJ_TYPE_EXEPT:
6368 env->exception_index = vector;
6369 env->error_code = event_inj_err;
6370 env->exception_is_int = 0;
6371 env->exception_next_eip = -1;
6372 if (loglevel & CPU_LOG_TB_IN_ASM)
6373 fprintf(logfile, "EXEPT");
6374 cpu_loop_exit();
6375 break;
6376 case SVM_EVTINJ_TYPE_SOFT:
6377 env->exception_index = vector;
6378 env->error_code = event_inj_err;
6379 env->exception_is_int = 1;
6380 env->exception_next_eip = EIP;
6381 if (loglevel & CPU_LOG_TB_IN_ASM)
6382 fprintf(logfile, "SOFT");
6383 cpu_loop_exit();
6384 break;
6385 }
6386 if (loglevel & CPU_LOG_TB_IN_ASM)
6387 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6388 }
6389}
6390
6391void helper_vmmcall(void)
6392{
6393 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6394 raise_exception(EXCP06_ILLOP);
6395}
6396
6397void helper_vmload(int aflag)
6398{
6399 target_ulong addr;
6400 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6401
6402 if (aflag == 2)
6403 addr = EAX;
6404 else
6405 addr = (uint32_t)EAX;
6406
6407 if (loglevel & CPU_LOG_TB_IN_ASM)
6408 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6409 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6410 env->segs[R_FS].base);
6411
6412 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6413 env, R_FS);
6414 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6415 env, R_GS);
6416 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6417 &env->tr);
6418 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6419 &env->ldt);
6420
6421#ifdef TARGET_X86_64
6422 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6423 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6424 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6425 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6426#endif
6427 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6428 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6429 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6430 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6431}
6432
6433void helper_vmsave(int aflag)
6434{
6435 target_ulong addr;
6436 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6437
6438 if (aflag == 2)
6439 addr = EAX;
6440 else
6441 addr = (uint32_t)EAX;
6442
6443 if (loglevel & CPU_LOG_TB_IN_ASM)
6444 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6445 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6446 env->segs[R_FS].base);
6447
6448 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6449 &env->segs[R_FS]);
6450 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6451 &env->segs[R_GS]);
6452 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6453 &env->tr);
6454 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6455 &env->ldt);
6456
6457#ifdef TARGET_X86_64
6458 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6459 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6460 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6461 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6462#endif
6463 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6464 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6465 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6466 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6467}
6468
6469void helper_stgi(void)
6470{
6471 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6472 env->hflags2 |= HF2_GIF_MASK;
6473}
6474
6475void helper_clgi(void)
6476{
6477 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6478 env->hflags2 &= ~HF2_GIF_MASK;
6479}
6480
6481void helper_skinit(void)
6482{
6483 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6484 /* XXX: not implemented */
6485 raise_exception(EXCP06_ILLOP);
6486}
6487
6488void helper_invlpga(int aflag)
6489{
6490 target_ulong addr;
6491 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6492
6493 if (aflag == 2)
6494 addr = EAX;
6495 else
6496 addr = (uint32_t)EAX;
6497
6498 /* XXX: could use the ASID to see if it is needed to do the
6499 flush */
6500 tlb_flush_page(env, addr);
6501}
6502
6503void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6504{
6505 if (likely(!(env->hflags & HF_SVMI_MASK)))
6506 return;
6507#ifndef VBOX
6508 switch(type) {
6509#ifndef VBOX
6510 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6511#else
6512 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
6513 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
6514 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
6515#endif
6516 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6517 helper_vmexit(type, param);
6518 }
6519 break;
6520#ifndef VBOX
6521 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6522#else
6523 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
6524 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
6525 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
6526#endif
6527 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6528 helper_vmexit(type, param);
6529 }
6530 break;
6531 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6532 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6533 helper_vmexit(type, param);
6534 }
6535 break;
6536 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6537 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6538 helper_vmexit(type, param);
6539 }
6540 break;
6541 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6542 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6543 helper_vmexit(type, param);
6544 }
6545 break;
6546 case SVM_EXIT_MSR:
6547 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6548 /* FIXME: this should be read in at vmrun (faster this way?) */
6549 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6550 uint32_t t0, t1;
6551 switch((uint32_t)ECX) {
6552 case 0 ... 0x1fff:
6553 t0 = (ECX * 2) % 8;
6554 t1 = ECX / 8;
6555 break;
6556 case 0xc0000000 ... 0xc0001fff:
6557 t0 = (8192 + ECX - 0xc0000000) * 2;
6558 t1 = (t0 / 8);
6559 t0 %= 8;
6560 break;
6561 case 0xc0010000 ... 0xc0011fff:
6562 t0 = (16384 + ECX - 0xc0010000) * 2;
6563 t1 = (t0 / 8);
6564 t0 %= 8;
6565 break;
6566 default:
6567 helper_vmexit(type, param);
6568 t0 = 0;
6569 t1 = 0;
6570 break;
6571 }
6572 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6573 helper_vmexit(type, param);
6574 }
6575 break;
6576 default:
6577 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6578 helper_vmexit(type, param);
6579 }
6580 break;
6581 }
6582#else
6583 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6584#endif
6585}
6586
6587void helper_svm_check_io(uint32_t port, uint32_t param,
6588 uint32_t next_eip_addend)
6589{
6590 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6591 /* FIXME: this should be read in at vmrun (faster this way?) */
6592 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6593 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6594 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6595 /* next EIP */
6596 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6597 env->eip + next_eip_addend);
6598 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6599 }
6600 }
6601}
6602
6603/* Note: currently only 32 bits of exit_code are used */
6604void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6605{
6606 uint32_t int_ctl;
6607
6608 if (loglevel & CPU_LOG_TB_IN_ASM)
6609 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6610 exit_code, exit_info_1,
6611 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6612 EIP);
6613
6614 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6615 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6616 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6617 } else {
6618 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6619 }
6620
6621 /* Save the VM state in the vmcb */
6622 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6623 &env->segs[R_ES]);
6624 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6625 &env->segs[R_CS]);
6626 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6627 &env->segs[R_SS]);
6628 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6629 &env->segs[R_DS]);
6630
6631 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6632 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6633
6634 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6635 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6636
6637 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6638 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6639 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6640 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6641 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6642
6643 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6644 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6645 int_ctl |= env->v_tpr & V_TPR_MASK;
6646 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6647 int_ctl |= V_IRQ_MASK;
6648 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6649
6650 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6651 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6652 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6653 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6654 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6655 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6656 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6657
6658 /* Reload the host state from vm_hsave */
6659 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6660 env->hflags &= ~HF_SVMI_MASK;
6661 env->intercept = 0;
6662 env->intercept_exceptions = 0;
6663 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6664 env->tsc_offset = 0;
6665
6666 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6667 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6668
6669 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6670 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6671
6672 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6673 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6674 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6675 /* we need to set the efer after the crs so the hidden flags get
6676 set properly */
6677 cpu_load_efer(env,
6678 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6679 env->eflags = 0;
6680 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6681 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6682 CC_OP = CC_OP_EFLAGS;
6683
6684 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6685 env, R_ES);
6686 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6687 env, R_CS);
6688 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6689 env, R_SS);
6690 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6691 env, R_DS);
6692
6693 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6694 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6695 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6696
6697 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6698 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6699
6700 /* other setups */
6701 cpu_x86_set_cpl(env, 0);
6702 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6703 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6704
6705 env->hflags2 &= ~HF2_GIF_MASK;
6706 /* FIXME: Resets the current ASID register to zero (host ASID). */
6707
6708 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6709
6710 /* Clears the TSC_OFFSET inside the processor. */
6711
6712 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6713 from the page table indicated the host's CR3. If the PDPEs contain
6714 illegal state, the processor causes a shutdown. */
6715
6716 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6717 env->cr[0] |= CR0_PE_MASK;
6718 env->eflags &= ~VM_MASK;
6719
6720 /* Disables all breakpoints in the host DR7 register. */
6721
6722 /* Checks the reloaded host state for consistency. */
6723
6724 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6725 host's code segment or non-canonical (in the case of long mode), a
6726 #GP fault is delivered inside the host.) */
6727
6728 /* remove any pending exception */
6729 env->exception_index = -1;
6730 env->error_code = 0;
6731 env->old_exception = -1;
6732
6733 cpu_loop_exit();
6734}
6735
6736#endif
6737
6738/* MMX/SSE */
6739/* XXX: optimize by storing fptt and fptags in the static cpu state */
6740void helper_enter_mmx(void)
6741{
6742 env->fpstt = 0;
6743 *(uint32_t *)(env->fptags) = 0;
6744 *(uint32_t *)(env->fptags + 4) = 0;
6745}
6746
6747void helper_emms(void)
6748{
6749 /* set to empty state */
6750 *(uint32_t *)(env->fptags) = 0x01010101;
6751 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6752}
6753
6754/* XXX: suppress */
6755void helper_movq(uint64_t *d, uint64_t *s)
6756{
6757 *d = *s;
6758}
6759
6760#define SHIFT 0
6761#include "ops_sse.h"
6762
6763#define SHIFT 1
6764#include "ops_sse.h"
6765
6766#define SHIFT 0
6767#include "helper_template.h"
6768#undef SHIFT
6769
6770#define SHIFT 1
6771#include "helper_template.h"
6772#undef SHIFT
6773
6774#define SHIFT 2
6775#include "helper_template.h"
6776#undef SHIFT
6777
6778#ifdef TARGET_X86_64
6779
6780#define SHIFT 3
6781#include "helper_template.h"
6782#undef SHIFT
6783
6784#endif
6785
6786/* bit operations */
6787target_ulong helper_bsf(target_ulong t0)
6788{
6789 int count;
6790 target_ulong res;
6791
6792 res = t0;
6793 count = 0;
6794 while ((res & 1) == 0) {
6795 count++;
6796 res >>= 1;
6797 }
6798 return count;
6799}
6800
6801target_ulong helper_bsr(target_ulong t0)
6802{
6803 int count;
6804 target_ulong res, mask;
6805
6806 res = t0;
6807 count = TARGET_LONG_BITS - 1;
6808 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6809 while ((res & mask) == 0) {
6810 count--;
6811 res <<= 1;
6812 }
6813 return count;
6814}
6815
6816
6817static int compute_all_eflags(void)
6818{
6819 return CC_SRC;
6820}
6821
6822static int compute_c_eflags(void)
6823{
6824 return CC_SRC & CC_C;
6825}
6826
6827#ifndef VBOX
6828CCTable cc_table[CC_OP_NB] = {
6829 [CC_OP_DYNAMIC] = { /* should never happen */ },
6830
6831 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6832
6833 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6834 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6835 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6836
6837 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6838 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6839 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6840
6841 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6842 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6843 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6844
6845 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6846 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6847 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6848
6849 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6850 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6851 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6852
6853 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6854 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6855 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6856
6857 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6858 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6859 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6860
6861 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6862 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6863 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6864
6865 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6866 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6867 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6868
6869 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6870 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6871 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6872
6873#ifdef TARGET_X86_64
6874 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6875
6876 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6877
6878 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6879
6880 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6881
6882 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6883
6884 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6885
6886 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6887
6888 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6889
6890 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6891
6892 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6893#endif
6894};
6895#else /* VBOX */
6896/* Sync carefully with cpu.h */
6897CCTable cc_table[CC_OP_NB] = {
6898 /* CC_OP_DYNAMIC */ { 0, 0 },
6899
6900 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
6901
6902 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
6903 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
6904 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
6905#ifdef TARGET_X86_64
6906 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
6907#else
6908 /* CC_OP_MULQ */ { 0, 0 },
6909#endif
6910
6911 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
6912 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw },
6913 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl },
6914#ifdef TARGET_X86_64
6915 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq },
6916#else
6917 /* CC_OP_ADDQ */ { 0, 0 },
6918#endif
6919
6920 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
6921 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw },
6922 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl },
6923#ifdef TARGET_X86_64
6924 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
6925#else
6926 /* CC_OP_ADCQ */ { 0, 0 },
6927#endif
6928
6929 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb },
6930 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw },
6931 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl },
6932#ifdef TARGET_X86_64
6933 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq },
6934#else
6935 /* CC_OP_SUBQ */ { 0, 0 },
6936#endif
6937
6938 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb },
6939 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw },
6940 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl },
6941#ifdef TARGET_X86_64
6942 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq },
6943#else
6944 /* CC_OP_SBBQ */ { 0, 0 },
6945#endif
6946
6947 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
6948 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
6949 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
6950#ifdef TARGET_X86_64
6951 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq },
6952#else
6953 /* CC_OP_LOGICQ */ { 0, 0 },
6954#endif
6955
6956 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
6957 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
6958 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
6959#ifdef TARGET_X86_64
6960 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl },
6961#else
6962 /* CC_OP_INCQ */ { 0, 0 },
6963#endif
6964
6965 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
6966 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
6967 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
6968#ifdef TARGET_X86_64
6969 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl },
6970#else
6971 /* CC_OP_DECQ */ { 0, 0 },
6972#endif
6973
6974 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
6975 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
6976 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
6977#ifdef TARGET_X86_64
6978 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq },
6979#else
6980 /* CC_OP_SHLQ */ { 0, 0 },
6981#endif
6982
6983 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
6984 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
6985 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
6986#ifdef TARGET_X86_64
6987 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
6988#else
6989 /* CC_OP_SARQ */ { 0, 0 },
6990#endif
6991};
6992#endif /* VBOX */
6993
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette