VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 42482

Last change on this file since 42482 was 42482, checked in by vboxsync, 13 years ago

helper_rdmsr: Don't try to check if the value is the same for
MSR_IA32_TSC by re-reading it, as it *will* be different. Fixes bogus
and annoying assertion for debug builds.

  • Property svn:eol-style set to native
File size: 195.5 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "exec.h"
30#include "exec-all.h"
31#include "host-utils.h"
32#include "ioport.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39
40//#define DEBUG_PCALL
41
42
43#ifdef DEBUG_PCALL
44# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
45# define LOG_PCALL_STATE(env) \
46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
47#else
48# define LOG_PCALL(...) do { } while (0)
49# define LOG_PCALL_STATE(env) do { } while (0)
50#endif
51
52
53#if 0
54#define raise_exception_err(a, b)\
55do {\
56 qemu_log("raise_exception line=%d\n", __LINE__);\
57 (raise_exception_err)(a, b);\
58} while (0)
59#endif
60
61static const uint8_t parity_table[256] = {
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
94};
95
96/* modulo 17 table */
97static const uint8_t rclw_table[32] = {
98 0, 1, 2, 3, 4, 5, 6, 7,
99 8, 9,10,11,12,13,14,15,
100 16, 0, 1, 2, 3, 4, 5, 6,
101 7, 8, 9,10,11,12,13,14,
102};
103
104/* modulo 9 table */
105static const uint8_t rclb_table[32] = {
106 0, 1, 2, 3, 4, 5, 6, 7,
107 8, 0, 1, 2, 3, 4, 5, 6,
108 7, 8, 0, 1, 2, 3, 4, 5,
109 6, 7, 8, 0, 1, 2, 3, 4,
110};
111
112static const CPU86_LDouble f15rk[7] =
113{
114 0.00000000000000000000L,
115 1.00000000000000000000L,
116 3.14159265358979323851L, /*pi*/
117 0.30102999566398119523L, /*lg2*/
118 0.69314718055994530943L, /*ln2*/
119 1.44269504088896340739L, /*l2e*/
120 3.32192809488736234781L, /*l2t*/
121};
122
123/* broken thread support */
124
125static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
126
127void helper_lock(void)
128{
129 spin_lock(&global_cpu_lock);
130}
131
132void helper_unlock(void)
133{
134 spin_unlock(&global_cpu_lock);
135}
136
137void helper_write_eflags(target_ulong t0, uint32_t update_mask)
138{
139 load_eflags(t0, update_mask);
140}
141
142target_ulong helper_read_eflags(void)
143{
144 uint32_t eflags;
145 eflags = helper_cc_compute_all(CC_OP);
146 eflags |= (DF & DF_MASK);
147 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
148 return eflags;
149}
150
151#ifdef VBOX
152
153void helper_write_eflags_vme(target_ulong t0)
154{
155 unsigned int new_eflags = t0;
156
157 assert(env->eflags & (1<<VM_SHIFT));
158
159 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
160 /* if TF will be set -> #GP */
161 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
162 || (new_eflags & TF_MASK)) {
163 raise_exception(EXCP0D_GPF);
164 } else {
165 load_eflags(new_eflags,
166 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
167
168 if (new_eflags & IF_MASK) {
169 env->eflags |= VIF_MASK;
170 } else {
171 env->eflags &= ~VIF_MASK;
172 }
173 }
174}
175
176target_ulong helper_read_eflags_vme(void)
177{
178 uint32_t eflags;
179 eflags = helper_cc_compute_all(CC_OP);
180 eflags |= (DF & DF_MASK);
181 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
182 if (env->eflags & VIF_MASK)
183 eflags |= IF_MASK;
184 else
185 eflags &= ~IF_MASK;
186
187 /* According to AMD manual, should be read with IOPL == 3 */
188 eflags |= (3 << IOPL_SHIFT);
189
190 /* We only use helper_read_eflags_vme() in 16-bits mode */
191 return eflags & 0xffff;
192}
193
194void helper_dump_state()
195{
196 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
197 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
198 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
199 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
200 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
201 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
202 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
203}
204
205#endif /* VBOX */
206
207/* return non zero if error */
208static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
209 int selector)
210{
211 SegmentCache *dt;
212 int index;
213 target_ulong ptr;
214
215#ifdef VBOX
216 /* Trying to load a selector with CPL=1? */
217 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
218 {
219 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
220 selector = selector & 0xfffc;
221 }
222#endif /* VBOX */
223
224 if (selector & 0x4)
225 dt = &env->ldt;
226 else
227 dt = &env->gdt;
228 index = selector & ~7;
229 if ((index + 7) > dt->limit)
230 return -1;
231 ptr = dt->base + index;
232 *e1_ptr = ldl_kernel(ptr);
233 *e2_ptr = ldl_kernel(ptr + 4);
234 return 0;
235}
236
237static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
238{
239 unsigned int limit;
240 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
241 if (e2 & DESC_G_MASK)
242 limit = (limit << 12) | 0xfff;
243 return limit;
244}
245
246static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
247{
248 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
249}
250
251static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
252{
253 sc->base = get_seg_base(e1, e2);
254 sc->limit = get_seg_limit(e1, e2);
255 sc->flags = e2;
256#ifdef VBOX
257 sc->newselector = 0;
258 sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID;
259#endif
260}
261
262/* init the segment cache in vm86 mode. */
263static inline void load_seg_vm(int seg, int selector)
264{
265 selector &= 0xffff;
266#ifdef VBOX
267 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
268 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
269 flags |= (3 << DESC_DPL_SHIFT);
270
271 cpu_x86_load_seg_cache(env, seg, selector,
272 (selector << 4), 0xffff, flags);
273#else /* VBOX */
274 cpu_x86_load_seg_cache(env, seg, selector,
275 (selector << 4), 0xffff, 0);
276#endif /* VBOX */
277}
278
279static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
280 uint32_t *esp_ptr, int dpl)
281{
282#ifndef VBOX
283 int type, index, shift;
284#else
285 unsigned int type, index, shift;
286#endif
287
288#if 0
289 {
290 int i;
291 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
292 for(i=0;i<env->tr.limit;i++) {
293 printf("%02x ", env->tr.base[i]);
294 if ((i & 7) == 7) printf("\n");
295 }
296 printf("\n");
297 }
298#endif
299
300 if (!(env->tr.flags & DESC_P_MASK))
301 cpu_abort(env, "invalid tss");
302 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
303 if ((type & 7) != 1)
304 cpu_abort(env, "invalid tss type");
305 shift = type >> 3;
306 index = (dpl * 4 + 2) << shift;
307 if (index + (4 << shift) - 1 > env->tr.limit)
308 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
309 if (shift == 0) {
310 *esp_ptr = lduw_kernel(env->tr.base + index);
311 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
312 } else {
313 *esp_ptr = ldl_kernel(env->tr.base + index);
314 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
315 }
316}
317
318/* XXX: merge with load_seg() */
319static void tss_load_seg(int seg_reg, int selector)
320{
321 uint32_t e1, e2;
322 int rpl, dpl, cpl;
323
324#ifdef VBOX
325 e1 = e2 = 0; /* gcc warning? */
326 cpl = env->hflags & HF_CPL_MASK;
327 /* Trying to load a selector with CPL=1? */
328 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
329 {
330 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
331 selector = selector & 0xfffc;
332 }
333#endif /* VBOX */
334
335 if ((selector & 0xfffc) != 0) {
336 if (load_segment(&e1, &e2, selector) != 0)
337 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
338 if (!(e2 & DESC_S_MASK))
339 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
340 rpl = selector & 3;
341 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
342 cpl = env->hflags & HF_CPL_MASK;
343 if (seg_reg == R_CS) {
344 if (!(e2 & DESC_CS_MASK))
345 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
346 /* XXX: is it correct ? */
347 if (dpl != rpl)
348 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
349 if ((e2 & DESC_C_MASK) && dpl > rpl)
350 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
351 } else if (seg_reg == R_SS) {
352 /* SS must be writable data */
353 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
354 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
355 if (dpl != cpl || dpl != rpl)
356 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
357 } else {
358 /* not readable code */
359 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 /* if data or non conforming code, checks the rights */
362 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
363 if (dpl < cpl || dpl < rpl)
364 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
365 }
366 }
367 if (!(e2 & DESC_P_MASK))
368 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
369 cpu_x86_load_seg_cache(env, seg_reg, selector,
370 get_seg_base(e1, e2),
371 get_seg_limit(e1, e2),
372 e2);
373 } else {
374 if (seg_reg == R_SS || seg_reg == R_CS)
375 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
376#ifdef VBOX
377# if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */
378 cpu_x86_load_seg_cache(env, seg_reg, selector,
379 0, 0, 0);
380# endif
381#endif /* VBOX */
382 }
383}
384
385#define SWITCH_TSS_JMP 0
386#define SWITCH_TSS_IRET 1
387#define SWITCH_TSS_CALL 2
388
389/* XXX: restore CPU state in registers (PowerPC case) */
390static void switch_tss(int tss_selector,
391 uint32_t e1, uint32_t e2, int source,
392 uint32_t next_eip)
393{
394 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
395 target_ulong tss_base;
396 uint32_t new_regs[8], new_segs[6];
397 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
398 uint32_t old_eflags, eflags_mask;
399 SegmentCache *dt;
400#ifndef VBOX
401 int index;
402#else
403 unsigned int index;
404#endif
405 target_ulong ptr;
406
407 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
408 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
409
410 /* if task gate, we read the TSS segment and we load it */
411 if (type == 5) {
412 if (!(e2 & DESC_P_MASK))
413 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
414 tss_selector = e1 >> 16;
415 if (tss_selector & 4)
416 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
417 if (load_segment(&e1, &e2, tss_selector) != 0)
418 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
419 if (e2 & DESC_S_MASK)
420 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
421 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
422 if ((type & 7) != 1)
423 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
424 }
425
426 if (!(e2 & DESC_P_MASK))
427 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
428
429 if (type & 8)
430 tss_limit_max = 103;
431 else
432 tss_limit_max = 43;
433 tss_limit = get_seg_limit(e1, e2);
434 tss_base = get_seg_base(e1, e2);
435 if ((tss_selector & 4) != 0 ||
436 tss_limit < tss_limit_max)
437 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
438 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
439 if (old_type & 8)
440 old_tss_limit_max = 103;
441 else
442 old_tss_limit_max = 43;
443
444 /* read all the registers from the new TSS */
445 if (type & 8) {
446 /* 32 bit */
447 new_cr3 = ldl_kernel(tss_base + 0x1c);
448 new_eip = ldl_kernel(tss_base + 0x20);
449 new_eflags = ldl_kernel(tss_base + 0x24);
450 for(i = 0; i < 8; i++)
451 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
452 for(i = 0; i < 6; i++)
453 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
454 new_ldt = lduw_kernel(tss_base + 0x60);
455 new_trap = ldl_kernel(tss_base + 0x64);
456 } else {
457 /* 16 bit */
458 new_cr3 = 0;
459 new_eip = lduw_kernel(tss_base + 0x0e);
460 new_eflags = lduw_kernel(tss_base + 0x10);
461 for(i = 0; i < 8; i++)
462 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
463 for(i = 0; i < 4; i++)
464 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
465 new_ldt = lduw_kernel(tss_base + 0x2a);
466 new_segs[R_FS] = 0;
467 new_segs[R_GS] = 0;
468 new_trap = 0;
469 }
470
471 /* NOTE: we must avoid memory exceptions during the task switch,
472 so we make dummy accesses before */
473 /* XXX: it can still fail in some cases, so a bigger hack is
474 necessary to valid the TLB after having done the accesses */
475
476 v1 = ldub_kernel(env->tr.base);
477 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
478 stb_kernel(env->tr.base, v1);
479 stb_kernel(env->tr.base + old_tss_limit_max, v2);
480
481 /* clear busy bit (it is restartable) */
482 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
483 target_ulong ptr;
484 uint32_t e2;
485 ptr = env->gdt.base + (env->tr.selector & ~7);
486 e2 = ldl_kernel(ptr + 4);
487 e2 &= ~DESC_TSS_BUSY_MASK;
488 stl_kernel(ptr + 4, e2);
489 }
490 old_eflags = compute_eflags();
491 if (source == SWITCH_TSS_IRET)
492 old_eflags &= ~NT_MASK;
493
494 /* save the current state in the old TSS */
495 if (type & 8) {
496 /* 32 bit */
497 stl_kernel(env->tr.base + 0x20, next_eip);
498 stl_kernel(env->tr.base + 0x24, old_eflags);
499 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
500 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
501 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
502 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
503 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
504 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
505 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
506 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
507 for(i = 0; i < 6; i++)
508 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
509#ifdef VBOX
510 /* Must store the ldt as it gets reloaded and might have been changed. */
511 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
512#endif
513#if defined(VBOX) && defined(DEBUG)
514 printf("TSS 32 bits switch\n");
515 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
516#endif
517 } else {
518 /* 16 bit */
519 stw_kernel(env->tr.base + 0x0e, next_eip);
520 stw_kernel(env->tr.base + 0x10, old_eflags);
521 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
522 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
523 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
524 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
525 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
526 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
527 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
528 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
529 for(i = 0; i < 4; i++)
530 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
531#ifdef VBOX
532 /* Must store the ldt as it gets reloaded and might have been changed. */
533 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
534#endif
535 }
536
537 /* now if an exception occurs, it will occurs in the next task
538 context */
539
540 if (source == SWITCH_TSS_CALL) {
541 stw_kernel(tss_base, env->tr.selector);
542 new_eflags |= NT_MASK;
543 }
544
545 /* set busy bit */
546 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
547 target_ulong ptr;
548 uint32_t e2;
549 ptr = env->gdt.base + (tss_selector & ~7);
550 e2 = ldl_kernel(ptr + 4);
551 e2 |= DESC_TSS_BUSY_MASK;
552 stl_kernel(ptr + 4, e2);
553 }
554
555 /* set the new CPU state */
556 /* from this point, any exception which occurs can give problems */
557 env->cr[0] |= CR0_TS_MASK;
558 env->hflags |= HF_TS_MASK;
559 env->tr.selector = tss_selector;
560 env->tr.base = tss_base;
561 env->tr.limit = tss_limit;
562 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
563#ifdef VBOX
564 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
565 env->tr.newselector = 0;
566#endif
567
568 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
569 cpu_x86_update_cr3(env, new_cr3);
570 }
571
572 /* load all registers without an exception, then reload them with
573 possible exception */
574 env->eip = new_eip;
575 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
576 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
577 if (!(type & 8))
578 eflags_mask &= 0xffff;
579 load_eflags(new_eflags, eflags_mask);
580 /* XXX: what to do in 16 bit case ? */
581 EAX = new_regs[0];
582 ECX = new_regs[1];
583 EDX = new_regs[2];
584 EBX = new_regs[3];
585 ESP = new_regs[4];
586 EBP = new_regs[5];
587 ESI = new_regs[6];
588 EDI = new_regs[7];
589 if (new_eflags & VM_MASK) {
590 for(i = 0; i < 6; i++)
591 load_seg_vm(i, new_segs[i]);
592 /* in vm86, CPL is always 3 */
593 cpu_x86_set_cpl(env, 3);
594 } else {
595 /* CPL is set the RPL of CS */
596 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
597 /* first just selectors as the rest may trigger exceptions */
598 for(i = 0; i < 6; i++)
599 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
600 }
601
602 env->ldt.selector = new_ldt & ~4;
603 env->ldt.base = 0;
604 env->ldt.limit = 0;
605 env->ldt.flags = 0;
606#ifdef VBOX
607 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
608 env->ldt.newselector = 0;
609#endif
610
611 /* load the LDT */
612 if (new_ldt & 4)
613 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
614
615 if ((new_ldt & 0xfffc) != 0) {
616 dt = &env->gdt;
617 index = new_ldt & ~7;
618 if ((index + 7) > dt->limit)
619 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
620 ptr = dt->base + index;
621 e1 = ldl_kernel(ptr);
622 e2 = ldl_kernel(ptr + 4);
623 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
624 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
625 if (!(e2 & DESC_P_MASK))
626 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
627 load_seg_cache_raw_dt(&env->ldt, e1, e2);
628 }
629
630 /* load the segments */
631 if (!(new_eflags & VM_MASK)) {
632 tss_load_seg(R_CS, new_segs[R_CS]);
633 tss_load_seg(R_SS, new_segs[R_SS]);
634 tss_load_seg(R_ES, new_segs[R_ES]);
635 tss_load_seg(R_DS, new_segs[R_DS]);
636 tss_load_seg(R_FS, new_segs[R_FS]);
637 tss_load_seg(R_GS, new_segs[R_GS]);
638 }
639
640 /* check that EIP is in the CS segment limits */
641 if (new_eip > env->segs[R_CS].limit) {
642 /* XXX: different exception if CALL ? */
643 raise_exception_err(EXCP0D_GPF, 0);
644 }
645
646#ifndef CONFIG_USER_ONLY
647 /* reset local breakpoints */
648 if (env->dr[7] & 0x55) {
649 for (i = 0; i < 4; i++) {
650 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
651 hw_breakpoint_remove(env, i);
652 }
653 env->dr[7] &= ~0x55;
654 }
655#endif
656}
657
658/* check if Port I/O is allowed in TSS */
659static inline void check_io(int addr, int size)
660{
661#ifndef VBOX
662 int io_offset, val, mask;
663#else
664 int val, mask;
665 unsigned int io_offset;
666#endif /* VBOX */
667
668 /* TSS must be a valid 32 bit one */
669 if (!(env->tr.flags & DESC_P_MASK) ||
670 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
671 env->tr.limit < 103)
672 goto fail;
673 io_offset = lduw_kernel(env->tr.base + 0x66);
674 io_offset += (addr >> 3);
675 /* Note: the check needs two bytes */
676 if ((io_offset + 1) > env->tr.limit)
677 goto fail;
678 val = lduw_kernel(env->tr.base + io_offset);
679 val >>= (addr & 7);
680 mask = (1 << size) - 1;
681 /* all bits must be zero to allow the I/O */
682 if ((val & mask) != 0) {
683 fail:
684 raise_exception_err(EXCP0D_GPF, 0);
685 }
686}
687
688#ifdef VBOX
689
690/* Keep in sync with gen_check_external_event() */
691void helper_check_external_event()
692{
693 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_FLUSH_TLB
694 | CPU_INTERRUPT_EXTERNAL_EXIT
695 | CPU_INTERRUPT_EXTERNAL_TIMER
696 | CPU_INTERRUPT_EXTERNAL_DMA))
697 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
698 && (env->eflags & IF_MASK)
699 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
700 {
701 helper_external_event();
702 }
703
704}
705
706void helper_sync_seg(uint32_t reg)
707{
708 if (env->segs[reg].newselector)
709 sync_seg(env, reg, env->segs[reg].newselector);
710}
711
712#endif /* VBOX */
713
714void helper_check_iob(uint32_t t0)
715{
716 check_io(t0, 1);
717}
718
719void helper_check_iow(uint32_t t0)
720{
721 check_io(t0, 2);
722}
723
724void helper_check_iol(uint32_t t0)
725{
726 check_io(t0, 4);
727}
728
729void helper_outb(uint32_t port, uint32_t data)
730{
731#ifndef VBOX
732 cpu_outb(port, data & 0xff);
733#else
734 cpu_outb(env, port, data & 0xff);
735#endif
736}
737
738target_ulong helper_inb(uint32_t port)
739{
740#ifndef VBOX
741 return cpu_inb(port);
742#else
743 return cpu_inb(env, port);
744#endif
745}
746
747void helper_outw(uint32_t port, uint32_t data)
748{
749#ifndef VBOX
750 cpu_outw(port, data & 0xffff);
751#else
752 cpu_outw(env, port, data & 0xffff);
753#endif
754}
755
756target_ulong helper_inw(uint32_t port)
757{
758#ifndef VBOX
759 return cpu_inw(port);
760#else
761 return cpu_inw(env, port);
762#endif
763}
764
765void helper_outl(uint32_t port, uint32_t data)
766{
767#ifndef VBOX
768 cpu_outl(port, data);
769#else
770 cpu_outl(env, port, data);
771#endif
772}
773
774target_ulong helper_inl(uint32_t port)
775{
776#ifndef VBOX
777 return cpu_inl(port);
778#else
779 return cpu_inl(env, port);
780#endif
781}
782
783static inline unsigned int get_sp_mask(unsigned int e2)
784{
785 if (e2 & DESC_B_MASK)
786 return 0xffffffff;
787 else
788 return 0xffff;
789}
790
791static int exeption_has_error_code(int intno)
792{
793 switch(intno) {
794 case 8:
795 case 10:
796 case 11:
797 case 12:
798 case 13:
799 case 14:
800 case 17:
801 return 1;
802 }
803 return 0;
804}
805
806#ifdef TARGET_X86_64
807#define SET_ESP(val, sp_mask)\
808do {\
809 if ((sp_mask) == 0xffff)\
810 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
811 else if ((sp_mask) == 0xffffffffLL)\
812 ESP = (uint32_t)(val);\
813 else\
814 ESP = (val);\
815} while (0)
816#else
817#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
818#endif
819
820/* in 64-bit machines, this can overflow. So this segment addition macro
821 * can be used to trim the value to 32-bit whenever needed */
822#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
823
824/* XXX: add a is_user flag to have proper security support */
825#define PUSHW(ssp, sp, sp_mask, val)\
826{\
827 sp -= 2;\
828 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
829}
830
831#define PUSHL(ssp, sp, sp_mask, val)\
832{\
833 sp -= 4;\
834 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
835}
836
837#define POPW(ssp, sp, sp_mask, val)\
838{\
839 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
840 sp += 2;\
841}
842
843#define POPL(ssp, sp, sp_mask, val)\
844{\
845 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
846 sp += 4;\
847}
848
849/* protected mode interrupt */
850static void do_interrupt_protected(int intno, int is_int, int error_code,
851 unsigned int next_eip, int is_hw)
852{
853 SegmentCache *dt;
854 target_ulong ptr, ssp;
855 int type, dpl, selector, ss_dpl, cpl;
856 int has_error_code, new_stack, shift;
857 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
858 uint32_t old_eip, sp_mask;
859
860#ifdef VBOX
861 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
862 cpu_loop_exit();
863#endif
864
865 has_error_code = 0;
866 if (!is_int && !is_hw)
867 has_error_code = exeption_has_error_code(intno);
868 if (is_int)
869 old_eip = next_eip;
870 else
871 old_eip = env->eip;
872
873 dt = &env->idt;
874#ifndef VBOX
875 if (intno * 8 + 7 > dt->limit)
876#else
877 if ((unsigned)intno * 8 + 7 > dt->limit)
878#endif
879 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
880 ptr = dt->base + intno * 8;
881 e1 = ldl_kernel(ptr);
882 e2 = ldl_kernel(ptr + 4);
883 /* check gate type */
884 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
885 switch(type) {
886 case 5: /* task gate */
887 /* must do that check here to return the correct error code */
888 if (!(e2 & DESC_P_MASK))
889 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
890 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
891 if (has_error_code) {
892 int type;
893 uint32_t mask;
894 /* push the error code */
895 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
896 shift = type >> 3;
897 if (env->segs[R_SS].flags & DESC_B_MASK)
898 mask = 0xffffffff;
899 else
900 mask = 0xffff;
901 esp = (ESP - (2 << shift)) & mask;
902 ssp = env->segs[R_SS].base + esp;
903 if (shift)
904 stl_kernel(ssp, error_code);
905 else
906 stw_kernel(ssp, error_code);
907 SET_ESP(esp, mask);
908 }
909 return;
910 case 6: /* 286 interrupt gate */
911 case 7: /* 286 trap gate */
912 case 14: /* 386 interrupt gate */
913 case 15: /* 386 trap gate */
914 break;
915 default:
916 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
917 break;
918 }
919 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
920 cpl = env->hflags & HF_CPL_MASK;
921 /* check privilege if software int */
922 if (is_int && dpl < cpl)
923 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
924 /* check valid bit */
925 if (!(e2 & DESC_P_MASK))
926 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
927 selector = e1 >> 16;
928 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
929 if ((selector & 0xfffc) == 0)
930 raise_exception_err(EXCP0D_GPF, 0);
931
932 if (load_segment(&e1, &e2, selector) != 0)
933 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
934 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
935 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
936 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
937 if (dpl > cpl)
938 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939 if (!(e2 & DESC_P_MASK))
940 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
941 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
942 /* to inner privilege */
943 get_ss_esp_from_tss(&ss, &esp, dpl);
944 if ((ss & 0xfffc) == 0)
945 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
946 if ((ss & 3) != dpl)
947 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
948 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
949 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
950 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
951 if (ss_dpl != dpl)
952 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
953 if (!(ss_e2 & DESC_S_MASK) ||
954 (ss_e2 & DESC_CS_MASK) ||
955 !(ss_e2 & DESC_W_MASK))
956 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
957 if (!(ss_e2 & DESC_P_MASK))
958#ifdef VBOX /* See page 3-477 of 253666.pdf */
959 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
960#else
961 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
962#endif
963 new_stack = 1;
964 sp_mask = get_sp_mask(ss_e2);
965 ssp = get_seg_base(ss_e1, ss_e2);
966#if defined(VBOX) && defined(DEBUG)
967 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
968#endif
969 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
970 /* to same privilege */
971 if (env->eflags & VM_MASK)
972 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
973 new_stack = 0;
974 sp_mask = get_sp_mask(env->segs[R_SS].flags);
975 ssp = env->segs[R_SS].base;
976 esp = ESP;
977 dpl = cpl;
978 } else {
979 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
980 new_stack = 0; /* avoid warning */
981 sp_mask = 0; /* avoid warning */
982 ssp = 0; /* avoid warning */
983 esp = 0; /* avoid warning */
984 }
985
986 shift = type >> 3;
987
988#if 0
989 /* XXX: check that enough room is available */
990 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
991 if (env->eflags & VM_MASK)
992 push_size += 8;
993 push_size <<= shift;
994#endif
995 if (shift == 1) {
996 if (new_stack) {
997 if (env->eflags & VM_MASK) {
998 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
999 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
1000 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
1001 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
1002 }
1003 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
1004 PUSHL(ssp, esp, sp_mask, ESP);
1005 }
1006 PUSHL(ssp, esp, sp_mask, compute_eflags());
1007 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
1008 PUSHL(ssp, esp, sp_mask, old_eip);
1009 if (has_error_code) {
1010 PUSHL(ssp, esp, sp_mask, error_code);
1011 }
1012 } else {
1013 if (new_stack) {
1014 if (env->eflags & VM_MASK) {
1015 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
1016 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
1017 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1018 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1019 }
1020 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1021 PUSHW(ssp, esp, sp_mask, ESP);
1022 }
1023 PUSHW(ssp, esp, sp_mask, compute_eflags());
1024 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1025 PUSHW(ssp, esp, sp_mask, old_eip);
1026 if (has_error_code) {
1027 PUSHW(ssp, esp, sp_mask, error_code);
1028 }
1029 }
1030
1031 if (new_stack) {
1032 if (env->eflags & VM_MASK) {
1033 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1034 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1035 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1036 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1037 }
1038 ss = (ss & ~3) | dpl;
1039 cpu_x86_load_seg_cache(env, R_SS, ss,
1040 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1041 }
1042 SET_ESP(esp, sp_mask);
1043
1044 selector = (selector & ~3) | dpl;
1045 cpu_x86_load_seg_cache(env, R_CS, selector,
1046 get_seg_base(e1, e2),
1047 get_seg_limit(e1, e2),
1048 e2);
1049 cpu_x86_set_cpl(env, dpl);
1050 env->eip = offset;
1051
1052 /* interrupt gate clear IF mask */
1053 if ((type & 1) == 0) {
1054 env->eflags &= ~IF_MASK;
1055 }
1056#ifndef VBOX
1057 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1058#else
1059 /*
1060 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1061 * gets confused by seemingly changed EFLAGS. See #3491 and
1062 * public bug #2341.
1063 */
1064 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1065#endif
1066}
1067
1068#ifdef VBOX
1069
1070/* check if VME interrupt redirection is enabled in TSS */
1071DECLINLINE(bool) is_vme_irq_redirected(int intno)
1072{
1073 unsigned int io_offset, intredir_offset;
1074 unsigned char val, mask;
1075
1076 /* TSS must be a valid 32 bit one */
1077 if (!(env->tr.flags & DESC_P_MASK) ||
1078 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1079 env->tr.limit < 103)
1080 goto fail;
1081 io_offset = lduw_kernel(env->tr.base + 0x66);
1082 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1083 if (io_offset < 0x68 + 0x20)
1084 io_offset = 0x68 + 0x20;
1085 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1086 intredir_offset = io_offset - 0x20;
1087
1088 intredir_offset += (intno >> 3);
1089 if ((intredir_offset) > env->tr.limit)
1090 goto fail;
1091
1092 val = ldub_kernel(env->tr.base + intredir_offset);
1093 mask = 1 << (unsigned char)(intno & 7);
1094
1095 /* bit set means no redirection. */
1096 if ((val & mask) != 0) {
1097 return false;
1098 }
1099 return true;
1100
1101fail:
1102 raise_exception_err(EXCP0D_GPF, 0);
1103 return true;
1104}
1105
1106/* V86 mode software interrupt with CR4.VME=1 */
1107static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1108{
1109 target_ulong ptr, ssp;
1110 int selector;
1111 uint32_t offset, esp;
1112 uint32_t old_cs, old_eflags;
1113 uint32_t iopl;
1114
1115 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1116
1117 if (!is_vme_irq_redirected(intno))
1118 {
1119 if (iopl == 3)
1120 {
1121 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1122 return;
1123 }
1124 else
1125 raise_exception_err(EXCP0D_GPF, 0);
1126 }
1127
1128 /* virtual mode idt is at linear address 0 */
1129 ptr = 0 + intno * 4;
1130 offset = lduw_kernel(ptr);
1131 selector = lduw_kernel(ptr + 2);
1132 esp = ESP;
1133 ssp = env->segs[R_SS].base;
1134 old_cs = env->segs[R_CS].selector;
1135
1136 old_eflags = compute_eflags();
1137 if (iopl < 3)
1138 {
1139 /* copy VIF into IF and set IOPL to 3 */
1140 if (env->eflags & VIF_MASK)
1141 old_eflags |= IF_MASK;
1142 else
1143 old_eflags &= ~IF_MASK;
1144
1145 old_eflags |= (3 << IOPL_SHIFT);
1146 }
1147
1148 /* XXX: use SS segment size ? */
1149 PUSHW(ssp, esp, 0xffff, old_eflags);
1150 PUSHW(ssp, esp, 0xffff, old_cs);
1151 PUSHW(ssp, esp, 0xffff, next_eip);
1152
1153 /* update processor state */
1154 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1155 env->eip = offset;
1156 env->segs[R_CS].selector = selector;
1157 env->segs[R_CS].base = (selector << 4);
1158 env->eflags &= ~(TF_MASK | RF_MASK);
1159
1160 if (iopl < 3)
1161 env->eflags &= ~VIF_MASK;
1162 else
1163 env->eflags &= ~IF_MASK;
1164}
1165
1166#endif /* VBOX */
1167
1168#ifdef TARGET_X86_64
1169
1170#define PUSHQ(sp, val)\
1171{\
1172 sp -= 8;\
1173 stq_kernel(sp, (val));\
1174}
1175
1176#define POPQ(sp, val)\
1177{\
1178 val = ldq_kernel(sp);\
1179 sp += 8;\
1180}
1181
1182static inline target_ulong get_rsp_from_tss(int level)
1183{
1184 int index;
1185
1186#if 0
1187 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1188 env->tr.base, env->tr.limit);
1189#endif
1190
1191 if (!(env->tr.flags & DESC_P_MASK))
1192 cpu_abort(env, "invalid tss");
1193 index = 8 * level + 4;
1194 if ((index + 7) > env->tr.limit)
1195 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1196 return ldq_kernel(env->tr.base + index);
1197}
1198
1199/* 64 bit interrupt */
1200static void do_interrupt64(int intno, int is_int, int error_code,
1201 target_ulong next_eip, int is_hw)
1202{
1203 SegmentCache *dt;
1204 target_ulong ptr;
1205 int type, dpl, selector, cpl, ist;
1206 int has_error_code, new_stack;
1207 uint32_t e1, e2, e3, ss;
1208 target_ulong old_eip, esp, offset;
1209
1210#ifdef VBOX
1211 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1212 cpu_loop_exit();
1213#endif
1214
1215 has_error_code = 0;
1216 if (!is_int && !is_hw)
1217 has_error_code = exeption_has_error_code(intno);
1218 if (is_int)
1219 old_eip = next_eip;
1220 else
1221 old_eip = env->eip;
1222
1223 dt = &env->idt;
1224 if (intno * 16 + 15 > dt->limit)
1225 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1226 ptr = dt->base + intno * 16;
1227 e1 = ldl_kernel(ptr);
1228 e2 = ldl_kernel(ptr + 4);
1229 e3 = ldl_kernel(ptr + 8);
1230 /* check gate type */
1231 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1232 switch(type) {
1233 case 14: /* 386 interrupt gate */
1234 case 15: /* 386 trap gate */
1235 break;
1236 default:
1237 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1238 break;
1239 }
1240 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1241 cpl = env->hflags & HF_CPL_MASK;
1242 /* check privilege if software int */
1243 if (is_int && dpl < cpl)
1244 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1245 /* check valid bit */
1246 if (!(e2 & DESC_P_MASK))
1247 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1248 selector = e1 >> 16;
1249 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1250 ist = e2 & 7;
1251 if ((selector & 0xfffc) == 0)
1252 raise_exception_err(EXCP0D_GPF, 0);
1253
1254 if (load_segment(&e1, &e2, selector) != 0)
1255 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1256 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1257 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1258 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1259 if (dpl > cpl)
1260 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1261 if (!(e2 & DESC_P_MASK))
1262 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1263 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1264 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1265 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1266 /* to inner privilege */
1267 if (ist != 0)
1268 esp = get_rsp_from_tss(ist + 3);
1269 else
1270 esp = get_rsp_from_tss(dpl);
1271 esp &= ~0xfLL; /* align stack */
1272 ss = 0;
1273 new_stack = 1;
1274 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1275 /* to same privilege */
1276 if (env->eflags & VM_MASK)
1277 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1278 new_stack = 0;
1279 if (ist != 0)
1280 esp = get_rsp_from_tss(ist + 3);
1281 else
1282 esp = ESP;
1283 esp &= ~0xfLL; /* align stack */
1284 dpl = cpl;
1285 } else {
1286 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1287 new_stack = 0; /* avoid warning */
1288 esp = 0; /* avoid warning */
1289 }
1290
1291 PUSHQ(esp, env->segs[R_SS].selector);
1292 PUSHQ(esp, ESP);
1293 PUSHQ(esp, compute_eflags());
1294 PUSHQ(esp, env->segs[R_CS].selector);
1295 PUSHQ(esp, old_eip);
1296 if (has_error_code) {
1297 PUSHQ(esp, error_code);
1298 }
1299
1300 if (new_stack) {
1301 ss = 0 | dpl;
1302 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1303 }
1304 ESP = esp;
1305
1306 selector = (selector & ~3) | dpl;
1307 cpu_x86_load_seg_cache(env, R_CS, selector,
1308 get_seg_base(e1, e2),
1309 get_seg_limit(e1, e2),
1310 e2);
1311 cpu_x86_set_cpl(env, dpl);
1312 env->eip = offset;
1313
1314 /* interrupt gate clear IF mask */
1315 if ((type & 1) == 0) {
1316 env->eflags &= ~IF_MASK;
1317 }
1318#ifndef VBOX
1319 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1320#else /* VBOX */
1321 /*
1322 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1323 * gets confused by seemingly changed EFLAGS. See #3491 and
1324 * public bug #2341.
1325 */
1326 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1327#endif /* VBOX */
1328}
1329#endif
1330
1331#ifdef TARGET_X86_64
1332#if defined(CONFIG_USER_ONLY)
1333void helper_syscall(int next_eip_addend)
1334{
1335 env->exception_index = EXCP_SYSCALL;
1336 env->exception_next_eip = env->eip + next_eip_addend;
1337 cpu_loop_exit();
1338}
1339#else
1340void helper_syscall(int next_eip_addend)
1341{
1342 int selector;
1343
1344 if (!(env->efer & MSR_EFER_SCE)) {
1345 raise_exception_err(EXCP06_ILLOP, 0);
1346 }
1347 selector = (env->star >> 32) & 0xffff;
1348 if (env->hflags & HF_LMA_MASK) {
1349 int code64;
1350
1351 ECX = env->eip + next_eip_addend;
1352 env->regs[11] = compute_eflags();
1353
1354 code64 = env->hflags & HF_CS64_MASK;
1355
1356 cpu_x86_set_cpl(env, 0);
1357 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1358 0, 0xffffffff,
1359 DESC_G_MASK | DESC_P_MASK |
1360 DESC_S_MASK |
1361 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1362 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1363 0, 0xffffffff,
1364 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1365 DESC_S_MASK |
1366 DESC_W_MASK | DESC_A_MASK);
1367 env->eflags &= ~env->fmask;
1368 load_eflags(env->eflags, 0);
1369 if (code64)
1370 env->eip = env->lstar;
1371 else
1372 env->eip = env->cstar;
1373 } else {
1374 ECX = (uint32_t)(env->eip + next_eip_addend);
1375
1376 cpu_x86_set_cpl(env, 0);
1377 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1378 0, 0xffffffff,
1379 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1380 DESC_S_MASK |
1381 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1382 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1383 0, 0xffffffff,
1384 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1385 DESC_S_MASK |
1386 DESC_W_MASK | DESC_A_MASK);
1387 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1388 env->eip = (uint32_t)env->star;
1389 }
1390}
1391#endif
1392#endif
1393
1394#ifdef TARGET_X86_64
1395void helper_sysret(int dflag)
1396{
1397 int cpl, selector;
1398
1399 if (!(env->efer & MSR_EFER_SCE)) {
1400 raise_exception_err(EXCP06_ILLOP, 0);
1401 }
1402 cpl = env->hflags & HF_CPL_MASK;
1403 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1404 raise_exception_err(EXCP0D_GPF, 0);
1405 }
1406 selector = (env->star >> 48) & 0xffff;
1407 if (env->hflags & HF_LMA_MASK) {
1408 if (dflag == 2) {
1409 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1410 0, 0xffffffff,
1411 DESC_G_MASK | DESC_P_MASK |
1412 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1413 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1414 DESC_L_MASK);
1415 env->eip = ECX;
1416 } else {
1417 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1418 0, 0xffffffff,
1419 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1420 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1421 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1422 env->eip = (uint32_t)ECX;
1423 }
1424 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1425 0, 0xffffffff,
1426 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1427 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1428 DESC_W_MASK | DESC_A_MASK);
1429 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1430 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1431 cpu_x86_set_cpl(env, 3);
1432 } else {
1433 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1434 0, 0xffffffff,
1435 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1436 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1437 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1438 env->eip = (uint32_t)ECX;
1439 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1440 0, 0xffffffff,
1441 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1442 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1443 DESC_W_MASK | DESC_A_MASK);
1444 env->eflags |= IF_MASK;
1445 cpu_x86_set_cpl(env, 3);
1446 }
1447}
1448#endif
1449
1450#ifdef VBOX
1451
1452/**
1453 * Checks and processes external VMM events.
1454 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1455 */
1456void helper_external_event(void)
1457{
1458# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1459 uintptr_t uSP;
1460# ifdef RT_ARCH_AMD64
1461 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1462# else
1463 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1464# endif
1465 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1466# endif
1467 /* Keep in sync with flags checked by gen_check_external_event() */
1468 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1469 {
1470 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1471 ~CPU_INTERRUPT_EXTERNAL_HARD);
1472 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1473 }
1474 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1475 {
1476 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1477 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1478 cpu_exit(env);
1479 }
1480 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1481 {
1482 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1483 ~CPU_INTERRUPT_EXTERNAL_DMA);
1484 remR3DmaRun(env);
1485 }
1486 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1487 {
1488 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1489 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1490 remR3TimersRun(env);
1491 }
1492 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)
1493 {
1494 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1495 ~CPU_INTERRUPT_EXTERNAL_HARD);
1496 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1497 }
1498}
1499
1500/* helper for recording call instruction addresses for later scanning */
1501void helper_record_call()
1502{
1503 if ( !(env->state & CPU_RAW_RING0)
1504 && (env->cr[0] & CR0_PG_MASK)
1505 && !(env->eflags & X86_EFL_IF))
1506 remR3RecordCall(env);
1507}
1508
1509#endif /* VBOX */
1510
1511/* real mode interrupt */
1512static void do_interrupt_real(int intno, int is_int, int error_code,
1513 unsigned int next_eip)
1514{
1515 SegmentCache *dt;
1516 target_ulong ptr, ssp;
1517 int selector;
1518 uint32_t offset, esp;
1519 uint32_t old_cs, old_eip;
1520
1521 /* real mode (simpler !) */
1522 dt = &env->idt;
1523#ifndef VBOX
1524 if (intno * 4 + 3 > dt->limit)
1525#else
1526 if ((unsigned)intno * 4 + 3 > dt->limit)
1527#endif
1528 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1529 ptr = dt->base + intno * 4;
1530 offset = lduw_kernel(ptr);
1531 selector = lduw_kernel(ptr + 2);
1532 esp = ESP;
1533 ssp = env->segs[R_SS].base;
1534 if (is_int)
1535 old_eip = next_eip;
1536 else
1537 old_eip = env->eip;
1538 old_cs = env->segs[R_CS].selector;
1539 /* XXX: use SS segment size ? */
1540 PUSHW(ssp, esp, 0xffff, compute_eflags());
1541 PUSHW(ssp, esp, 0xffff, old_cs);
1542 PUSHW(ssp, esp, 0xffff, old_eip);
1543
1544 /* update processor state */
1545 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1546 env->eip = offset;
1547 env->segs[R_CS].selector = selector;
1548 env->segs[R_CS].base = (selector << 4);
1549 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1550}
1551
1552/* fake user mode interrupt */
1553void do_interrupt_user(int intno, int is_int, int error_code,
1554 target_ulong next_eip)
1555{
1556 SegmentCache *dt;
1557 target_ulong ptr;
1558 int dpl, cpl, shift;
1559 uint32_t e2;
1560
1561 dt = &env->idt;
1562 if (env->hflags & HF_LMA_MASK) {
1563 shift = 4;
1564 } else {
1565 shift = 3;
1566 }
1567 ptr = dt->base + (intno << shift);
1568 e2 = ldl_kernel(ptr + 4);
1569
1570 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1571 cpl = env->hflags & HF_CPL_MASK;
1572 /* check privilege if software int */
1573 if (is_int && dpl < cpl)
1574 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1575
1576 /* Since we emulate only user space, we cannot do more than
1577 exiting the emulation with the suitable exception and error
1578 code */
1579 if (is_int)
1580 EIP = next_eip;
1581}
1582
1583#if !defined(CONFIG_USER_ONLY)
1584static void handle_even_inj(int intno, int is_int, int error_code,
1585 int is_hw, int rm)
1586{
1587 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1588 if (!(event_inj & SVM_EVTINJ_VALID)) {
1589 int type;
1590 if (is_int)
1591 type = SVM_EVTINJ_TYPE_SOFT;
1592 else
1593 type = SVM_EVTINJ_TYPE_EXEPT;
1594 event_inj = intno | type | SVM_EVTINJ_VALID;
1595 if (!rm && exeption_has_error_code(intno)) {
1596 event_inj |= SVM_EVTINJ_VALID_ERR;
1597 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1598 }
1599 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1600 }
1601}
1602#endif
1603
1604/*
1605 * Begin execution of an interruption. is_int is TRUE if coming from
1606 * the int instruction. next_eip is the EIP value AFTER the interrupt
1607 * instruction. It is only relevant if is_int is TRUE.
1608 */
1609void do_interrupt(int intno, int is_int, int error_code,
1610 target_ulong next_eip, int is_hw)
1611{
1612 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1613 if ((env->cr[0] & CR0_PE_MASK)) {
1614 static int count;
1615 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1616 count, intno, error_code, is_int,
1617 env->hflags & HF_CPL_MASK,
1618 env->segs[R_CS].selector, EIP,
1619 (int)env->segs[R_CS].base + EIP,
1620 env->segs[R_SS].selector, ESP);
1621 if (intno == 0x0e) {
1622 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1623 } else {
1624 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1625 }
1626 qemu_log("\n");
1627 log_cpu_state(env, X86_DUMP_CCOP);
1628#if 0
1629 {
1630 int i;
1631 uint8_t *ptr;
1632 qemu_log(" code=");
1633 ptr = env->segs[R_CS].base + env->eip;
1634 for(i = 0; i < 16; i++) {
1635 qemu_log(" %02x", ldub(ptr + i));
1636 }
1637 qemu_log("\n");
1638 }
1639#endif
1640 count++;
1641 }
1642 }
1643#ifdef VBOX
1644 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1645 if (is_int) {
1646 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1647 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1648 } else {
1649 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1650 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1651 }
1652 }
1653#endif
1654 if (env->cr[0] & CR0_PE_MASK) {
1655#if !defined(CONFIG_USER_ONLY)
1656 if (env->hflags & HF_SVMI_MASK)
1657 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1658#endif
1659#ifdef TARGET_X86_64
1660 if (env->hflags & HF_LMA_MASK) {
1661 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1662 } else
1663#endif
1664 {
1665#ifdef VBOX
1666 /* int xx *, v86 code and VME enabled? */
1667 if ( (env->eflags & VM_MASK)
1668 && (env->cr[4] & CR4_VME_MASK)
1669 && is_int
1670 && !is_hw
1671 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1672 )
1673 do_soft_interrupt_vme(intno, error_code, next_eip);
1674 else
1675#endif /* VBOX */
1676 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1677 }
1678 } else {
1679#if !defined(CONFIG_USER_ONLY)
1680 if (env->hflags & HF_SVMI_MASK)
1681 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1682#endif
1683 do_interrupt_real(intno, is_int, error_code, next_eip);
1684 }
1685
1686#if !defined(CONFIG_USER_ONLY)
1687 if (env->hflags & HF_SVMI_MASK) {
1688 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1689 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1690 }
1691#endif
1692}
1693
1694/* This should come from sysemu.h - if we could include it here... */
1695void qemu_system_reset_request(void);
1696
1697/*
1698 * Check nested exceptions and change to double or triple fault if
1699 * needed. It should only be called, if this is not an interrupt.
1700 * Returns the new exception number.
1701 */
1702static int check_exception(int intno, int *error_code)
1703{
1704 int first_contributory = env->old_exception == 0 ||
1705 (env->old_exception >= 10 &&
1706 env->old_exception <= 13);
1707 int second_contributory = intno == 0 ||
1708 (intno >= 10 && intno <= 13);
1709
1710 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1711 env->old_exception, intno);
1712
1713#if !defined(CONFIG_USER_ONLY)
1714 if (env->old_exception == EXCP08_DBLE) {
1715 if (env->hflags & HF_SVMI_MASK)
1716 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1717
1718 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1719
1720# ifndef VBOX
1721 qemu_system_reset_request();
1722# else
1723 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */
1724# endif
1725 return EXCP_HLT;
1726 }
1727#endif
1728
1729 if ((first_contributory && second_contributory)
1730 || (env->old_exception == EXCP0E_PAGE &&
1731 (second_contributory || (intno == EXCP0E_PAGE)))) {
1732 intno = EXCP08_DBLE;
1733 *error_code = 0;
1734 }
1735
1736 if (second_contributory || (intno == EXCP0E_PAGE) ||
1737 (intno == EXCP08_DBLE))
1738 env->old_exception = intno;
1739
1740 return intno;
1741}
1742
1743/*
1744 * Signal an interruption. It is executed in the main CPU loop.
1745 * is_int is TRUE if coming from the int instruction. next_eip is the
1746 * EIP value AFTER the interrupt instruction. It is only relevant if
1747 * is_int is TRUE.
1748 */
1749static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1750 int next_eip_addend)
1751{
1752#if defined(VBOX) && defined(DEBUG)
1753 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1754#endif
1755 if (!is_int) {
1756 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1757 intno = check_exception(intno, &error_code);
1758 } else {
1759 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1760 }
1761
1762 env->exception_index = intno;
1763 env->error_code = error_code;
1764 env->exception_is_int = is_int;
1765 env->exception_next_eip = env->eip + next_eip_addend;
1766 cpu_loop_exit();
1767}
1768
1769/* shortcuts to generate exceptions */
1770
1771void raise_exception_err(int exception_index, int error_code)
1772{
1773 raise_interrupt(exception_index, 0, error_code, 0);
1774}
1775
1776void raise_exception(int exception_index)
1777{
1778 raise_interrupt(exception_index, 0, 0, 0);
1779}
1780
1781void raise_exception_env(int exception_index, CPUState *nenv)
1782{
1783 env = nenv;
1784 raise_exception(exception_index);
1785}
1786/* SMM support */
1787
1788#if defined(CONFIG_USER_ONLY)
1789
1790void do_smm_enter(void)
1791{
1792}
1793
1794void helper_rsm(void)
1795{
1796}
1797
1798#else
1799
1800#ifdef TARGET_X86_64
1801#define SMM_REVISION_ID 0x00020064
1802#else
1803#define SMM_REVISION_ID 0x00020000
1804#endif
1805
1806void do_smm_enter(void)
1807{
1808 target_ulong sm_state;
1809 SegmentCache *dt;
1810 int i, offset;
1811
1812 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1813 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1814
1815 env->hflags |= HF_SMM_MASK;
1816 cpu_smm_update(env);
1817
1818 sm_state = env->smbase + 0x8000;
1819
1820#ifdef TARGET_X86_64
1821 for(i = 0; i < 6; i++) {
1822 dt = &env->segs[i];
1823 offset = 0x7e00 + i * 16;
1824 stw_phys(sm_state + offset, dt->selector);
1825 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1826 stl_phys(sm_state + offset + 4, dt->limit);
1827 stq_phys(sm_state + offset + 8, dt->base);
1828 }
1829
1830 stq_phys(sm_state + 0x7e68, env->gdt.base);
1831 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1832
1833 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1834 stq_phys(sm_state + 0x7e78, env->ldt.base);
1835 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1836 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1837
1838 stq_phys(sm_state + 0x7e88, env->idt.base);
1839 stl_phys(sm_state + 0x7e84, env->idt.limit);
1840
1841 stw_phys(sm_state + 0x7e90, env->tr.selector);
1842 stq_phys(sm_state + 0x7e98, env->tr.base);
1843 stl_phys(sm_state + 0x7e94, env->tr.limit);
1844 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1845
1846 stq_phys(sm_state + 0x7ed0, env->efer);
1847
1848 stq_phys(sm_state + 0x7ff8, EAX);
1849 stq_phys(sm_state + 0x7ff0, ECX);
1850 stq_phys(sm_state + 0x7fe8, EDX);
1851 stq_phys(sm_state + 0x7fe0, EBX);
1852 stq_phys(sm_state + 0x7fd8, ESP);
1853 stq_phys(sm_state + 0x7fd0, EBP);
1854 stq_phys(sm_state + 0x7fc8, ESI);
1855 stq_phys(sm_state + 0x7fc0, EDI);
1856 for(i = 8; i < 16; i++)
1857 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1858 stq_phys(sm_state + 0x7f78, env->eip);
1859 stl_phys(sm_state + 0x7f70, compute_eflags());
1860 stl_phys(sm_state + 0x7f68, env->dr[6]);
1861 stl_phys(sm_state + 0x7f60, env->dr[7]);
1862
1863 stl_phys(sm_state + 0x7f48, env->cr[4]);
1864 stl_phys(sm_state + 0x7f50, env->cr[3]);
1865 stl_phys(sm_state + 0x7f58, env->cr[0]);
1866
1867 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1868 stl_phys(sm_state + 0x7f00, env->smbase);
1869#else
1870 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1871 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1872 stl_phys(sm_state + 0x7ff4, compute_eflags());
1873 stl_phys(sm_state + 0x7ff0, env->eip);
1874 stl_phys(sm_state + 0x7fec, EDI);
1875 stl_phys(sm_state + 0x7fe8, ESI);
1876 stl_phys(sm_state + 0x7fe4, EBP);
1877 stl_phys(sm_state + 0x7fe0, ESP);
1878 stl_phys(sm_state + 0x7fdc, EBX);
1879 stl_phys(sm_state + 0x7fd8, EDX);
1880 stl_phys(sm_state + 0x7fd4, ECX);
1881 stl_phys(sm_state + 0x7fd0, EAX);
1882 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1883 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1884
1885 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1886 stl_phys(sm_state + 0x7f64, env->tr.base);
1887 stl_phys(sm_state + 0x7f60, env->tr.limit);
1888 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1889
1890 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1891 stl_phys(sm_state + 0x7f80, env->ldt.base);
1892 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1893 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1894
1895 stl_phys(sm_state + 0x7f74, env->gdt.base);
1896 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1897
1898 stl_phys(sm_state + 0x7f58, env->idt.base);
1899 stl_phys(sm_state + 0x7f54, env->idt.limit);
1900
1901 for(i = 0; i < 6; i++) {
1902 dt = &env->segs[i];
1903 if (i < 3)
1904 offset = 0x7f84 + i * 12;
1905 else
1906 offset = 0x7f2c + (i - 3) * 12;
1907 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1908 stl_phys(sm_state + offset + 8, dt->base);
1909 stl_phys(sm_state + offset + 4, dt->limit);
1910 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1911 }
1912 stl_phys(sm_state + 0x7f14, env->cr[4]);
1913
1914 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1915 stl_phys(sm_state + 0x7ef8, env->smbase);
1916#endif
1917 /* init SMM cpu state */
1918
1919#ifdef TARGET_X86_64
1920 cpu_load_efer(env, 0);
1921#endif
1922 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1923 env->eip = 0x00008000;
1924 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1925 0xffffffff, 0);
1926 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1927 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1928 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1929 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1930 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1931
1932 cpu_x86_update_cr0(env,
1933 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1934 cpu_x86_update_cr4(env, 0);
1935 env->dr[7] = 0x00000400;
1936 CC_OP = CC_OP_EFLAGS;
1937}
1938
1939void helper_rsm(void)
1940{
1941#ifdef VBOX
1942 cpu_abort(env, "helper_rsm");
1943#else /* !VBOX */
1944 target_ulong sm_state;
1945 int i, offset;
1946 uint32_t val;
1947
1948 sm_state = env->smbase + 0x8000;
1949#ifdef TARGET_X86_64
1950 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1951
1952 for(i = 0; i < 6; i++) {
1953 offset = 0x7e00 + i * 16;
1954 cpu_x86_load_seg_cache(env, i,
1955 lduw_phys(sm_state + offset),
1956 ldq_phys(sm_state + offset + 8),
1957 ldl_phys(sm_state + offset + 4),
1958 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1959 }
1960
1961 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1962 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1963
1964 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1965 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1966 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1967 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1968#ifdef VBOX
1969 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
1970 env->ldt.newselector = 0;
1971#endif
1972
1973 env->idt.base = ldq_phys(sm_state + 0x7e88);
1974 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1975
1976 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1977 env->tr.base = ldq_phys(sm_state + 0x7e98);
1978 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1979 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1980#ifdef VBOX
1981 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
1982 env->tr.newselector = 0;
1983#endif
1984
1985 EAX = ldq_phys(sm_state + 0x7ff8);
1986 ECX = ldq_phys(sm_state + 0x7ff0);
1987 EDX = ldq_phys(sm_state + 0x7fe8);
1988 EBX = ldq_phys(sm_state + 0x7fe0);
1989 ESP = ldq_phys(sm_state + 0x7fd8);
1990 EBP = ldq_phys(sm_state + 0x7fd0);
1991 ESI = ldq_phys(sm_state + 0x7fc8);
1992 EDI = ldq_phys(sm_state + 0x7fc0);
1993 for(i = 8; i < 16; i++)
1994 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1995 env->eip = ldq_phys(sm_state + 0x7f78);
1996 load_eflags(ldl_phys(sm_state + 0x7f70),
1997 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1998 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1999 env->dr[7] = ldl_phys(sm_state + 0x7f60);
2000
2001 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
2002 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
2003 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
2004
2005 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2006 if (val & 0x20000) {
2007 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
2008 }
2009#else
2010 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
2011 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
2012 load_eflags(ldl_phys(sm_state + 0x7ff4),
2013 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2014 env->eip = ldl_phys(sm_state + 0x7ff0);
2015 EDI = ldl_phys(sm_state + 0x7fec);
2016 ESI = ldl_phys(sm_state + 0x7fe8);
2017 EBP = ldl_phys(sm_state + 0x7fe4);
2018 ESP = ldl_phys(sm_state + 0x7fe0);
2019 EBX = ldl_phys(sm_state + 0x7fdc);
2020 EDX = ldl_phys(sm_state + 0x7fd8);
2021 ECX = ldl_phys(sm_state + 0x7fd4);
2022 EAX = ldl_phys(sm_state + 0x7fd0);
2023 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
2024 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
2025
2026 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
2027 env->tr.base = ldl_phys(sm_state + 0x7f64);
2028 env->tr.limit = ldl_phys(sm_state + 0x7f60);
2029 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
2030#ifdef VBOX
2031 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2032 env->tr.newselector = 0;
2033#endif
2034
2035 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
2036 env->ldt.base = ldl_phys(sm_state + 0x7f80);
2037 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
2038 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
2039#ifdef VBOX
2040 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2041 env->ldt.newselector = 0;
2042#endif
2043
2044 env->gdt.base = ldl_phys(sm_state + 0x7f74);
2045 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
2046
2047 env->idt.base = ldl_phys(sm_state + 0x7f58);
2048 env->idt.limit = ldl_phys(sm_state + 0x7f54);
2049
2050 for(i = 0; i < 6; i++) {
2051 if (i < 3)
2052 offset = 0x7f84 + i * 12;
2053 else
2054 offset = 0x7f2c + (i - 3) * 12;
2055 cpu_x86_load_seg_cache(env, i,
2056 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
2057 ldl_phys(sm_state + offset + 8),
2058 ldl_phys(sm_state + offset + 4),
2059 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
2060 }
2061 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2062
2063 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2064 if (val & 0x20000) {
2065 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2066 }
2067#endif
2068 CC_OP = CC_OP_EFLAGS;
2069 env->hflags &= ~HF_SMM_MASK;
2070 cpu_smm_update(env);
2071
2072 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2073 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2074#endif /* !VBOX */
2075}
2076
2077#endif /* !CONFIG_USER_ONLY */
2078
2079
2080/* division, flags are undefined */
2081
2082void helper_divb_AL(target_ulong t0)
2083{
2084 unsigned int num, den, q, r;
2085
2086 num = (EAX & 0xffff);
2087 den = (t0 & 0xff);
2088 if (den == 0) {
2089 raise_exception(EXCP00_DIVZ);
2090 }
2091 q = (num / den);
2092 if (q > 0xff)
2093 raise_exception(EXCP00_DIVZ);
2094 q &= 0xff;
2095 r = (num % den) & 0xff;
2096 EAX = (EAX & ~0xffff) | (r << 8) | q;
2097}
2098
2099void helper_idivb_AL(target_ulong t0)
2100{
2101 int num, den, q, r;
2102
2103 num = (int16_t)EAX;
2104 den = (int8_t)t0;
2105 if (den == 0) {
2106 raise_exception(EXCP00_DIVZ);
2107 }
2108 q = (num / den);
2109 if (q != (int8_t)q)
2110 raise_exception(EXCP00_DIVZ);
2111 q &= 0xff;
2112 r = (num % den) & 0xff;
2113 EAX = (EAX & ~0xffff) | (r << 8) | q;
2114}
2115
2116void helper_divw_AX(target_ulong t0)
2117{
2118 unsigned int num, den, q, r;
2119
2120 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2121 den = (t0 & 0xffff);
2122 if (den == 0) {
2123 raise_exception(EXCP00_DIVZ);
2124 }
2125 q = (num / den);
2126 if (q > 0xffff)
2127 raise_exception(EXCP00_DIVZ);
2128 q &= 0xffff;
2129 r = (num % den) & 0xffff;
2130 EAX = (EAX & ~0xffff) | q;
2131 EDX = (EDX & ~0xffff) | r;
2132}
2133
2134void helper_idivw_AX(target_ulong t0)
2135{
2136 int num, den, q, r;
2137
2138 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2139 den = (int16_t)t0;
2140 if (den == 0) {
2141 raise_exception(EXCP00_DIVZ);
2142 }
2143 q = (num / den);
2144 if (q != (int16_t)q)
2145 raise_exception(EXCP00_DIVZ);
2146 q &= 0xffff;
2147 r = (num % den) & 0xffff;
2148 EAX = (EAX & ~0xffff) | q;
2149 EDX = (EDX & ~0xffff) | r;
2150}
2151
2152void helper_divl_EAX(target_ulong t0)
2153{
2154 unsigned int den, r;
2155 uint64_t num, q;
2156
2157 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2158 den = t0;
2159 if (den == 0) {
2160 raise_exception(EXCP00_DIVZ);
2161 }
2162 q = (num / den);
2163 r = (num % den);
2164 if (q > 0xffffffff)
2165 raise_exception(EXCP00_DIVZ);
2166 EAX = (uint32_t)q;
2167 EDX = (uint32_t)r;
2168}
2169
2170void helper_idivl_EAX(target_ulong t0)
2171{
2172 int den, r;
2173 int64_t num, q;
2174
2175 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2176 den = t0;
2177 if (den == 0) {
2178 raise_exception(EXCP00_DIVZ);
2179 }
2180 q = (num / den);
2181 r = (num % den);
2182 if (q != (int32_t)q)
2183 raise_exception(EXCP00_DIVZ);
2184 EAX = (uint32_t)q;
2185 EDX = (uint32_t)r;
2186}
2187
2188/* bcd */
2189
2190/* XXX: exception */
2191void helper_aam(int base)
2192{
2193 int al, ah;
2194 al = EAX & 0xff;
2195 ah = al / base;
2196 al = al % base;
2197 EAX = (EAX & ~0xffff) | al | (ah << 8);
2198 CC_DST = al;
2199}
2200
2201void helper_aad(int base)
2202{
2203 int al, ah;
2204 al = EAX & 0xff;
2205 ah = (EAX >> 8) & 0xff;
2206 al = ((ah * base) + al) & 0xff;
2207 EAX = (EAX & ~0xffff) | al;
2208 CC_DST = al;
2209}
2210
2211void helper_aaa(void)
2212{
2213 int icarry;
2214 int al, ah, af;
2215 int eflags;
2216
2217 eflags = helper_cc_compute_all(CC_OP);
2218 af = eflags & CC_A;
2219 al = EAX & 0xff;
2220 ah = (EAX >> 8) & 0xff;
2221
2222 icarry = (al > 0xf9);
2223 if (((al & 0x0f) > 9 ) || af) {
2224 al = (al + 6) & 0x0f;
2225 ah = (ah + 1 + icarry) & 0xff;
2226 eflags |= CC_C | CC_A;
2227 } else {
2228 eflags &= ~(CC_C | CC_A);
2229 al &= 0x0f;
2230 }
2231 EAX = (EAX & ~0xffff) | al | (ah << 8);
2232 CC_SRC = eflags;
2233}
2234
2235void helper_aas(void)
2236{
2237 int icarry;
2238 int al, ah, af;
2239 int eflags;
2240
2241 eflags = helper_cc_compute_all(CC_OP);
2242 af = eflags & CC_A;
2243 al = EAX & 0xff;
2244 ah = (EAX >> 8) & 0xff;
2245
2246 icarry = (al < 6);
2247 if (((al & 0x0f) > 9 ) || af) {
2248 al = (al - 6) & 0x0f;
2249 ah = (ah - 1 - icarry) & 0xff;
2250 eflags |= CC_C | CC_A;
2251 } else {
2252 eflags &= ~(CC_C | CC_A);
2253 al &= 0x0f;
2254 }
2255 EAX = (EAX & ~0xffff) | al | (ah << 8);
2256 CC_SRC = eflags;
2257}
2258
2259void helper_daa(void)
2260{
2261 int al, af, cf;
2262 int eflags;
2263
2264 eflags = helper_cc_compute_all(CC_OP);
2265 cf = eflags & CC_C;
2266 af = eflags & CC_A;
2267 al = EAX & 0xff;
2268
2269 eflags = 0;
2270 if (((al & 0x0f) > 9 ) || af) {
2271 al = (al + 6) & 0xff;
2272 eflags |= CC_A;
2273 }
2274 if ((al > 0x9f) || cf) {
2275 al = (al + 0x60) & 0xff;
2276 eflags |= CC_C;
2277 }
2278 EAX = (EAX & ~0xff) | al;
2279 /* well, speed is not an issue here, so we compute the flags by hand */
2280 eflags |= (al == 0) << 6; /* zf */
2281 eflags |= parity_table[al]; /* pf */
2282 eflags |= (al & 0x80); /* sf */
2283 CC_SRC = eflags;
2284}
2285
2286void helper_das(void)
2287{
2288 int al, al1, af, cf;
2289 int eflags;
2290
2291 eflags = helper_cc_compute_all(CC_OP);
2292 cf = eflags & CC_C;
2293 af = eflags & CC_A;
2294 al = EAX & 0xff;
2295
2296 eflags = 0;
2297 al1 = al;
2298 if (((al & 0x0f) > 9 ) || af) {
2299 eflags |= CC_A;
2300 if (al < 6 || cf)
2301 eflags |= CC_C;
2302 al = (al - 6) & 0xff;
2303 }
2304 if ((al1 > 0x99) || cf) {
2305 al = (al - 0x60) & 0xff;
2306 eflags |= CC_C;
2307 }
2308 EAX = (EAX & ~0xff) | al;
2309 /* well, speed is not an issue here, so we compute the flags by hand */
2310 eflags |= (al == 0) << 6; /* zf */
2311 eflags |= parity_table[al]; /* pf */
2312 eflags |= (al & 0x80); /* sf */
2313 CC_SRC = eflags;
2314}
2315
2316void helper_into(int next_eip_addend)
2317{
2318 int eflags;
2319 eflags = helper_cc_compute_all(CC_OP);
2320 if (eflags & CC_O) {
2321 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2322 }
2323}
2324
2325void helper_cmpxchg8b(target_ulong a0)
2326{
2327 uint64_t d;
2328 int eflags;
2329
2330 eflags = helper_cc_compute_all(CC_OP);
2331 d = ldq(a0);
2332 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2333 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2334 eflags |= CC_Z;
2335 } else {
2336 /* always do the store */
2337 stq(a0, d);
2338 EDX = (uint32_t)(d >> 32);
2339 EAX = (uint32_t)d;
2340 eflags &= ~CC_Z;
2341 }
2342 CC_SRC = eflags;
2343}
2344
2345#ifdef TARGET_X86_64
2346void helper_cmpxchg16b(target_ulong a0)
2347{
2348 uint64_t d0, d1;
2349 int eflags;
2350
2351 if ((a0 & 0xf) != 0)
2352 raise_exception(EXCP0D_GPF);
2353 eflags = helper_cc_compute_all(CC_OP);
2354 d0 = ldq(a0);
2355 d1 = ldq(a0 + 8);
2356 if (d0 == EAX && d1 == EDX) {
2357 stq(a0, EBX);
2358 stq(a0 + 8, ECX);
2359 eflags |= CC_Z;
2360 } else {
2361 /* always do the store */
2362 stq(a0, d0);
2363 stq(a0 + 8, d1);
2364 EDX = d1;
2365 EAX = d0;
2366 eflags &= ~CC_Z;
2367 }
2368 CC_SRC = eflags;
2369}
2370#endif
2371
2372void helper_single_step(void)
2373{
2374#ifndef CONFIG_USER_ONLY
2375 check_hw_breakpoints(env, 1);
2376 env->dr[6] |= DR6_BS;
2377#endif
2378 raise_exception(EXCP01_DB);
2379}
2380
2381void helper_cpuid(void)
2382{
2383 uint32_t eax, ebx, ecx, edx;
2384
2385 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2386
2387 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2388 EAX = eax;
2389 EBX = ebx;
2390 ECX = ecx;
2391 EDX = edx;
2392}
2393
2394void helper_enter_level(int level, int data32, target_ulong t1)
2395{
2396 target_ulong ssp;
2397 uint32_t esp_mask, esp, ebp;
2398
2399 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2400 ssp = env->segs[R_SS].base;
2401 ebp = EBP;
2402 esp = ESP;
2403 if (data32) {
2404 /* 32 bit */
2405 esp -= 4;
2406 while (--level) {
2407 esp -= 4;
2408 ebp -= 4;
2409 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2410 }
2411 esp -= 4;
2412 stl(ssp + (esp & esp_mask), t1);
2413 } else {
2414 /* 16 bit */
2415 esp -= 2;
2416 while (--level) {
2417 esp -= 2;
2418 ebp -= 2;
2419 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2420 }
2421 esp -= 2;
2422 stw(ssp + (esp & esp_mask), t1);
2423 }
2424}
2425
2426#ifdef TARGET_X86_64
2427void helper_enter64_level(int level, int data64, target_ulong t1)
2428{
2429 target_ulong esp, ebp;
2430 ebp = EBP;
2431 esp = ESP;
2432
2433 if (data64) {
2434 /* 64 bit */
2435 esp -= 8;
2436 while (--level) {
2437 esp -= 8;
2438 ebp -= 8;
2439 stq(esp, ldq(ebp));
2440 }
2441 esp -= 8;
2442 stq(esp, t1);
2443 } else {
2444 /* 16 bit */
2445 esp -= 2;
2446 while (--level) {
2447 esp -= 2;
2448 ebp -= 2;
2449 stw(esp, lduw(ebp));
2450 }
2451 esp -= 2;
2452 stw(esp, t1);
2453 }
2454}
2455#endif
2456
2457void helper_lldt(int selector)
2458{
2459 SegmentCache *dt;
2460 uint32_t e1, e2;
2461#ifndef VBOX
2462 int index, entry_limit;
2463#else
2464 unsigned int index, entry_limit;
2465#endif
2466 target_ulong ptr;
2467
2468#ifdef VBOX
2469 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2470 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2471#endif
2472
2473 selector &= 0xffff;
2474 if ((selector & 0xfffc) == 0) {
2475 /* XXX: NULL selector case: invalid LDT */
2476 env->ldt.base = 0;
2477 env->ldt.limit = 0;
2478#ifdef VBOX
2479 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2480 env->ldt.newselector = 0;
2481#endif
2482 } else {
2483 if (selector & 0x4)
2484 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2485 dt = &env->gdt;
2486 index = selector & ~7;
2487#ifdef TARGET_X86_64
2488 if (env->hflags & HF_LMA_MASK)
2489 entry_limit = 15;
2490 else
2491#endif
2492 entry_limit = 7;
2493 if ((index + entry_limit) > dt->limit)
2494 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2495 ptr = dt->base + index;
2496 e1 = ldl_kernel(ptr);
2497 e2 = ldl_kernel(ptr + 4);
2498 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2499 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2500 if (!(e2 & DESC_P_MASK))
2501 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2502#ifdef TARGET_X86_64
2503 if (env->hflags & HF_LMA_MASK) {
2504 uint32_t e3;
2505 e3 = ldl_kernel(ptr + 8);
2506 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2507 env->ldt.base |= (target_ulong)e3 << 32;
2508 } else
2509#endif
2510 {
2511 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2512 }
2513 }
2514 env->ldt.selector = selector;
2515#ifdef VBOX
2516 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2517 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2518#endif
2519}
2520
2521void helper_ltr(int selector)
2522{
2523 SegmentCache *dt;
2524 uint32_t e1, e2;
2525#ifndef VBOX
2526 int index, type, entry_limit;
2527#else
2528 unsigned int index;
2529 int type, entry_limit;
2530#endif
2531 target_ulong ptr;
2532
2533#ifdef VBOX
2534 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2535 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2536 env->tr.flags, (RTSEL)(selector & 0xffff)));
2537#endif
2538 selector &= 0xffff;
2539 if ((selector & 0xfffc) == 0) {
2540 /* NULL selector case: invalid TR */
2541 env->tr.base = 0;
2542 env->tr.limit = 0;
2543 env->tr.flags = 0;
2544#ifdef VBOX
2545 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2546 env->tr.newselector = 0;
2547#endif
2548 } else {
2549 if (selector & 0x4)
2550 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2551 dt = &env->gdt;
2552 index = selector & ~7;
2553#ifdef TARGET_X86_64
2554 if (env->hflags & HF_LMA_MASK)
2555 entry_limit = 15;
2556 else
2557#endif
2558 entry_limit = 7;
2559 if ((index + entry_limit) > dt->limit)
2560 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2561 ptr = dt->base + index;
2562 e1 = ldl_kernel(ptr);
2563 e2 = ldl_kernel(ptr + 4);
2564 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2565 if ((e2 & DESC_S_MASK) ||
2566 (type != 1 && type != 9))
2567 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2568 if (!(e2 & DESC_P_MASK))
2569 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2570#ifdef TARGET_X86_64
2571 if (env->hflags & HF_LMA_MASK) {
2572 uint32_t e3, e4;
2573 e3 = ldl_kernel(ptr + 8);
2574 e4 = ldl_kernel(ptr + 12);
2575 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2576 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2577 load_seg_cache_raw_dt(&env->tr, e1, e2);
2578 env->tr.base |= (target_ulong)e3 << 32;
2579 } else
2580#endif
2581 {
2582 load_seg_cache_raw_dt(&env->tr, e1, e2);
2583 }
2584 e2 |= DESC_TSS_BUSY_MASK;
2585 stl_kernel(ptr + 4, e2);
2586 }
2587 env->tr.selector = selector;
2588#ifdef VBOX
2589 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2590 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2591 env->tr.flags, (RTSEL)(selector & 0xffff)));
2592#endif
2593}
2594
2595/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2596void helper_load_seg(int seg_reg, int selector)
2597{
2598 uint32_t e1, e2;
2599 int cpl, dpl, rpl;
2600 SegmentCache *dt;
2601#ifndef VBOX
2602 int index;
2603#else
2604 unsigned int index;
2605#endif
2606 target_ulong ptr;
2607
2608 selector &= 0xffff;
2609 cpl = env->hflags & HF_CPL_MASK;
2610#ifdef VBOX
2611
2612 /* Trying to load a selector with CPL=1? */
2613 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2614 {
2615 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2616 selector = selector & 0xfffc;
2617 }
2618#endif /* VBOX */
2619 if ((selector & 0xfffc) == 0) {
2620 /* null selector case */
2621 if (seg_reg == R_SS
2622#ifdef TARGET_X86_64
2623 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2624#endif
2625 )
2626 raise_exception_err(EXCP0D_GPF, 0);
2627 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2628 } else {
2629
2630 if (selector & 0x4)
2631 dt = &env->ldt;
2632 else
2633 dt = &env->gdt;
2634 index = selector & ~7;
2635 if ((index + 7) > dt->limit)
2636 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2637 ptr = dt->base + index;
2638 e1 = ldl_kernel(ptr);
2639 e2 = ldl_kernel(ptr + 4);
2640
2641 if (!(e2 & DESC_S_MASK))
2642 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2643 rpl = selector & 3;
2644 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2645 if (seg_reg == R_SS) {
2646 /* must be writable segment */
2647 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2648 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2649 if (rpl != cpl || dpl != cpl)
2650 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2651 } else {
2652 /* must be readable segment */
2653 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2654 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2655
2656 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2657 /* if not conforming code, test rights */
2658 if (dpl < cpl || dpl < rpl)
2659 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2660 }
2661 }
2662
2663 if (!(e2 & DESC_P_MASK)) {
2664 if (seg_reg == R_SS)
2665 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2666 else
2667 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2668 }
2669
2670 /* set the access bit if not already set */
2671 if (!(e2 & DESC_A_MASK)) {
2672 e2 |= DESC_A_MASK;
2673 stl_kernel(ptr + 4, e2);
2674 }
2675
2676 cpu_x86_load_seg_cache(env, seg_reg, selector,
2677 get_seg_base(e1, e2),
2678 get_seg_limit(e1, e2),
2679 e2);
2680#if 0
2681 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2682 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2683#endif
2684 }
2685}
2686
2687/* protected mode jump */
2688void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2689 int next_eip_addend)
2690{
2691 int gate_cs, type;
2692 uint32_t e1, e2, cpl, dpl, rpl, limit;
2693 target_ulong next_eip;
2694
2695#ifdef VBOX /** @todo Why do we do this? */
2696 e1 = e2 = 0;
2697#endif
2698 if ((new_cs & 0xfffc) == 0)
2699 raise_exception_err(EXCP0D_GPF, 0);
2700 if (load_segment(&e1, &e2, new_cs) != 0)
2701 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2702 cpl = env->hflags & HF_CPL_MASK;
2703 if (e2 & DESC_S_MASK) {
2704 if (!(e2 & DESC_CS_MASK))
2705 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2706 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2707 if (e2 & DESC_C_MASK) {
2708 /* conforming code segment */
2709 if (dpl > cpl)
2710 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2711 } else {
2712 /* non conforming code segment */
2713 rpl = new_cs & 3;
2714 if (rpl > cpl)
2715 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2716 if (dpl != cpl)
2717 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2718 }
2719 if (!(e2 & DESC_P_MASK))
2720 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2721 limit = get_seg_limit(e1, e2);
2722 if (new_eip > limit &&
2723 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2724 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2725 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2726 get_seg_base(e1, e2), limit, e2);
2727 EIP = new_eip;
2728 } else {
2729 /* jump to call or task gate */
2730 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2731 rpl = new_cs & 3;
2732 cpl = env->hflags & HF_CPL_MASK;
2733 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2734 switch(type) {
2735 case 1: /* 286 TSS */
2736 case 9: /* 386 TSS */
2737 case 5: /* task gate */
2738 if (dpl < cpl || dpl < rpl)
2739 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2740 next_eip = env->eip + next_eip_addend;
2741 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2742 CC_OP = CC_OP_EFLAGS;
2743 break;
2744 case 4: /* 286 call gate */
2745 case 12: /* 386 call gate */
2746 if ((dpl < cpl) || (dpl < rpl))
2747 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2748 if (!(e2 & DESC_P_MASK))
2749 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2750 gate_cs = e1 >> 16;
2751 new_eip = (e1 & 0xffff);
2752 if (type == 12)
2753 new_eip |= (e2 & 0xffff0000);
2754 if (load_segment(&e1, &e2, gate_cs) != 0)
2755 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2756 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2757 /* must be code segment */
2758 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2759 (DESC_S_MASK | DESC_CS_MASK)))
2760 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2761 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2762 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2763 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2764 if (!(e2 & DESC_P_MASK))
2765#ifdef VBOX /* See page 3-514 of 253666.pdf */
2766 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2767#else
2768 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2769#endif
2770 limit = get_seg_limit(e1, e2);
2771 if (new_eip > limit)
2772 raise_exception_err(EXCP0D_GPF, 0);
2773 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2774 get_seg_base(e1, e2), limit, e2);
2775 EIP = new_eip;
2776 break;
2777 default:
2778 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2779 break;
2780 }
2781 }
2782}
2783
2784/* real mode call */
2785void helper_lcall_real(int new_cs, target_ulong new_eip1,
2786 int shift, int next_eip)
2787{
2788 int new_eip;
2789 uint32_t esp, esp_mask;
2790 target_ulong ssp;
2791
2792 new_eip = new_eip1;
2793 esp = ESP;
2794 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2795 ssp = env->segs[R_SS].base;
2796 if (shift) {
2797 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2798 PUSHL(ssp, esp, esp_mask, next_eip);
2799 } else {
2800 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2801 PUSHW(ssp, esp, esp_mask, next_eip);
2802 }
2803
2804 SET_ESP(esp, esp_mask);
2805 env->eip = new_eip;
2806 env->segs[R_CS].selector = new_cs;
2807 env->segs[R_CS].base = (new_cs << 4);
2808}
2809
2810/* protected mode call */
2811void helper_lcall_protected(int new_cs, target_ulong new_eip,
2812 int shift, int next_eip_addend)
2813{
2814 int new_stack, i;
2815 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2816 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2817 uint32_t val, limit, old_sp_mask;
2818 target_ulong ssp, old_ssp, next_eip;
2819
2820#ifdef VBOX /** @todo Why do we do this? */
2821 e1 = e2 = 0;
2822#endif
2823 next_eip = env->eip + next_eip_addend;
2824 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2825 LOG_PCALL_STATE(env);
2826 if ((new_cs & 0xfffc) == 0)
2827 raise_exception_err(EXCP0D_GPF, 0);
2828 if (load_segment(&e1, &e2, new_cs) != 0)
2829 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2830 cpl = env->hflags & HF_CPL_MASK;
2831 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2832 if (e2 & DESC_S_MASK) {
2833 if (!(e2 & DESC_CS_MASK))
2834 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2835 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2836 if (e2 & DESC_C_MASK) {
2837 /* conforming code segment */
2838 if (dpl > cpl)
2839 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2840 } else {
2841 /* non conforming code segment */
2842 rpl = new_cs & 3;
2843 if (rpl > cpl)
2844 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2845 if (dpl != cpl)
2846 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2847 }
2848 if (!(e2 & DESC_P_MASK))
2849 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2850
2851#ifdef TARGET_X86_64
2852 /* XXX: check 16/32 bit cases in long mode */
2853 if (shift == 2) {
2854 target_ulong rsp;
2855 /* 64 bit case */
2856 rsp = ESP;
2857 PUSHQ(rsp, env->segs[R_CS].selector);
2858 PUSHQ(rsp, next_eip);
2859 /* from this point, not restartable */
2860 ESP = rsp;
2861 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2862 get_seg_base(e1, e2),
2863 get_seg_limit(e1, e2), e2);
2864 EIP = new_eip;
2865 } else
2866#endif
2867 {
2868 sp = ESP;
2869 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2870 ssp = env->segs[R_SS].base;
2871 if (shift) {
2872 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2873 PUSHL(ssp, sp, sp_mask, next_eip);
2874 } else {
2875 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2876 PUSHW(ssp, sp, sp_mask, next_eip);
2877 }
2878
2879 limit = get_seg_limit(e1, e2);
2880 if (new_eip > limit)
2881 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2882 /* from this point, not restartable */
2883 SET_ESP(sp, sp_mask);
2884 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2885 get_seg_base(e1, e2), limit, e2);
2886 EIP = new_eip;
2887 }
2888 } else {
2889 /* check gate type */
2890 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2891 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2892 rpl = new_cs & 3;
2893 switch(type) {
2894 case 1: /* available 286 TSS */
2895 case 9: /* available 386 TSS */
2896 case 5: /* task gate */
2897 if (dpl < cpl || dpl < rpl)
2898 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2899 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2900 CC_OP = CC_OP_EFLAGS;
2901 return;
2902 case 4: /* 286 call gate */
2903 case 12: /* 386 call gate */
2904 break;
2905 default:
2906 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2907 break;
2908 }
2909 shift = type >> 3;
2910
2911 if (dpl < cpl || dpl < rpl)
2912 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2913 /* check valid bit */
2914 if (!(e2 & DESC_P_MASK))
2915 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2916 selector = e1 >> 16;
2917 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2918 param_count = e2 & 0x1f;
2919 if ((selector & 0xfffc) == 0)
2920 raise_exception_err(EXCP0D_GPF, 0);
2921
2922 if (load_segment(&e1, &e2, selector) != 0)
2923 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2924 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2925 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2926 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2927 if (dpl > cpl)
2928 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2929 if (!(e2 & DESC_P_MASK))
2930 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2931
2932 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2933 /* to inner privilege */
2934 get_ss_esp_from_tss(&ss, &sp, dpl);
2935 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2936 ss, sp, param_count, ESP);
2937 if ((ss & 0xfffc) == 0)
2938 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2939 if ((ss & 3) != dpl)
2940 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2941 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2942 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2943 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2944 if (ss_dpl != dpl)
2945 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2946 if (!(ss_e2 & DESC_S_MASK) ||
2947 (ss_e2 & DESC_CS_MASK) ||
2948 !(ss_e2 & DESC_W_MASK))
2949 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2950 if (!(ss_e2 & DESC_P_MASK))
2951#ifdef VBOX /* See page 3-99 of 253666.pdf */
2952 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2953#else
2954 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2955#endif
2956
2957 // push_size = ((param_count * 2) + 8) << shift;
2958
2959 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2960 old_ssp = env->segs[R_SS].base;
2961
2962 sp_mask = get_sp_mask(ss_e2);
2963 ssp = get_seg_base(ss_e1, ss_e2);
2964 if (shift) {
2965 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2966 PUSHL(ssp, sp, sp_mask, ESP);
2967 for(i = param_count - 1; i >= 0; i--) {
2968 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2969 PUSHL(ssp, sp, sp_mask, val);
2970 }
2971 } else {
2972 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2973 PUSHW(ssp, sp, sp_mask, ESP);
2974 for(i = param_count - 1; i >= 0; i--) {
2975 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2976 PUSHW(ssp, sp, sp_mask, val);
2977 }
2978 }
2979 new_stack = 1;
2980 } else {
2981 /* to same privilege */
2982 sp = ESP;
2983 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2984 ssp = env->segs[R_SS].base;
2985 // push_size = (4 << shift);
2986 new_stack = 0;
2987 }
2988
2989 if (shift) {
2990 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2991 PUSHL(ssp, sp, sp_mask, next_eip);
2992 } else {
2993 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2994 PUSHW(ssp, sp, sp_mask, next_eip);
2995 }
2996
2997 /* from this point, not restartable */
2998
2999 if (new_stack) {
3000 ss = (ss & ~3) | dpl;
3001 cpu_x86_load_seg_cache(env, R_SS, ss,
3002 ssp,
3003 get_seg_limit(ss_e1, ss_e2),
3004 ss_e2);
3005 }
3006
3007 selector = (selector & ~3) | dpl;
3008 cpu_x86_load_seg_cache(env, R_CS, selector,
3009 get_seg_base(e1, e2),
3010 get_seg_limit(e1, e2),
3011 e2);
3012 cpu_x86_set_cpl(env, dpl);
3013 SET_ESP(sp, sp_mask);
3014 EIP = offset;
3015 }
3016}
3017
3018/* real and vm86 mode iret */
3019void helper_iret_real(int shift)
3020{
3021 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3022 target_ulong ssp;
3023 int eflags_mask;
3024#ifdef VBOX
3025 bool fVME = false;
3026
3027 remR3TrapClear(env->pVM);
3028#endif /* VBOX */
3029
3030 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3031 sp = ESP;
3032 ssp = env->segs[R_SS].base;
3033 if (shift == 1) {
3034 /* 32 bits */
3035 POPL(ssp, sp, sp_mask, new_eip);
3036 POPL(ssp, sp, sp_mask, new_cs);
3037 new_cs &= 0xffff;
3038 POPL(ssp, sp, sp_mask, new_eflags);
3039 } else {
3040 /* 16 bits */
3041 POPW(ssp, sp, sp_mask, new_eip);
3042 POPW(ssp, sp, sp_mask, new_cs);
3043 POPW(ssp, sp, sp_mask, new_eflags);
3044 }
3045#ifdef VBOX
3046 if ( (env->eflags & VM_MASK)
3047 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3048 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3049 {
3050 fVME = true;
3051 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3052 /* if TF will be set -> #GP */
3053 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3054 || (new_eflags & TF_MASK))
3055 raise_exception(EXCP0D_GPF);
3056 }
3057#endif /* VBOX */
3058 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3059 env->segs[R_CS].selector = new_cs;
3060 env->segs[R_CS].base = (new_cs << 4);
3061 env->eip = new_eip;
3062#ifdef VBOX
3063 if (fVME)
3064 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3065 else
3066#endif
3067 if (env->eflags & VM_MASK)
3068 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3069 else
3070 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3071 if (shift == 0)
3072 eflags_mask &= 0xffff;
3073 load_eflags(new_eflags, eflags_mask);
3074 env->hflags2 &= ~HF2_NMI_MASK;
3075#ifdef VBOX
3076 if (fVME)
3077 {
3078 if (new_eflags & IF_MASK)
3079 env->eflags |= VIF_MASK;
3080 else
3081 env->eflags &= ~VIF_MASK;
3082 }
3083#endif /* VBOX */
3084}
3085
3086static inline void validate_seg(int seg_reg, int cpl)
3087{
3088 int dpl;
3089 uint32_t e2;
3090
3091 /* XXX: on x86_64, we do not want to nullify FS and GS because
3092 they may still contain a valid base. I would be interested to
3093 know how a real x86_64 CPU behaves */
3094 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3095 (env->segs[seg_reg].selector & 0xfffc) == 0)
3096 return;
3097
3098 e2 = env->segs[seg_reg].flags;
3099 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3100 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3101 /* data or non conforming code segment */
3102 if (dpl < cpl) {
3103 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3104 }
3105 }
3106}
3107
3108/* protected mode iret */
3109static inline void helper_ret_protected(int shift, int is_iret, int addend)
3110{
3111 uint32_t new_cs, new_eflags, new_ss;
3112 uint32_t new_es, new_ds, new_fs, new_gs;
3113 uint32_t e1, e2, ss_e1, ss_e2;
3114 int cpl, dpl, rpl, eflags_mask, iopl;
3115 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3116
3117#ifdef VBOX /** @todo Why do we do this? */
3118 ss_e1 = ss_e2 = e1 = e2 = 0;
3119#endif
3120
3121#ifdef TARGET_X86_64
3122 if (shift == 2)
3123 sp_mask = -1;
3124 else
3125#endif
3126 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3127 sp = ESP;
3128 ssp = env->segs[R_SS].base;
3129 new_eflags = 0; /* avoid warning */
3130#ifdef TARGET_X86_64
3131 if (shift == 2) {
3132 POPQ(sp, new_eip);
3133 POPQ(sp, new_cs);
3134 new_cs &= 0xffff;
3135 if (is_iret) {
3136 POPQ(sp, new_eflags);
3137 }
3138 } else
3139#endif
3140 if (shift == 1) {
3141 /* 32 bits */
3142 POPL(ssp, sp, sp_mask, new_eip);
3143 POPL(ssp, sp, sp_mask, new_cs);
3144 new_cs &= 0xffff;
3145 if (is_iret) {
3146 POPL(ssp, sp, sp_mask, new_eflags);
3147#if defined(VBOX) && defined(DEBUG)
3148 printf("iret: new CS %04X\n", new_cs);
3149 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3150 printf("iret: new EFLAGS %08X\n", new_eflags);
3151 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3152#endif
3153 if (new_eflags & VM_MASK)
3154 goto return_to_vm86;
3155 }
3156#ifdef VBOX
3157 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3158 {
3159# ifdef DEBUG
3160 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3161# endif
3162 new_cs = new_cs & 0xfffc;
3163 }
3164#endif
3165 } else {
3166 /* 16 bits */
3167 POPW(ssp, sp, sp_mask, new_eip);
3168 POPW(ssp, sp, sp_mask, new_cs);
3169 if (is_iret)
3170 POPW(ssp, sp, sp_mask, new_eflags);
3171 }
3172 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3173 new_cs, new_eip, shift, addend);
3174 LOG_PCALL_STATE(env);
3175 if ((new_cs & 0xfffc) == 0)
3176 {
3177#if defined(VBOX) && defined(DEBUG)
3178 printf("new_cs & 0xfffc) == 0\n");
3179#endif
3180 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3181 }
3182 if (load_segment(&e1, &e2, new_cs) != 0)
3183 {
3184#if defined(VBOX) && defined(DEBUG)
3185 printf("load_segment failed\n");
3186#endif
3187 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3188 }
3189 if (!(e2 & DESC_S_MASK) ||
3190 !(e2 & DESC_CS_MASK))
3191 {
3192#if defined(VBOX) && defined(DEBUG)
3193 printf("e2 mask %08x\n", e2);
3194#endif
3195 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3196 }
3197 cpl = env->hflags & HF_CPL_MASK;
3198 rpl = new_cs & 3;
3199 if (rpl < cpl)
3200 {
3201#if defined(VBOX) && defined(DEBUG)
3202 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3203#endif
3204 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3205 }
3206 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3207 if (e2 & DESC_C_MASK) {
3208 if (dpl > rpl)
3209 {
3210#if defined(VBOX) && defined(DEBUG)
3211 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3212#endif
3213 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3214 }
3215 } else {
3216 if (dpl != rpl)
3217 {
3218#if defined(VBOX) && defined(DEBUG)
3219 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3220#endif
3221 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3222 }
3223 }
3224 if (!(e2 & DESC_P_MASK))
3225 {
3226#if defined(VBOX) && defined(DEBUG)
3227 printf("DESC_P_MASK e2=%08x\n", e2);
3228#endif
3229 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3230 }
3231
3232 sp += addend;
3233 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3234 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3235 /* return to same privilege level */
3236 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3237 get_seg_base(e1, e2),
3238 get_seg_limit(e1, e2),
3239 e2);
3240 } else {
3241 /* return to different privilege level */
3242#ifdef TARGET_X86_64
3243 if (shift == 2) {
3244 POPQ(sp, new_esp);
3245 POPQ(sp, new_ss);
3246 new_ss &= 0xffff;
3247 } else
3248#endif
3249 if (shift == 1) {
3250 /* 32 bits */
3251 POPL(ssp, sp, sp_mask, new_esp);
3252 POPL(ssp, sp, sp_mask, new_ss);
3253 new_ss &= 0xffff;
3254 } else {
3255 /* 16 bits */
3256 POPW(ssp, sp, sp_mask, new_esp);
3257 POPW(ssp, sp, sp_mask, new_ss);
3258 }
3259 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3260 new_ss, new_esp);
3261 if ((new_ss & 0xfffc) == 0) {
3262#ifdef TARGET_X86_64
3263 /* NULL ss is allowed in long mode if cpl != 3*/
3264 /* XXX: test CS64 ? */
3265 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3266 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3267 0, 0xffffffff,
3268 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3269 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3270 DESC_W_MASK | DESC_A_MASK);
3271 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3272 } else
3273#endif
3274 {
3275 raise_exception_err(EXCP0D_GPF, 0);
3276 }
3277 } else {
3278 if ((new_ss & 3) != rpl)
3279 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3280 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3281 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3282 if (!(ss_e2 & DESC_S_MASK) ||
3283 (ss_e2 & DESC_CS_MASK) ||
3284 !(ss_e2 & DESC_W_MASK))
3285 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3286 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3287 if (dpl != rpl)
3288 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3289 if (!(ss_e2 & DESC_P_MASK))
3290 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3291 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3292 get_seg_base(ss_e1, ss_e2),
3293 get_seg_limit(ss_e1, ss_e2),
3294 ss_e2);
3295 }
3296
3297 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3298 get_seg_base(e1, e2),
3299 get_seg_limit(e1, e2),
3300 e2);
3301 cpu_x86_set_cpl(env, rpl);
3302 sp = new_esp;
3303#ifdef TARGET_X86_64
3304 if (env->hflags & HF_CS64_MASK)
3305 sp_mask = -1;
3306 else
3307#endif
3308 sp_mask = get_sp_mask(ss_e2);
3309
3310 /* validate data segments */
3311 validate_seg(R_ES, rpl);
3312 validate_seg(R_DS, rpl);
3313 validate_seg(R_FS, rpl);
3314 validate_seg(R_GS, rpl);
3315
3316 sp += addend;
3317 }
3318 SET_ESP(sp, sp_mask);
3319 env->eip = new_eip;
3320 if (is_iret) {
3321 /* NOTE: 'cpl' is the _old_ CPL */
3322 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3323 if (cpl == 0)
3324#ifdef VBOX
3325 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3326#else
3327 eflags_mask |= IOPL_MASK;
3328#endif
3329 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3330 if (cpl <= iopl)
3331 eflags_mask |= IF_MASK;
3332 if (shift == 0)
3333 eflags_mask &= 0xffff;
3334 load_eflags(new_eflags, eflags_mask);
3335 }
3336 return;
3337
3338 return_to_vm86:
3339 POPL(ssp, sp, sp_mask, new_esp);
3340 POPL(ssp, sp, sp_mask, new_ss);
3341 POPL(ssp, sp, sp_mask, new_es);
3342 POPL(ssp, sp, sp_mask, new_ds);
3343 POPL(ssp, sp, sp_mask, new_fs);
3344 POPL(ssp, sp, sp_mask, new_gs);
3345
3346 /* modify processor state */
3347 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3348 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3349 load_seg_vm(R_CS, new_cs & 0xffff);
3350 cpu_x86_set_cpl(env, 3);
3351 load_seg_vm(R_SS, new_ss & 0xffff);
3352 load_seg_vm(R_ES, new_es & 0xffff);
3353 load_seg_vm(R_DS, new_ds & 0xffff);
3354 load_seg_vm(R_FS, new_fs & 0xffff);
3355 load_seg_vm(R_GS, new_gs & 0xffff);
3356
3357 env->eip = new_eip & 0xffff;
3358 ESP = new_esp;
3359}
3360
3361void helper_iret_protected(int shift, int next_eip)
3362{
3363 int tss_selector, type;
3364 uint32_t e1, e2;
3365
3366#ifdef VBOX
3367 e1 = e2 = 0; /** @todo Why do we do this? */
3368 remR3TrapClear(env->pVM);
3369#endif
3370
3371 /* specific case for TSS */
3372 if (env->eflags & NT_MASK) {
3373#ifdef TARGET_X86_64
3374 if (env->hflags & HF_LMA_MASK)
3375 raise_exception_err(EXCP0D_GPF, 0);
3376#endif
3377 tss_selector = lduw_kernel(env->tr.base + 0);
3378 if (tss_selector & 4)
3379 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3380 if (load_segment(&e1, &e2, tss_selector) != 0)
3381 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3382 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3383 /* NOTE: we check both segment and busy TSS */
3384 if (type != 3)
3385 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3386 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3387 } else {
3388 helper_ret_protected(shift, 1, 0);
3389 }
3390 env->hflags2 &= ~HF2_NMI_MASK;
3391}
3392
3393void helper_lret_protected(int shift, int addend)
3394{
3395 helper_ret_protected(shift, 0, addend);
3396}
3397
3398void helper_sysenter(void)
3399{
3400 if (env->sysenter_cs == 0) {
3401 raise_exception_err(EXCP0D_GPF, 0);
3402 }
3403 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3404 cpu_x86_set_cpl(env, 0);
3405
3406#ifdef TARGET_X86_64
3407 if (env->hflags & HF_LMA_MASK) {
3408 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3409 0, 0xffffffff,
3410 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3411 DESC_S_MASK |
3412 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3413 } else
3414#endif
3415 {
3416 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3417 0, 0xffffffff,
3418 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3419 DESC_S_MASK |
3420 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3421 }
3422 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3423 0, 0xffffffff,
3424 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3425 DESC_S_MASK |
3426 DESC_W_MASK | DESC_A_MASK);
3427 ESP = env->sysenter_esp;
3428 EIP = env->sysenter_eip;
3429}
3430
3431void helper_sysexit(int dflag)
3432{
3433 int cpl;
3434
3435 cpl = env->hflags & HF_CPL_MASK;
3436 if (env->sysenter_cs == 0 || cpl != 0) {
3437 raise_exception_err(EXCP0D_GPF, 0);
3438 }
3439 cpu_x86_set_cpl(env, 3);
3440#ifdef TARGET_X86_64
3441 if (dflag == 2) {
3442 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3443 0, 0xffffffff,
3444 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3445 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3446 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3447 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3448 0, 0xffffffff,
3449 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3450 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3451 DESC_W_MASK | DESC_A_MASK);
3452 } else
3453#endif
3454 {
3455 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3456 0, 0xffffffff,
3457 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3458 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3459 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3460 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3461 0, 0xffffffff,
3462 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3463 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3464 DESC_W_MASK | DESC_A_MASK);
3465 }
3466 ESP = ECX;
3467 EIP = EDX;
3468}
3469
3470#if defined(CONFIG_USER_ONLY)
3471target_ulong helper_read_crN(int reg)
3472{
3473 return 0;
3474}
3475
3476void helper_write_crN(int reg, target_ulong t0)
3477{
3478}
3479
3480void helper_movl_drN_T0(int reg, target_ulong t0)
3481{
3482}
3483#else
3484target_ulong helper_read_crN(int reg)
3485{
3486 target_ulong val;
3487
3488 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3489 switch(reg) {
3490 default:
3491 val = env->cr[reg];
3492 break;
3493 case 8:
3494 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3495#ifndef VBOX
3496 val = cpu_get_apic_tpr(env->apic_state);
3497#else /* VBOX */
3498 val = cpu_get_apic_tpr(env);
3499#endif /* VBOX */
3500 } else {
3501 val = env->v_tpr;
3502 }
3503 break;
3504 }
3505 return val;
3506}
3507
3508void helper_write_crN(int reg, target_ulong t0)
3509{
3510 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3511 switch(reg) {
3512 case 0:
3513 cpu_x86_update_cr0(env, t0);
3514 break;
3515 case 3:
3516 cpu_x86_update_cr3(env, t0);
3517 break;
3518 case 4:
3519 cpu_x86_update_cr4(env, t0);
3520 break;
3521 case 8:
3522 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3523#ifndef VBOX
3524 cpu_set_apic_tpr(env->apic_state, t0);
3525#else /* VBOX */
3526 cpu_set_apic_tpr(env, t0);
3527#endif /* VBOX */
3528 }
3529 env->v_tpr = t0 & 0x0f;
3530 break;
3531 default:
3532 env->cr[reg] = t0;
3533 break;
3534 }
3535}
3536
3537void helper_movl_drN_T0(int reg, target_ulong t0)
3538{
3539 int i;
3540
3541 if (reg < 4) {
3542 hw_breakpoint_remove(env, reg);
3543 env->dr[reg] = t0;
3544 hw_breakpoint_insert(env, reg);
3545 } else if (reg == 7) {
3546 for (i = 0; i < 4; i++)
3547 hw_breakpoint_remove(env, i);
3548 env->dr[7] = t0;
3549 for (i = 0; i < 4; i++)
3550 hw_breakpoint_insert(env, i);
3551 } else
3552 env->dr[reg] = t0;
3553}
3554#endif
3555
3556void helper_lmsw(target_ulong t0)
3557{
3558 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3559 if already set to one. */
3560 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3561 helper_write_crN(0, t0);
3562}
3563
3564void helper_clts(void)
3565{
3566 env->cr[0] &= ~CR0_TS_MASK;
3567 env->hflags &= ~HF_TS_MASK;
3568}
3569
3570void helper_invlpg(target_ulong addr)
3571{
3572 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3573 tlb_flush_page(env, addr);
3574}
3575
3576void helper_rdtsc(void)
3577{
3578 uint64_t val;
3579
3580 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3581 raise_exception(EXCP0D_GPF);
3582 }
3583 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3584
3585 val = cpu_get_tsc(env) + env->tsc_offset;
3586 EAX = (uint32_t)(val);
3587 EDX = (uint32_t)(val >> 32);
3588}
3589
3590void helper_rdtscp(void)
3591{
3592 helper_rdtsc();
3593#ifndef VBOX
3594 ECX = (uint32_t)(env->tsc_aux);
3595#else /* VBOX */
3596 uint64_t val;
3597 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3598 ECX = (uint32_t)(val);
3599 else
3600 ECX = 0;
3601#endif /* VBOX */
3602}
3603
3604void helper_rdpmc(void)
3605{
3606#ifdef VBOX
3607 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3608 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3609 raise_exception(EXCP0D_GPF);
3610 }
3611 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3612 EAX = 0;
3613 EDX = 0;
3614#else /* !VBOX */
3615 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3616 raise_exception(EXCP0D_GPF);
3617 }
3618 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3619
3620 /* currently unimplemented */
3621 raise_exception_err(EXCP06_ILLOP, 0);
3622#endif /* !VBOX */
3623}
3624
3625#if defined(CONFIG_USER_ONLY)
3626void helper_wrmsr(void)
3627{
3628}
3629
3630void helper_rdmsr(void)
3631{
3632}
3633#else
3634void helper_wrmsr(void)
3635{
3636 uint64_t val;
3637
3638 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3639
3640 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3641
3642 switch((uint32_t)ECX) {
3643 case MSR_IA32_SYSENTER_CS:
3644 env->sysenter_cs = val & 0xffff;
3645 break;
3646 case MSR_IA32_SYSENTER_ESP:
3647 env->sysenter_esp = val;
3648 break;
3649 case MSR_IA32_SYSENTER_EIP:
3650 env->sysenter_eip = val;
3651 break;
3652 case MSR_IA32_APICBASE:
3653# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3654 cpu_set_apic_base(env->apic_state, val);
3655# endif
3656 break;
3657 case MSR_EFER:
3658 {
3659 uint64_t update_mask;
3660 update_mask = 0;
3661 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3662 update_mask |= MSR_EFER_SCE;
3663 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3664 update_mask |= MSR_EFER_LME;
3665 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3666 update_mask |= MSR_EFER_FFXSR;
3667 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3668 update_mask |= MSR_EFER_NXE;
3669 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3670 update_mask |= MSR_EFER_SVME;
3671 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3672 update_mask |= MSR_EFER_FFXSR;
3673 cpu_load_efer(env, (env->efer & ~update_mask) |
3674 (val & update_mask));
3675 }
3676 break;
3677 case MSR_STAR:
3678 env->star = val;
3679 break;
3680 case MSR_PAT:
3681 env->pat = val;
3682 break;
3683 case MSR_VM_HSAVE_PA:
3684 env->vm_hsave = val;
3685 break;
3686#ifdef TARGET_X86_64
3687 case MSR_LSTAR:
3688 env->lstar = val;
3689 break;
3690 case MSR_CSTAR:
3691 env->cstar = val;
3692 break;
3693 case MSR_FMASK:
3694 env->fmask = val;
3695 break;
3696 case MSR_FSBASE:
3697 env->segs[R_FS].base = val;
3698 break;
3699 case MSR_GSBASE:
3700 env->segs[R_GS].base = val;
3701 break;
3702 case MSR_KERNELGSBASE:
3703 env->kernelgsbase = val;
3704 break;
3705#endif
3706# ifndef VBOX
3707 case MSR_MTRRphysBase(0):
3708 case MSR_MTRRphysBase(1):
3709 case MSR_MTRRphysBase(2):
3710 case MSR_MTRRphysBase(3):
3711 case MSR_MTRRphysBase(4):
3712 case MSR_MTRRphysBase(5):
3713 case MSR_MTRRphysBase(6):
3714 case MSR_MTRRphysBase(7):
3715 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3716 break;
3717 case MSR_MTRRphysMask(0):
3718 case MSR_MTRRphysMask(1):
3719 case MSR_MTRRphysMask(2):
3720 case MSR_MTRRphysMask(3):
3721 case MSR_MTRRphysMask(4):
3722 case MSR_MTRRphysMask(5):
3723 case MSR_MTRRphysMask(6):
3724 case MSR_MTRRphysMask(7):
3725 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3726 break;
3727 case MSR_MTRRfix64K_00000:
3728 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3729 break;
3730 case MSR_MTRRfix16K_80000:
3731 case MSR_MTRRfix16K_A0000:
3732 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3733 break;
3734 case MSR_MTRRfix4K_C0000:
3735 case MSR_MTRRfix4K_C8000:
3736 case MSR_MTRRfix4K_D0000:
3737 case MSR_MTRRfix4K_D8000:
3738 case MSR_MTRRfix4K_E0000:
3739 case MSR_MTRRfix4K_E8000:
3740 case MSR_MTRRfix4K_F0000:
3741 case MSR_MTRRfix4K_F8000:
3742 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3743 break;
3744 case MSR_MTRRdefType:
3745 env->mtrr_deftype = val;
3746 break;
3747 case MSR_MCG_STATUS:
3748 env->mcg_status = val;
3749 break;
3750 case MSR_MCG_CTL:
3751 if ((env->mcg_cap & MCG_CTL_P)
3752 && (val == 0 || val == ~(uint64_t)0))
3753 env->mcg_ctl = val;
3754 break;
3755 case MSR_TSC_AUX:
3756 env->tsc_aux = val;
3757 break;
3758# endif /* !VBOX */
3759 default:
3760# ifndef VBOX
3761 if ((uint32_t)ECX >= MSR_MC0_CTL
3762 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3763 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3764 if ((offset & 0x3) != 0
3765 || (val == 0 || val == ~(uint64_t)0))
3766 env->mce_banks[offset] = val;
3767 break;
3768 }
3769 /* XXX: exception ? */
3770# endif
3771 break;
3772 }
3773
3774# ifdef VBOX
3775 /* call CPUM. */
3776 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3777 {
3778 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3779 }
3780# endif
3781}
3782
3783void helper_rdmsr(void)
3784{
3785 uint64_t val;
3786
3787 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3788
3789 switch((uint32_t)ECX) {
3790 case MSR_IA32_SYSENTER_CS:
3791 val = env->sysenter_cs;
3792 break;
3793 case MSR_IA32_SYSENTER_ESP:
3794 val = env->sysenter_esp;
3795 break;
3796 case MSR_IA32_SYSENTER_EIP:
3797 val = env->sysenter_eip;
3798 break;
3799 case MSR_IA32_APICBASE:
3800#ifndef VBOX
3801 val = cpu_get_apic_base(env->apic_state);
3802#else /* VBOX */
3803 val = cpu_get_apic_base(env);
3804#endif /* VBOX */
3805 break;
3806 case MSR_EFER:
3807 val = env->efer;
3808 break;
3809 case MSR_STAR:
3810 val = env->star;
3811 break;
3812 case MSR_PAT:
3813 val = env->pat;
3814 break;
3815 case MSR_VM_HSAVE_PA:
3816 val = env->vm_hsave;
3817 break;
3818# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3819 case MSR_IA32_PERF_STATUS:
3820 /* tsc_increment_by_tick */
3821 val = 1000ULL;
3822 /* CPU multiplier */
3823 val |= (((uint64_t)4ULL) << 40);
3824 break;
3825# endif /* !VBOX */
3826#ifdef TARGET_X86_64
3827 case MSR_LSTAR:
3828 val = env->lstar;
3829 break;
3830 case MSR_CSTAR:
3831 val = env->cstar;
3832 break;
3833 case MSR_FMASK:
3834 val = env->fmask;
3835 break;
3836 case MSR_FSBASE:
3837 val = env->segs[R_FS].base;
3838 break;
3839 case MSR_GSBASE:
3840 val = env->segs[R_GS].base;
3841 break;
3842 case MSR_KERNELGSBASE:
3843 val = env->kernelgsbase;
3844 break;
3845# ifndef VBOX
3846 case MSR_TSC_AUX:
3847 val = env->tsc_aux;
3848 break;
3849# endif /*!VBOX*/
3850#endif
3851# ifndef VBOX
3852 case MSR_MTRRphysBase(0):
3853 case MSR_MTRRphysBase(1):
3854 case MSR_MTRRphysBase(2):
3855 case MSR_MTRRphysBase(3):
3856 case MSR_MTRRphysBase(4):
3857 case MSR_MTRRphysBase(5):
3858 case MSR_MTRRphysBase(6):
3859 case MSR_MTRRphysBase(7):
3860 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3861 break;
3862 case MSR_MTRRphysMask(0):
3863 case MSR_MTRRphysMask(1):
3864 case MSR_MTRRphysMask(2):
3865 case MSR_MTRRphysMask(3):
3866 case MSR_MTRRphysMask(4):
3867 case MSR_MTRRphysMask(5):
3868 case MSR_MTRRphysMask(6):
3869 case MSR_MTRRphysMask(7):
3870 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3871 break;
3872 case MSR_MTRRfix64K_00000:
3873 val = env->mtrr_fixed[0];
3874 break;
3875 case MSR_MTRRfix16K_80000:
3876 case MSR_MTRRfix16K_A0000:
3877 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3878 break;
3879 case MSR_MTRRfix4K_C0000:
3880 case MSR_MTRRfix4K_C8000:
3881 case MSR_MTRRfix4K_D0000:
3882 case MSR_MTRRfix4K_D8000:
3883 case MSR_MTRRfix4K_E0000:
3884 case MSR_MTRRfix4K_E8000:
3885 case MSR_MTRRfix4K_F0000:
3886 case MSR_MTRRfix4K_F8000:
3887 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3888 break;
3889 case MSR_MTRRdefType:
3890 val = env->mtrr_deftype;
3891 break;
3892 case MSR_MTRRcap:
3893 if (env->cpuid_features & CPUID_MTRR)
3894 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3895 else
3896 /* XXX: exception ? */
3897 val = 0;
3898 break;
3899 case MSR_MCG_CAP:
3900 val = env->mcg_cap;
3901 break;
3902 case MSR_MCG_CTL:
3903 if (env->mcg_cap & MCG_CTL_P)
3904 val = env->mcg_ctl;
3905 else
3906 val = 0;
3907 break;
3908 case MSR_MCG_STATUS:
3909 val = env->mcg_status;
3910 break;
3911# endif /* !VBOX */
3912 default:
3913# ifndef VBOX
3914 if ((uint32_t)ECX >= MSR_MC0_CTL
3915 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3916 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3917 val = env->mce_banks[offset];
3918 break;
3919 }
3920 /* XXX: exception ? */
3921 val = 0;
3922# else /* VBOX */
3923 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3924 {
3925 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3926 val = 0;
3927 }
3928# endif /* VBOX */
3929 break;
3930 }
3931 EAX = (uint32_t)(val);
3932 EDX = (uint32_t)(val >> 32);
3933
3934# ifdef VBOX_STRICT
3935 if ((uint32_t)ECX != MSR_IA32_TSC) {
3936 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3937 val = 0;
3938 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
3939 }
3940# endif
3941}
3942#endif
3943
3944target_ulong helper_lsl(target_ulong selector1)
3945{
3946 unsigned int limit;
3947 uint32_t e1, e2, eflags, selector;
3948 int rpl, dpl, cpl, type;
3949
3950 selector = selector1 & 0xffff;
3951 eflags = helper_cc_compute_all(CC_OP);
3952 if ((selector & 0xfffc) == 0)
3953 goto fail;
3954 if (load_segment(&e1, &e2, selector) != 0)
3955 goto fail;
3956 rpl = selector & 3;
3957 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3958 cpl = env->hflags & HF_CPL_MASK;
3959 if (e2 & DESC_S_MASK) {
3960 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3961 /* conforming */
3962 } else {
3963 if (dpl < cpl || dpl < rpl)
3964 goto fail;
3965 }
3966 } else {
3967 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3968 switch(type) {
3969 case 1:
3970 case 2:
3971 case 3:
3972 case 9:
3973 case 11:
3974 break;
3975 default:
3976 goto fail;
3977 }
3978 if (dpl < cpl || dpl < rpl) {
3979 fail:
3980 CC_SRC = eflags & ~CC_Z;
3981 return 0;
3982 }
3983 }
3984 limit = get_seg_limit(e1, e2);
3985 CC_SRC = eflags | CC_Z;
3986 return limit;
3987}
3988
3989target_ulong helper_lar(target_ulong selector1)
3990{
3991 uint32_t e1, e2, eflags, selector;
3992 int rpl, dpl, cpl, type;
3993
3994 selector = selector1 & 0xffff;
3995 eflags = helper_cc_compute_all(CC_OP);
3996 if ((selector & 0xfffc) == 0)
3997 goto fail;
3998 if (load_segment(&e1, &e2, selector) != 0)
3999 goto fail;
4000 rpl = selector & 3;
4001 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4002 cpl = env->hflags & HF_CPL_MASK;
4003 if (e2 & DESC_S_MASK) {
4004 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4005 /* conforming */
4006 } else {
4007 if (dpl < cpl || dpl < rpl)
4008 goto fail;
4009 }
4010 } else {
4011 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4012 switch(type) {
4013 case 1:
4014 case 2:
4015 case 3:
4016 case 4:
4017 case 5:
4018 case 9:
4019 case 11:
4020 case 12:
4021 break;
4022 default:
4023 goto fail;
4024 }
4025 if (dpl < cpl || dpl < rpl) {
4026 fail:
4027 CC_SRC = eflags & ~CC_Z;
4028 return 0;
4029 }
4030 }
4031 CC_SRC = eflags | CC_Z;
4032 return e2 & 0x00f0ff00;
4033}
4034
4035void helper_verr(target_ulong selector1)
4036{
4037 uint32_t e1, e2, eflags, selector;
4038 int rpl, dpl, cpl;
4039
4040 selector = selector1 & 0xffff;
4041 eflags = helper_cc_compute_all(CC_OP);
4042 if ((selector & 0xfffc) == 0)
4043 goto fail;
4044 if (load_segment(&e1, &e2, selector) != 0)
4045 goto fail;
4046 if (!(e2 & DESC_S_MASK))
4047 goto fail;
4048 rpl = selector & 3;
4049 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4050 cpl = env->hflags & HF_CPL_MASK;
4051 if (e2 & DESC_CS_MASK) {
4052 if (!(e2 & DESC_R_MASK))
4053 goto fail;
4054 if (!(e2 & DESC_C_MASK)) {
4055 if (dpl < cpl || dpl < rpl)
4056 goto fail;
4057 }
4058 } else {
4059 if (dpl < cpl || dpl < rpl) {
4060 fail:
4061 CC_SRC = eflags & ~CC_Z;
4062 return;
4063 }
4064 }
4065 CC_SRC = eflags | CC_Z;
4066}
4067
4068void helper_verw(target_ulong selector1)
4069{
4070 uint32_t e1, e2, eflags, selector;
4071 int rpl, dpl, cpl;
4072
4073 selector = selector1 & 0xffff;
4074 eflags = helper_cc_compute_all(CC_OP);
4075 if ((selector & 0xfffc) == 0)
4076 goto fail;
4077 if (load_segment(&e1, &e2, selector) != 0)
4078 goto fail;
4079 if (!(e2 & DESC_S_MASK))
4080 goto fail;
4081 rpl = selector & 3;
4082 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4083 cpl = env->hflags & HF_CPL_MASK;
4084 if (e2 & DESC_CS_MASK) {
4085 goto fail;
4086 } else {
4087 if (dpl < cpl || dpl < rpl)
4088 goto fail;
4089 if (!(e2 & DESC_W_MASK)) {
4090 fail:
4091 CC_SRC = eflags & ~CC_Z;
4092 return;
4093 }
4094 }
4095 CC_SRC = eflags | CC_Z;
4096}
4097
4098/* x87 FPU helpers */
4099
4100static void fpu_set_exception(int mask)
4101{
4102 env->fpus |= mask;
4103 if (env->fpus & (~env->fpuc & FPUC_EM))
4104 env->fpus |= FPUS_SE | FPUS_B;
4105}
4106
4107static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4108{
4109 if (b == 0.0)
4110 fpu_set_exception(FPUS_ZE);
4111 return a / b;
4112}
4113
4114static void fpu_raise_exception(void)
4115{
4116 if (env->cr[0] & CR0_NE_MASK) {
4117 raise_exception(EXCP10_COPR);
4118 }
4119#if !defined(CONFIG_USER_ONLY)
4120 else {
4121 cpu_set_ferr(env);
4122 }
4123#endif
4124}
4125
4126void helper_flds_FT0(uint32_t val)
4127{
4128 union {
4129 float32 f;
4130 uint32_t i;
4131 } u;
4132 u.i = val;
4133 FT0 = float32_to_floatx(u.f, &env->fp_status);
4134}
4135
4136void helper_fldl_FT0(uint64_t val)
4137{
4138 union {
4139 float64 f;
4140 uint64_t i;
4141 } u;
4142 u.i = val;
4143 FT0 = float64_to_floatx(u.f, &env->fp_status);
4144}
4145
4146void helper_fildl_FT0(int32_t val)
4147{
4148 FT0 = int32_to_floatx(val, &env->fp_status);
4149}
4150
4151void helper_flds_ST0(uint32_t val)
4152{
4153 int new_fpstt;
4154 union {
4155 float32 f;
4156 uint32_t i;
4157 } u;
4158 new_fpstt = (env->fpstt - 1) & 7;
4159 u.i = val;
4160 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4161 env->fpstt = new_fpstt;
4162 env->fptags[new_fpstt] = 0; /* validate stack entry */
4163}
4164
4165void helper_fldl_ST0(uint64_t val)
4166{
4167 int new_fpstt;
4168 union {
4169 float64 f;
4170 uint64_t i;
4171 } u;
4172 new_fpstt = (env->fpstt - 1) & 7;
4173 u.i = val;
4174 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4175 env->fpstt = new_fpstt;
4176 env->fptags[new_fpstt] = 0; /* validate stack entry */
4177}
4178
4179void helper_fildl_ST0(int32_t val)
4180{
4181 int new_fpstt;
4182 new_fpstt = (env->fpstt - 1) & 7;
4183 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4184 env->fpstt = new_fpstt;
4185 env->fptags[new_fpstt] = 0; /* validate stack entry */
4186}
4187
4188void helper_fildll_ST0(int64_t val)
4189{
4190 int new_fpstt;
4191 new_fpstt = (env->fpstt - 1) & 7;
4192 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4193 env->fpstt = new_fpstt;
4194 env->fptags[new_fpstt] = 0; /* validate stack entry */
4195}
4196
4197#ifndef VBOX
4198uint32_t helper_fsts_ST0(void)
4199#else
4200RTCCUINTREG helper_fsts_ST0(void)
4201#endif
4202{
4203 union {
4204 float32 f;
4205 uint32_t i;
4206 } u;
4207 u.f = floatx_to_float32(ST0, &env->fp_status);
4208 return u.i;
4209}
4210
4211uint64_t helper_fstl_ST0(void)
4212{
4213 union {
4214 float64 f;
4215 uint64_t i;
4216 } u;
4217 u.f = floatx_to_float64(ST0, &env->fp_status);
4218 return u.i;
4219}
4220
4221#ifndef VBOX
4222int32_t helper_fist_ST0(void)
4223#else
4224RTCCINTREG helper_fist_ST0(void)
4225#endif
4226{
4227 int32_t val;
4228 val = floatx_to_int32(ST0, &env->fp_status);
4229 if (val != (int16_t)val)
4230 val = -32768;
4231 return val;
4232}
4233
4234#ifndef VBOX
4235int32_t helper_fistl_ST0(void)
4236#else
4237RTCCINTREG helper_fistl_ST0(void)
4238#endif
4239{
4240 int32_t val;
4241 val = floatx_to_int32(ST0, &env->fp_status);
4242 return val;
4243}
4244
4245int64_t helper_fistll_ST0(void)
4246{
4247 int64_t val;
4248 val = floatx_to_int64(ST0, &env->fp_status);
4249 return val;
4250}
4251
4252#ifndef VBOX
4253int32_t helper_fistt_ST0(void)
4254#else
4255RTCCINTREG helper_fistt_ST0(void)
4256#endif
4257{
4258 int32_t val;
4259 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4260 if (val != (int16_t)val)
4261 val = -32768;
4262 return val;
4263}
4264
4265#ifndef VBOX
4266int32_t helper_fisttl_ST0(void)
4267#else
4268RTCCINTREG helper_fisttl_ST0(void)
4269#endif
4270{
4271 int32_t val;
4272 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4273 return val;
4274}
4275
4276int64_t helper_fisttll_ST0(void)
4277{
4278 int64_t val;
4279 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4280 return val;
4281}
4282
4283void helper_fldt_ST0(target_ulong ptr)
4284{
4285 int new_fpstt;
4286 new_fpstt = (env->fpstt - 1) & 7;
4287 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4288 env->fpstt = new_fpstt;
4289 env->fptags[new_fpstt] = 0; /* validate stack entry */
4290}
4291
4292void helper_fstt_ST0(target_ulong ptr)
4293{
4294 helper_fstt(ST0, ptr);
4295}
4296
4297void helper_fpush(void)
4298{
4299 fpush();
4300}
4301
4302void helper_fpop(void)
4303{
4304 fpop();
4305}
4306
4307void helper_fdecstp(void)
4308{
4309 env->fpstt = (env->fpstt - 1) & 7;
4310 env->fpus &= (~0x4700);
4311}
4312
4313void helper_fincstp(void)
4314{
4315 env->fpstt = (env->fpstt + 1) & 7;
4316 env->fpus &= (~0x4700);
4317}
4318
4319/* FPU move */
4320
4321void helper_ffree_STN(int st_index)
4322{
4323 env->fptags[(env->fpstt + st_index) & 7] = 1;
4324}
4325
4326void helper_fmov_ST0_FT0(void)
4327{
4328 ST0 = FT0;
4329}
4330
4331void helper_fmov_FT0_STN(int st_index)
4332{
4333 FT0 = ST(st_index);
4334}
4335
4336void helper_fmov_ST0_STN(int st_index)
4337{
4338 ST0 = ST(st_index);
4339}
4340
4341void helper_fmov_STN_ST0(int st_index)
4342{
4343 ST(st_index) = ST0;
4344}
4345
4346void helper_fxchg_ST0_STN(int st_index)
4347{
4348 CPU86_LDouble tmp;
4349 tmp = ST(st_index);
4350 ST(st_index) = ST0;
4351 ST0 = tmp;
4352}
4353
4354/* FPU operations */
4355
4356static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4357
4358void helper_fcom_ST0_FT0(void)
4359{
4360 int ret;
4361
4362 ret = floatx_compare(ST0, FT0, &env->fp_status);
4363 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4364}
4365
4366void helper_fucom_ST0_FT0(void)
4367{
4368 int ret;
4369
4370 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4371 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4372}
4373
4374static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4375
4376void helper_fcomi_ST0_FT0(void)
4377{
4378 int eflags;
4379 int ret;
4380
4381 ret = floatx_compare(ST0, FT0, &env->fp_status);
4382 eflags = helper_cc_compute_all(CC_OP);
4383 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4384 CC_SRC = eflags;
4385}
4386
4387void helper_fucomi_ST0_FT0(void)
4388{
4389 int eflags;
4390 int ret;
4391
4392 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4393 eflags = helper_cc_compute_all(CC_OP);
4394 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4395 CC_SRC = eflags;
4396}
4397
4398void helper_fadd_ST0_FT0(void)
4399{
4400 ST0 += FT0;
4401}
4402
4403void helper_fmul_ST0_FT0(void)
4404{
4405 ST0 *= FT0;
4406}
4407
4408void helper_fsub_ST0_FT0(void)
4409{
4410 ST0 -= FT0;
4411}
4412
4413void helper_fsubr_ST0_FT0(void)
4414{
4415 ST0 = FT0 - ST0;
4416}
4417
4418void helper_fdiv_ST0_FT0(void)
4419{
4420 ST0 = helper_fdiv(ST0, FT0);
4421}
4422
4423void helper_fdivr_ST0_FT0(void)
4424{
4425 ST0 = helper_fdiv(FT0, ST0);
4426}
4427
4428/* fp operations between STN and ST0 */
4429
4430void helper_fadd_STN_ST0(int st_index)
4431{
4432 ST(st_index) += ST0;
4433}
4434
4435void helper_fmul_STN_ST0(int st_index)
4436{
4437 ST(st_index) *= ST0;
4438}
4439
4440void helper_fsub_STN_ST0(int st_index)
4441{
4442 ST(st_index) -= ST0;
4443}
4444
4445void helper_fsubr_STN_ST0(int st_index)
4446{
4447 CPU86_LDouble *p;
4448 p = &ST(st_index);
4449 *p = ST0 - *p;
4450}
4451
4452void helper_fdiv_STN_ST0(int st_index)
4453{
4454 CPU86_LDouble *p;
4455 p = &ST(st_index);
4456 *p = helper_fdiv(*p, ST0);
4457}
4458
4459void helper_fdivr_STN_ST0(int st_index)
4460{
4461 CPU86_LDouble *p;
4462 p = &ST(st_index);
4463 *p = helper_fdiv(ST0, *p);
4464}
4465
4466/* misc FPU operations */
4467void helper_fchs_ST0(void)
4468{
4469 ST0 = floatx_chs(ST0);
4470}
4471
4472void helper_fabs_ST0(void)
4473{
4474 ST0 = floatx_abs(ST0);
4475}
4476
4477void helper_fld1_ST0(void)
4478{
4479 ST0 = f15rk[1];
4480}
4481
4482void helper_fldl2t_ST0(void)
4483{
4484 ST0 = f15rk[6];
4485}
4486
4487void helper_fldl2e_ST0(void)
4488{
4489 ST0 = f15rk[5];
4490}
4491
4492void helper_fldpi_ST0(void)
4493{
4494 ST0 = f15rk[2];
4495}
4496
4497void helper_fldlg2_ST0(void)
4498{
4499 ST0 = f15rk[3];
4500}
4501
4502void helper_fldln2_ST0(void)
4503{
4504 ST0 = f15rk[4];
4505}
4506
4507void helper_fldz_ST0(void)
4508{
4509 ST0 = f15rk[0];
4510}
4511
4512void helper_fldz_FT0(void)
4513{
4514 FT0 = f15rk[0];
4515}
4516
4517#ifndef VBOX
4518uint32_t helper_fnstsw(void)
4519#else
4520RTCCUINTREG helper_fnstsw(void)
4521#endif
4522{
4523 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4524}
4525
4526#ifndef VBOX
4527uint32_t helper_fnstcw(void)
4528#else
4529RTCCUINTREG helper_fnstcw(void)
4530#endif
4531{
4532 return env->fpuc;
4533}
4534
4535static void update_fp_status(void)
4536{
4537 int rnd_type;
4538
4539 /* set rounding mode */
4540 switch(env->fpuc & RC_MASK) {
4541 default:
4542 case RC_NEAR:
4543 rnd_type = float_round_nearest_even;
4544 break;
4545 case RC_DOWN:
4546 rnd_type = float_round_down;
4547 break;
4548 case RC_UP:
4549 rnd_type = float_round_up;
4550 break;
4551 case RC_CHOP:
4552 rnd_type = float_round_to_zero;
4553 break;
4554 }
4555 set_float_rounding_mode(rnd_type, &env->fp_status);
4556#ifdef FLOATX80
4557 switch((env->fpuc >> 8) & 3) {
4558 case 0:
4559 rnd_type = 32;
4560 break;
4561 case 2:
4562 rnd_type = 64;
4563 break;
4564 case 3:
4565 default:
4566 rnd_type = 80;
4567 break;
4568 }
4569 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4570#endif
4571}
4572
4573void helper_fldcw(uint32_t val)
4574{
4575 env->fpuc = val;
4576 update_fp_status();
4577}
4578
4579void helper_fclex(void)
4580{
4581 env->fpus &= 0x7f00;
4582}
4583
4584void helper_fwait(void)
4585{
4586 if (env->fpus & FPUS_SE)
4587 fpu_raise_exception();
4588}
4589
4590void helper_fninit(void)
4591{
4592 env->fpus = 0;
4593 env->fpstt = 0;
4594 env->fpuc = 0x37f;
4595 env->fptags[0] = 1;
4596 env->fptags[1] = 1;
4597 env->fptags[2] = 1;
4598 env->fptags[3] = 1;
4599 env->fptags[4] = 1;
4600 env->fptags[5] = 1;
4601 env->fptags[6] = 1;
4602 env->fptags[7] = 1;
4603}
4604
4605/* BCD ops */
4606
4607void helper_fbld_ST0(target_ulong ptr)
4608{
4609 CPU86_LDouble tmp;
4610 uint64_t val;
4611 unsigned int v;
4612 int i;
4613
4614 val = 0;
4615 for(i = 8; i >= 0; i--) {
4616 v = ldub(ptr + i);
4617 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4618 }
4619 tmp = val;
4620 if (ldub(ptr + 9) & 0x80)
4621 tmp = -tmp;
4622 fpush();
4623 ST0 = tmp;
4624}
4625
4626void helper_fbst_ST0(target_ulong ptr)
4627{
4628 int v;
4629 target_ulong mem_ref, mem_end;
4630 int64_t val;
4631
4632 val = floatx_to_int64(ST0, &env->fp_status);
4633 mem_ref = ptr;
4634 mem_end = mem_ref + 9;
4635 if (val < 0) {
4636 stb(mem_end, 0x80);
4637 val = -val;
4638 } else {
4639 stb(mem_end, 0x00);
4640 }
4641 while (mem_ref < mem_end) {
4642 if (val == 0)
4643 break;
4644 v = val % 100;
4645 val = val / 100;
4646 v = ((v / 10) << 4) | (v % 10);
4647 stb(mem_ref++, v);
4648 }
4649 while (mem_ref < mem_end) {
4650 stb(mem_ref++, 0);
4651 }
4652}
4653
4654void helper_f2xm1(void)
4655{
4656 ST0 = pow(2.0,ST0) - 1.0;
4657}
4658
4659void helper_fyl2x(void)
4660{
4661 CPU86_LDouble fptemp;
4662
4663 fptemp = ST0;
4664 if (fptemp>0.0){
4665 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4666 ST1 *= fptemp;
4667 fpop();
4668 } else {
4669 env->fpus &= (~0x4700);
4670 env->fpus |= 0x400;
4671 }
4672}
4673
4674void helper_fptan(void)
4675{
4676 CPU86_LDouble fptemp;
4677
4678 fptemp = ST0;
4679 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4680 env->fpus |= 0x400;
4681 } else {
4682 ST0 = tan(fptemp);
4683 fpush();
4684 ST0 = 1.0;
4685 env->fpus &= (~0x400); /* C2 <-- 0 */
4686 /* the above code is for |arg| < 2**52 only */
4687 }
4688}
4689
4690void helper_fpatan(void)
4691{
4692 CPU86_LDouble fptemp, fpsrcop;
4693
4694 fpsrcop = ST1;
4695 fptemp = ST0;
4696 ST1 = atan2(fpsrcop,fptemp);
4697 fpop();
4698}
4699
4700void helper_fxtract(void)
4701{
4702 CPU86_LDoubleU temp;
4703 unsigned int expdif;
4704
4705 temp.d = ST0;
4706 expdif = EXPD(temp) - EXPBIAS;
4707 /*DP exponent bias*/
4708 ST0 = expdif;
4709 fpush();
4710 BIASEXPONENT(temp);
4711 ST0 = temp.d;
4712}
4713
4714void helper_fprem1(void)
4715{
4716 CPU86_LDouble dblq, fpsrcop, fptemp;
4717 CPU86_LDoubleU fpsrcop1, fptemp1;
4718 int expdif;
4719 signed long long int q;
4720
4721#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4722 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4723#else
4724 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4725#endif
4726 ST0 = 0.0 / 0.0; /* NaN */
4727 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4728 return;
4729 }
4730
4731 fpsrcop = ST0;
4732 fptemp = ST1;
4733 fpsrcop1.d = fpsrcop;
4734 fptemp1.d = fptemp;
4735 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4736
4737 if (expdif < 0) {
4738 /* optimisation? taken from the AMD docs */
4739 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4740 /* ST0 is unchanged */
4741 return;
4742 }
4743
4744 if (expdif < 53) {
4745 dblq = fpsrcop / fptemp;
4746 /* round dblq towards nearest integer */
4747 dblq = rint(dblq);
4748 ST0 = fpsrcop - fptemp * dblq;
4749
4750 /* convert dblq to q by truncating towards zero */
4751 if (dblq < 0.0)
4752 q = (signed long long int)(-dblq);
4753 else
4754 q = (signed long long int)dblq;
4755
4756 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4757 /* (C0,C3,C1) <-- (q2,q1,q0) */
4758 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4759 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4760 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4761 } else {
4762 env->fpus |= 0x400; /* C2 <-- 1 */
4763 fptemp = pow(2.0, expdif - 50);
4764 fpsrcop = (ST0 / ST1) / fptemp;
4765 /* fpsrcop = integer obtained by chopping */
4766 fpsrcop = (fpsrcop < 0.0) ?
4767 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4768 ST0 -= (ST1 * fpsrcop * fptemp);
4769 }
4770}
4771
4772void helper_fprem(void)
4773{
4774 CPU86_LDouble dblq, fpsrcop, fptemp;
4775 CPU86_LDoubleU fpsrcop1, fptemp1;
4776 int expdif;
4777 signed long long int q;
4778
4779#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4780 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4781#else
4782 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4783#endif
4784 ST0 = 0.0 / 0.0; /* NaN */
4785 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4786 return;
4787 }
4788
4789 fpsrcop = (CPU86_LDouble)ST0;
4790 fptemp = (CPU86_LDouble)ST1;
4791 fpsrcop1.d = fpsrcop;
4792 fptemp1.d = fptemp;
4793 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4794
4795 if (expdif < 0) {
4796 /* optimisation? taken from the AMD docs */
4797 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4798 /* ST0 is unchanged */
4799 return;
4800 }
4801
4802 if ( expdif < 53 ) {
4803 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4804 /* round dblq towards zero */
4805 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4806 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4807
4808 /* convert dblq to q by truncating towards zero */
4809 if (dblq < 0.0)
4810 q = (signed long long int)(-dblq);
4811 else
4812 q = (signed long long int)dblq;
4813
4814 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4815 /* (C0,C3,C1) <-- (q2,q1,q0) */
4816 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4817 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4818 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4819 } else {
4820 int N = 32 + (expdif % 32); /* as per AMD docs */
4821 env->fpus |= 0x400; /* C2 <-- 1 */
4822 fptemp = pow(2.0, (double)(expdif - N));
4823 fpsrcop = (ST0 / ST1) / fptemp;
4824 /* fpsrcop = integer obtained by chopping */
4825 fpsrcop = (fpsrcop < 0.0) ?
4826 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4827 ST0 -= (ST1 * fpsrcop * fptemp);
4828 }
4829}
4830
4831void helper_fyl2xp1(void)
4832{
4833 CPU86_LDouble fptemp;
4834
4835 fptemp = ST0;
4836 if ((fptemp+1.0)>0.0) {
4837 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4838 ST1 *= fptemp;
4839 fpop();
4840 } else {
4841 env->fpus &= (~0x4700);
4842 env->fpus |= 0x400;
4843 }
4844}
4845
4846void helper_fsqrt(void)
4847{
4848 CPU86_LDouble fptemp;
4849
4850 fptemp = ST0;
4851 if (fptemp<0.0) {
4852 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4853 env->fpus |= 0x400;
4854 }
4855 ST0 = sqrt(fptemp);
4856}
4857
4858void helper_fsincos(void)
4859{
4860 CPU86_LDouble fptemp;
4861
4862 fptemp = ST0;
4863 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4864 env->fpus |= 0x400;
4865 } else {
4866 ST0 = sin(fptemp);
4867 fpush();
4868 ST0 = cos(fptemp);
4869 env->fpus &= (~0x400); /* C2 <-- 0 */
4870 /* the above code is for |arg| < 2**63 only */
4871 }
4872}
4873
4874void helper_frndint(void)
4875{
4876 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4877}
4878
4879void helper_fscale(void)
4880{
4881 ST0 = ldexp (ST0, (int)(ST1));
4882}
4883
4884void helper_fsin(void)
4885{
4886 CPU86_LDouble fptemp;
4887
4888 fptemp = ST0;
4889 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4890 env->fpus |= 0x400;
4891 } else {
4892 ST0 = sin(fptemp);
4893 env->fpus &= (~0x400); /* C2 <-- 0 */
4894 /* the above code is for |arg| < 2**53 only */
4895 }
4896}
4897
4898void helper_fcos(void)
4899{
4900 CPU86_LDouble fptemp;
4901
4902 fptemp = ST0;
4903 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4904 env->fpus |= 0x400;
4905 } else {
4906 ST0 = cos(fptemp);
4907 env->fpus &= (~0x400); /* C2 <-- 0 */
4908 /* the above code is for |arg5 < 2**63 only */
4909 }
4910}
4911
4912void helper_fxam_ST0(void)
4913{
4914 CPU86_LDoubleU temp;
4915 int expdif;
4916
4917 temp.d = ST0;
4918
4919 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4920 if (SIGND(temp))
4921 env->fpus |= 0x200; /* C1 <-- 1 */
4922
4923 /* XXX: test fptags too */
4924 expdif = EXPD(temp);
4925 if (expdif == MAXEXPD) {
4926#ifdef USE_X86LDOUBLE
4927 if (MANTD(temp) == 0x8000000000000000ULL)
4928#else
4929 if (MANTD(temp) == 0)
4930#endif
4931 env->fpus |= 0x500 /*Infinity*/;
4932 else
4933 env->fpus |= 0x100 /*NaN*/;
4934 } else if (expdif == 0) {
4935 if (MANTD(temp) == 0)
4936 env->fpus |= 0x4000 /*Zero*/;
4937 else
4938 env->fpus |= 0x4400 /*Denormal*/;
4939 } else {
4940 env->fpus |= 0x400;
4941 }
4942}
4943
4944void helper_fstenv(target_ulong ptr, int data32)
4945{
4946 int fpus, fptag, exp, i;
4947 uint64_t mant;
4948 CPU86_LDoubleU tmp;
4949
4950 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4951 fptag = 0;
4952 for (i=7; i>=0; i--) {
4953 fptag <<= 2;
4954 if (env->fptags[i]) {
4955 fptag |= 3;
4956 } else {
4957 tmp.d = env->fpregs[i].d;
4958 exp = EXPD(tmp);
4959 mant = MANTD(tmp);
4960 if (exp == 0 && mant == 0) {
4961 /* zero */
4962 fptag |= 1;
4963 } else if (exp == 0 || exp == MAXEXPD
4964#ifdef USE_X86LDOUBLE
4965 || (mant & (1LL << 63)) == 0
4966#endif
4967 ) {
4968 /* NaNs, infinity, denormal */
4969 fptag |= 2;
4970 }
4971 }
4972 }
4973 if (data32) {
4974 /* 32 bit */
4975 stl(ptr, env->fpuc);
4976 stl(ptr + 4, fpus);
4977 stl(ptr + 8, fptag);
4978 stl(ptr + 12, 0); /* fpip */
4979 stl(ptr + 16, 0); /* fpcs */
4980 stl(ptr + 20, 0); /* fpoo */
4981 stl(ptr + 24, 0); /* fpos */
4982 } else {
4983 /* 16 bit */
4984 stw(ptr, env->fpuc);
4985 stw(ptr + 2, fpus);
4986 stw(ptr + 4, fptag);
4987 stw(ptr + 6, 0);
4988 stw(ptr + 8, 0);
4989 stw(ptr + 10, 0);
4990 stw(ptr + 12, 0);
4991 }
4992}
4993
4994void helper_fldenv(target_ulong ptr, int data32)
4995{
4996 int i, fpus, fptag;
4997
4998 if (data32) {
4999 env->fpuc = lduw(ptr);
5000 fpus = lduw(ptr + 4);
5001 fptag = lduw(ptr + 8);
5002 }
5003 else {
5004 env->fpuc = lduw(ptr);
5005 fpus = lduw(ptr + 2);
5006 fptag = lduw(ptr + 4);
5007 }
5008 env->fpstt = (fpus >> 11) & 7;
5009 env->fpus = fpus & ~0x3800;
5010 for(i = 0;i < 8; i++) {
5011 env->fptags[i] = ((fptag & 3) == 3);
5012 fptag >>= 2;
5013 }
5014}
5015
5016void helper_fsave(target_ulong ptr, int data32)
5017{
5018 CPU86_LDouble tmp;
5019 int i;
5020
5021 helper_fstenv(ptr, data32);
5022
5023 ptr += (14 << data32);
5024 for(i = 0;i < 8; i++) {
5025 tmp = ST(i);
5026 helper_fstt(tmp, ptr);
5027 ptr += 10;
5028 }
5029
5030 /* fninit */
5031 env->fpus = 0;
5032 env->fpstt = 0;
5033 env->fpuc = 0x37f;
5034 env->fptags[0] = 1;
5035 env->fptags[1] = 1;
5036 env->fptags[2] = 1;
5037 env->fptags[3] = 1;
5038 env->fptags[4] = 1;
5039 env->fptags[5] = 1;
5040 env->fptags[6] = 1;
5041 env->fptags[7] = 1;
5042}
5043
5044void helper_frstor(target_ulong ptr, int data32)
5045{
5046 CPU86_LDouble tmp;
5047 int i;
5048
5049 helper_fldenv(ptr, data32);
5050 ptr += (14 << data32);
5051
5052 for(i = 0;i < 8; i++) {
5053 tmp = helper_fldt(ptr);
5054 ST(i) = tmp;
5055 ptr += 10;
5056 }
5057}
5058
5059void helper_fxsave(target_ulong ptr, int data64)
5060{
5061 int fpus, fptag, i, nb_xmm_regs;
5062 CPU86_LDouble tmp;
5063 target_ulong addr;
5064
5065 /* The operand must be 16 byte aligned */
5066 if (ptr & 0xf) {
5067 raise_exception(EXCP0D_GPF);
5068 }
5069
5070 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5071 fptag = 0;
5072 for(i = 0; i < 8; i++) {
5073 fptag |= (env->fptags[i] << i);
5074 }
5075 stw(ptr, env->fpuc);
5076 stw(ptr + 2, fpus);
5077 stw(ptr + 4, fptag ^ 0xff);
5078#ifdef TARGET_X86_64
5079 if (data64) {
5080 stq(ptr + 0x08, 0); /* rip */
5081 stq(ptr + 0x10, 0); /* rdp */
5082 } else
5083#endif
5084 {
5085 stl(ptr + 0x08, 0); /* eip */
5086 stl(ptr + 0x0c, 0); /* sel */
5087 stl(ptr + 0x10, 0); /* dp */
5088 stl(ptr + 0x14, 0); /* sel */
5089 }
5090
5091 addr = ptr + 0x20;
5092 for(i = 0;i < 8; i++) {
5093 tmp = ST(i);
5094 helper_fstt(tmp, addr);
5095 addr += 16;
5096 }
5097
5098 if (env->cr[4] & CR4_OSFXSR_MASK) {
5099 /* XXX: finish it */
5100 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5101 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5102 if (env->hflags & HF_CS64_MASK)
5103 nb_xmm_regs = 16;
5104 else
5105 nb_xmm_regs = 8;
5106 addr = ptr + 0xa0;
5107 /* Fast FXSAVE leaves out the XMM registers */
5108 if (!(env->efer & MSR_EFER_FFXSR)
5109 || (env->hflags & HF_CPL_MASK)
5110 || !(env->hflags & HF_LMA_MASK)) {
5111 for(i = 0; i < nb_xmm_regs; i++) {
5112 stq(addr, env->xmm_regs[i].XMM_Q(0));
5113 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5114 addr += 16;
5115 }
5116 }
5117 }
5118}
5119
5120void helper_fxrstor(target_ulong ptr, int data64)
5121{
5122 int i, fpus, fptag, nb_xmm_regs;
5123 CPU86_LDouble tmp;
5124 target_ulong addr;
5125
5126 /* The operand must be 16 byte aligned */
5127 if (ptr & 0xf) {
5128 raise_exception(EXCP0D_GPF);
5129 }
5130
5131 env->fpuc = lduw(ptr);
5132 fpus = lduw(ptr + 2);
5133 fptag = lduw(ptr + 4);
5134 env->fpstt = (fpus >> 11) & 7;
5135 env->fpus = fpus & ~0x3800;
5136 fptag ^= 0xff;
5137 for(i = 0;i < 8; i++) {
5138 env->fptags[i] = ((fptag >> i) & 1);
5139 }
5140
5141 addr = ptr + 0x20;
5142 for(i = 0;i < 8; i++) {
5143 tmp = helper_fldt(addr);
5144 ST(i) = tmp;
5145 addr += 16;
5146 }
5147
5148 if (env->cr[4] & CR4_OSFXSR_MASK) {
5149 /* XXX: finish it */
5150 env->mxcsr = ldl(ptr + 0x18);
5151 //ldl(ptr + 0x1c);
5152 if (env->hflags & HF_CS64_MASK)
5153 nb_xmm_regs = 16;
5154 else
5155 nb_xmm_regs = 8;
5156 addr = ptr + 0xa0;
5157 /* Fast FXRESTORE leaves out the XMM registers */
5158 if (!(env->efer & MSR_EFER_FFXSR)
5159 || (env->hflags & HF_CPL_MASK)
5160 || !(env->hflags & HF_LMA_MASK)) {
5161 for(i = 0; i < nb_xmm_regs; i++) {
5162#if !defined(VBOX) || __GNUC__ < 4
5163 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5164 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5165#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5166# if 1
5167 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5168 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5169 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5170 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5171# else
5172 /* this works fine on Mac OS X, gcc 4.0.1 */
5173 uint64_t u64 = ldq(addr);
5174 env->xmm_regs[i].XMM_Q(0);
5175 u64 = ldq(addr + 4);
5176 env->xmm_regs[i].XMM_Q(1) = u64;
5177# endif
5178#endif
5179 addr += 16;
5180 }
5181 }
5182 }
5183}
5184
5185#ifndef USE_X86LDOUBLE
5186
5187void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5188{
5189 CPU86_LDoubleU temp;
5190 int e;
5191
5192 temp.d = f;
5193 /* mantissa */
5194 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5195 /* exponent + sign */
5196 e = EXPD(temp) - EXPBIAS + 16383;
5197 e |= SIGND(temp) >> 16;
5198 *pexp = e;
5199}
5200
5201CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5202{
5203 CPU86_LDoubleU temp;
5204 int e;
5205 uint64_t ll;
5206
5207 /* XXX: handle overflow ? */
5208 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5209 e |= (upper >> 4) & 0x800; /* sign */
5210 ll = (mant >> 11) & ((1LL << 52) - 1);
5211#ifdef __arm__
5212 temp.l.upper = (e << 20) | (ll >> 32);
5213 temp.l.lower = ll;
5214#else
5215 temp.ll = ll | ((uint64_t)e << 52);
5216#endif
5217 return temp.d;
5218}
5219
5220#else
5221
5222void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5223{
5224 CPU86_LDoubleU temp;
5225
5226 temp.d = f;
5227 *pmant = temp.l.lower;
5228 *pexp = temp.l.upper;
5229}
5230
5231CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5232{
5233 CPU86_LDoubleU temp;
5234
5235 temp.l.upper = upper;
5236 temp.l.lower = mant;
5237 return temp.d;
5238}
5239#endif
5240
5241#ifdef TARGET_X86_64
5242
5243//#define DEBUG_MULDIV
5244
5245static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5246{
5247 *plow += a;
5248 /* carry test */
5249 if (*plow < a)
5250 (*phigh)++;
5251 *phigh += b;
5252}
5253
5254static void neg128(uint64_t *plow, uint64_t *phigh)
5255{
5256 *plow = ~ *plow;
5257 *phigh = ~ *phigh;
5258 add128(plow, phigh, 1, 0);
5259}
5260
5261/* return TRUE if overflow */
5262static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5263{
5264 uint64_t q, r, a1, a0;
5265 int i, qb, ab;
5266
5267 a0 = *plow;
5268 a1 = *phigh;
5269 if (a1 == 0) {
5270 q = a0 / b;
5271 r = a0 % b;
5272 *plow = q;
5273 *phigh = r;
5274 } else {
5275 if (a1 >= b)
5276 return 1;
5277 /* XXX: use a better algorithm */
5278 for(i = 0; i < 64; i++) {
5279 ab = a1 >> 63;
5280 a1 = (a1 << 1) | (a0 >> 63);
5281 if (ab || a1 >= b) {
5282 a1 -= b;
5283 qb = 1;
5284 } else {
5285 qb = 0;
5286 }
5287 a0 = (a0 << 1) | qb;
5288 }
5289#if defined(DEBUG_MULDIV)
5290 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5291 *phigh, *plow, b, a0, a1);
5292#endif
5293 *plow = a0;
5294 *phigh = a1;
5295 }
5296 return 0;
5297}
5298
5299/* return TRUE if overflow */
5300static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5301{
5302 int sa, sb;
5303 sa = ((int64_t)*phigh < 0);
5304 if (sa)
5305 neg128(plow, phigh);
5306 sb = (b < 0);
5307 if (sb)
5308 b = -b;
5309 if (div64(plow, phigh, b) != 0)
5310 return 1;
5311 if (sa ^ sb) {
5312 if (*plow > (1ULL << 63))
5313 return 1;
5314 *plow = - *plow;
5315 } else {
5316 if (*plow >= (1ULL << 63))
5317 return 1;
5318 }
5319 if (sa)
5320 *phigh = - *phigh;
5321 return 0;
5322}
5323
5324void helper_mulq_EAX_T0(target_ulong t0)
5325{
5326 uint64_t r0, r1;
5327
5328 mulu64(&r0, &r1, EAX, t0);
5329 EAX = r0;
5330 EDX = r1;
5331 CC_DST = r0;
5332 CC_SRC = r1;
5333}
5334
5335void helper_imulq_EAX_T0(target_ulong t0)
5336{
5337 uint64_t r0, r1;
5338
5339 muls64(&r0, &r1, EAX, t0);
5340 EAX = r0;
5341 EDX = r1;
5342 CC_DST = r0;
5343 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5344}
5345
5346target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5347{
5348 uint64_t r0, r1;
5349
5350 muls64(&r0, &r1, t0, t1);
5351 CC_DST = r0;
5352 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5353 return r0;
5354}
5355
5356void helper_divq_EAX(target_ulong t0)
5357{
5358 uint64_t r0, r1;
5359 if (t0 == 0) {
5360 raise_exception(EXCP00_DIVZ);
5361 }
5362 r0 = EAX;
5363 r1 = EDX;
5364 if (div64(&r0, &r1, t0))
5365 raise_exception(EXCP00_DIVZ);
5366 EAX = r0;
5367 EDX = r1;
5368}
5369
5370void helper_idivq_EAX(target_ulong t0)
5371{
5372 uint64_t r0, r1;
5373 if (t0 == 0) {
5374 raise_exception(EXCP00_DIVZ);
5375 }
5376 r0 = EAX;
5377 r1 = EDX;
5378 if (idiv64(&r0, &r1, t0))
5379 raise_exception(EXCP00_DIVZ);
5380 EAX = r0;
5381 EDX = r1;
5382}
5383#endif
5384
5385static void do_hlt(void)
5386{
5387 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5388 env->halted = 1;
5389 env->exception_index = EXCP_HLT;
5390 cpu_loop_exit();
5391}
5392
5393void helper_hlt(int next_eip_addend)
5394{
5395 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5396 EIP += next_eip_addend;
5397
5398 do_hlt();
5399}
5400
5401void helper_monitor(target_ulong ptr)
5402{
5403#ifdef VBOX
5404 if ((uint32_t)ECX > 1)
5405 raise_exception(EXCP0D_GPF);
5406#else /* !VBOX */
5407 if ((uint32_t)ECX != 0)
5408 raise_exception(EXCP0D_GPF);
5409#endif /* !VBOX */
5410 /* XXX: store address ? */
5411 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5412}
5413
5414void helper_mwait(int next_eip_addend)
5415{
5416 if ((uint32_t)ECX != 0)
5417 raise_exception(EXCP0D_GPF);
5418#ifdef VBOX
5419 helper_hlt(next_eip_addend);
5420#else /* !VBOX */
5421 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5422 EIP += next_eip_addend;
5423
5424 /* XXX: not complete but not completely erroneous */
5425 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5426 /* more than one CPU: do not sleep because another CPU may
5427 wake this one */
5428 } else {
5429 do_hlt();
5430 }
5431#endif /* !VBOX */
5432}
5433
5434void helper_debug(void)
5435{
5436 env->exception_index = EXCP_DEBUG;
5437 cpu_loop_exit();
5438}
5439
5440void helper_reset_rf(void)
5441{
5442 env->eflags &= ~RF_MASK;
5443}
5444
5445void helper_raise_interrupt(int intno, int next_eip_addend)
5446{
5447 raise_interrupt(intno, 1, 0, next_eip_addend);
5448}
5449
5450void helper_raise_exception(int exception_index)
5451{
5452 raise_exception(exception_index);
5453}
5454
5455void helper_cli(void)
5456{
5457 env->eflags &= ~IF_MASK;
5458}
5459
5460void helper_sti(void)
5461{
5462 env->eflags |= IF_MASK;
5463}
5464
5465#ifdef VBOX
5466void helper_cli_vme(void)
5467{
5468 env->eflags &= ~VIF_MASK;
5469}
5470
5471void helper_sti_vme(void)
5472{
5473 /* First check, then change eflags according to the AMD manual */
5474 if (env->eflags & VIP_MASK) {
5475 raise_exception(EXCP0D_GPF);
5476 }
5477 env->eflags |= VIF_MASK;
5478}
5479#endif /* VBOX */
5480
5481#if 0
5482/* vm86plus instructions */
5483void helper_cli_vm(void)
5484{
5485 env->eflags &= ~VIF_MASK;
5486}
5487
5488void helper_sti_vm(void)
5489{
5490 env->eflags |= VIF_MASK;
5491 if (env->eflags & VIP_MASK) {
5492 raise_exception(EXCP0D_GPF);
5493 }
5494}
5495#endif
5496
5497void helper_set_inhibit_irq(void)
5498{
5499 env->hflags |= HF_INHIBIT_IRQ_MASK;
5500}
5501
5502void helper_reset_inhibit_irq(void)
5503{
5504 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5505}
5506
5507void helper_boundw(target_ulong a0, int v)
5508{
5509 int low, high;
5510 low = ldsw(a0);
5511 high = ldsw(a0 + 2);
5512 v = (int16_t)v;
5513 if (v < low || v > high) {
5514 raise_exception(EXCP05_BOUND);
5515 }
5516}
5517
5518void helper_boundl(target_ulong a0, int v)
5519{
5520 int low, high;
5521 low = ldl(a0);
5522 high = ldl(a0 + 4);
5523 if (v < low || v > high) {
5524 raise_exception(EXCP05_BOUND);
5525 }
5526}
5527
5528static float approx_rsqrt(float a)
5529{
5530 return 1.0 / sqrt(a);
5531}
5532
5533static float approx_rcp(float a)
5534{
5535 return 1.0 / a;
5536}
5537
5538#if !defined(CONFIG_USER_ONLY)
5539
5540#define MMUSUFFIX _mmu
5541
5542#define SHIFT 0
5543#include "softmmu_template.h"
5544
5545#define SHIFT 1
5546#include "softmmu_template.h"
5547
5548#define SHIFT 2
5549#include "softmmu_template.h"
5550
5551#define SHIFT 3
5552#include "softmmu_template.h"
5553
5554#endif
5555
5556#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5557/* This code assumes real physical address always fit into host CPU reg,
5558 which is wrong in general, but true for our current use cases. */
5559RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5560{
5561 return remR3PhysReadS8(addr);
5562}
5563RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5564{
5565 return remR3PhysReadU8(addr);
5566}
5567void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5568{
5569 remR3PhysWriteU8(addr, val);
5570}
5571RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5572{
5573 return remR3PhysReadS16(addr);
5574}
5575RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5576{
5577 return remR3PhysReadU16(addr);
5578}
5579void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5580{
5581 remR3PhysWriteU16(addr, val);
5582}
5583RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5584{
5585 return remR3PhysReadS32(addr);
5586}
5587RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5588{
5589 return remR3PhysReadU32(addr);
5590}
5591void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5592{
5593 remR3PhysWriteU32(addr, val);
5594}
5595uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5596{
5597 return remR3PhysReadU64(addr);
5598}
5599void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5600{
5601 remR3PhysWriteU64(addr, val);
5602}
5603#endif /* VBOX */
5604
5605#if !defined(CONFIG_USER_ONLY)
5606/* try to fill the TLB and return an exception if error. If retaddr is
5607 NULL, it means that the function was called in C code (i.e. not
5608 from generated code or from helper.c) */
5609/* XXX: fix it to restore all registers */
5610void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5611{
5612 TranslationBlock *tb;
5613 int ret;
5614 unsigned long pc;
5615 CPUX86State *saved_env;
5616
5617 /* XXX: hack to restore env in all cases, even if not called from
5618 generated code */
5619 saved_env = env;
5620 env = cpu_single_env;
5621
5622 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5623 if (ret) {
5624 if (retaddr) {
5625 /* now we have a real cpu fault */
5626 pc = (unsigned long)retaddr;
5627 tb = tb_find_pc(pc);
5628 if (tb) {
5629 /* the PC is inside the translated code. It means that we have
5630 a virtual CPU fault */
5631 cpu_restore_state(tb, env, pc, NULL);
5632 }
5633 }
5634 raise_exception_err(env->exception_index, env->error_code);
5635 }
5636 env = saved_env;
5637}
5638#endif
5639
5640#ifdef VBOX
5641
5642/**
5643 * Correctly computes the eflags.
5644 * @returns eflags.
5645 * @param env1 CPU environment.
5646 */
5647uint32_t raw_compute_eflags(CPUX86State *env1)
5648{
5649 CPUX86State *savedenv = env;
5650 uint32_t efl;
5651 env = env1;
5652 efl = compute_eflags();
5653 env = savedenv;
5654 return efl;
5655}
5656
5657/**
5658 * Reads byte from virtual address in guest memory area.
5659 * XXX: is it working for any addresses? swapped out pages?
5660 * @returns read data byte.
5661 * @param env1 CPU environment.
5662 * @param pvAddr GC Virtual address.
5663 */
5664uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5665{
5666 CPUX86State *savedenv = env;
5667 uint8_t u8;
5668 env = env1;
5669 u8 = ldub_kernel(addr);
5670 env = savedenv;
5671 return u8;
5672}
5673
5674/**
5675 * Reads byte from virtual address in guest memory area.
5676 * XXX: is it working for any addresses? swapped out pages?
5677 * @returns read data byte.
5678 * @param env1 CPU environment.
5679 * @param pvAddr GC Virtual address.
5680 */
5681uint16_t read_word(CPUX86State *env1, target_ulong addr)
5682{
5683 CPUX86State *savedenv = env;
5684 uint16_t u16;
5685 env = env1;
5686 u16 = lduw_kernel(addr);
5687 env = savedenv;
5688 return u16;
5689}
5690
5691/**
5692 * Reads byte from virtual address in guest memory area.
5693 * XXX: is it working for any addresses? swapped out pages?
5694 * @returns read data byte.
5695 * @param env1 CPU environment.
5696 * @param pvAddr GC Virtual address.
5697 */
5698uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5699{
5700 CPUX86State *savedenv = env;
5701 uint32_t u32;
5702 env = env1;
5703 u32 = ldl_kernel(addr);
5704 env = savedenv;
5705 return u32;
5706}
5707
5708/**
5709 * Writes byte to virtual address in guest memory area.
5710 * XXX: is it working for any addresses? swapped out pages?
5711 * @returns read data byte.
5712 * @param env1 CPU environment.
5713 * @param pvAddr GC Virtual address.
5714 * @param val byte value
5715 */
5716void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5717{
5718 CPUX86State *savedenv = env;
5719 env = env1;
5720 stb(addr, val);
5721 env = savedenv;
5722}
5723
5724void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5725{
5726 CPUX86State *savedenv = env;
5727 env = env1;
5728 stw(addr, val);
5729 env = savedenv;
5730}
5731
5732void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5733{
5734 CPUX86State *savedenv = env;
5735 env = env1;
5736 stl(addr, val);
5737 env = savedenv;
5738}
5739
5740/**
5741 * Correctly loads selector into segment register with updating internal
5742 * qemu data/caches.
5743 * @param env1 CPU environment.
5744 * @param seg_reg Segment register.
5745 * @param selector Selector to load.
5746 */
5747void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5748{
5749 CPUX86State *savedenv = env;
5750#ifdef FORCE_SEGMENT_SYNC
5751 jmp_buf old_buf;
5752#endif
5753
5754 env = env1;
5755
5756 if ( env->eflags & X86_EFL_VM
5757 || !(env->cr[0] & X86_CR0_PE))
5758 {
5759 load_seg_vm(seg_reg, selector);
5760
5761 env = savedenv;
5762
5763 /* Successful sync. */
5764 Assert(env1->segs[seg_reg].newselector == 0);
5765 }
5766 else
5767 {
5768 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5769 time critical - let's not do that */
5770#ifdef FORCE_SEGMENT_SYNC
5771 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5772#endif
5773 if (setjmp(env1->jmp_env) == 0)
5774 {
5775 if (seg_reg == R_CS)
5776 {
5777 uint32_t e1, e2;
5778 e1 = e2 = 0;
5779 load_segment(&e1, &e2, selector);
5780 cpu_x86_load_seg_cache(env, R_CS, selector,
5781 get_seg_base(e1, e2),
5782 get_seg_limit(e1, e2),
5783 e2);
5784 }
5785 else
5786 helper_load_seg(seg_reg, selector);
5787 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5788 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5789
5790 env = savedenv;
5791
5792 /* Successful sync. */
5793 Assert(env1->segs[seg_reg].newselector == 0);
5794 }
5795 else
5796 {
5797 env = savedenv;
5798
5799 /* Postpone sync until the guest uses the selector. */
5800 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5801 env1->segs[seg_reg].newselector = selector;
5802 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5803 env1->exception_index = -1;
5804 env1->error_code = 0;
5805 env1->old_exception = -1;
5806 }
5807#ifdef FORCE_SEGMENT_SYNC
5808 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5809#endif
5810 }
5811
5812}
5813
5814DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5815{
5816 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5817}
5818
5819
5820int emulate_single_instr(CPUX86State *env1)
5821{
5822 TranslationBlock *tb;
5823 TranslationBlock *current;
5824 int flags;
5825 uint8_t *tc_ptr;
5826 target_ulong old_eip;
5827
5828 /* ensures env is loaded! */
5829 CPUX86State *savedenv = env;
5830 env = env1;
5831
5832 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5833
5834 current = env->current_tb;
5835 env->current_tb = NULL;
5836 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5837
5838 /*
5839 * Translate only one instruction.
5840 */
5841 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5842 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5843 env->segs[R_CS].base, flags, 0);
5844
5845 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5846
5847
5848 /* tb_link_phys: */
5849 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5850 tb->jmp_next[0] = NULL;
5851 tb->jmp_next[1] = NULL;
5852 Assert(tb->jmp_next[0] == NULL);
5853 Assert(tb->jmp_next[1] == NULL);
5854 if (tb->tb_next_offset[0] != 0xffff)
5855 tb_reset_jump(tb, 0);
5856 if (tb->tb_next_offset[1] != 0xffff)
5857 tb_reset_jump(tb, 1);
5858
5859 /*
5860 * Execute it using emulation
5861 */
5862 old_eip = env->eip;
5863 env->current_tb = tb;
5864
5865 /*
5866 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5867 * perhaps not a very safe hack
5868 */
5869 while (old_eip == env->eip)
5870 {
5871 tc_ptr = tb->tc_ptr;
5872
5873#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5874 int fake_ret;
5875 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5876#else
5877 tcg_qemu_tb_exec(tc_ptr);
5878#endif
5879
5880 /*
5881 * Exit once we detect an external interrupt and interrupts are enabled
5882 */
5883 if ( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))
5884 || ( (env->eflags & IF_MASK)
5885 && !(env->hflags & HF_INHIBIT_IRQ_MASK)
5886 && (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) )
5887 )
5888 {
5889 break;
5890 }
5891 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB) {
5892 tlb_flush(env, true);
5893 }
5894 }
5895 env->current_tb = current;
5896
5897 tb_phys_invalidate(tb, -1);
5898 tb_free(tb);
5899/*
5900 Assert(tb->tb_next_offset[0] == 0xffff);
5901 Assert(tb->tb_next_offset[1] == 0xffff);
5902 Assert(tb->tb_next[0] == 0xffff);
5903 Assert(tb->tb_next[1] == 0xffff);
5904 Assert(tb->jmp_next[0] == NULL);
5905 Assert(tb->jmp_next[1] == NULL);
5906 Assert(tb->jmp_first == NULL); */
5907
5908 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5909
5910 /*
5911 * Execute the next instruction when we encounter instruction fusing.
5912 */
5913 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5914 {
5915 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5916 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5917 emulate_single_instr(env);
5918 }
5919
5920 env = savedenv;
5921 return 0;
5922}
5923
5924/**
5925 * Correctly loads a new ldtr selector.
5926 *
5927 * @param env1 CPU environment.
5928 * @param selector Selector to load.
5929 */
5930void sync_ldtr(CPUX86State *env1, int selector)
5931{
5932 CPUX86State *saved_env = env;
5933 if (setjmp(env1->jmp_env) == 0)
5934 {
5935 env = env1;
5936 helper_lldt(selector);
5937 env = saved_env;
5938 }
5939 else
5940 {
5941 env = saved_env;
5942#ifdef VBOX_STRICT
5943 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5944#endif
5945 }
5946}
5947
5948int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5949 uint32_t *esp_ptr, int dpl)
5950{
5951 int type, index, shift;
5952
5953 CPUX86State *savedenv = env;
5954 env = env1;
5955
5956 if (!(env->tr.flags & DESC_P_MASK))
5957 cpu_abort(env, "invalid tss");
5958 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5959 if ((type & 7) != 1)
5960 cpu_abort(env, "invalid tss type %d", type);
5961 shift = type >> 3;
5962 index = (dpl * 4 + 2) << shift;
5963 if (index + (4 << shift) - 1 > env->tr.limit)
5964 {
5965 env = savedenv;
5966 return 0;
5967 }
5968 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5969
5970 if (shift == 0) {
5971 *esp_ptr = lduw_kernel(env->tr.base + index);
5972 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5973 } else {
5974 *esp_ptr = ldl_kernel(env->tr.base + index);
5975 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5976 }
5977
5978 env = savedenv;
5979 return 1;
5980}
5981
5982//*****************************************************************************
5983// Needs to be at the bottom of the file (overriding macros)
5984
5985static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5986{
5987#ifdef USE_X86LDOUBLE
5988 CPU86_LDoubleU tmp;
5989 tmp.l.lower = *(uint64_t const *)ptr;
5990 tmp.l.upper = *(uint16_t const *)(ptr + 8);
5991 return tmp.d;
5992#else
5993# error "Busted FPU saving/restoring!"
5994 return *(CPU86_LDouble *)ptr;
5995#endif
5996}
5997
5998static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5999{
6000#ifdef USE_X86LDOUBLE
6001 CPU86_LDoubleU tmp;
6002 tmp.d = f;
6003 *(uint64_t *)(ptr + 0) = tmp.l.lower;
6004 *(uint16_t *)(ptr + 8) = tmp.l.upper;
6005 *(uint16_t *)(ptr + 10) = 0;
6006 *(uint32_t *)(ptr + 12) = 0;
6007 AssertCompile(sizeof(long double) > 8);
6008#else
6009# error "Busted FPU saving/restoring!"
6010 *(CPU86_LDouble *)ptr = f;
6011#endif
6012}
6013
6014#undef stw
6015#undef stl
6016#undef stq
6017#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
6018#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
6019#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
6020
6021//*****************************************************************************
6022void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6023{
6024 int fpus, fptag, i, nb_xmm_regs;
6025 CPU86_LDouble tmp;
6026 uint8_t *addr;
6027 int data64 = !!(env->hflags & HF_LMA_MASK);
6028
6029 if (env->cpuid_features & CPUID_FXSR)
6030 {
6031 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6032 fptag = 0;
6033 for(i = 0; i < 8; i++) {
6034 fptag |= (env->fptags[i] << i);
6035 }
6036 stw(ptr, env->fpuc);
6037 stw(ptr + 2, fpus);
6038 stw(ptr + 4, fptag ^ 0xff);
6039
6040 addr = ptr + 0x20;
6041 for(i = 0;i < 8; i++) {
6042 tmp = ST(i);
6043 helper_fstt_raw(tmp, addr);
6044 addr += 16;
6045 }
6046
6047 if (env->cr[4] & CR4_OSFXSR_MASK) {
6048 /* XXX: finish it */
6049 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6050 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6051 nb_xmm_regs = 8 << data64;
6052 addr = ptr + 0xa0;
6053 for(i = 0; i < nb_xmm_regs; i++) {
6054#if __GNUC__ < 4
6055 stq(addr, env->xmm_regs[i].XMM_Q(0));
6056 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6057#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6058 stl(addr, env->xmm_regs[i].XMM_L(0));
6059 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6060 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6061 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6062#endif
6063 addr += 16;
6064 }
6065 }
6066 }
6067 else
6068 {
6069 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6070 int fptag;
6071
6072 fp->FCW = env->fpuc;
6073 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6074 fptag = 0;
6075 for (i=7; i>=0; i--) {
6076 fptag <<= 2;
6077 if (env->fptags[i]) {
6078 fptag |= 3;
6079 } else {
6080 /* the FPU automatically computes it */
6081 }
6082 }
6083 fp->FTW = fptag;
6084
6085 for(i = 0;i < 8; i++) {
6086 tmp = ST(i);
6087 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6088 }
6089 }
6090}
6091
6092//*****************************************************************************
6093#undef lduw
6094#undef ldl
6095#undef ldq
6096#define lduw(a) *(uint16_t *)(a)
6097#define ldl(a) *(uint32_t *)(a)
6098#define ldq(a) *(uint64_t *)(a)
6099//*****************************************************************************
6100void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6101{
6102 int i, fpus, fptag, nb_xmm_regs;
6103 CPU86_LDouble tmp;
6104 uint8_t *addr;
6105 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6106
6107 if (env->cpuid_features & CPUID_FXSR)
6108 {
6109 env->fpuc = lduw(ptr);
6110 fpus = lduw(ptr + 2);
6111 fptag = lduw(ptr + 4);
6112 env->fpstt = (fpus >> 11) & 7;
6113 env->fpus = fpus & ~0x3800;
6114 fptag ^= 0xff;
6115 for(i = 0;i < 8; i++) {
6116 env->fptags[i] = ((fptag >> i) & 1);
6117 }
6118
6119 addr = ptr + 0x20;
6120 for(i = 0;i < 8; i++) {
6121 tmp = helper_fldt_raw(addr);
6122 ST(i) = tmp;
6123 addr += 16;
6124 }
6125
6126 if (env->cr[4] & CR4_OSFXSR_MASK) {
6127 /* XXX: finish it, endianness */
6128 env->mxcsr = ldl(ptr + 0x18);
6129 //ldl(ptr + 0x1c);
6130 nb_xmm_regs = 8 << data64;
6131 addr = ptr + 0xa0;
6132 for(i = 0; i < nb_xmm_regs; i++) {
6133#if HC_ARCH_BITS == 32
6134 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6135 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6136 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6137 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6138 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6139#else
6140 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6141 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6142#endif
6143 addr += 16;
6144 }
6145 }
6146 }
6147 else
6148 {
6149 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6150 int fptag, j;
6151
6152 env->fpuc = fp->FCW;
6153 env->fpstt = (fp->FSW >> 11) & 7;
6154 env->fpus = fp->FSW & ~0x3800;
6155 fptag = fp->FTW;
6156 for(i = 0;i < 8; i++) {
6157 env->fptags[i] = ((fptag & 3) == 3);
6158 fptag >>= 2;
6159 }
6160 j = env->fpstt;
6161 for(i = 0;i < 8; i++) {
6162 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6163 ST(i) = tmp;
6164 }
6165 }
6166}
6167//*****************************************************************************
6168//*****************************************************************************
6169
6170#endif /* VBOX */
6171
6172/* Secure Virtual Machine helpers */
6173
6174#if defined(CONFIG_USER_ONLY)
6175
6176void helper_vmrun(int aflag, int next_eip_addend)
6177{
6178}
6179void helper_vmmcall(void)
6180{
6181}
6182void helper_vmload(int aflag)
6183{
6184}
6185void helper_vmsave(int aflag)
6186{
6187}
6188void helper_stgi(void)
6189{
6190}
6191void helper_clgi(void)
6192{
6193}
6194void helper_skinit(void)
6195{
6196}
6197void helper_invlpga(int aflag)
6198{
6199}
6200void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6201{
6202}
6203void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6204{
6205}
6206
6207void helper_svm_check_io(uint32_t port, uint32_t param,
6208 uint32_t next_eip_addend)
6209{
6210}
6211#else
6212
6213static inline void svm_save_seg(target_phys_addr_t addr,
6214 const SegmentCache *sc)
6215{
6216 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6217 sc->selector);
6218 stq_phys(addr + offsetof(struct vmcb_seg, base),
6219 sc->base);
6220 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6221 sc->limit);
6222 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6223 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6224}
6225
6226static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6227{
6228 unsigned int flags;
6229
6230 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6231 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6232 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6233 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6234 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6235}
6236
6237static inline void svm_load_seg_cache(target_phys_addr_t addr,
6238 CPUState *env, int seg_reg)
6239{
6240 SegmentCache sc1, *sc = &sc1;
6241 svm_load_seg(addr, sc);
6242 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6243 sc->base, sc->limit, sc->flags);
6244}
6245
6246void helper_vmrun(int aflag, int next_eip_addend)
6247{
6248 target_ulong addr;
6249 uint32_t event_inj;
6250 uint32_t int_ctl;
6251
6252 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6253
6254 if (aflag == 2)
6255 addr = EAX;
6256 else
6257 addr = (uint32_t)EAX;
6258
6259 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6260
6261 env->vm_vmcb = addr;
6262
6263 /* save the current CPU state in the hsave page */
6264 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6265 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6266
6267 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6268 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6269
6270 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6271 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6272 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6273 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6274 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6275 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6276
6277 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6278 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6279
6280 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6281 &env->segs[R_ES]);
6282 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6283 &env->segs[R_CS]);
6284 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6285 &env->segs[R_SS]);
6286 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6287 &env->segs[R_DS]);
6288
6289 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6290 EIP + next_eip_addend);
6291 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6292 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6293
6294 /* load the interception bitmaps so we do not need to access the
6295 vmcb in svm mode */
6296 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6297 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6298 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6299 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6300 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6301 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6302
6303 /* enable intercepts */
6304 env->hflags |= HF_SVMI_MASK;
6305
6306 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6307
6308 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6309 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6310
6311 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6312 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6313
6314 /* clear exit_info_2 so we behave like the real hardware */
6315 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6316
6317 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6318 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6319 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6320 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6321 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6322 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6323 if (int_ctl & V_INTR_MASKING_MASK) {
6324 env->v_tpr = int_ctl & V_TPR_MASK;
6325 env->hflags2 |= HF2_VINTR_MASK;
6326 if (env->eflags & IF_MASK)
6327 env->hflags2 |= HF2_HIF_MASK;
6328 }
6329
6330 cpu_load_efer(env,
6331 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6332 env->eflags = 0;
6333 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6334 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6335 CC_OP = CC_OP_EFLAGS;
6336
6337 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6338 env, R_ES);
6339 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6340 env, R_CS);
6341 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6342 env, R_SS);
6343 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6344 env, R_DS);
6345
6346 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6347 env->eip = EIP;
6348 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6349 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6350 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6351 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6352 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6353
6354 /* FIXME: guest state consistency checks */
6355
6356 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6357 case TLB_CONTROL_DO_NOTHING:
6358 break;
6359 case TLB_CONTROL_FLUSH_ALL_ASID:
6360 /* FIXME: this is not 100% correct but should work for now */
6361 tlb_flush(env, 1);
6362 break;
6363 }
6364
6365 env->hflags2 |= HF2_GIF_MASK;
6366
6367 if (int_ctl & V_IRQ_MASK) {
6368 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6369 }
6370
6371 /* maybe we need to inject an event */
6372 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6373 if (event_inj & SVM_EVTINJ_VALID) {
6374 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6375 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6376 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6377
6378 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6379 /* FIXME: need to implement valid_err */
6380 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6381 case SVM_EVTINJ_TYPE_INTR:
6382 env->exception_index = vector;
6383 env->error_code = event_inj_err;
6384 env->exception_is_int = 0;
6385 env->exception_next_eip = -1;
6386 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6387 /* XXX: is it always correct ? */
6388 do_interrupt(vector, 0, 0, 0, 1);
6389 break;
6390 case SVM_EVTINJ_TYPE_NMI:
6391 env->exception_index = EXCP02_NMI;
6392 env->error_code = event_inj_err;
6393 env->exception_is_int = 0;
6394 env->exception_next_eip = EIP;
6395 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6396 cpu_loop_exit();
6397 break;
6398 case SVM_EVTINJ_TYPE_EXEPT:
6399 env->exception_index = vector;
6400 env->error_code = event_inj_err;
6401 env->exception_is_int = 0;
6402 env->exception_next_eip = -1;
6403 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6404 cpu_loop_exit();
6405 break;
6406 case SVM_EVTINJ_TYPE_SOFT:
6407 env->exception_index = vector;
6408 env->error_code = event_inj_err;
6409 env->exception_is_int = 1;
6410 env->exception_next_eip = EIP;
6411 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6412 cpu_loop_exit();
6413 break;
6414 }
6415 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6416 }
6417}
6418
6419void helper_vmmcall(void)
6420{
6421 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6422 raise_exception(EXCP06_ILLOP);
6423}
6424
6425void helper_vmload(int aflag)
6426{
6427 target_ulong addr;
6428 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6429
6430 if (aflag == 2)
6431 addr = EAX;
6432 else
6433 addr = (uint32_t)EAX;
6434
6435 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6436 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6437 env->segs[R_FS].base);
6438
6439 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6440 env, R_FS);
6441 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6442 env, R_GS);
6443 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6444 &env->tr);
6445 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6446 &env->ldt);
6447
6448#ifdef TARGET_X86_64
6449 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6450 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6451 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6452 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6453#endif
6454 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6455 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6456 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6457 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6458}
6459
6460void helper_vmsave(int aflag)
6461{
6462 target_ulong addr;
6463 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6464
6465 if (aflag == 2)
6466 addr = EAX;
6467 else
6468 addr = (uint32_t)EAX;
6469
6470 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6471 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6472 env->segs[R_FS].base);
6473
6474 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6475 &env->segs[R_FS]);
6476 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6477 &env->segs[R_GS]);
6478 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6479 &env->tr);
6480 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6481 &env->ldt);
6482
6483#ifdef TARGET_X86_64
6484 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6485 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6486 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6487 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6488#endif
6489 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6490 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6491 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6492 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6493}
6494
6495void helper_stgi(void)
6496{
6497 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6498 env->hflags2 |= HF2_GIF_MASK;
6499}
6500
6501void helper_clgi(void)
6502{
6503 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6504 env->hflags2 &= ~HF2_GIF_MASK;
6505}
6506
6507void helper_skinit(void)
6508{
6509 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6510 /* XXX: not implemented */
6511 raise_exception(EXCP06_ILLOP);
6512}
6513
6514void helper_invlpga(int aflag)
6515{
6516 target_ulong addr;
6517 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6518
6519 if (aflag == 2)
6520 addr = EAX;
6521 else
6522 addr = (uint32_t)EAX;
6523
6524 /* XXX: could use the ASID to see if it is needed to do the
6525 flush */
6526 tlb_flush_page(env, addr);
6527}
6528
6529void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6530{
6531 if (likely(!(env->hflags & HF_SVMI_MASK)))
6532 return;
6533#ifndef VBOX
6534 switch(type) {
6535 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6536 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6537 helper_vmexit(type, param);
6538 }
6539 break;
6540 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6541 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6542 helper_vmexit(type, param);
6543 }
6544 break;
6545 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6546 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6547 helper_vmexit(type, param);
6548 }
6549 break;
6550 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6551 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6552 helper_vmexit(type, param);
6553 }
6554 break;
6555 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6556 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6557 helper_vmexit(type, param);
6558 }
6559 break;
6560 case SVM_EXIT_MSR:
6561 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6562 /* FIXME: this should be read in at vmrun (faster this way?) */
6563 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6564 uint32_t t0, t1;
6565 switch((uint32_t)ECX) {
6566 case 0 ... 0x1fff:
6567 t0 = (ECX * 2) % 8;
6568 t1 = ECX / 8;
6569 break;
6570 case 0xc0000000 ... 0xc0001fff:
6571 t0 = (8192 + ECX - 0xc0000000) * 2;
6572 t1 = (t0 / 8);
6573 t0 %= 8;
6574 break;
6575 case 0xc0010000 ... 0xc0011fff:
6576 t0 = (16384 + ECX - 0xc0010000) * 2;
6577 t1 = (t0 / 8);
6578 t0 %= 8;
6579 break;
6580 default:
6581 helper_vmexit(type, param);
6582 t0 = 0;
6583 t1 = 0;
6584 break;
6585 }
6586 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6587 helper_vmexit(type, param);
6588 }
6589 break;
6590 default:
6591 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6592 helper_vmexit(type, param);
6593 }
6594 break;
6595 }
6596#else /* VBOX */
6597 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6598#endif /* VBOX */
6599}
6600
6601void helper_svm_check_io(uint32_t port, uint32_t param,
6602 uint32_t next_eip_addend)
6603{
6604 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6605 /* FIXME: this should be read in at vmrun (faster this way?) */
6606 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6607 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6608 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6609 /* next EIP */
6610 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6611 env->eip + next_eip_addend);
6612 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6613 }
6614 }
6615}
6616
6617/* Note: currently only 32 bits of exit_code are used */
6618void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6619{
6620 uint32_t int_ctl;
6621
6622 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6623 exit_code, exit_info_1,
6624 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6625 EIP);
6626
6627 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6628 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6629 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6630 } else {
6631 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6632 }
6633
6634 /* Save the VM state in the vmcb */
6635 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6636 &env->segs[R_ES]);
6637 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6638 &env->segs[R_CS]);
6639 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6640 &env->segs[R_SS]);
6641 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6642 &env->segs[R_DS]);
6643
6644 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6645 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6646
6647 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6648 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6649
6650 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6651 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6652 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6653 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6654 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6655
6656 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6657 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6658 int_ctl |= env->v_tpr & V_TPR_MASK;
6659 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6660 int_ctl |= V_IRQ_MASK;
6661 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6662
6663 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6664 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6665 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6666 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6667 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6668 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6669 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6670
6671 /* Reload the host state from vm_hsave */
6672 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6673 env->hflags &= ~HF_SVMI_MASK;
6674 env->intercept = 0;
6675 env->intercept_exceptions = 0;
6676 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6677 env->tsc_offset = 0;
6678
6679 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6680 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6681
6682 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6683 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6684
6685 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6686 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6687 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6688 /* we need to set the efer after the crs so the hidden flags get
6689 set properly */
6690 cpu_load_efer(env,
6691 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6692 env->eflags = 0;
6693 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6694 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6695 CC_OP = CC_OP_EFLAGS;
6696
6697 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6698 env, R_ES);
6699 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6700 env, R_CS);
6701 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6702 env, R_SS);
6703 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6704 env, R_DS);
6705
6706 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6707 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6708 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6709
6710 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6711 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6712
6713 /* other setups */
6714 cpu_x86_set_cpl(env, 0);
6715 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6716 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6717
6718 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6719 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6720 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6721 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6722 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6723
6724 env->hflags2 &= ~HF2_GIF_MASK;
6725 /* FIXME: Resets the current ASID register to zero (host ASID). */
6726
6727 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6728
6729 /* Clears the TSC_OFFSET inside the processor. */
6730
6731 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6732 from the page table indicated the host's CR3. If the PDPEs contain
6733 illegal state, the processor causes a shutdown. */
6734
6735 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6736 env->cr[0] |= CR0_PE_MASK;
6737 env->eflags &= ~VM_MASK;
6738
6739 /* Disables all breakpoints in the host DR7 register. */
6740
6741 /* Checks the reloaded host state for consistency. */
6742
6743 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6744 host's code segment or non-canonical (in the case of long mode), a
6745 #GP fault is delivered inside the host.) */
6746
6747 /* remove any pending exception */
6748 env->exception_index = -1;
6749 env->error_code = 0;
6750 env->old_exception = -1;
6751
6752 cpu_loop_exit();
6753}
6754
6755#endif
6756
6757/* MMX/SSE */
6758/* XXX: optimize by storing fptt and fptags in the static cpu state */
6759void helper_enter_mmx(void)
6760{
6761 env->fpstt = 0;
6762 *(uint32_t *)(env->fptags) = 0;
6763 *(uint32_t *)(env->fptags + 4) = 0;
6764}
6765
6766void helper_emms(void)
6767{
6768 /* set to empty state */
6769 *(uint32_t *)(env->fptags) = 0x01010101;
6770 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6771}
6772
6773/* XXX: suppress */
6774void helper_movq(void *d, void *s)
6775{
6776 *(uint64_t *)d = *(uint64_t *)s;
6777}
6778
6779#define SHIFT 0
6780#include "ops_sse.h"
6781
6782#define SHIFT 1
6783#include "ops_sse.h"
6784
6785#define SHIFT 0
6786#include "helper_template.h"
6787#undef SHIFT
6788
6789#define SHIFT 1
6790#include "helper_template.h"
6791#undef SHIFT
6792
6793#define SHIFT 2
6794#include "helper_template.h"
6795#undef SHIFT
6796
6797#ifdef TARGET_X86_64
6798
6799#define SHIFT 3
6800#include "helper_template.h"
6801#undef SHIFT
6802
6803#endif
6804
6805/* bit operations */
6806target_ulong helper_bsf(target_ulong t0)
6807{
6808 int count;
6809 target_ulong res;
6810
6811 res = t0;
6812 count = 0;
6813 while ((res & 1) == 0) {
6814 count++;
6815 res >>= 1;
6816 }
6817 return count;
6818}
6819
6820target_ulong helper_lzcnt(target_ulong t0, int wordsize)
6821{
6822 int count;
6823 target_ulong res, mask;
6824
6825 if (wordsize > 0 && t0 == 0) {
6826 return wordsize;
6827 }
6828 res = t0;
6829 count = TARGET_LONG_BITS - 1;
6830 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6831 while ((res & mask) == 0) {
6832 count--;
6833 res <<= 1;
6834 }
6835 if (wordsize > 0) {
6836 return wordsize - 1 - count;
6837 }
6838 return count;
6839}
6840
6841target_ulong helper_bsr(target_ulong t0)
6842{
6843 return helper_lzcnt(t0, 0);
6844}
6845
6846static int compute_all_eflags(void)
6847{
6848 return CC_SRC;
6849}
6850
6851static int compute_c_eflags(void)
6852{
6853 return CC_SRC & CC_C;
6854}
6855
6856uint32_t helper_cc_compute_all(int op)
6857{
6858 switch (op) {
6859 default: /* should never happen */ return 0;
6860
6861 case CC_OP_EFLAGS: return compute_all_eflags();
6862
6863 case CC_OP_MULB: return compute_all_mulb();
6864 case CC_OP_MULW: return compute_all_mulw();
6865 case CC_OP_MULL: return compute_all_mull();
6866
6867 case CC_OP_ADDB: return compute_all_addb();
6868 case CC_OP_ADDW: return compute_all_addw();
6869 case CC_OP_ADDL: return compute_all_addl();
6870
6871 case CC_OP_ADCB: return compute_all_adcb();
6872 case CC_OP_ADCW: return compute_all_adcw();
6873 case CC_OP_ADCL: return compute_all_adcl();
6874
6875 case CC_OP_SUBB: return compute_all_subb();
6876 case CC_OP_SUBW: return compute_all_subw();
6877 case CC_OP_SUBL: return compute_all_subl();
6878
6879 case CC_OP_SBBB: return compute_all_sbbb();
6880 case CC_OP_SBBW: return compute_all_sbbw();
6881 case CC_OP_SBBL: return compute_all_sbbl();
6882
6883 case CC_OP_LOGICB: return compute_all_logicb();
6884 case CC_OP_LOGICW: return compute_all_logicw();
6885 case CC_OP_LOGICL: return compute_all_logicl();
6886
6887 case CC_OP_INCB: return compute_all_incb();
6888 case CC_OP_INCW: return compute_all_incw();
6889 case CC_OP_INCL: return compute_all_incl();
6890
6891 case CC_OP_DECB: return compute_all_decb();
6892 case CC_OP_DECW: return compute_all_decw();
6893 case CC_OP_DECL: return compute_all_decl();
6894
6895 case CC_OP_SHLB: return compute_all_shlb();
6896 case CC_OP_SHLW: return compute_all_shlw();
6897 case CC_OP_SHLL: return compute_all_shll();
6898
6899 case CC_OP_SARB: return compute_all_sarb();
6900 case CC_OP_SARW: return compute_all_sarw();
6901 case CC_OP_SARL: return compute_all_sarl();
6902
6903#ifdef TARGET_X86_64
6904 case CC_OP_MULQ: return compute_all_mulq();
6905
6906 case CC_OP_ADDQ: return compute_all_addq();
6907
6908 case CC_OP_ADCQ: return compute_all_adcq();
6909
6910 case CC_OP_SUBQ: return compute_all_subq();
6911
6912 case CC_OP_SBBQ: return compute_all_sbbq();
6913
6914 case CC_OP_LOGICQ: return compute_all_logicq();
6915
6916 case CC_OP_INCQ: return compute_all_incq();
6917
6918 case CC_OP_DECQ: return compute_all_decq();
6919
6920 case CC_OP_SHLQ: return compute_all_shlq();
6921
6922 case CC_OP_SARQ: return compute_all_sarq();
6923#endif
6924 }
6925}
6926
6927uint32_t helper_cc_compute_c(int op)
6928{
6929 switch (op) {
6930 default: /* should never happen */ return 0;
6931
6932 case CC_OP_EFLAGS: return compute_c_eflags();
6933
6934 case CC_OP_MULB: return compute_c_mull();
6935 case CC_OP_MULW: return compute_c_mull();
6936 case CC_OP_MULL: return compute_c_mull();
6937
6938 case CC_OP_ADDB: return compute_c_addb();
6939 case CC_OP_ADDW: return compute_c_addw();
6940 case CC_OP_ADDL: return compute_c_addl();
6941
6942 case CC_OP_ADCB: return compute_c_adcb();
6943 case CC_OP_ADCW: return compute_c_adcw();
6944 case CC_OP_ADCL: return compute_c_adcl();
6945
6946 case CC_OP_SUBB: return compute_c_subb();
6947 case CC_OP_SUBW: return compute_c_subw();
6948 case CC_OP_SUBL: return compute_c_subl();
6949
6950 case CC_OP_SBBB: return compute_c_sbbb();
6951 case CC_OP_SBBW: return compute_c_sbbw();
6952 case CC_OP_SBBL: return compute_c_sbbl();
6953
6954 case CC_OP_LOGICB: return compute_c_logicb();
6955 case CC_OP_LOGICW: return compute_c_logicw();
6956 case CC_OP_LOGICL: return compute_c_logicl();
6957
6958 case CC_OP_INCB: return compute_c_incl();
6959 case CC_OP_INCW: return compute_c_incl();
6960 case CC_OP_INCL: return compute_c_incl();
6961
6962 case CC_OP_DECB: return compute_c_incl();
6963 case CC_OP_DECW: return compute_c_incl();
6964 case CC_OP_DECL: return compute_c_incl();
6965
6966 case CC_OP_SHLB: return compute_c_shlb();
6967 case CC_OP_SHLW: return compute_c_shlw();
6968 case CC_OP_SHLL: return compute_c_shll();
6969
6970 case CC_OP_SARB: return compute_c_sarl();
6971 case CC_OP_SARW: return compute_c_sarl();
6972 case CC_OP_SARL: return compute_c_sarl();
6973
6974#ifdef TARGET_X86_64
6975 case CC_OP_MULQ: return compute_c_mull();
6976
6977 case CC_OP_ADDQ: return compute_c_addq();
6978
6979 case CC_OP_ADCQ: return compute_c_adcq();
6980
6981 case CC_OP_SUBQ: return compute_c_subq();
6982
6983 case CC_OP_SBBQ: return compute_c_sbbq();
6984
6985 case CC_OP_LOGICQ: return compute_c_logicq();
6986
6987 case CC_OP_INCQ: return compute_c_incl();
6988
6989 case CC_OP_DECQ: return compute_c_incl();
6990
6991 case CC_OP_SHLQ: return compute_c_shlq();
6992
6993 case CC_OP_SARQ: return compute_c_sarl();
6994#endif
6995 }
6996}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette