VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 47757

Last change on this file since 47757 was 47757, checked in by vboxsync, 12 years ago

REM: Corrected task switch order (old task saved first, new task loaded next). Fixed 16-bit task switching bug (offsets into TSS for segment storage were wrong).

  • Property svn:eol-style set to native
File size: 201.4 KB
Line 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "exec.h"
30#include "exec-all.h"
31#include "host-utils.h"
32#include "ioport.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39
40//#define DEBUG_PCALL
41
42
43#ifdef DEBUG_PCALL
44# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
45# define LOG_PCALL_STATE(env) \
46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
47#else
48# define LOG_PCALL(...) do { } while (0)
49# define LOG_PCALL_STATE(env) do { } while (0)
50#endif
51
52
53#if 0
54#define raise_exception_err(a, b)\
55do {\
56 qemu_log("raise_exception line=%d\n", __LINE__);\
57 (raise_exception_err)(a, b);\
58} while (0)
59#endif
60
61static const uint8_t parity_table[256] = {
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
94};
95
96/* modulo 17 table */
97static const uint8_t rclw_table[32] = {
98 0, 1, 2, 3, 4, 5, 6, 7,
99 8, 9,10,11,12,13,14,15,
100 16, 0, 1, 2, 3, 4, 5, 6,
101 7, 8, 9,10,11,12,13,14,
102};
103
104/* modulo 9 table */
105static const uint8_t rclb_table[32] = {
106 0, 1, 2, 3, 4, 5, 6, 7,
107 8, 0, 1, 2, 3, 4, 5, 6,
108 7, 8, 0, 1, 2, 3, 4, 5,
109 6, 7, 8, 0, 1, 2, 3, 4,
110};
111
112static const CPU86_LDouble f15rk[7] =
113{
114 0.00000000000000000000L,
115 1.00000000000000000000L,
116 3.14159265358979323851L, /*pi*/
117 0.30102999566398119523L, /*lg2*/
118 0.69314718055994530943L, /*ln2*/
119 1.44269504088896340739L, /*l2e*/
120 3.32192809488736234781L, /*l2t*/
121};
122
123/* broken thread support */
124
125static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
126
127void helper_lock(void)
128{
129 spin_lock(&global_cpu_lock);
130}
131
132void helper_unlock(void)
133{
134 spin_unlock(&global_cpu_lock);
135}
136
137void helper_write_eflags(target_ulong t0, uint32_t update_mask)
138{
139 load_eflags(t0, update_mask);
140}
141
142target_ulong helper_read_eflags(void)
143{
144 uint32_t eflags;
145 eflags = helper_cc_compute_all(CC_OP);
146 eflags |= (DF & DF_MASK);
147 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
148 return eflags;
149}
150
151#ifdef VBOX
152
153void helper_write_eflags_vme(target_ulong t0)
154{
155 unsigned int new_eflags = t0;
156
157 assert(env->eflags & (1<<VM_SHIFT));
158
159 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
160 /* if TF will be set -> #GP */
161 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
162 || (new_eflags & TF_MASK)) {
163 raise_exception(EXCP0D_GPF);
164 } else {
165 load_eflags(new_eflags,
166 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
167
168 if (new_eflags & IF_MASK) {
169 env->eflags |= VIF_MASK;
170 } else {
171 env->eflags &= ~VIF_MASK;
172 }
173 }
174}
175
176target_ulong helper_read_eflags_vme(void)
177{
178 uint32_t eflags;
179 eflags = helper_cc_compute_all(CC_OP);
180 eflags |= (DF & DF_MASK);
181 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
182 if (env->eflags & VIF_MASK)
183 eflags |= IF_MASK;
184 else
185 eflags &= ~IF_MASK;
186
187 /* According to AMD manual, should be read with IOPL == 3 */
188 eflags |= (3 << IOPL_SHIFT);
189
190 /* We only use helper_read_eflags_vme() in 16-bits mode */
191 return eflags & 0xffff;
192}
193
194void helper_dump_state()
195{
196 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
197 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
198 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
199 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
200 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
201 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
202 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
203}
204
205/**
206 * Updates e2 with the DESC_A_MASK, writes it to the descriptor table, and
207 * returns the updated e2.
208 *
209 * @returns e2 with A set.
210 * @param e2 The 2nd selector DWORD.
211 */
212static uint32_t set_segment_accessed(int selector, uint32_t e2)
213{
214 SegmentCache *dt = selector & X86_SEL_LDT ? &env->ldt : &env->gdt;
215 target_ulong ptr = dt->base + (selector & X86_SEL_MASK);
216
217 e2 |= DESC_A_MASK;
218 stl_kernel(ptr + 4, e2);
219 return e2;
220}
221
222#endif /* VBOX */
223
224/* return non zero if error */
225static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
226 int selector)
227{
228 SegmentCache *dt;
229 int index;
230 target_ulong ptr;
231
232#ifdef VBOX
233 /* Trying to load a selector with CPL=1? */
234 /** @todo this is a hack to correct the incorrect checking order for pending interrupts in the patm iret replacement code (corrected in the ring-1 version) */
235 /** @todo in theory the iret could fault and we'd still need this. */
236 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0) && !EMIsRawRing1Enabled(env->pVM))
237 {
238 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
239 selector = selector & 0xfffc;
240 }
241#endif /* VBOX */
242
243 if (selector & 0x4)
244 dt = &env->ldt;
245 else
246 dt = &env->gdt;
247 index = selector & ~7;
248 if ((index + 7) > dt->limit)
249 return -1;
250 ptr = dt->base + index;
251 *e1_ptr = ldl_kernel(ptr);
252 *e2_ptr = ldl_kernel(ptr + 4);
253 return 0;
254}
255
256static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
257{
258 unsigned int limit;
259 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
260 if (e2 & DESC_G_MASK)
261 limit = (limit << 12) | 0xfff;
262 return limit;
263}
264
265static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
266{
267 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
268}
269
270static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
271{
272 sc->base = get_seg_base(e1, e2);
273 sc->limit = get_seg_limit(e1, e2);
274#ifndef VBOX
275 sc->flags = e2;
276#else
277 sc->flags = e2 & DESC_RAW_FLAG_BITS;
278 sc->newselector = 0;
279 sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID;
280#endif
281}
282
283/* init the segment cache in vm86 mode. */
284static inline void load_seg_vm(int seg, int selector)
285{
286 selector &= 0xffff;
287#ifdef VBOX
288 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
289 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
290 flags |= (3 << DESC_DPL_SHIFT);
291
292 cpu_x86_load_seg_cache(env, seg, selector,
293 (selector << 4), 0xffff, flags);
294#else /* VBOX */
295 cpu_x86_load_seg_cache(env, seg, selector,
296 (selector << 4), 0xffff, 0);
297#endif /* VBOX */
298}
299
300static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
301 uint32_t *esp_ptr, int dpl)
302{
303#ifndef VBOX
304 int type, index, shift;
305#else
306 unsigned int type, index, shift;
307#endif
308
309#if 0
310 {
311 int i;
312 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
313 for(i=0;i<env->tr.limit;i++) {
314 printf("%02x ", env->tr.base[i]);
315 if ((i & 7) == 7) printf("\n");
316 }
317 printf("\n");
318 }
319#endif
320
321 if (!(env->tr.flags & DESC_P_MASK))
322 cpu_abort(env, "invalid tss");
323 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
324 if ((type & 7) != 1)
325 cpu_abort(env, "invalid tss type");
326 shift = type >> 3;
327 index = (dpl * 4 + 2) << shift;
328 if (index + (4 << shift) - 1 > env->tr.limit)
329 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
330 if (shift == 0) {
331 *esp_ptr = lduw_kernel(env->tr.base + index);
332 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
333 } else {
334 *esp_ptr = ldl_kernel(env->tr.base + index);
335 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
336 }
337}
338
339/* XXX: merge with load_seg() */
340static void tss_load_seg(int seg_reg, int selector)
341{
342 uint32_t e1, e2;
343 int rpl, dpl, cpl;
344
345#ifdef VBOX
346 e1 = e2 = 0; /* gcc warning? */
347 cpl = env->hflags & HF_CPL_MASK;
348 /* Trying to load a selector with CPL=1? */
349 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
350 {
351 Log(("RPL 1 -> sel %04X -> %04X (tss_load_seg)\n", selector, selector & 0xfffc));
352 selector = selector & 0xfffc;
353 }
354#endif /* VBOX */
355
356 if ((selector & 0xfffc) != 0) {
357 if (load_segment(&e1, &e2, selector) != 0)
358 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
359 if (!(e2 & DESC_S_MASK))
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 rpl = selector & 3;
362 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
363 cpl = env->hflags & HF_CPL_MASK;
364 if (seg_reg == R_CS) {
365 if (!(e2 & DESC_CS_MASK))
366 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
367 /* XXX: is it correct ? */
368 if (dpl != rpl)
369 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
370 if ((e2 & DESC_C_MASK) && dpl > rpl)
371 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
372 } else if (seg_reg == R_SS) {
373 /* SS must be writable data */
374 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
375 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
376 if (dpl != cpl || dpl != rpl)
377 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
378 } else {
379 /* not readable code */
380 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
381 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
382 /* if data or non conforming code, checks the rights */
383 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
384 if (dpl < cpl || dpl < rpl)
385 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
386 }
387 }
388 if (!(e2 & DESC_P_MASK))
389 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
390 cpu_x86_load_seg_cache(env, seg_reg, selector,
391 get_seg_base(e1, e2),
392 get_seg_limit(e1, e2),
393 e2);
394 } else {
395 if (seg_reg == R_SS || seg_reg == R_CS)
396 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
397#ifdef VBOX
398# if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */
399 cpu_x86_load_seg_cache(env, seg_reg, selector,
400 0, 0, 0);
401# endif
402#endif /* VBOX */
403 }
404}
405
406#define SWITCH_TSS_JMP 0
407#define SWITCH_TSS_IRET 1
408#define SWITCH_TSS_CALL 2
409
410/* XXX: restore CPU state in registers (PowerPC case) */
411static void switch_tss(int tss_selector,
412 uint32_t e1, uint32_t e2, int source,
413 uint32_t next_eip)
414{
415 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
416 target_ulong tss_base;
417 uint32_t new_regs[8], new_segs[6];
418 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
419 uint32_t old_eflags, eflags_mask;
420 SegmentCache *dt;
421#ifndef VBOX
422 int index;
423#else
424 unsigned int index;
425#endif
426 target_ulong ptr;
427
428 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
429 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
430
431 /* if task gate, we read the TSS segment and we load it */
432 if (type == 5) {
433 if (!(e2 & DESC_P_MASK))
434 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
435 tss_selector = e1 >> 16;
436 if (tss_selector & 4)
437 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
438 if (load_segment(&e1, &e2, tss_selector) != 0)
439 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
440 if (e2 & DESC_S_MASK)
441 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
442 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
443 if ((type & 7) != 1)
444 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
445 }
446
447 if (!(e2 & DESC_P_MASK))
448 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
449
450 if (type & 8)
451 tss_limit_max = 103;
452 else
453 tss_limit_max = 43;
454 tss_limit = get_seg_limit(e1, e2);
455 tss_base = get_seg_base(e1, e2);
456 if ((tss_selector & 4) != 0 ||
457 tss_limit < tss_limit_max)
458 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
459 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
460 if (old_type & 8)
461 old_tss_limit_max = 103;
462 else
463 old_tss_limit_max = 43;
464
465#ifndef VBOX /* The old TSS is written first... */
466 /* read all the registers from the new TSS */
467 if (type & 8) {
468 /* 32 bit */
469 new_cr3 = ldl_kernel(tss_base + 0x1c);
470 new_eip = ldl_kernel(tss_base + 0x20);
471 new_eflags = ldl_kernel(tss_base + 0x24);
472 for(i = 0; i < 8; i++)
473 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
474 for(i = 0; i < 6; i++)
475 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
476 new_ldt = lduw_kernel(tss_base + 0x60);
477 new_trap = ldl_kernel(tss_base + 0x64);
478 } else {
479 /* 16 bit */
480 new_cr3 = 0;
481 new_eip = lduw_kernel(tss_base + 0x0e);
482 new_eflags = lduw_kernel(tss_base + 0x10);
483 for(i = 0; i < 8; i++)
484 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
485 for(i = 0; i < 4; i++)
486 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
487 new_ldt = lduw_kernel(tss_base + 0x2a);
488 new_segs[R_FS] = 0;
489 new_segs[R_GS] = 0;
490 new_trap = 0;
491 }
492#endif
493
494 /* NOTE: we must avoid memory exceptions during the task switch,
495 so we make dummy accesses before */
496 /* XXX: it can still fail in some cases, so a bigger hack is
497 necessary to valid the TLB after having done the accesses */
498
499 v1 = ldub_kernel(env->tr.base);
500 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
501 stb_kernel(env->tr.base, v1);
502 stb_kernel(env->tr.base + old_tss_limit_max, v2);
503
504 /* clear busy bit (it is restartable) */
505 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
506 target_ulong ptr;
507 uint32_t e2;
508 ptr = env->gdt.base + (env->tr.selector & ~7);
509 e2 = ldl_kernel(ptr + 4);
510 e2 &= ~DESC_TSS_BUSY_MASK;
511 stl_kernel(ptr + 4, e2);
512 }
513 old_eflags = compute_eflags();
514 if (source == SWITCH_TSS_IRET)
515 old_eflags &= ~NT_MASK;
516
517 /* save the current state in the old TSS */
518 if (type & 8) {
519 /* 32 bit */
520 stl_kernel(env->tr.base + 0x20, next_eip);
521 stl_kernel(env->tr.base + 0x24, old_eflags);
522 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
523 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
524 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
525 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
526 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
527 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
528 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
529 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
530 for(i = 0; i < 6; i++)
531 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
532#if defined(VBOX) && defined(DEBUG)
533 printf("TSS 32 bits switch\n");
534 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
535#endif
536 } else {
537 /* 16 bit */
538 stw_kernel(env->tr.base + 0x0e, next_eip);
539 stw_kernel(env->tr.base + 0x10, old_eflags);
540 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
541 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
542 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
543 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
544 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
545 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
546 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
547 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
548 for(i = 0; i < 4; i++)
549 stw_kernel(env->tr.base + (0x22 + i * 2), env->segs[i].selector);
550 }
551
552#ifdef VBOX
553 /* read all the registers from the new TSS - may be the same as the old one */
554 if (type & 8) {
555 /* 32 bit */
556 new_cr3 = ldl_kernel(tss_base + 0x1c);
557 new_eip = ldl_kernel(tss_base + 0x20);
558 new_eflags = ldl_kernel(tss_base + 0x24);
559 for(i = 0; i < 8; i++)
560 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
561 for(i = 0; i < 6; i++)
562 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
563 new_ldt = lduw_kernel(tss_base + 0x60);
564 new_trap = ldl_kernel(tss_base + 0x64);
565 } else {
566 /* 16 bit */
567 new_cr3 = 0;
568 new_eip = lduw_kernel(tss_base + 0x0e);
569 new_eflags = lduw_kernel(tss_base + 0x10);
570 for(i = 0; i < 8; i++)
571 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
572 for(i = 0; i < 4; i++)
573 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 2));
574 new_ldt = lduw_kernel(tss_base + 0x2a);
575 new_segs[R_FS] = 0;
576 new_segs[R_GS] = 0;
577 new_trap = 0;
578 }
579#endif
580
581 /* now if an exception occurs, it will occurs in the next task
582 context */
583
584 if (source == SWITCH_TSS_CALL) {
585 stw_kernel(tss_base, env->tr.selector);
586 new_eflags |= NT_MASK;
587 }
588
589 /* set busy bit */
590 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
591 target_ulong ptr;
592 uint32_t e2;
593 ptr = env->gdt.base + (tss_selector & ~7);
594 e2 = ldl_kernel(ptr + 4);
595 e2 |= DESC_TSS_BUSY_MASK;
596 stl_kernel(ptr + 4, e2);
597 }
598
599 /* set the new CPU state */
600 /* from this point, any exception which occurs can give problems */
601 env->cr[0] |= CR0_TS_MASK;
602 env->hflags |= HF_TS_MASK;
603 env->tr.selector = tss_selector;
604 env->tr.base = tss_base;
605 env->tr.limit = tss_limit;
606 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
607#ifdef VBOX
608 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
609 env->tr.newselector = 0;
610#endif
611
612 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
613 cpu_x86_update_cr3(env, new_cr3);
614 }
615
616 /* load all registers without an exception, then reload them with
617 possible exception */
618 env->eip = new_eip;
619 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
620 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
621 if (!(type & 8))
622 eflags_mask &= 0xffff;
623 load_eflags(new_eflags, eflags_mask);
624 /* XXX: what to do in 16 bit case ? */
625 EAX = new_regs[0];
626 ECX = new_regs[1];
627 EDX = new_regs[2];
628 EBX = new_regs[3];
629 ESP = new_regs[4];
630 EBP = new_regs[5];
631 ESI = new_regs[6];
632 EDI = new_regs[7];
633 if (new_eflags & VM_MASK) {
634 for(i = 0; i < 6; i++)
635 load_seg_vm(i, new_segs[i]);
636 /* in vm86, CPL is always 3 */
637 cpu_x86_set_cpl(env, 3);
638 } else {
639 /* CPL is set the RPL of CS */
640 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
641 /* first just selectors as the rest may trigger exceptions */
642 for(i = 0; i < 6; i++)
643 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
644 }
645
646 env->ldt.selector = new_ldt & ~4;
647 env->ldt.base = 0;
648 env->ldt.limit = 0;
649 env->ldt.flags = 0;
650#ifdef VBOX
651 env->ldt.flags = DESC_INTEL_UNUSABLE;
652 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
653 env->ldt.newselector = 0;
654#endif
655
656 /* load the LDT */
657 if (new_ldt & 4)
658 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
659
660 if ((new_ldt & 0xfffc) != 0) {
661 dt = &env->gdt;
662 index = new_ldt & ~7;
663 if ((index + 7) > dt->limit)
664 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
665 ptr = dt->base + index;
666 e1 = ldl_kernel(ptr);
667 e2 = ldl_kernel(ptr + 4);
668 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
669 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
670 if (!(e2 & DESC_P_MASK))
671 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
672 load_seg_cache_raw_dt(&env->ldt, e1, e2);
673 }
674
675 /* load the segments */
676 if (!(new_eflags & VM_MASK)) {
677 tss_load_seg(R_CS, new_segs[R_CS]);
678 tss_load_seg(R_SS, new_segs[R_SS]);
679 tss_load_seg(R_ES, new_segs[R_ES]);
680 tss_load_seg(R_DS, new_segs[R_DS]);
681 tss_load_seg(R_FS, new_segs[R_FS]);
682 tss_load_seg(R_GS, new_segs[R_GS]);
683 }
684
685 /* check that EIP is in the CS segment limits */
686 if (new_eip > env->segs[R_CS].limit) {
687 /* XXX: different exception if CALL ? */
688 raise_exception_err(EXCP0D_GPF, 0);
689 }
690
691#ifndef CONFIG_USER_ONLY
692 /* reset local breakpoints */
693 if (env->dr[7] & 0x55) {
694 for (i = 0; i < 4; i++) {
695 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
696 hw_breakpoint_remove(env, i);
697 }
698 env->dr[7] &= ~0x55;
699 }
700#endif
701}
702
703/* check if Port I/O is allowed in TSS */
704static inline void check_io(int addr, int size)
705{
706#ifndef VBOX
707 int io_offset, val, mask;
708#else
709 int val, mask;
710 unsigned int io_offset;
711#endif /* VBOX */
712
713 /* TSS must be a valid 32 bit one */
714 if (!(env->tr.flags & DESC_P_MASK) ||
715 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
716 env->tr.limit < 103)
717 goto fail;
718 io_offset = lduw_kernel(env->tr.base + 0x66);
719 io_offset += (addr >> 3);
720 /* Note: the check needs two bytes */
721 if ((io_offset + 1) > env->tr.limit)
722 goto fail;
723 val = lduw_kernel(env->tr.base + io_offset);
724 val >>= (addr & 7);
725 mask = (1 << size) - 1;
726 /* all bits must be zero to allow the I/O */
727 if ((val & mask) != 0) {
728 fail:
729 raise_exception_err(EXCP0D_GPF, 0);
730 }
731}
732
733#ifdef VBOX
734
735/* Keep in sync with gen_check_external_event() */
736void helper_check_external_event()
737{
738 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_FLUSH_TLB
739 | CPU_INTERRUPT_EXTERNAL_EXIT
740 | CPU_INTERRUPT_EXTERNAL_TIMER
741 | CPU_INTERRUPT_EXTERNAL_DMA))
742 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
743 && (env->eflags & IF_MASK)
744 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
745 {
746 helper_external_event();
747 }
748
749}
750
751void helper_sync_seg(uint32_t reg)
752{
753 if (env->segs[reg].newselector)
754 sync_seg(env, reg, env->segs[reg].newselector);
755}
756
757#endif /* VBOX */
758
759void helper_check_iob(uint32_t t0)
760{
761 check_io(t0, 1);
762}
763
764void helper_check_iow(uint32_t t0)
765{
766 check_io(t0, 2);
767}
768
769void helper_check_iol(uint32_t t0)
770{
771 check_io(t0, 4);
772}
773
774void helper_outb(uint32_t port, uint32_t data)
775{
776#ifndef VBOX
777 cpu_outb(port, data & 0xff);
778#else
779 cpu_outb(env, port, data & 0xff);
780#endif
781}
782
783target_ulong helper_inb(uint32_t port)
784{
785#ifndef VBOX
786 return cpu_inb(port);
787#else
788 return cpu_inb(env, port);
789#endif
790}
791
792void helper_outw(uint32_t port, uint32_t data)
793{
794#ifndef VBOX
795 cpu_outw(port, data & 0xffff);
796#else
797 cpu_outw(env, port, data & 0xffff);
798#endif
799}
800
801target_ulong helper_inw(uint32_t port)
802{
803#ifndef VBOX
804 return cpu_inw(port);
805#else
806 return cpu_inw(env, port);
807#endif
808}
809
810void helper_outl(uint32_t port, uint32_t data)
811{
812#ifndef VBOX
813 cpu_outl(port, data);
814#else
815 cpu_outl(env, port, data);
816#endif
817}
818
819target_ulong helper_inl(uint32_t port)
820{
821#ifndef VBOX
822 return cpu_inl(port);
823#else
824 return cpu_inl(env, port);
825#endif
826}
827
828static inline unsigned int get_sp_mask(unsigned int e2)
829{
830 if (e2 & DESC_B_MASK)
831 return 0xffffffff;
832 else
833 return 0xffff;
834}
835
836static int exeption_has_error_code(int intno)
837{
838 switch(intno) {
839 case 8:
840 case 10:
841 case 11:
842 case 12:
843 case 13:
844 case 14:
845 case 17:
846 return 1;
847 }
848 return 0;
849}
850
851#ifdef TARGET_X86_64
852#define SET_ESP(val, sp_mask)\
853do {\
854 if ((sp_mask) == 0xffff)\
855 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
856 else if ((sp_mask) == 0xffffffffLL)\
857 ESP = (uint32_t)(val);\
858 else\
859 ESP = (val);\
860} while (0)
861#else
862#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
863#endif
864
865/* in 64-bit machines, this can overflow. So this segment addition macro
866 * can be used to trim the value to 32-bit whenever needed */
867#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
868
869/* XXX: add a is_user flag to have proper security support */
870#define PUSHW(ssp, sp, sp_mask, val)\
871{\
872 sp -= 2;\
873 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
874}
875
876#define PUSHL(ssp, sp, sp_mask, val)\
877{\
878 sp -= 4;\
879 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
880}
881
882#define POPW(ssp, sp, sp_mask, val)\
883{\
884 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
885 sp += 2;\
886}
887
888#define POPL(ssp, sp, sp_mask, val)\
889{\
890 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
891 sp += 4;\
892}
893
894/* protected mode interrupt */
895static void do_interrupt_protected(int intno, int is_int, int error_code,
896 unsigned int next_eip, int is_hw)
897{
898 SegmentCache *dt;
899 target_ulong ptr, ssp;
900 int type, dpl, selector, ss_dpl, cpl;
901 int has_error_code, new_stack, shift;
902 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
903 uint32_t old_eip, sp_mask;
904
905#ifdef VBOX
906 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
907 cpu_loop_exit();
908#endif
909
910 has_error_code = 0;
911 if (!is_int && !is_hw)
912 has_error_code = exeption_has_error_code(intno);
913 if (is_int)
914 old_eip = next_eip;
915 else
916 old_eip = env->eip;
917
918 dt = &env->idt;
919#ifndef VBOX
920 if (intno * 8 + 7 > dt->limit)
921#else
922 if ((unsigned)intno * 8 + 7 > dt->limit)
923#endif
924 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
925 ptr = dt->base + intno * 8;
926 e1 = ldl_kernel(ptr);
927 e2 = ldl_kernel(ptr + 4);
928 /* check gate type */
929 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
930 switch(type) {
931 case 5: /* task gate */
932#ifdef VBOX
933 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
934 cpl = env->hflags & HF_CPL_MASK;
935 /* check privilege if software int */
936 if (is_int && dpl < cpl)
937 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
938#endif
939 /* must do that check here to return the correct error code */
940 if (!(e2 & DESC_P_MASK))
941 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
942 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
943 if (has_error_code) {
944 int type;
945 uint32_t mask;
946 /* push the error code */
947 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
948 shift = type >> 3;
949 if (env->segs[R_SS].flags & DESC_B_MASK)
950 mask = 0xffffffff;
951 else
952 mask = 0xffff;
953 esp = (ESP - (2 << shift)) & mask;
954 ssp = env->segs[R_SS].base + esp;
955 if (shift)
956 stl_kernel(ssp, error_code);
957 else
958 stw_kernel(ssp, error_code);
959 SET_ESP(esp, mask);
960 }
961 return;
962 case 6: /* 286 interrupt gate */
963 case 7: /* 286 trap gate */
964 case 14: /* 386 interrupt gate */
965 case 15: /* 386 trap gate */
966 break;
967 default:
968 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
969 break;
970 }
971 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
972 cpl = env->hflags & HF_CPL_MASK;
973 /* check privilege if software int */
974 if (is_int && dpl < cpl)
975 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
976 /* check valid bit */
977 if (!(e2 & DESC_P_MASK))
978 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
979 selector = e1 >> 16;
980 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
981 if ((selector & 0xfffc) == 0)
982 raise_exception_err(EXCP0D_GPF, 0);
983
984 if (load_segment(&e1, &e2, selector) != 0)
985 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
986#ifdef VBOX /** @todo figure out when this is done one day... */
987 if (!(e2 & DESC_A_MASK))
988 e2 = set_segment_accessed(selector, e2);
989#endif
990 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
991 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
992 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
993 if (dpl > cpl)
994 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
995 if (!(e2 & DESC_P_MASK))
996 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
997 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
998 /* to inner privilege */
999 get_ss_esp_from_tss(&ss, &esp, dpl);
1000 if ((ss & 0xfffc) == 0)
1001 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1002 if ((ss & 3) != dpl)
1003 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1004 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
1005 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1006#ifdef VBOX /** @todo figure out when this is done one day... */
1007 if (!(ss_e2 & DESC_A_MASK))
1008 ss_e2 = set_segment_accessed(ss, ss_e2);
1009#endif
1010 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1011 if (ss_dpl != dpl)
1012 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1013 if (!(ss_e2 & DESC_S_MASK) ||
1014 (ss_e2 & DESC_CS_MASK) ||
1015 !(ss_e2 & DESC_W_MASK))
1016 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1017 if (!(ss_e2 & DESC_P_MASK))
1018#ifdef VBOX /* See page 3-477 of 253666.pdf */
1019 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
1020#else
1021 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1022#endif
1023 new_stack = 1;
1024 sp_mask = get_sp_mask(ss_e2);
1025 ssp = get_seg_base(ss_e1, ss_e2);
1026#if defined(VBOX) && defined(DEBUG)
1027 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
1028#endif
1029 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1030 /* to same privilege */
1031 if (env->eflags & VM_MASK)
1032 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1033 new_stack = 0;
1034 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1035 ssp = env->segs[R_SS].base;
1036 esp = ESP;
1037 dpl = cpl;
1038 } else {
1039 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1040 new_stack = 0; /* avoid warning */
1041 sp_mask = 0; /* avoid warning */
1042 ssp = 0; /* avoid warning */
1043 esp = 0; /* avoid warning */
1044 }
1045
1046 shift = type >> 3;
1047
1048#if 0
1049 /* XXX: check that enough room is available */
1050 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
1051 if (env->eflags & VM_MASK)
1052 push_size += 8;
1053 push_size <<= shift;
1054#endif
1055 if (shift == 1) {
1056 if (new_stack) {
1057 if (env->eflags & VM_MASK) {
1058 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
1059 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
1060 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
1061 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
1062 }
1063 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
1064 PUSHL(ssp, esp, sp_mask, ESP);
1065 }
1066 PUSHL(ssp, esp, sp_mask, compute_eflags());
1067 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
1068 PUSHL(ssp, esp, sp_mask, old_eip);
1069 if (has_error_code) {
1070 PUSHL(ssp, esp, sp_mask, error_code);
1071 }
1072 } else {
1073 if (new_stack) {
1074 if (env->eflags & VM_MASK) {
1075 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
1076 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
1077 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1078 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1079 }
1080 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1081 PUSHW(ssp, esp, sp_mask, ESP);
1082 }
1083 PUSHW(ssp, esp, sp_mask, compute_eflags());
1084 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1085 PUSHW(ssp, esp, sp_mask, old_eip);
1086 if (has_error_code) {
1087 PUSHW(ssp, esp, sp_mask, error_code);
1088 }
1089 }
1090
1091 if (new_stack) {
1092 if (env->eflags & VM_MASK) {
1093 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1094 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1095 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1096 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1097 }
1098 ss = (ss & ~3) | dpl;
1099 cpu_x86_load_seg_cache(env, R_SS, ss,
1100 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1101 }
1102 SET_ESP(esp, sp_mask);
1103
1104 selector = (selector & ~3) | dpl;
1105 cpu_x86_load_seg_cache(env, R_CS, selector,
1106 get_seg_base(e1, e2),
1107 get_seg_limit(e1, e2),
1108 e2);
1109 cpu_x86_set_cpl(env, dpl);
1110 env->eip = offset;
1111
1112 /* interrupt gate clear IF mask */
1113 if ((type & 1) == 0) {
1114 env->eflags &= ~IF_MASK;
1115 }
1116#ifndef VBOX
1117 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1118#else
1119 /*
1120 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1121 * gets confused by seemingly changed EFLAGS. See #3491 and
1122 * public bug #2341.
1123 */
1124 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1125#endif
1126}
1127
1128#ifdef VBOX
1129
1130/* check if VME interrupt redirection is enabled in TSS */
1131DECLINLINE(bool) is_vme_irq_redirected(int intno)
1132{
1133 unsigned int io_offset, intredir_offset;
1134 unsigned char val, mask;
1135
1136 /* TSS must be a valid 32 bit one */
1137 if (!(env->tr.flags & DESC_P_MASK) ||
1138 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1139 env->tr.limit < 103)
1140 goto fail;
1141 io_offset = lduw_kernel(env->tr.base + 0x66);
1142 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1143 if (io_offset < 0x68 + 0x20)
1144 io_offset = 0x68 + 0x20;
1145 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1146 intredir_offset = io_offset - 0x20;
1147
1148 intredir_offset += (intno >> 3);
1149 if ((intredir_offset) > env->tr.limit)
1150 goto fail;
1151
1152 val = ldub_kernel(env->tr.base + intredir_offset);
1153 mask = 1 << (unsigned char)(intno & 7);
1154
1155 /* bit set means no redirection. */
1156 if ((val & mask) != 0) {
1157 return false;
1158 }
1159 return true;
1160
1161fail:
1162 raise_exception_err(EXCP0D_GPF, 0);
1163 return true;
1164}
1165
1166/* V86 mode software interrupt with CR4.VME=1 */
1167static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1168{
1169 target_ulong ptr, ssp;
1170 int selector;
1171 uint32_t offset, esp;
1172 uint32_t old_cs, old_eflags;
1173 uint32_t iopl;
1174
1175 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1176
1177 if (!is_vme_irq_redirected(intno))
1178 {
1179 if (iopl == 3)
1180 {
1181 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1182 return;
1183 }
1184 else
1185 raise_exception_err(EXCP0D_GPF, 0);
1186 }
1187
1188 /* virtual mode idt is at linear address 0 */
1189 ptr = 0 + intno * 4;
1190 offset = lduw_kernel(ptr);
1191 selector = lduw_kernel(ptr + 2);
1192 esp = ESP;
1193 ssp = env->segs[R_SS].base;
1194 old_cs = env->segs[R_CS].selector;
1195
1196 old_eflags = compute_eflags();
1197 if (iopl < 3)
1198 {
1199 /* copy VIF into IF and set IOPL to 3 */
1200 if (env->eflags & VIF_MASK)
1201 old_eflags |= IF_MASK;
1202 else
1203 old_eflags &= ~IF_MASK;
1204
1205 old_eflags |= (3 << IOPL_SHIFT);
1206 }
1207
1208 /* XXX: use SS segment size ? */
1209 PUSHW(ssp, esp, 0xffff, old_eflags);
1210 PUSHW(ssp, esp, 0xffff, old_cs);
1211 PUSHW(ssp, esp, 0xffff, next_eip);
1212
1213 /* update processor state */
1214 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1215 env->eip = offset;
1216 env->segs[R_CS].selector = selector;
1217 env->segs[R_CS].base = (selector << 4);
1218 env->eflags &= ~(TF_MASK | RF_MASK);
1219
1220 if (iopl < 3)
1221 env->eflags &= ~VIF_MASK;
1222 else
1223 env->eflags &= ~IF_MASK;
1224}
1225
1226#endif /* VBOX */
1227
1228#ifdef TARGET_X86_64
1229
1230#define PUSHQ(sp, val)\
1231{\
1232 sp -= 8;\
1233 stq_kernel(sp, (val));\
1234}
1235
1236#define POPQ(sp, val)\
1237{\
1238 val = ldq_kernel(sp);\
1239 sp += 8;\
1240}
1241
1242static inline target_ulong get_rsp_from_tss(int level)
1243{
1244 int index;
1245
1246#if 0
1247 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1248 env->tr.base, env->tr.limit);
1249#endif
1250
1251 if (!(env->tr.flags & DESC_P_MASK))
1252 cpu_abort(env, "invalid tss");
1253 index = 8 * level + 4;
1254 if ((index + 7) > env->tr.limit)
1255 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1256 return ldq_kernel(env->tr.base + index);
1257}
1258
1259/* 64 bit interrupt */
1260static void do_interrupt64(int intno, int is_int, int error_code,
1261 target_ulong next_eip, int is_hw)
1262{
1263 SegmentCache *dt;
1264 target_ulong ptr;
1265 int type, dpl, selector, cpl, ist;
1266 int has_error_code, new_stack;
1267 uint32_t e1, e2, e3, ss;
1268 target_ulong old_eip, esp, offset;
1269
1270#ifdef VBOX
1271 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1272 cpu_loop_exit();
1273#endif
1274
1275 has_error_code = 0;
1276 if (!is_int && !is_hw)
1277 has_error_code = exeption_has_error_code(intno);
1278 if (is_int)
1279 old_eip = next_eip;
1280 else
1281 old_eip = env->eip;
1282
1283 dt = &env->idt;
1284 if (intno * 16 + 15 > dt->limit)
1285 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1286 ptr = dt->base + intno * 16;
1287 e1 = ldl_kernel(ptr);
1288 e2 = ldl_kernel(ptr + 4);
1289 e3 = ldl_kernel(ptr + 8);
1290 /* check gate type */
1291 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1292 switch(type) {
1293 case 14: /* 386 interrupt gate */
1294 case 15: /* 386 trap gate */
1295 break;
1296 default:
1297 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1298 break;
1299 }
1300 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1301 cpl = env->hflags & HF_CPL_MASK;
1302 /* check privilege if software int */
1303 if (is_int && dpl < cpl)
1304 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1305 /* check valid bit */
1306 if (!(e2 & DESC_P_MASK))
1307 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1308 selector = e1 >> 16;
1309 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1310 ist = e2 & 7;
1311 if ((selector & 0xfffc) == 0)
1312 raise_exception_err(EXCP0D_GPF, 0);
1313
1314 if (load_segment(&e1, &e2, selector) != 0)
1315 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1316 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1317 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1318 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1319 if (dpl > cpl)
1320 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1321 if (!(e2 & DESC_P_MASK))
1322 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1323 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1324 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1325 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1326 /* to inner privilege */
1327 if (ist != 0)
1328 esp = get_rsp_from_tss(ist + 3);
1329 else
1330 esp = get_rsp_from_tss(dpl);
1331 esp &= ~0xfLL; /* align stack */
1332 ss = 0;
1333 new_stack = 1;
1334 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1335 /* to same privilege */
1336 if (env->eflags & VM_MASK)
1337 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1338 new_stack = 0;
1339 if (ist != 0)
1340 esp = get_rsp_from_tss(ist + 3);
1341 else
1342 esp = ESP;
1343 esp &= ~0xfLL; /* align stack */
1344 dpl = cpl;
1345 } else {
1346 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1347 new_stack = 0; /* avoid warning */
1348 esp = 0; /* avoid warning */
1349 }
1350
1351 PUSHQ(esp, env->segs[R_SS].selector);
1352 PUSHQ(esp, ESP);
1353 PUSHQ(esp, compute_eflags());
1354 PUSHQ(esp, env->segs[R_CS].selector);
1355 PUSHQ(esp, old_eip);
1356 if (has_error_code) {
1357 PUSHQ(esp, error_code);
1358 }
1359
1360 if (new_stack) {
1361 ss = 0 | dpl;
1362#ifndef VBOX
1363 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1364#else
1365 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1366#endif
1367 }
1368 ESP = esp;
1369
1370 selector = (selector & ~3) | dpl;
1371 cpu_x86_load_seg_cache(env, R_CS, selector,
1372 get_seg_base(e1, e2),
1373 get_seg_limit(e1, e2),
1374 e2);
1375 cpu_x86_set_cpl(env, dpl);
1376 env->eip = offset;
1377
1378 /* interrupt gate clear IF mask */
1379 if ((type & 1) == 0) {
1380 env->eflags &= ~IF_MASK;
1381 }
1382#ifndef VBOX
1383 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1384#else /* VBOX */
1385 /*
1386 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1387 * gets confused by seemingly changed EFLAGS. See #3491 and
1388 * public bug #2341.
1389 */
1390 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1391#endif /* VBOX */
1392}
1393#endif
1394
1395#ifdef TARGET_X86_64
1396#if defined(CONFIG_USER_ONLY)
1397void helper_syscall(int next_eip_addend)
1398{
1399 env->exception_index = EXCP_SYSCALL;
1400 env->exception_next_eip = env->eip + next_eip_addend;
1401 cpu_loop_exit();
1402}
1403#else
1404void helper_syscall(int next_eip_addend)
1405{
1406 int selector;
1407
1408 if (!(env->efer & MSR_EFER_SCE)) {
1409 raise_exception_err(EXCP06_ILLOP, 0);
1410 }
1411 selector = (env->star >> 32) & 0xffff;
1412 if (env->hflags & HF_LMA_MASK) {
1413 int code64;
1414
1415 ECX = env->eip + next_eip_addend;
1416 env->regs[11] = compute_eflags();
1417
1418 code64 = env->hflags & HF_CS64_MASK;
1419
1420 cpu_x86_set_cpl(env, 0);
1421 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1422 0, 0xffffffff,
1423 DESC_G_MASK | DESC_P_MASK |
1424 DESC_S_MASK |
1425 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1426 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1427 0, 0xffffffff,
1428 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1429 DESC_S_MASK |
1430 DESC_W_MASK | DESC_A_MASK);
1431 env->eflags &= ~env->fmask;
1432 load_eflags(env->eflags, 0);
1433 if (code64)
1434 env->eip = env->lstar;
1435 else
1436 env->eip = env->cstar;
1437 } else {
1438 ECX = (uint32_t)(env->eip + next_eip_addend);
1439
1440 cpu_x86_set_cpl(env, 0);
1441 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1442 0, 0xffffffff,
1443 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1444 DESC_S_MASK |
1445 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1446 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1447 0, 0xffffffff,
1448 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1449 DESC_S_MASK |
1450 DESC_W_MASK | DESC_A_MASK);
1451 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1452 env->eip = (uint32_t)env->star;
1453 }
1454}
1455#endif
1456#endif
1457
1458#ifdef TARGET_X86_64
1459void helper_sysret(int dflag)
1460{
1461 int cpl, selector;
1462
1463 if (!(env->efer & MSR_EFER_SCE)) {
1464 raise_exception_err(EXCP06_ILLOP, 0);
1465 }
1466 cpl = env->hflags & HF_CPL_MASK;
1467 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1468 raise_exception_err(EXCP0D_GPF, 0);
1469 }
1470 selector = (env->star >> 48) & 0xffff;
1471 if (env->hflags & HF_LMA_MASK) {
1472 if (dflag == 2) {
1473 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1474 0, 0xffffffff,
1475 DESC_G_MASK | DESC_P_MASK |
1476 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1477 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1478 DESC_L_MASK);
1479 env->eip = ECX;
1480 } else {
1481 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1482 0, 0xffffffff,
1483 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1484 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1485 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1486 env->eip = (uint32_t)ECX;
1487 }
1488 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1489 0, 0xffffffff,
1490 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1491 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1492 DESC_W_MASK | DESC_A_MASK);
1493 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1494 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1495 cpu_x86_set_cpl(env, 3);
1496 } else {
1497 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1498 0, 0xffffffff,
1499 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1500 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1501 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1502 env->eip = (uint32_t)ECX;
1503 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1504 0, 0xffffffff,
1505 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1506 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1507 DESC_W_MASK | DESC_A_MASK);
1508 env->eflags |= IF_MASK;
1509 cpu_x86_set_cpl(env, 3);
1510 }
1511}
1512#endif
1513
1514#ifdef VBOX
1515
1516/**
1517 * Checks and processes external VMM events.
1518 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1519 */
1520void helper_external_event(void)
1521{
1522# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1523 uintptr_t uSP;
1524# ifdef RT_ARCH_AMD64
1525 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1526# else
1527 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1528# endif
1529 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1530# endif
1531 /* Keep in sync with flags checked by gen_check_external_event() */
1532 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1533 {
1534 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1535 ~CPU_INTERRUPT_EXTERNAL_HARD);
1536 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1537 }
1538 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1539 {
1540 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1541 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1542 cpu_exit(env);
1543 }
1544 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1545 {
1546 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1547 ~CPU_INTERRUPT_EXTERNAL_DMA);
1548 remR3DmaRun(env);
1549 }
1550 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1551 {
1552 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1553 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1554 remR3TimersRun(env);
1555 }
1556 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)
1557 {
1558 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1559 ~CPU_INTERRUPT_EXTERNAL_HARD);
1560 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1561 }
1562}
1563
1564/* helper for recording call instruction addresses for later scanning */
1565void helper_record_call()
1566{
1567 if ( !(env->state & CPU_RAW_RING0)
1568 && (env->cr[0] & CR0_PG_MASK)
1569 && !(env->eflags & X86_EFL_IF))
1570 remR3RecordCall(env);
1571}
1572
1573#endif /* VBOX */
1574
1575/* real mode interrupt */
1576static void do_interrupt_real(int intno, int is_int, int error_code,
1577 unsigned int next_eip)
1578{
1579 SegmentCache *dt;
1580 target_ulong ptr, ssp;
1581 int selector;
1582 uint32_t offset, esp;
1583 uint32_t old_cs, old_eip;
1584
1585 /* real mode (simpler !) */
1586 dt = &env->idt;
1587#ifndef VBOX
1588 if (intno * 4 + 3 > dt->limit)
1589#else
1590 if ((unsigned)intno * 4 + 3 > dt->limit)
1591#endif
1592 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1593 ptr = dt->base + intno * 4;
1594 offset = lduw_kernel(ptr);
1595 selector = lduw_kernel(ptr + 2);
1596 esp = ESP;
1597 ssp = env->segs[R_SS].base;
1598 if (is_int)
1599 old_eip = next_eip;
1600 else
1601 old_eip = env->eip;
1602 old_cs = env->segs[R_CS].selector;
1603 /* XXX: use SS segment size ? */
1604 PUSHW(ssp, esp, 0xffff, compute_eflags());
1605 PUSHW(ssp, esp, 0xffff, old_cs);
1606 PUSHW(ssp, esp, 0xffff, old_eip);
1607
1608 /* update processor state */
1609 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1610 env->eip = offset;
1611 env->segs[R_CS].selector = selector;
1612 env->segs[R_CS].base = (selector << 4);
1613 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1614}
1615
1616/* fake user mode interrupt */
1617void do_interrupt_user(int intno, int is_int, int error_code,
1618 target_ulong next_eip)
1619{
1620 SegmentCache *dt;
1621 target_ulong ptr;
1622 int dpl, cpl, shift;
1623 uint32_t e2;
1624
1625 dt = &env->idt;
1626 if (env->hflags & HF_LMA_MASK) {
1627 shift = 4;
1628 } else {
1629 shift = 3;
1630 }
1631 ptr = dt->base + (intno << shift);
1632 e2 = ldl_kernel(ptr + 4);
1633
1634 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1635 cpl = env->hflags & HF_CPL_MASK;
1636 /* check privilege if software int */
1637 if (is_int && dpl < cpl)
1638 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1639
1640 /* Since we emulate only user space, we cannot do more than
1641 exiting the emulation with the suitable exception and error
1642 code */
1643 if (is_int)
1644 EIP = next_eip;
1645}
1646
1647#if !defined(CONFIG_USER_ONLY)
1648static void handle_even_inj(int intno, int is_int, int error_code,
1649 int is_hw, int rm)
1650{
1651 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1652 if (!(event_inj & SVM_EVTINJ_VALID)) {
1653 int type;
1654 if (is_int)
1655 type = SVM_EVTINJ_TYPE_SOFT;
1656 else
1657 type = SVM_EVTINJ_TYPE_EXEPT;
1658 event_inj = intno | type | SVM_EVTINJ_VALID;
1659 if (!rm && exeption_has_error_code(intno)) {
1660 event_inj |= SVM_EVTINJ_VALID_ERR;
1661 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1662 }
1663 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1664 }
1665}
1666#endif
1667
1668/*
1669 * Begin execution of an interruption. is_int is TRUE if coming from
1670 * the int instruction. next_eip is the EIP value AFTER the interrupt
1671 * instruction. It is only relevant if is_int is TRUE.
1672 */
1673void do_interrupt(int intno, int is_int, int error_code,
1674 target_ulong next_eip, int is_hw)
1675{
1676 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1677 if ((env->cr[0] & CR0_PE_MASK)) {
1678 static int count;
1679 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1680 count, intno, error_code, is_int,
1681 env->hflags & HF_CPL_MASK,
1682 env->segs[R_CS].selector, EIP,
1683 (int)env->segs[R_CS].base + EIP,
1684 env->segs[R_SS].selector, ESP);
1685 if (intno == 0x0e) {
1686 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1687 } else {
1688 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1689 }
1690 qemu_log("\n");
1691 log_cpu_state(env, X86_DUMP_CCOP);
1692#if 0
1693 {
1694 int i;
1695 uint8_t *ptr;
1696 qemu_log(" code=");
1697 ptr = env->segs[R_CS].base + env->eip;
1698 for(i = 0; i < 16; i++) {
1699 qemu_log(" %02x", ldub(ptr + i));
1700 }
1701 qemu_log("\n");
1702 }
1703#endif
1704 count++;
1705 }
1706 }
1707#ifdef VBOX
1708 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1709 if (is_int) {
1710 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1711 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1712 } else {
1713 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1714 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1715 }
1716 }
1717#endif
1718 if (env->cr[0] & CR0_PE_MASK) {
1719#if !defined(CONFIG_USER_ONLY)
1720 if (env->hflags & HF_SVMI_MASK)
1721 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1722#endif
1723#ifdef TARGET_X86_64
1724 if (env->hflags & HF_LMA_MASK) {
1725 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1726 } else
1727#endif
1728 {
1729#ifdef VBOX
1730 /* int xx *, v86 code and VME enabled? */
1731 if ( (env->eflags & VM_MASK)
1732 && (env->cr[4] & CR4_VME_MASK)
1733 && is_int
1734 && !is_hw
1735 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1736 )
1737 do_soft_interrupt_vme(intno, error_code, next_eip);
1738 else
1739#endif /* VBOX */
1740 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1741 }
1742 } else {
1743#if !defined(CONFIG_USER_ONLY)
1744 if (env->hflags & HF_SVMI_MASK)
1745 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1746#endif
1747 do_interrupt_real(intno, is_int, error_code, next_eip);
1748 }
1749
1750#if !defined(CONFIG_USER_ONLY)
1751 if (env->hflags & HF_SVMI_MASK) {
1752 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1753 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1754 }
1755#endif
1756}
1757
1758/* This should come from sysemu.h - if we could include it here... */
1759void qemu_system_reset_request(void);
1760
1761/*
1762 * Check nested exceptions and change to double or triple fault if
1763 * needed. It should only be called, if this is not an interrupt.
1764 * Returns the new exception number.
1765 */
1766static int check_exception(int intno, int *error_code)
1767{
1768 int first_contributory = env->old_exception == 0 ||
1769 (env->old_exception >= 10 &&
1770 env->old_exception <= 13);
1771 int second_contributory = intno == 0 ||
1772 (intno >= 10 && intno <= 13);
1773
1774 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1775 env->old_exception, intno);
1776
1777#if !defined(CONFIG_USER_ONLY)
1778 if (env->old_exception == EXCP08_DBLE) {
1779 if (env->hflags & HF_SVMI_MASK)
1780 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1781
1782 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1783
1784# ifndef VBOX
1785 qemu_system_reset_request();
1786# else
1787 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */
1788# endif
1789 return EXCP_HLT;
1790 }
1791#endif
1792
1793 if ((first_contributory && second_contributory)
1794 || (env->old_exception == EXCP0E_PAGE &&
1795 (second_contributory || (intno == EXCP0E_PAGE)))) {
1796 intno = EXCP08_DBLE;
1797 *error_code = 0;
1798 }
1799
1800 if (second_contributory || (intno == EXCP0E_PAGE) ||
1801 (intno == EXCP08_DBLE))
1802 env->old_exception = intno;
1803
1804 return intno;
1805}
1806
1807/*
1808 * Signal an interruption. It is executed in the main CPU loop.
1809 * is_int is TRUE if coming from the int instruction. next_eip is the
1810 * EIP value AFTER the interrupt instruction. It is only relevant if
1811 * is_int is TRUE.
1812 */
1813static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1814 int next_eip_addend)
1815{
1816#if defined(VBOX) && defined(DEBUG)
1817 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1818#endif
1819 if (!is_int) {
1820 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1821 intno = check_exception(intno, &error_code);
1822 } else {
1823 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1824 }
1825
1826 env->exception_index = intno;
1827 env->error_code = error_code;
1828 env->exception_is_int = is_int;
1829 env->exception_next_eip = env->eip + next_eip_addend;
1830 cpu_loop_exit();
1831}
1832
1833/* shortcuts to generate exceptions */
1834
1835void raise_exception_err(int exception_index, int error_code)
1836{
1837 raise_interrupt(exception_index, 0, error_code, 0);
1838}
1839
1840void raise_exception(int exception_index)
1841{
1842 raise_interrupt(exception_index, 0, 0, 0);
1843}
1844
1845void raise_exception_env(int exception_index, CPUState *nenv)
1846{
1847 env = nenv;
1848 raise_exception(exception_index);
1849}
1850/* SMM support */
1851
1852#if defined(CONFIG_USER_ONLY)
1853
1854void do_smm_enter(void)
1855{
1856}
1857
1858void helper_rsm(void)
1859{
1860}
1861
1862#else
1863
1864#ifdef TARGET_X86_64
1865#define SMM_REVISION_ID 0x00020064
1866#else
1867#define SMM_REVISION_ID 0x00020000
1868#endif
1869
1870void do_smm_enter(void)
1871{
1872 target_ulong sm_state;
1873 SegmentCache *dt;
1874 int i, offset;
1875
1876 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1877 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1878
1879 env->hflags |= HF_SMM_MASK;
1880 cpu_smm_update(env);
1881
1882 sm_state = env->smbase + 0x8000;
1883
1884#ifdef TARGET_X86_64
1885 for(i = 0; i < 6; i++) {
1886 dt = &env->segs[i];
1887 offset = 0x7e00 + i * 16;
1888 stw_phys(sm_state + offset, dt->selector);
1889 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1890 stl_phys(sm_state + offset + 4, dt->limit);
1891 stq_phys(sm_state + offset + 8, dt->base);
1892 }
1893
1894 stq_phys(sm_state + 0x7e68, env->gdt.base);
1895 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1896
1897 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1898 stq_phys(sm_state + 0x7e78, env->ldt.base);
1899 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1900 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1901
1902 stq_phys(sm_state + 0x7e88, env->idt.base);
1903 stl_phys(sm_state + 0x7e84, env->idt.limit);
1904
1905 stw_phys(sm_state + 0x7e90, env->tr.selector);
1906 stq_phys(sm_state + 0x7e98, env->tr.base);
1907 stl_phys(sm_state + 0x7e94, env->tr.limit);
1908 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1909
1910 stq_phys(sm_state + 0x7ed0, env->efer);
1911
1912 stq_phys(sm_state + 0x7ff8, EAX);
1913 stq_phys(sm_state + 0x7ff0, ECX);
1914 stq_phys(sm_state + 0x7fe8, EDX);
1915 stq_phys(sm_state + 0x7fe0, EBX);
1916 stq_phys(sm_state + 0x7fd8, ESP);
1917 stq_phys(sm_state + 0x7fd0, EBP);
1918 stq_phys(sm_state + 0x7fc8, ESI);
1919 stq_phys(sm_state + 0x7fc0, EDI);
1920 for(i = 8; i < 16; i++)
1921 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1922 stq_phys(sm_state + 0x7f78, env->eip);
1923 stl_phys(sm_state + 0x7f70, compute_eflags());
1924 stl_phys(sm_state + 0x7f68, env->dr[6]);
1925 stl_phys(sm_state + 0x7f60, env->dr[7]);
1926
1927 stl_phys(sm_state + 0x7f48, env->cr[4]);
1928 stl_phys(sm_state + 0x7f50, env->cr[3]);
1929 stl_phys(sm_state + 0x7f58, env->cr[0]);
1930
1931 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1932 stl_phys(sm_state + 0x7f00, env->smbase);
1933#else
1934 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1935 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1936 stl_phys(sm_state + 0x7ff4, compute_eflags());
1937 stl_phys(sm_state + 0x7ff0, env->eip);
1938 stl_phys(sm_state + 0x7fec, EDI);
1939 stl_phys(sm_state + 0x7fe8, ESI);
1940 stl_phys(sm_state + 0x7fe4, EBP);
1941 stl_phys(sm_state + 0x7fe0, ESP);
1942 stl_phys(sm_state + 0x7fdc, EBX);
1943 stl_phys(sm_state + 0x7fd8, EDX);
1944 stl_phys(sm_state + 0x7fd4, ECX);
1945 stl_phys(sm_state + 0x7fd0, EAX);
1946 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1947 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1948
1949 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1950 stl_phys(sm_state + 0x7f64, env->tr.base);
1951 stl_phys(sm_state + 0x7f60, env->tr.limit);
1952 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1953
1954 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1955 stl_phys(sm_state + 0x7f80, env->ldt.base);
1956 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1957 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1958
1959 stl_phys(sm_state + 0x7f74, env->gdt.base);
1960 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1961
1962 stl_phys(sm_state + 0x7f58, env->idt.base);
1963 stl_phys(sm_state + 0x7f54, env->idt.limit);
1964
1965 for(i = 0; i < 6; i++) {
1966 dt = &env->segs[i];
1967 if (i < 3)
1968 offset = 0x7f84 + i * 12;
1969 else
1970 offset = 0x7f2c + (i - 3) * 12;
1971 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1972 stl_phys(sm_state + offset + 8, dt->base);
1973 stl_phys(sm_state + offset + 4, dt->limit);
1974 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1975 }
1976 stl_phys(sm_state + 0x7f14, env->cr[4]);
1977
1978 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1979 stl_phys(sm_state + 0x7ef8, env->smbase);
1980#endif
1981 /* init SMM cpu state */
1982
1983#ifdef TARGET_X86_64
1984 cpu_load_efer(env, 0);
1985#endif
1986 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1987 env->eip = 0x00008000;
1988 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1989 0xffffffff, 0);
1990 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1991 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1992 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1993 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1994 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1995
1996 cpu_x86_update_cr0(env,
1997 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1998 cpu_x86_update_cr4(env, 0);
1999 env->dr[7] = 0x00000400;
2000 CC_OP = CC_OP_EFLAGS;
2001}
2002
2003void helper_rsm(void)
2004{
2005#ifdef VBOX
2006 cpu_abort(env, "helper_rsm");
2007#else /* !VBOX */
2008 target_ulong sm_state;
2009 int i, offset;
2010 uint32_t val;
2011
2012 sm_state = env->smbase + 0x8000;
2013#ifdef TARGET_X86_64
2014 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
2015
2016 for(i = 0; i < 6; i++) {
2017 offset = 0x7e00 + i * 16;
2018 cpu_x86_load_seg_cache(env, i,
2019 lduw_phys(sm_state + offset),
2020 ldq_phys(sm_state + offset + 8),
2021 ldl_phys(sm_state + offset + 4),
2022 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
2023 }
2024
2025 env->gdt.base = ldq_phys(sm_state + 0x7e68);
2026 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
2027
2028 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
2029 env->ldt.base = ldq_phys(sm_state + 0x7e78);
2030 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
2031 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
2032#ifdef VBOX
2033 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2034 env->ldt.newselector = 0;
2035#endif
2036
2037 env->idt.base = ldq_phys(sm_state + 0x7e88);
2038 env->idt.limit = ldl_phys(sm_state + 0x7e84);
2039
2040 env->tr.selector = lduw_phys(sm_state + 0x7e90);
2041 env->tr.base = ldq_phys(sm_state + 0x7e98);
2042 env->tr.limit = ldl_phys(sm_state + 0x7e94);
2043 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
2044#ifdef VBOX
2045 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2046 env->tr.newselector = 0;
2047#endif
2048
2049 EAX = ldq_phys(sm_state + 0x7ff8);
2050 ECX = ldq_phys(sm_state + 0x7ff0);
2051 EDX = ldq_phys(sm_state + 0x7fe8);
2052 EBX = ldq_phys(sm_state + 0x7fe0);
2053 ESP = ldq_phys(sm_state + 0x7fd8);
2054 EBP = ldq_phys(sm_state + 0x7fd0);
2055 ESI = ldq_phys(sm_state + 0x7fc8);
2056 EDI = ldq_phys(sm_state + 0x7fc0);
2057 for(i = 8; i < 16; i++)
2058 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
2059 env->eip = ldq_phys(sm_state + 0x7f78);
2060 load_eflags(ldl_phys(sm_state + 0x7f70),
2061 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2062 env->dr[6] = ldl_phys(sm_state + 0x7f68);
2063 env->dr[7] = ldl_phys(sm_state + 0x7f60);
2064
2065 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
2066 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
2067 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
2068
2069 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2070 if (val & 0x20000) {
2071 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
2072 }
2073#else
2074 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
2075 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
2076 load_eflags(ldl_phys(sm_state + 0x7ff4),
2077 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2078 env->eip = ldl_phys(sm_state + 0x7ff0);
2079 EDI = ldl_phys(sm_state + 0x7fec);
2080 ESI = ldl_phys(sm_state + 0x7fe8);
2081 EBP = ldl_phys(sm_state + 0x7fe4);
2082 ESP = ldl_phys(sm_state + 0x7fe0);
2083 EBX = ldl_phys(sm_state + 0x7fdc);
2084 EDX = ldl_phys(sm_state + 0x7fd8);
2085 ECX = ldl_phys(sm_state + 0x7fd4);
2086 EAX = ldl_phys(sm_state + 0x7fd0);
2087 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
2088 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
2089
2090 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
2091 env->tr.base = ldl_phys(sm_state + 0x7f64);
2092 env->tr.limit = ldl_phys(sm_state + 0x7f60);
2093 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
2094#ifdef VBOX
2095 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2096 env->tr.newselector = 0;
2097#endif
2098
2099 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
2100 env->ldt.base = ldl_phys(sm_state + 0x7f80);
2101 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
2102 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
2103#ifdef VBOX
2104 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2105 env->ldt.newselector = 0;
2106#endif
2107
2108 env->gdt.base = ldl_phys(sm_state + 0x7f74);
2109 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
2110
2111 env->idt.base = ldl_phys(sm_state + 0x7f58);
2112 env->idt.limit = ldl_phys(sm_state + 0x7f54);
2113
2114 for(i = 0; i < 6; i++) {
2115 if (i < 3)
2116 offset = 0x7f84 + i * 12;
2117 else
2118 offset = 0x7f2c + (i - 3) * 12;
2119 cpu_x86_load_seg_cache(env, i,
2120 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
2121 ldl_phys(sm_state + offset + 8),
2122 ldl_phys(sm_state + offset + 4),
2123 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
2124 }
2125 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2126
2127 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2128 if (val & 0x20000) {
2129 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2130 }
2131#endif
2132 CC_OP = CC_OP_EFLAGS;
2133 env->hflags &= ~HF_SMM_MASK;
2134 cpu_smm_update(env);
2135
2136 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2137 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2138#endif /* !VBOX */
2139}
2140
2141#endif /* !CONFIG_USER_ONLY */
2142
2143
2144/* division, flags are undefined */
2145
2146void helper_divb_AL(target_ulong t0)
2147{
2148 unsigned int num, den, q, r;
2149
2150 num = (EAX & 0xffff);
2151 den = (t0 & 0xff);
2152 if (den == 0) {
2153 raise_exception(EXCP00_DIVZ);
2154 }
2155 q = (num / den);
2156 if (q > 0xff)
2157 raise_exception(EXCP00_DIVZ);
2158 q &= 0xff;
2159 r = (num % den) & 0xff;
2160 EAX = (EAX & ~0xffff) | (r << 8) | q;
2161}
2162
2163void helper_idivb_AL(target_ulong t0)
2164{
2165 int num, den, q, r;
2166
2167 num = (int16_t)EAX;
2168 den = (int8_t)t0;
2169 if (den == 0) {
2170 raise_exception(EXCP00_DIVZ);
2171 }
2172 q = (num / den);
2173 if (q != (int8_t)q)
2174 raise_exception(EXCP00_DIVZ);
2175 q &= 0xff;
2176 r = (num % den) & 0xff;
2177 EAX = (EAX & ~0xffff) | (r << 8) | q;
2178}
2179
2180void helper_divw_AX(target_ulong t0)
2181{
2182 unsigned int num, den, q, r;
2183
2184 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2185 den = (t0 & 0xffff);
2186 if (den == 0) {
2187 raise_exception(EXCP00_DIVZ);
2188 }
2189 q = (num / den);
2190 if (q > 0xffff)
2191 raise_exception(EXCP00_DIVZ);
2192 q &= 0xffff;
2193 r = (num % den) & 0xffff;
2194 EAX = (EAX & ~0xffff) | q;
2195 EDX = (EDX & ~0xffff) | r;
2196}
2197
2198void helper_idivw_AX(target_ulong t0)
2199{
2200 int num, den, q, r;
2201
2202 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2203 den = (int16_t)t0;
2204 if (den == 0) {
2205 raise_exception(EXCP00_DIVZ);
2206 }
2207 q = (num / den);
2208 if (q != (int16_t)q)
2209 raise_exception(EXCP00_DIVZ);
2210 q &= 0xffff;
2211 r = (num % den) & 0xffff;
2212 EAX = (EAX & ~0xffff) | q;
2213 EDX = (EDX & ~0xffff) | r;
2214}
2215
2216void helper_divl_EAX(target_ulong t0)
2217{
2218 unsigned int den, r;
2219 uint64_t num, q;
2220
2221 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2222 den = t0;
2223 if (den == 0) {
2224 raise_exception(EXCP00_DIVZ);
2225 }
2226 q = (num / den);
2227 r = (num % den);
2228 if (q > 0xffffffff)
2229 raise_exception(EXCP00_DIVZ);
2230 EAX = (uint32_t)q;
2231 EDX = (uint32_t)r;
2232}
2233
2234void helper_idivl_EAX(target_ulong t0)
2235{
2236 int den, r;
2237 int64_t num, q;
2238
2239 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2240 den = t0;
2241 if (den == 0) {
2242 raise_exception(EXCP00_DIVZ);
2243 }
2244 q = (num / den);
2245 r = (num % den);
2246 if (q != (int32_t)q)
2247 raise_exception(EXCP00_DIVZ);
2248 EAX = (uint32_t)q;
2249 EDX = (uint32_t)r;
2250}
2251
2252/* bcd */
2253
2254/* XXX: exception */
2255void helper_aam(int base)
2256{
2257 int al, ah;
2258 al = EAX & 0xff;
2259 ah = al / base;
2260 al = al % base;
2261 EAX = (EAX & ~0xffff) | al | (ah << 8);
2262 CC_DST = al;
2263}
2264
2265void helper_aad(int base)
2266{
2267 int al, ah;
2268 al = EAX & 0xff;
2269 ah = (EAX >> 8) & 0xff;
2270 al = ((ah * base) + al) & 0xff;
2271 EAX = (EAX & ~0xffff) | al;
2272 CC_DST = al;
2273}
2274
2275void helper_aaa(void)
2276{
2277 int icarry;
2278 int al, ah, af;
2279 int eflags;
2280
2281 eflags = helper_cc_compute_all(CC_OP);
2282 af = eflags & CC_A;
2283 al = EAX & 0xff;
2284 ah = (EAX >> 8) & 0xff;
2285
2286 icarry = (al > 0xf9);
2287 if (((al & 0x0f) > 9 ) || af) {
2288 al = (al + 6) & 0x0f;
2289 ah = (ah + 1 + icarry) & 0xff;
2290 eflags |= CC_C | CC_A;
2291 } else {
2292 eflags &= ~(CC_C | CC_A);
2293 al &= 0x0f;
2294 }
2295 EAX = (EAX & ~0xffff) | al | (ah << 8);
2296 CC_SRC = eflags;
2297}
2298
2299void helper_aas(void)
2300{
2301 int icarry;
2302 int al, ah, af;
2303 int eflags;
2304
2305 eflags = helper_cc_compute_all(CC_OP);
2306 af = eflags & CC_A;
2307 al = EAX & 0xff;
2308 ah = (EAX >> 8) & 0xff;
2309
2310 icarry = (al < 6);
2311 if (((al & 0x0f) > 9 ) || af) {
2312 al = (al - 6) & 0x0f;
2313 ah = (ah - 1 - icarry) & 0xff;
2314 eflags |= CC_C | CC_A;
2315 } else {
2316 eflags &= ~(CC_C | CC_A);
2317 al &= 0x0f;
2318 }
2319 EAX = (EAX & ~0xffff) | al | (ah << 8);
2320 CC_SRC = eflags;
2321}
2322
2323void helper_daa(void)
2324{
2325 int al, af, cf;
2326 int eflags;
2327
2328 eflags = helper_cc_compute_all(CC_OP);
2329 cf = eflags & CC_C;
2330 af = eflags & CC_A;
2331 al = EAX & 0xff;
2332
2333 eflags = 0;
2334 if (((al & 0x0f) > 9 ) || af) {
2335 al = (al + 6) & 0xff;
2336 eflags |= CC_A;
2337 }
2338 if ((al > 0x9f) || cf) {
2339 al = (al + 0x60) & 0xff;
2340 eflags |= CC_C;
2341 }
2342 EAX = (EAX & ~0xff) | al;
2343 /* well, speed is not an issue here, so we compute the flags by hand */
2344 eflags |= (al == 0) << 6; /* zf */
2345 eflags |= parity_table[al]; /* pf */
2346 eflags |= (al & 0x80); /* sf */
2347 CC_SRC = eflags;
2348}
2349
2350void helper_das(void)
2351{
2352 int al, al1, af, cf;
2353 int eflags;
2354
2355 eflags = helper_cc_compute_all(CC_OP);
2356 cf = eflags & CC_C;
2357 af = eflags & CC_A;
2358 al = EAX & 0xff;
2359
2360 eflags = 0;
2361 al1 = al;
2362 if (((al & 0x0f) > 9 ) || af) {
2363 eflags |= CC_A;
2364 if (al < 6 || cf)
2365 eflags |= CC_C;
2366 al = (al - 6) & 0xff;
2367 }
2368 if ((al1 > 0x99) || cf) {
2369 al = (al - 0x60) & 0xff;
2370 eflags |= CC_C;
2371 }
2372 EAX = (EAX & ~0xff) | al;
2373 /* well, speed is not an issue here, so we compute the flags by hand */
2374 eflags |= (al == 0) << 6; /* zf */
2375 eflags |= parity_table[al]; /* pf */
2376 eflags |= (al & 0x80); /* sf */
2377 CC_SRC = eflags;
2378}
2379
2380void helper_into(int next_eip_addend)
2381{
2382 int eflags;
2383 eflags = helper_cc_compute_all(CC_OP);
2384 if (eflags & CC_O) {
2385 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2386 }
2387}
2388
2389void helper_cmpxchg8b(target_ulong a0)
2390{
2391 uint64_t d;
2392 int eflags;
2393
2394 eflags = helper_cc_compute_all(CC_OP);
2395 d = ldq(a0);
2396 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2397 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2398 eflags |= CC_Z;
2399 } else {
2400 /* always do the store */
2401 stq(a0, d);
2402 EDX = (uint32_t)(d >> 32);
2403 EAX = (uint32_t)d;
2404 eflags &= ~CC_Z;
2405 }
2406 CC_SRC = eflags;
2407}
2408
2409#ifdef TARGET_X86_64
2410void helper_cmpxchg16b(target_ulong a0)
2411{
2412 uint64_t d0, d1;
2413 int eflags;
2414
2415 if ((a0 & 0xf) != 0)
2416 raise_exception(EXCP0D_GPF);
2417 eflags = helper_cc_compute_all(CC_OP);
2418 d0 = ldq(a0);
2419 d1 = ldq(a0 + 8);
2420 if (d0 == EAX && d1 == EDX) {
2421 stq(a0, EBX);
2422 stq(a0 + 8, ECX);
2423 eflags |= CC_Z;
2424 } else {
2425 /* always do the store */
2426 stq(a0, d0);
2427 stq(a0 + 8, d1);
2428 EDX = d1;
2429 EAX = d0;
2430 eflags &= ~CC_Z;
2431 }
2432 CC_SRC = eflags;
2433}
2434#endif
2435
2436void helper_single_step(void)
2437{
2438#ifndef CONFIG_USER_ONLY
2439 check_hw_breakpoints(env, 1);
2440 env->dr[6] |= DR6_BS;
2441#endif
2442 raise_exception(EXCP01_DB);
2443}
2444
2445void helper_cpuid(void)
2446{
2447 uint32_t eax, ebx, ecx, edx;
2448
2449 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2450
2451 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2452 EAX = eax;
2453 EBX = ebx;
2454 ECX = ecx;
2455 EDX = edx;
2456}
2457
2458void helper_enter_level(int level, int data32, target_ulong t1)
2459{
2460 target_ulong ssp;
2461 uint32_t esp_mask, esp, ebp;
2462
2463 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2464 ssp = env->segs[R_SS].base;
2465 ebp = EBP;
2466 esp = ESP;
2467 if (data32) {
2468 /* 32 bit */
2469 esp -= 4;
2470 while (--level) {
2471 esp -= 4;
2472 ebp -= 4;
2473 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2474 }
2475 esp -= 4;
2476 stl(ssp + (esp & esp_mask), t1);
2477 } else {
2478 /* 16 bit */
2479 esp -= 2;
2480 while (--level) {
2481 esp -= 2;
2482 ebp -= 2;
2483 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2484 }
2485 esp -= 2;
2486 stw(ssp + (esp & esp_mask), t1);
2487 }
2488}
2489
2490#ifdef TARGET_X86_64
2491void helper_enter64_level(int level, int data64, target_ulong t1)
2492{
2493 target_ulong esp, ebp;
2494 ebp = EBP;
2495 esp = ESP;
2496
2497 if (data64) {
2498 /* 64 bit */
2499 esp -= 8;
2500 while (--level) {
2501 esp -= 8;
2502 ebp -= 8;
2503 stq(esp, ldq(ebp));
2504 }
2505 esp -= 8;
2506 stq(esp, t1);
2507 } else {
2508 /* 16 bit */
2509 esp -= 2;
2510 while (--level) {
2511 esp -= 2;
2512 ebp -= 2;
2513 stw(esp, lduw(ebp));
2514 }
2515 esp -= 2;
2516 stw(esp, t1);
2517 }
2518}
2519#endif
2520
2521void helper_lldt(int selector)
2522{
2523 SegmentCache *dt;
2524 uint32_t e1, e2;
2525#ifndef VBOX
2526 int index, entry_limit;
2527#else
2528 unsigned int index, entry_limit;
2529#endif
2530 target_ulong ptr;
2531
2532#ifdef VBOX
2533 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2534 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2535#endif
2536
2537 selector &= 0xffff;
2538 if ((selector & 0xfffc) == 0) {
2539 /* XXX: NULL selector case: invalid LDT */
2540 env->ldt.base = 0;
2541 env->ldt.limit = 0;
2542#ifdef VBOX
2543 env->ldt.flags = DESC_INTEL_UNUSABLE;
2544 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2545 env->ldt.newselector = 0;
2546#endif
2547 } else {
2548 if (selector & 0x4)
2549 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2550 dt = &env->gdt;
2551 index = selector & ~7;
2552#ifdef TARGET_X86_64
2553 if (env->hflags & HF_LMA_MASK)
2554 entry_limit = 15;
2555 else
2556#endif
2557 entry_limit = 7;
2558 if ((index + entry_limit) > dt->limit)
2559 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2560 ptr = dt->base + index;
2561 e1 = ldl_kernel(ptr);
2562 e2 = ldl_kernel(ptr + 4);
2563 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2564 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2565 if (!(e2 & DESC_P_MASK))
2566 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2567#ifdef TARGET_X86_64
2568 if (env->hflags & HF_LMA_MASK) {
2569 uint32_t e3;
2570 e3 = ldl_kernel(ptr + 8);
2571 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2572 env->ldt.base |= (target_ulong)e3 << 32;
2573 } else
2574#endif
2575 {
2576 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2577 }
2578 }
2579 env->ldt.selector = selector;
2580#ifdef VBOX
2581 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2582 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2583#endif
2584}
2585
2586void helper_ltr(int selector)
2587{
2588 SegmentCache *dt;
2589 uint32_t e1, e2;
2590#ifndef VBOX
2591 int index, type, entry_limit;
2592#else
2593 unsigned int index;
2594 int type, entry_limit;
2595#endif
2596 target_ulong ptr;
2597
2598#ifdef VBOX
2599 Log(("helper_ltr: pc=%RGv old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2600 (RTGCPTR)env->eip, (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2601 env->tr.flags, (RTSEL)(selector & 0xffff)));
2602#endif
2603 selector &= 0xffff;
2604 if ((selector & 0xfffc) == 0) {
2605 /* NULL selector case: invalid TR */
2606#ifdef VBOX
2607 raise_exception_err(EXCP0A_TSS, 0);
2608#else
2609 env->tr.base = 0;
2610 env->tr.limit = 0;
2611 env->tr.flags = 0;
2612#endif
2613 } else {
2614 if (selector & 0x4)
2615 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2616 dt = &env->gdt;
2617 index = selector & ~7;
2618#ifdef TARGET_X86_64
2619 if (env->hflags & HF_LMA_MASK)
2620 entry_limit = 15;
2621 else
2622#endif
2623 entry_limit = 7;
2624 if ((index + entry_limit) > dt->limit)
2625 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2626 ptr = dt->base + index;
2627 e1 = ldl_kernel(ptr);
2628 e2 = ldl_kernel(ptr + 4);
2629 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2630 if ((e2 & DESC_S_MASK) ||
2631 (type != 1 && type != 9))
2632 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2633 if (!(e2 & DESC_P_MASK))
2634 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2635#ifdef TARGET_X86_64
2636 if (env->hflags & HF_LMA_MASK) {
2637 uint32_t e3, e4;
2638 e3 = ldl_kernel(ptr + 8);
2639 e4 = ldl_kernel(ptr + 12);
2640 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2641 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2642 load_seg_cache_raw_dt(&env->tr, e1, e2);
2643 env->tr.base |= (target_ulong)e3 << 32;
2644 } else
2645#endif
2646 {
2647 load_seg_cache_raw_dt(&env->tr, e1, e2);
2648 }
2649 e2 |= DESC_TSS_BUSY_MASK;
2650 stl_kernel(ptr + 4, e2);
2651 }
2652 env->tr.selector = selector;
2653#ifdef VBOX
2654 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2655 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2656 env->tr.flags, (RTSEL)(selector & 0xffff)));
2657#endif
2658}
2659
2660/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2661void helper_load_seg(int seg_reg, int selector)
2662{
2663 uint32_t e1, e2;
2664 int cpl, dpl, rpl;
2665 SegmentCache *dt;
2666#ifndef VBOX
2667 int index;
2668#else
2669 unsigned int index;
2670#endif
2671 target_ulong ptr;
2672
2673 selector &= 0xffff;
2674 cpl = env->hflags & HF_CPL_MASK;
2675#ifdef VBOX
2676
2677 /* Trying to load a selector with CPL=1? */
2678 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2679 {
2680 Log(("RPL 1 -> sel %04X -> %04X (helper_load_seg)\n", selector, selector & 0xfffc));
2681 selector = selector & 0xfffc;
2682 }
2683#endif /* VBOX */
2684 if ((selector & 0xfffc) == 0) {
2685 /* null selector case */
2686#ifndef VBOX
2687 if (seg_reg == R_SS
2688#ifdef TARGET_X86_64
2689 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2690#endif
2691 )
2692 raise_exception_err(EXCP0D_GPF, 0);
2693 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2694#else
2695 if (seg_reg == R_SS) {
2696 if (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2697 raise_exception_err(EXCP0D_GPF, 0);
2698 e2 = (cpl << DESC_DPL_SHIFT) | DESC_INTEL_UNUSABLE;
2699 } else {
2700 e2 = DESC_INTEL_UNUSABLE;
2701 }
2702 cpu_x86_load_seg_cache_with_clean_flags(env, seg_reg, selector, 0, 0, e2);
2703#endif
2704 } else {
2705
2706 if (selector & 0x4)
2707 dt = &env->ldt;
2708 else
2709 dt = &env->gdt;
2710 index = selector & ~7;
2711 if ((index + 7) > dt->limit)
2712 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2713 ptr = dt->base + index;
2714 e1 = ldl_kernel(ptr);
2715 e2 = ldl_kernel(ptr + 4);
2716
2717 if (!(e2 & DESC_S_MASK))
2718 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2719 rpl = selector & 3;
2720 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2721 if (seg_reg == R_SS) {
2722 /* must be writable segment */
2723 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2724 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2725 if (rpl != cpl || dpl != cpl)
2726 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2727 } else {
2728 /* must be readable segment */
2729 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2730 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2731
2732 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2733 /* if not conforming code, test rights */
2734 if (dpl < cpl || dpl < rpl)
2735 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2736 }
2737 }
2738
2739 if (!(e2 & DESC_P_MASK)) {
2740 if (seg_reg == R_SS)
2741 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2742 else
2743 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2744 }
2745
2746 /* set the access bit if not already set */
2747 if (!(e2 & DESC_A_MASK)) {
2748 e2 |= DESC_A_MASK;
2749 stl_kernel(ptr + 4, e2);
2750 }
2751
2752 cpu_x86_load_seg_cache(env, seg_reg, selector,
2753 get_seg_base(e1, e2),
2754 get_seg_limit(e1, e2),
2755 e2);
2756#if 0
2757 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2758 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2759#endif
2760 }
2761}
2762
2763/* protected mode jump */
2764void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2765 int next_eip_addend)
2766{
2767 int gate_cs, type;
2768 uint32_t e1, e2, cpl, dpl, rpl, limit;
2769 target_ulong next_eip;
2770
2771#ifdef VBOX /** @todo Why do we do this? */
2772 e1 = e2 = 0;
2773#endif
2774 if ((new_cs & 0xfffc) == 0)
2775 raise_exception_err(EXCP0D_GPF, 0);
2776 if (load_segment(&e1, &e2, new_cs) != 0)
2777 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2778 cpl = env->hflags & HF_CPL_MASK;
2779 if (e2 & DESC_S_MASK) {
2780 if (!(e2 & DESC_CS_MASK))
2781 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2782 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2783 if (e2 & DESC_C_MASK) {
2784 /* conforming code segment */
2785 if (dpl > cpl)
2786 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2787 } else {
2788 /* non conforming code segment */
2789 rpl = new_cs & 3;
2790 if (rpl > cpl)
2791 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2792 if (dpl != cpl)
2793 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2794 }
2795 if (!(e2 & DESC_P_MASK))
2796 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2797 limit = get_seg_limit(e1, e2);
2798 if (new_eip > limit &&
2799 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2800 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2801#ifdef VBOX
2802 if (!(e2 & DESC_A_MASK))
2803 e2 = set_segment_accessed(new_cs, e2);
2804#endif
2805 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2806 get_seg_base(e1, e2), limit, e2);
2807 EIP = new_eip;
2808 } else {
2809 /* jump to call or task gate */
2810 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2811 rpl = new_cs & 3;
2812 cpl = env->hflags & HF_CPL_MASK;
2813 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2814 switch(type) {
2815 case 1: /* 286 TSS */
2816 case 9: /* 386 TSS */
2817 case 5: /* task gate */
2818 if (dpl < cpl || dpl < rpl)
2819 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2820 next_eip = env->eip + next_eip_addend;
2821 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2822 CC_OP = CC_OP_EFLAGS;
2823 break;
2824 case 4: /* 286 call gate */
2825 case 12: /* 386 call gate */
2826 if ((dpl < cpl) || (dpl < rpl))
2827 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2828 if (!(e2 & DESC_P_MASK))
2829 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2830 gate_cs = e1 >> 16;
2831 new_eip = (e1 & 0xffff);
2832 if (type == 12)
2833 new_eip |= (e2 & 0xffff0000);
2834 if (load_segment(&e1, &e2, gate_cs) != 0)
2835 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2836 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2837 /* must be code segment */
2838 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2839 (DESC_S_MASK | DESC_CS_MASK)))
2840 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2841 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2842 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2843 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2844 if (!(e2 & DESC_P_MASK))
2845#ifdef VBOX /* See page 3-514 of 253666.pdf */
2846 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2847#else
2848 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2849#endif
2850 limit = get_seg_limit(e1, e2);
2851 if (new_eip > limit)
2852 raise_exception_err(EXCP0D_GPF, 0);
2853 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2854 get_seg_base(e1, e2), limit, e2);
2855 EIP = new_eip;
2856 break;
2857 default:
2858 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2859 break;
2860 }
2861 }
2862}
2863
2864/* real mode call */
2865void helper_lcall_real(int new_cs, target_ulong new_eip1,
2866 int shift, int next_eip)
2867{
2868 int new_eip;
2869 uint32_t esp, esp_mask;
2870 target_ulong ssp;
2871
2872 new_eip = new_eip1;
2873 esp = ESP;
2874 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2875 ssp = env->segs[R_SS].base;
2876 if (shift) {
2877 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2878 PUSHL(ssp, esp, esp_mask, next_eip);
2879 } else {
2880 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2881 PUSHW(ssp, esp, esp_mask, next_eip);
2882 }
2883
2884 SET_ESP(esp, esp_mask);
2885 env->eip = new_eip;
2886 env->segs[R_CS].selector = new_cs;
2887 env->segs[R_CS].base = (new_cs << 4);
2888}
2889
2890/* protected mode call */
2891void helper_lcall_protected(int new_cs, target_ulong new_eip,
2892 int shift, int next_eip_addend)
2893{
2894 int new_stack, i;
2895 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2896 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2897 uint32_t val, limit, old_sp_mask;
2898 target_ulong ssp, old_ssp, next_eip;
2899
2900#ifdef VBOX /** @todo Why do we do this? */
2901 e1 = e2 = 0;
2902#endif
2903 next_eip = env->eip + next_eip_addend;
2904 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2905 LOG_PCALL_STATE(env);
2906 if ((new_cs & 0xfffc) == 0)
2907 raise_exception_err(EXCP0D_GPF, 0);
2908 if (load_segment(&e1, &e2, new_cs) != 0)
2909 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2910 cpl = env->hflags & HF_CPL_MASK;
2911 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2912 if (e2 & DESC_S_MASK) {
2913 if (!(e2 & DESC_CS_MASK))
2914 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2915 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2916 if (e2 & DESC_C_MASK) {
2917 /* conforming code segment */
2918 if (dpl > cpl)
2919 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2920 } else {
2921 /* non conforming code segment */
2922 rpl = new_cs & 3;
2923 if (rpl > cpl)
2924 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2925 if (dpl != cpl)
2926 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2927 }
2928 if (!(e2 & DESC_P_MASK))
2929 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2930#ifdef VBOX
2931 if (!(e2 & DESC_A_MASK))
2932 e2 = set_segment_accessed(new_cs, e2);
2933#endif
2934
2935#ifdef TARGET_X86_64
2936 /* XXX: check 16/32 bit cases in long mode */
2937 if (shift == 2) {
2938 target_ulong rsp;
2939 /* 64 bit case */
2940 rsp = ESP;
2941 PUSHQ(rsp, env->segs[R_CS].selector);
2942 PUSHQ(rsp, next_eip);
2943 /* from this point, not restartable */
2944 ESP = rsp;
2945 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2946 get_seg_base(e1, e2),
2947 get_seg_limit(e1, e2), e2);
2948 EIP = new_eip;
2949 } else
2950#endif
2951 {
2952 sp = ESP;
2953 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2954 ssp = env->segs[R_SS].base;
2955 if (shift) {
2956 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2957 PUSHL(ssp, sp, sp_mask, next_eip);
2958 } else {
2959 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2960 PUSHW(ssp, sp, sp_mask, next_eip);
2961 }
2962
2963 limit = get_seg_limit(e1, e2);
2964 if (new_eip > limit)
2965 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2966 /* from this point, not restartable */
2967 SET_ESP(sp, sp_mask);
2968 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2969 get_seg_base(e1, e2), limit, e2);
2970 EIP = new_eip;
2971 }
2972 } else {
2973 /* check gate type */
2974 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2975 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2976 rpl = new_cs & 3;
2977 switch(type) {
2978 case 1: /* available 286 TSS */
2979 case 9: /* available 386 TSS */
2980 case 5: /* task gate */
2981 if (dpl < cpl || dpl < rpl)
2982 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2983 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2984 CC_OP = CC_OP_EFLAGS;
2985 return;
2986 case 4: /* 286 call gate */
2987 case 12: /* 386 call gate */
2988 break;
2989 default:
2990 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2991 break;
2992 }
2993 shift = type >> 3;
2994
2995 if (dpl < cpl || dpl < rpl)
2996 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2997 /* check valid bit */
2998 if (!(e2 & DESC_P_MASK))
2999 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3000 selector = e1 >> 16;
3001 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
3002 param_count = e2 & 0x1f;
3003 if ((selector & 0xfffc) == 0)
3004 raise_exception_err(EXCP0D_GPF, 0);
3005
3006 if (load_segment(&e1, &e2, selector) != 0)
3007 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3008 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
3009 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3010 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3011 if (dpl > cpl)
3012 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3013 if (!(e2 & DESC_P_MASK))
3014 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
3015
3016 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
3017 /* to inner privilege */
3018 get_ss_esp_from_tss(&ss, &sp, dpl);
3019 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
3020 ss, sp, param_count, ESP);
3021 if ((ss & 0xfffc) == 0)
3022 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3023 if ((ss & 3) != dpl)
3024 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3025 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3026 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3027 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3028 if (ss_dpl != dpl)
3029 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3030 if (!(ss_e2 & DESC_S_MASK) ||
3031 (ss_e2 & DESC_CS_MASK) ||
3032 !(ss_e2 & DESC_W_MASK))
3033 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3034 if (!(ss_e2 & DESC_P_MASK))
3035#ifdef VBOX /* See page 3-99 of 253666.pdf */
3036 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3037#else
3038 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3039#endif
3040
3041 // push_size = ((param_count * 2) + 8) << shift;
3042
3043 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3044 old_ssp = env->segs[R_SS].base;
3045
3046 sp_mask = get_sp_mask(ss_e2);
3047 ssp = get_seg_base(ss_e1, ss_e2);
3048 if (shift) {
3049 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3050 PUSHL(ssp, sp, sp_mask, ESP);
3051 for(i = param_count - 1; i >= 0; i--) {
3052 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3053 PUSHL(ssp, sp, sp_mask, val);
3054 }
3055 } else {
3056 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3057 PUSHW(ssp, sp, sp_mask, ESP);
3058 for(i = param_count - 1; i >= 0; i--) {
3059 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3060 PUSHW(ssp, sp, sp_mask, val);
3061 }
3062 }
3063 new_stack = 1;
3064 } else {
3065 /* to same privilege */
3066 sp = ESP;
3067 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3068 ssp = env->segs[R_SS].base;
3069 // push_size = (4 << shift);
3070 new_stack = 0;
3071 }
3072
3073 if (shift) {
3074 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3075 PUSHL(ssp, sp, sp_mask, next_eip);
3076 } else {
3077 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3078 PUSHW(ssp, sp, sp_mask, next_eip);
3079 }
3080
3081 /* from this point, not restartable */
3082
3083 if (new_stack) {
3084 ss = (ss & ~3) | dpl;
3085 cpu_x86_load_seg_cache(env, R_SS, ss,
3086 ssp,
3087 get_seg_limit(ss_e1, ss_e2),
3088 ss_e2);
3089 }
3090
3091 selector = (selector & ~3) | dpl;
3092 cpu_x86_load_seg_cache(env, R_CS, selector,
3093 get_seg_base(e1, e2),
3094 get_seg_limit(e1, e2),
3095 e2);
3096 cpu_x86_set_cpl(env, dpl);
3097 SET_ESP(sp, sp_mask);
3098 EIP = offset;
3099 }
3100}
3101
3102/* real and vm86 mode iret */
3103void helper_iret_real(int shift)
3104{
3105 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3106 target_ulong ssp;
3107 int eflags_mask;
3108#ifdef VBOX
3109 bool fVME = false;
3110
3111 remR3TrapClear(env->pVM);
3112#endif /* VBOX */
3113
3114 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3115 sp = ESP;
3116 ssp = env->segs[R_SS].base;
3117 if (shift == 1) {
3118 /* 32 bits */
3119 POPL(ssp, sp, sp_mask, new_eip);
3120 POPL(ssp, sp, sp_mask, new_cs);
3121 new_cs &= 0xffff;
3122 POPL(ssp, sp, sp_mask, new_eflags);
3123 } else {
3124 /* 16 bits */
3125 POPW(ssp, sp, sp_mask, new_eip);
3126 POPW(ssp, sp, sp_mask, new_cs);
3127 POPW(ssp, sp, sp_mask, new_eflags);
3128 }
3129#ifdef VBOX
3130 if ( (env->eflags & VM_MASK)
3131 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3132 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3133 {
3134 fVME = true;
3135 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3136 /* if TF will be set -> #GP */
3137 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3138 || (new_eflags & TF_MASK))
3139 raise_exception(EXCP0D_GPF);
3140 }
3141#endif /* VBOX */
3142 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3143 env->segs[R_CS].selector = new_cs;
3144 env->segs[R_CS].base = (new_cs << 4);
3145 env->eip = new_eip;
3146#ifdef VBOX
3147 if (fVME)
3148 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3149 else
3150#endif
3151 if (env->eflags & VM_MASK)
3152 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3153 else
3154 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3155 if (shift == 0)
3156 eflags_mask &= 0xffff;
3157 load_eflags(new_eflags, eflags_mask);
3158 env->hflags2 &= ~HF2_NMI_MASK;
3159#ifdef VBOX
3160 if (fVME)
3161 {
3162 if (new_eflags & IF_MASK)
3163 env->eflags |= VIF_MASK;
3164 else
3165 env->eflags &= ~VIF_MASK;
3166 }
3167#endif /* VBOX */
3168}
3169
3170static inline void validate_seg(int seg_reg, int cpl)
3171{
3172 int dpl;
3173 uint32_t e2;
3174
3175 /* XXX: on x86_64, we do not want to nullify FS and GS because
3176 they may still contain a valid base. I would be interested to
3177 know how a real x86_64 CPU behaves */
3178 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3179 (env->segs[seg_reg].selector & 0xfffc) == 0)
3180 return;
3181
3182 e2 = env->segs[seg_reg].flags;
3183 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3184 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3185 /* data or non conforming code segment */
3186 if (dpl < cpl) {
3187 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3188 }
3189 }
3190}
3191
3192/* protected mode iret */
3193static inline void helper_ret_protected(int shift, int is_iret, int addend)
3194{
3195 uint32_t new_cs, new_eflags, new_ss;
3196 uint32_t new_es, new_ds, new_fs, new_gs;
3197 uint32_t e1, e2, ss_e1, ss_e2;
3198 int cpl, dpl, rpl, eflags_mask, iopl;
3199 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3200
3201#ifdef VBOX /** @todo Why do we do this? */
3202 ss_e1 = ss_e2 = e1 = e2 = 0;
3203#endif
3204
3205#ifdef TARGET_X86_64
3206 if (shift == 2)
3207 sp_mask = -1;
3208 else
3209#endif
3210 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3211 sp = ESP;
3212 ssp = env->segs[R_SS].base;
3213 new_eflags = 0; /* avoid warning */
3214#ifdef TARGET_X86_64
3215 if (shift == 2) {
3216 POPQ(sp, new_eip);
3217 POPQ(sp, new_cs);
3218 new_cs &= 0xffff;
3219 if (is_iret) {
3220 POPQ(sp, new_eflags);
3221 }
3222 } else
3223#endif
3224 if (shift == 1) {
3225 /* 32 bits */
3226 POPL(ssp, sp, sp_mask, new_eip);
3227 POPL(ssp, sp, sp_mask, new_cs);
3228 new_cs &= 0xffff;
3229 if (is_iret) {
3230 POPL(ssp, sp, sp_mask, new_eflags);
3231#define LOG_GROUP LOG_GROUP_REM
3232#if defined(VBOX) && defined(DEBUG)
3233 Log(("iret: new CS %04X (old=%x)\n", new_cs, env->segs[R_CS].selector));
3234 Log(("iret: new EIP %08X\n", (uint32_t)new_eip));
3235 Log(("iret: new EFLAGS %08X\n", new_eflags));
3236 Log(("iret: EAX=%08x\n", (uint32_t)EAX));
3237#endif
3238 if (new_eflags & VM_MASK)
3239 goto return_to_vm86;
3240 }
3241#ifdef VBOX
3242 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3243 {
3244 if ( !EMIsRawRing1Enabled(env->pVM)
3245 || env->segs[R_CS].selector == (new_cs & 0xfffc))
3246 {
3247 Log(("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc));
3248 new_cs = new_cs & 0xfffc;
3249 }
3250 else
3251 {
3252 /* Ugly assumption: assume a genuine switch to ring-1. */
3253 Log(("Genuine switch to ring-1 (iret)\n"));
3254 }
3255 }
3256 else if ((new_cs & 0x3) == 2 && (env->state & CPU_RAW_RING0) && EMIsRawRing1Enabled(env->pVM))
3257 {
3258 Log(("RPL 2 -> new_cs %04X -> %04X\n", new_cs, (new_cs & 0xfffc) | 1));
3259 new_cs = (new_cs & 0xfffc) | 1;
3260 }
3261#endif
3262 } else {
3263 /* 16 bits */
3264 POPW(ssp, sp, sp_mask, new_eip);
3265 POPW(ssp, sp, sp_mask, new_cs);
3266 if (is_iret)
3267 POPW(ssp, sp, sp_mask, new_eflags);
3268 }
3269 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3270 new_cs, new_eip, shift, addend);
3271 LOG_PCALL_STATE(env);
3272 if ((new_cs & 0xfffc) == 0)
3273 {
3274#if defined(VBOX) && defined(DEBUG)
3275 Log(("new_cs & 0xfffc) == 0\n"));
3276#endif
3277 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3278 }
3279 if (load_segment(&e1, &e2, new_cs) != 0)
3280 {
3281#if defined(VBOX) && defined(DEBUG)
3282 Log(("load_segment failed\n"));
3283#endif
3284 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3285 }
3286 if (!(e2 & DESC_S_MASK) ||
3287 !(e2 & DESC_CS_MASK))
3288 {
3289#if defined(VBOX) && defined(DEBUG)
3290 Log(("e2 mask %08x\n", e2));
3291#endif
3292 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3293 }
3294 cpl = env->hflags & HF_CPL_MASK;
3295 rpl = new_cs & 3;
3296 if (rpl < cpl)
3297 {
3298#if defined(VBOX) && defined(DEBUG)
3299 Log(("rpl < cpl (%d vs %d)\n", rpl, cpl));
3300#endif
3301 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3302 }
3303 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3304
3305 if (e2 & DESC_C_MASK) {
3306 if (dpl > rpl)
3307 {
3308#if defined(VBOX) && defined(DEBUG)
3309 Log(("dpl > rpl (%d vs %d)\n", dpl, rpl));
3310#endif
3311 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3312 }
3313 } else {
3314 if (dpl != rpl)
3315 {
3316#if defined(VBOX) && defined(DEBUG)
3317 Log(("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2));
3318#endif
3319 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3320 }
3321 }
3322 if (!(e2 & DESC_P_MASK))
3323 {
3324#if defined(VBOX) && defined(DEBUG)
3325 Log(("DESC_P_MASK e2=%08x\n", e2));
3326#endif
3327 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3328 }
3329
3330 sp += addend;
3331 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3332 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3333 /* return to same privilege level */
3334#ifdef VBOX
3335 if (!(e2 & DESC_A_MASK))
3336 e2 = set_segment_accessed(new_cs, e2);
3337#endif
3338 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3339 get_seg_base(e1, e2),
3340 get_seg_limit(e1, e2),
3341 e2);
3342 } else {
3343 /* return to different privilege level */
3344#ifdef TARGET_X86_64
3345 if (shift == 2) {
3346 POPQ(sp, new_esp);
3347 POPQ(sp, new_ss);
3348 new_ss &= 0xffff;
3349 } else
3350#endif
3351 if (shift == 1) {
3352 /* 32 bits */
3353 POPL(ssp, sp, sp_mask, new_esp);
3354 POPL(ssp, sp, sp_mask, new_ss);
3355 new_ss &= 0xffff;
3356 } else {
3357 /* 16 bits */
3358 POPW(ssp, sp, sp_mask, new_esp);
3359 POPW(ssp, sp, sp_mask, new_ss);
3360 }
3361 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3362 new_ss, new_esp);
3363 if ((new_ss & 0xfffc) == 0) {
3364#ifdef TARGET_X86_64
3365 /* NULL ss is allowed in long mode if cpl != 3*/
3366# ifndef VBOX
3367 /* XXX: test CS64 ? */
3368 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3369 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3370 0, 0xffffffff,
3371 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3372 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3373 DESC_W_MASK | DESC_A_MASK);
3374 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3375 } else
3376# else /* VBOX */
3377 if ((env->hflags & HF_LMA_MASK) && rpl != 3 && (e2 & DESC_L_MASK)) {
3378 if (!(e2 & DESC_A_MASK))
3379 e2 = set_segment_accessed(new_cs, e2);
3380 cpu_x86_load_seg_cache_with_clean_flags(env, R_SS, new_ss,
3381 0, 0xffffffff,
3382 DESC_INTEL_UNUSABLE | (rpl << DESC_DPL_SHIFT) );
3383 ss_e2 = DESC_B_MASK; /* not really used */
3384 } else
3385# endif
3386#endif
3387 {
3388#if defined(VBOX) && defined(DEBUG)
3389 Log(("NULL ss, rpl=%d\n", rpl));
3390#endif
3391 raise_exception_err(EXCP0D_GPF, 0);
3392 }
3393 } else {
3394 if ((new_ss & 3) != rpl)
3395 {
3396#if defined(VBOX) && defined(DEBUG)
3397 Log(("new_ss=%x != rpl=%d\n", new_ss, rpl));
3398#endif
3399 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3400 }
3401 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3402 {
3403#if defined(VBOX) && defined(DEBUG)
3404 Log(("new_ss=%x load error\n", new_ss));
3405#endif
3406 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3407 }
3408 if (!(ss_e2 & DESC_S_MASK) ||
3409 (ss_e2 & DESC_CS_MASK) ||
3410 !(ss_e2 & DESC_W_MASK))
3411 {
3412#if defined(VBOX) && defined(DEBUG)
3413 Log(("new_ss=%x ss_e2=%#x bad type\n", new_ss, ss_e2));
3414#endif
3415 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3416 }
3417 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3418 if (dpl != rpl)
3419 {
3420#if defined(VBOX) && defined(DEBUG)
3421 Log(("SS.dpl=%u != rpl=%u\n", dpl, rpl));
3422#endif
3423 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3424 }
3425 if (!(ss_e2 & DESC_P_MASK))
3426 {
3427#if defined(VBOX) && defined(DEBUG)
3428 Log(("new_ss=%#x #NP\n", new_ss));
3429#endif
3430 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3431 }
3432#ifdef VBOX
3433 if (!(e2 & DESC_A_MASK))
3434 e2 = set_segment_accessed(new_cs, e2);
3435 if (!(ss_e2 & DESC_A_MASK))
3436 ss_e2 = set_segment_accessed(new_ss, ss_e2);
3437#endif
3438 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3439 get_seg_base(ss_e1, ss_e2),
3440 get_seg_limit(ss_e1, ss_e2),
3441 ss_e2);
3442 }
3443
3444 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3445 get_seg_base(e1, e2),
3446 get_seg_limit(e1, e2),
3447 e2);
3448 cpu_x86_set_cpl(env, rpl);
3449 sp = new_esp;
3450#ifdef TARGET_X86_64
3451 if (env->hflags & HF_CS64_MASK)
3452 sp_mask = -1;
3453 else
3454#endif
3455 sp_mask = get_sp_mask(ss_e2);
3456
3457 /* validate data segments */
3458 validate_seg(R_ES, rpl);
3459 validate_seg(R_DS, rpl);
3460 validate_seg(R_FS, rpl);
3461 validate_seg(R_GS, rpl);
3462
3463 sp += addend;
3464 }
3465 SET_ESP(sp, sp_mask);
3466 env->eip = new_eip;
3467 if (is_iret) {
3468 /* NOTE: 'cpl' is the _old_ CPL */
3469 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3470 if (cpl == 0)
3471#ifdef VBOX
3472 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3473#else
3474 eflags_mask |= IOPL_MASK;
3475#endif
3476 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3477 if (cpl <= iopl)
3478 eflags_mask |= IF_MASK;
3479 if (shift == 0)
3480 eflags_mask &= 0xffff;
3481 load_eflags(new_eflags, eflags_mask);
3482 }
3483 return;
3484
3485 return_to_vm86:
3486 POPL(ssp, sp, sp_mask, new_esp);
3487 POPL(ssp, sp, sp_mask, new_ss);
3488 POPL(ssp, sp, sp_mask, new_es);
3489 POPL(ssp, sp, sp_mask, new_ds);
3490 POPL(ssp, sp, sp_mask, new_fs);
3491 POPL(ssp, sp, sp_mask, new_gs);
3492
3493 /* modify processor state */
3494 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3495 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3496 load_seg_vm(R_CS, new_cs & 0xffff);
3497 cpu_x86_set_cpl(env, 3);
3498 load_seg_vm(R_SS, new_ss & 0xffff);
3499 load_seg_vm(R_ES, new_es & 0xffff);
3500 load_seg_vm(R_DS, new_ds & 0xffff);
3501 load_seg_vm(R_FS, new_fs & 0xffff);
3502 load_seg_vm(R_GS, new_gs & 0xffff);
3503
3504 env->eip = new_eip & 0xffff;
3505 ESP = new_esp;
3506}
3507
3508void helper_iret_protected(int shift, int next_eip)
3509{
3510 int tss_selector, type;
3511 uint32_t e1, e2;
3512
3513#ifdef VBOX
3514 Log(("iret (shift=%d new_eip=%#x)\n", shift, next_eip));
3515 e1 = e2 = 0; /** @todo Why do we do this? */
3516 remR3TrapClear(env->pVM);
3517#endif
3518
3519 /* specific case for TSS */
3520 if (env->eflags & NT_MASK) {
3521#ifdef TARGET_X86_64
3522 if (env->hflags & HF_LMA_MASK)
3523 {
3524#if defined(VBOX) && defined(DEBUG)
3525 Log(("eflags.NT=1 on iret in long mode\n"));
3526#endif
3527 raise_exception_err(EXCP0D_GPF, 0);
3528 }
3529#endif
3530 tss_selector = lduw_kernel(env->tr.base + 0);
3531 if (tss_selector & 4)
3532 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3533 if (load_segment(&e1, &e2, tss_selector) != 0)
3534 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3535 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3536 /* NOTE: we check both segment and busy TSS */
3537 if (type != 3)
3538 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3539 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3540 } else {
3541 helper_ret_protected(shift, 1, 0);
3542 }
3543 env->hflags2 &= ~HF2_NMI_MASK;
3544}
3545
3546void helper_lret_protected(int shift, int addend)
3547{
3548 helper_ret_protected(shift, 0, addend);
3549}
3550
3551void helper_sysenter(void)
3552{
3553 if (env->sysenter_cs == 0) {
3554 raise_exception_err(EXCP0D_GPF, 0);
3555 }
3556 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3557 cpu_x86_set_cpl(env, 0);
3558
3559#ifdef TARGET_X86_64
3560 if (env->hflags & HF_LMA_MASK) {
3561 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3562 0, 0xffffffff,
3563 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3564 DESC_S_MASK |
3565 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3566 } else
3567#endif
3568 {
3569 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3570 0, 0xffffffff,
3571 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3572 DESC_S_MASK |
3573 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3574 }
3575 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3576 0, 0xffffffff,
3577 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3578 DESC_S_MASK |
3579 DESC_W_MASK | DESC_A_MASK);
3580 ESP = env->sysenter_esp;
3581 EIP = env->sysenter_eip;
3582}
3583
3584void helper_sysexit(int dflag)
3585{
3586 int cpl;
3587
3588 cpl = env->hflags & HF_CPL_MASK;
3589 if (env->sysenter_cs == 0 || cpl != 0) {
3590 raise_exception_err(EXCP0D_GPF, 0);
3591 }
3592 cpu_x86_set_cpl(env, 3);
3593#ifdef TARGET_X86_64
3594 if (dflag == 2) {
3595 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3596 0, 0xffffffff,
3597 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3598 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3599 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3600 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3601 0, 0xffffffff,
3602 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3603 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3604 DESC_W_MASK | DESC_A_MASK);
3605 } else
3606#endif
3607 {
3608 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3609 0, 0xffffffff,
3610 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3611 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3612 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3613 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3614 0, 0xffffffff,
3615 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3616 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3617 DESC_W_MASK | DESC_A_MASK);
3618 }
3619 ESP = ECX;
3620 EIP = EDX;
3621}
3622
3623#if defined(CONFIG_USER_ONLY)
3624target_ulong helper_read_crN(int reg)
3625{
3626 return 0;
3627}
3628
3629void helper_write_crN(int reg, target_ulong t0)
3630{
3631}
3632
3633void helper_movl_drN_T0(int reg, target_ulong t0)
3634{
3635}
3636#else
3637target_ulong helper_read_crN(int reg)
3638{
3639 target_ulong val;
3640
3641 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3642 switch(reg) {
3643 default:
3644 val = env->cr[reg];
3645 break;
3646 case 8:
3647 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3648#ifndef VBOX
3649 val = cpu_get_apic_tpr(env->apic_state);
3650#else /* VBOX */
3651 val = cpu_get_apic_tpr(env);
3652#endif /* VBOX */
3653 } else {
3654 val = env->v_tpr;
3655 }
3656 break;
3657 }
3658 return val;
3659}
3660
3661void helper_write_crN(int reg, target_ulong t0)
3662{
3663 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3664 switch(reg) {
3665 case 0:
3666 cpu_x86_update_cr0(env, t0);
3667 break;
3668 case 3:
3669 cpu_x86_update_cr3(env, t0);
3670 break;
3671 case 4:
3672 cpu_x86_update_cr4(env, t0);
3673 break;
3674 case 8:
3675 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3676#ifndef VBOX
3677 cpu_set_apic_tpr(env->apic_state, t0);
3678#else /* VBOX */
3679 cpu_set_apic_tpr(env, t0);
3680#endif /* VBOX */
3681 }
3682 env->v_tpr = t0 & 0x0f;
3683 break;
3684 default:
3685 env->cr[reg] = t0;
3686 break;
3687 }
3688}
3689
3690void helper_movl_drN_T0(int reg, target_ulong t0)
3691{
3692 int i;
3693
3694 if (reg < 4) {
3695 hw_breakpoint_remove(env, reg);
3696 env->dr[reg] = t0;
3697 hw_breakpoint_insert(env, reg);
3698# ifndef VBOX
3699 } else if (reg == 7) {
3700# else
3701 } else if (reg == 7 || reg == 5) { /* (DR5 is an alias for DR7.) */
3702 if (t0 & X86_DR7_MBZ_MASK)
3703 raise_exception_err(EXCP0D_GPF, 0);
3704 t0 |= X86_DR7_RA1_MASK;
3705 t0 &= ~X86_DR7_RAZ_MASK;
3706# endif
3707 for (i = 0; i < 4; i++)
3708 hw_breakpoint_remove(env, i);
3709 env->dr[7] = t0;
3710 for (i = 0; i < 4; i++)
3711 hw_breakpoint_insert(env, i);
3712 } else {
3713# ifndef VBOX
3714 env->dr[reg] = t0;
3715# else
3716 if (t0 & X86_DR6_MBZ_MASK)
3717 raise_exception_err(EXCP0D_GPF, 0);
3718 t0 |= X86_DR6_RA1_MASK;
3719 t0 &= ~X86_DR6_RAZ_MASK;
3720 env->dr[6] = t0; /* (DR4 is an alias for DR6.) */
3721# endif
3722 }
3723}
3724#endif
3725
3726void helper_lmsw(target_ulong t0)
3727{
3728 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3729 if already set to one. */
3730 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3731 helper_write_crN(0, t0);
3732}
3733
3734void helper_clts(void)
3735{
3736 env->cr[0] &= ~CR0_TS_MASK;
3737 env->hflags &= ~HF_TS_MASK;
3738}
3739
3740void helper_invlpg(target_ulong addr)
3741{
3742 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3743 tlb_flush_page(env, addr);
3744}
3745
3746void helper_rdtsc(void)
3747{
3748 uint64_t val;
3749
3750 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3751 raise_exception(EXCP0D_GPF);
3752 }
3753 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3754
3755 val = cpu_get_tsc(env) + env->tsc_offset;
3756 EAX = (uint32_t)(val);
3757 EDX = (uint32_t)(val >> 32);
3758}
3759
3760void helper_rdtscp(void)
3761{
3762 helper_rdtsc();
3763#ifndef VBOX
3764 ECX = (uint32_t)(env->tsc_aux);
3765#else /* VBOX */
3766 uint64_t val;
3767 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3768 ECX = (uint32_t)(val);
3769 else
3770 ECX = 0;
3771#endif /* VBOX */
3772}
3773
3774void helper_rdpmc(void)
3775{
3776#ifdef VBOX
3777 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3778 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3779 raise_exception(EXCP0D_GPF);
3780 }
3781 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3782 EAX = 0;
3783 EDX = 0;
3784#else /* !VBOX */
3785 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3786 raise_exception(EXCP0D_GPF);
3787 }
3788 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3789
3790 /* currently unimplemented */
3791 raise_exception_err(EXCP06_ILLOP, 0);
3792#endif /* !VBOX */
3793}
3794
3795#if defined(CONFIG_USER_ONLY)
3796void helper_wrmsr(void)
3797{
3798}
3799
3800void helper_rdmsr(void)
3801{
3802}
3803#else
3804void helper_wrmsr(void)
3805{
3806 uint64_t val;
3807
3808 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3809
3810 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3811
3812 switch((uint32_t)ECX) {
3813 case MSR_IA32_SYSENTER_CS:
3814 env->sysenter_cs = val & 0xffff;
3815 break;
3816 case MSR_IA32_SYSENTER_ESP:
3817 env->sysenter_esp = val;
3818 break;
3819 case MSR_IA32_SYSENTER_EIP:
3820 env->sysenter_eip = val;
3821 break;
3822 case MSR_IA32_APICBASE:
3823# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3824 cpu_set_apic_base(env->apic_state, val);
3825# endif
3826 break;
3827 case MSR_EFER:
3828 {
3829 uint64_t update_mask;
3830 update_mask = 0;
3831 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3832 update_mask |= MSR_EFER_SCE;
3833 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3834 update_mask |= MSR_EFER_LME;
3835 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3836 update_mask |= MSR_EFER_FFXSR;
3837 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3838 update_mask |= MSR_EFER_NXE;
3839 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3840 update_mask |= MSR_EFER_SVME;
3841 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3842 update_mask |= MSR_EFER_FFXSR;
3843 cpu_load_efer(env, (env->efer & ~update_mask) |
3844 (val & update_mask));
3845 }
3846 break;
3847 case MSR_STAR:
3848 env->star = val;
3849 break;
3850 case MSR_PAT:
3851 env->pat = val;
3852 break;
3853 case MSR_VM_HSAVE_PA:
3854 env->vm_hsave = val;
3855 break;
3856#ifdef TARGET_X86_64
3857 case MSR_LSTAR:
3858 env->lstar = val;
3859 break;
3860 case MSR_CSTAR:
3861 env->cstar = val;
3862 break;
3863 case MSR_FMASK:
3864 env->fmask = val;
3865 break;
3866 case MSR_FSBASE:
3867 env->segs[R_FS].base = val;
3868 break;
3869 case MSR_GSBASE:
3870 env->segs[R_GS].base = val;
3871 break;
3872 case MSR_KERNELGSBASE:
3873 env->kernelgsbase = val;
3874 break;
3875#endif
3876# ifndef VBOX
3877 case MSR_MTRRphysBase(0):
3878 case MSR_MTRRphysBase(1):
3879 case MSR_MTRRphysBase(2):
3880 case MSR_MTRRphysBase(3):
3881 case MSR_MTRRphysBase(4):
3882 case MSR_MTRRphysBase(5):
3883 case MSR_MTRRphysBase(6):
3884 case MSR_MTRRphysBase(7):
3885 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3886 break;
3887 case MSR_MTRRphysMask(0):
3888 case MSR_MTRRphysMask(1):
3889 case MSR_MTRRphysMask(2):
3890 case MSR_MTRRphysMask(3):
3891 case MSR_MTRRphysMask(4):
3892 case MSR_MTRRphysMask(5):
3893 case MSR_MTRRphysMask(6):
3894 case MSR_MTRRphysMask(7):
3895 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3896 break;
3897 case MSR_MTRRfix64K_00000:
3898 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3899 break;
3900 case MSR_MTRRfix16K_80000:
3901 case MSR_MTRRfix16K_A0000:
3902 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3903 break;
3904 case MSR_MTRRfix4K_C0000:
3905 case MSR_MTRRfix4K_C8000:
3906 case MSR_MTRRfix4K_D0000:
3907 case MSR_MTRRfix4K_D8000:
3908 case MSR_MTRRfix4K_E0000:
3909 case MSR_MTRRfix4K_E8000:
3910 case MSR_MTRRfix4K_F0000:
3911 case MSR_MTRRfix4K_F8000:
3912 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3913 break;
3914 case MSR_MTRRdefType:
3915 env->mtrr_deftype = val;
3916 break;
3917 case MSR_MCG_STATUS:
3918 env->mcg_status = val;
3919 break;
3920 case MSR_MCG_CTL:
3921 if ((env->mcg_cap & MCG_CTL_P)
3922 && (val == 0 || val == ~(uint64_t)0))
3923 env->mcg_ctl = val;
3924 break;
3925 case MSR_TSC_AUX:
3926 env->tsc_aux = val;
3927 break;
3928# endif /* !VBOX */
3929 default:
3930# ifndef VBOX
3931 if ((uint32_t)ECX >= MSR_MC0_CTL
3932 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3933 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3934 if ((offset & 0x3) != 0
3935 || (val == 0 || val == ~(uint64_t)0))
3936 env->mce_banks[offset] = val;
3937 break;
3938 }
3939 /* XXX: exception ? */
3940# endif
3941 break;
3942 }
3943
3944# ifdef VBOX
3945 /* call CPUM. */
3946 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3947 {
3948 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3949 }
3950# endif
3951}
3952
3953void helper_rdmsr(void)
3954{
3955 uint64_t val;
3956
3957 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3958
3959 switch((uint32_t)ECX) {
3960 case MSR_IA32_SYSENTER_CS:
3961 val = env->sysenter_cs;
3962 break;
3963 case MSR_IA32_SYSENTER_ESP:
3964 val = env->sysenter_esp;
3965 break;
3966 case MSR_IA32_SYSENTER_EIP:
3967 val = env->sysenter_eip;
3968 break;
3969 case MSR_IA32_APICBASE:
3970#ifndef VBOX
3971 val = cpu_get_apic_base(env->apic_state);
3972#else /* VBOX */
3973 val = cpu_get_apic_base(env);
3974#endif /* VBOX */
3975 break;
3976 case MSR_EFER:
3977 val = env->efer;
3978 break;
3979 case MSR_STAR:
3980 val = env->star;
3981 break;
3982 case MSR_PAT:
3983 val = env->pat;
3984 break;
3985 case MSR_VM_HSAVE_PA:
3986 val = env->vm_hsave;
3987 break;
3988# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3989 case MSR_IA32_PERF_STATUS:
3990 /* tsc_increment_by_tick */
3991 val = 1000ULL;
3992 /* CPU multiplier */
3993 val |= (((uint64_t)4ULL) << 40);
3994 break;
3995# endif /* !VBOX */
3996#ifdef TARGET_X86_64
3997 case MSR_LSTAR:
3998 val = env->lstar;
3999 break;
4000 case MSR_CSTAR:
4001 val = env->cstar;
4002 break;
4003 case MSR_FMASK:
4004 val = env->fmask;
4005 break;
4006 case MSR_FSBASE:
4007 val = env->segs[R_FS].base;
4008 break;
4009 case MSR_GSBASE:
4010 val = env->segs[R_GS].base;
4011 break;
4012 case MSR_KERNELGSBASE:
4013 val = env->kernelgsbase;
4014 break;
4015# ifndef VBOX
4016 case MSR_TSC_AUX:
4017 val = env->tsc_aux;
4018 break;
4019# endif /*!VBOX*/
4020#endif
4021# ifndef VBOX
4022 case MSR_MTRRphysBase(0):
4023 case MSR_MTRRphysBase(1):
4024 case MSR_MTRRphysBase(2):
4025 case MSR_MTRRphysBase(3):
4026 case MSR_MTRRphysBase(4):
4027 case MSR_MTRRphysBase(5):
4028 case MSR_MTRRphysBase(6):
4029 case MSR_MTRRphysBase(7):
4030 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
4031 break;
4032 case MSR_MTRRphysMask(0):
4033 case MSR_MTRRphysMask(1):
4034 case MSR_MTRRphysMask(2):
4035 case MSR_MTRRphysMask(3):
4036 case MSR_MTRRphysMask(4):
4037 case MSR_MTRRphysMask(5):
4038 case MSR_MTRRphysMask(6):
4039 case MSR_MTRRphysMask(7):
4040 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
4041 break;
4042 case MSR_MTRRfix64K_00000:
4043 val = env->mtrr_fixed[0];
4044 break;
4045 case MSR_MTRRfix16K_80000:
4046 case MSR_MTRRfix16K_A0000:
4047 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
4048 break;
4049 case MSR_MTRRfix4K_C0000:
4050 case MSR_MTRRfix4K_C8000:
4051 case MSR_MTRRfix4K_D0000:
4052 case MSR_MTRRfix4K_D8000:
4053 case MSR_MTRRfix4K_E0000:
4054 case MSR_MTRRfix4K_E8000:
4055 case MSR_MTRRfix4K_F0000:
4056 case MSR_MTRRfix4K_F8000:
4057 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
4058 break;
4059 case MSR_MTRRdefType:
4060 val = env->mtrr_deftype;
4061 break;
4062 case MSR_MTRRcap:
4063 if (env->cpuid_features & CPUID_MTRR)
4064 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
4065 else
4066 /* XXX: exception ? */
4067 val = 0;
4068 break;
4069 case MSR_MCG_CAP:
4070 val = env->mcg_cap;
4071 break;
4072 case MSR_MCG_CTL:
4073 if (env->mcg_cap & MCG_CTL_P)
4074 val = env->mcg_ctl;
4075 else
4076 val = 0;
4077 break;
4078 case MSR_MCG_STATUS:
4079 val = env->mcg_status;
4080 break;
4081# endif /* !VBOX */
4082 default:
4083# ifndef VBOX
4084 if ((uint32_t)ECX >= MSR_MC0_CTL
4085 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
4086 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
4087 val = env->mce_banks[offset];
4088 break;
4089 }
4090 /* XXX: exception ? */
4091 val = 0;
4092# else /* VBOX */
4093 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4094 {
4095 /** @todo be a brave man and raise a \#GP(0) here as we should... */
4096 val = 0;
4097 }
4098# endif /* VBOX */
4099 break;
4100 }
4101 EAX = (uint32_t)(val);
4102 EDX = (uint32_t)(val >> 32);
4103
4104# ifdef VBOX_STRICT
4105 if ((uint32_t)ECX != MSR_IA32_TSC) {
4106 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4107 val = 0;
4108 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
4109 }
4110# endif
4111}
4112#endif
4113
4114target_ulong helper_lsl(target_ulong selector1)
4115{
4116 unsigned int limit;
4117 uint32_t e1, e2, eflags, selector;
4118 int rpl, dpl, cpl, type;
4119
4120 selector = selector1 & 0xffff;
4121 eflags = helper_cc_compute_all(CC_OP);
4122 if ((selector & 0xfffc) == 0)
4123 goto fail;
4124 if (load_segment(&e1, &e2, selector) != 0)
4125 goto fail;
4126 rpl = selector & 3;
4127 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4128 cpl = env->hflags & HF_CPL_MASK;
4129 if (e2 & DESC_S_MASK) {
4130 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4131 /* conforming */
4132 } else {
4133 if (dpl < cpl || dpl < rpl)
4134 goto fail;
4135 }
4136 } else {
4137 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4138 switch(type) {
4139 case 1:
4140 case 2:
4141 case 3:
4142 case 9:
4143 case 11:
4144 break;
4145 default:
4146 goto fail;
4147 }
4148 if (dpl < cpl || dpl < rpl) {
4149 fail:
4150 CC_SRC = eflags & ~CC_Z;
4151 return 0;
4152 }
4153 }
4154 limit = get_seg_limit(e1, e2);
4155 CC_SRC = eflags | CC_Z;
4156 return limit;
4157}
4158
4159target_ulong helper_lar(target_ulong selector1)
4160{
4161 uint32_t e1, e2, eflags, selector;
4162 int rpl, dpl, cpl, type;
4163
4164 selector = selector1 & 0xffff;
4165 eflags = helper_cc_compute_all(CC_OP);
4166 if ((selector & 0xfffc) == 0)
4167 goto fail;
4168 if (load_segment(&e1, &e2, selector) != 0)
4169 goto fail;
4170 rpl = selector & 3;
4171 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4172 cpl = env->hflags & HF_CPL_MASK;
4173 if (e2 & DESC_S_MASK) {
4174 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4175 /* conforming */
4176 } else {
4177 if (dpl < cpl || dpl < rpl)
4178 goto fail;
4179 }
4180 } else {
4181 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4182 switch(type) {
4183 case 1:
4184 case 2:
4185 case 3:
4186 case 4:
4187 case 5:
4188 case 9:
4189 case 11:
4190 case 12:
4191 break;
4192 default:
4193 goto fail;
4194 }
4195 if (dpl < cpl || dpl < rpl) {
4196 fail:
4197 CC_SRC = eflags & ~CC_Z;
4198 return 0;
4199 }
4200 }
4201 CC_SRC = eflags | CC_Z;
4202#ifdef VBOX /* AMD says 0x00ffff00, while intel says 0x00fxff00. Bochs and IEM does like AMD says (x=f). */
4203 return e2 & 0x00ffff00;
4204#else
4205 return e2 & 0x00f0ff00;
4206#endif
4207}
4208
4209void helper_verr(target_ulong selector1)
4210{
4211 uint32_t e1, e2, eflags, selector;
4212 int rpl, dpl, cpl;
4213
4214 selector = selector1 & 0xffff;
4215 eflags = helper_cc_compute_all(CC_OP);
4216 if ((selector & 0xfffc) == 0)
4217 goto fail;
4218 if (load_segment(&e1, &e2, selector) != 0)
4219 goto fail;
4220 if (!(e2 & DESC_S_MASK))
4221 goto fail;
4222 rpl = selector & 3;
4223 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4224 cpl = env->hflags & HF_CPL_MASK;
4225 if (e2 & DESC_CS_MASK) {
4226 if (!(e2 & DESC_R_MASK))
4227 goto fail;
4228 if (!(e2 & DESC_C_MASK)) {
4229 if (dpl < cpl || dpl < rpl)
4230 goto fail;
4231 }
4232 } else {
4233 if (dpl < cpl || dpl < rpl) {
4234 fail:
4235 CC_SRC = eflags & ~CC_Z;
4236 return;
4237 }
4238 }
4239 CC_SRC = eflags | CC_Z;
4240}
4241
4242void helper_verw(target_ulong selector1)
4243{
4244 uint32_t e1, e2, eflags, selector;
4245 int rpl, dpl, cpl;
4246
4247 selector = selector1 & 0xffff;
4248 eflags = helper_cc_compute_all(CC_OP);
4249 if ((selector & 0xfffc) == 0)
4250 goto fail;
4251 if (load_segment(&e1, &e2, selector) != 0)
4252 goto fail;
4253 if (!(e2 & DESC_S_MASK))
4254 goto fail;
4255 rpl = selector & 3;
4256 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4257 cpl = env->hflags & HF_CPL_MASK;
4258 if (e2 & DESC_CS_MASK) {
4259 goto fail;
4260 } else {
4261 if (dpl < cpl || dpl < rpl)
4262 goto fail;
4263 if (!(e2 & DESC_W_MASK)) {
4264 fail:
4265 CC_SRC = eflags & ~CC_Z;
4266 return;
4267 }
4268 }
4269 CC_SRC = eflags | CC_Z;
4270}
4271
4272/* x87 FPU helpers */
4273
4274static void fpu_set_exception(int mask)
4275{
4276 env->fpus |= mask;
4277 if (env->fpus & (~env->fpuc & FPUC_EM))
4278 env->fpus |= FPUS_SE | FPUS_B;
4279}
4280
4281static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4282{
4283 if (b == 0.0)
4284 fpu_set_exception(FPUS_ZE);
4285 return a / b;
4286}
4287
4288static void fpu_raise_exception(void)
4289{
4290 if (env->cr[0] & CR0_NE_MASK) {
4291 raise_exception(EXCP10_COPR);
4292 }
4293#if !defined(CONFIG_USER_ONLY)
4294 else {
4295 cpu_set_ferr(env);
4296 }
4297#endif
4298}
4299
4300void helper_flds_FT0(uint32_t val)
4301{
4302 union {
4303 float32 f;
4304 uint32_t i;
4305 } u;
4306 u.i = val;
4307 FT0 = float32_to_floatx(u.f, &env->fp_status);
4308}
4309
4310void helper_fldl_FT0(uint64_t val)
4311{
4312 union {
4313 float64 f;
4314 uint64_t i;
4315 } u;
4316 u.i = val;
4317 FT0 = float64_to_floatx(u.f, &env->fp_status);
4318}
4319
4320void helper_fildl_FT0(int32_t val)
4321{
4322 FT0 = int32_to_floatx(val, &env->fp_status);
4323}
4324
4325void helper_flds_ST0(uint32_t val)
4326{
4327 int new_fpstt;
4328 union {
4329 float32 f;
4330 uint32_t i;
4331 } u;
4332 new_fpstt = (env->fpstt - 1) & 7;
4333 u.i = val;
4334 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4335 env->fpstt = new_fpstt;
4336 env->fptags[new_fpstt] = 0; /* validate stack entry */
4337}
4338
4339void helper_fldl_ST0(uint64_t val)
4340{
4341 int new_fpstt;
4342 union {
4343 float64 f;
4344 uint64_t i;
4345 } u;
4346 new_fpstt = (env->fpstt - 1) & 7;
4347 u.i = val;
4348 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4349 env->fpstt = new_fpstt;
4350 env->fptags[new_fpstt] = 0; /* validate stack entry */
4351}
4352
4353void helper_fildl_ST0(int32_t val)
4354{
4355 int new_fpstt;
4356 new_fpstt = (env->fpstt - 1) & 7;
4357 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4358 env->fpstt = new_fpstt;
4359 env->fptags[new_fpstt] = 0; /* validate stack entry */
4360}
4361
4362void helper_fildll_ST0(int64_t val)
4363{
4364 int new_fpstt;
4365 new_fpstt = (env->fpstt - 1) & 7;
4366 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4367 env->fpstt = new_fpstt;
4368 env->fptags[new_fpstt] = 0; /* validate stack entry */
4369}
4370
4371#ifndef VBOX
4372uint32_t helper_fsts_ST0(void)
4373#else
4374RTCCUINTREG helper_fsts_ST0(void)
4375#endif
4376{
4377 union {
4378 float32 f;
4379 uint32_t i;
4380 } u;
4381 u.f = floatx_to_float32(ST0, &env->fp_status);
4382 return u.i;
4383}
4384
4385uint64_t helper_fstl_ST0(void)
4386{
4387 union {
4388 float64 f;
4389 uint64_t i;
4390 } u;
4391 u.f = floatx_to_float64(ST0, &env->fp_status);
4392 return u.i;
4393}
4394
4395#ifndef VBOX
4396int32_t helper_fist_ST0(void)
4397#else
4398RTCCINTREG helper_fist_ST0(void)
4399#endif
4400{
4401 int32_t val;
4402 val = floatx_to_int32(ST0, &env->fp_status);
4403 if (val != (int16_t)val)
4404 val = -32768;
4405 return val;
4406}
4407
4408#ifndef VBOX
4409int32_t helper_fistl_ST0(void)
4410#else
4411RTCCINTREG helper_fistl_ST0(void)
4412#endif
4413{
4414 int32_t val;
4415 val = floatx_to_int32(ST0, &env->fp_status);
4416 return val;
4417}
4418
4419int64_t helper_fistll_ST0(void)
4420{
4421 int64_t val;
4422 val = floatx_to_int64(ST0, &env->fp_status);
4423 return val;
4424}
4425
4426#ifndef VBOX
4427int32_t helper_fistt_ST0(void)
4428#else
4429RTCCINTREG helper_fistt_ST0(void)
4430#endif
4431{
4432 int32_t val;
4433 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4434 if (val != (int16_t)val)
4435 val = -32768;
4436 return val;
4437}
4438
4439#ifndef VBOX
4440int32_t helper_fisttl_ST0(void)
4441#else
4442RTCCINTREG helper_fisttl_ST0(void)
4443#endif
4444{
4445 int32_t val;
4446 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4447 return val;
4448}
4449
4450int64_t helper_fisttll_ST0(void)
4451{
4452 int64_t val;
4453 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4454 return val;
4455}
4456
4457void helper_fldt_ST0(target_ulong ptr)
4458{
4459 int new_fpstt;
4460 new_fpstt = (env->fpstt - 1) & 7;
4461 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4462 env->fpstt = new_fpstt;
4463 env->fptags[new_fpstt] = 0; /* validate stack entry */
4464}
4465
4466void helper_fstt_ST0(target_ulong ptr)
4467{
4468 helper_fstt(ST0, ptr);
4469}
4470
4471void helper_fpush(void)
4472{
4473 fpush();
4474}
4475
4476void helper_fpop(void)
4477{
4478 fpop();
4479}
4480
4481void helper_fdecstp(void)
4482{
4483 env->fpstt = (env->fpstt - 1) & 7;
4484 env->fpus &= (~0x4700);
4485}
4486
4487void helper_fincstp(void)
4488{
4489 env->fpstt = (env->fpstt + 1) & 7;
4490 env->fpus &= (~0x4700);
4491}
4492
4493/* FPU move */
4494
4495void helper_ffree_STN(int st_index)
4496{
4497 env->fptags[(env->fpstt + st_index) & 7] = 1;
4498}
4499
4500void helper_fmov_ST0_FT0(void)
4501{
4502 ST0 = FT0;
4503}
4504
4505void helper_fmov_FT0_STN(int st_index)
4506{
4507 FT0 = ST(st_index);
4508}
4509
4510void helper_fmov_ST0_STN(int st_index)
4511{
4512 ST0 = ST(st_index);
4513}
4514
4515void helper_fmov_STN_ST0(int st_index)
4516{
4517 ST(st_index) = ST0;
4518}
4519
4520void helper_fxchg_ST0_STN(int st_index)
4521{
4522 CPU86_LDouble tmp;
4523 tmp = ST(st_index);
4524 ST(st_index) = ST0;
4525 ST0 = tmp;
4526}
4527
4528/* FPU operations */
4529
4530static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4531
4532void helper_fcom_ST0_FT0(void)
4533{
4534 int ret;
4535
4536 ret = floatx_compare(ST0, FT0, &env->fp_status);
4537 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4538}
4539
4540void helper_fucom_ST0_FT0(void)
4541{
4542 int ret;
4543
4544 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4545 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4546}
4547
4548static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4549
4550void helper_fcomi_ST0_FT0(void)
4551{
4552 int eflags;
4553 int ret;
4554
4555 ret = floatx_compare(ST0, FT0, &env->fp_status);
4556 eflags = helper_cc_compute_all(CC_OP);
4557 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4558 CC_SRC = eflags;
4559}
4560
4561void helper_fucomi_ST0_FT0(void)
4562{
4563 int eflags;
4564 int ret;
4565
4566 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4567 eflags = helper_cc_compute_all(CC_OP);
4568 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4569 CC_SRC = eflags;
4570}
4571
4572void helper_fadd_ST0_FT0(void)
4573{
4574 ST0 += FT0;
4575}
4576
4577void helper_fmul_ST0_FT0(void)
4578{
4579 ST0 *= FT0;
4580}
4581
4582void helper_fsub_ST0_FT0(void)
4583{
4584 ST0 -= FT0;
4585}
4586
4587void helper_fsubr_ST0_FT0(void)
4588{
4589 ST0 = FT0 - ST0;
4590}
4591
4592void helper_fdiv_ST0_FT0(void)
4593{
4594 ST0 = helper_fdiv(ST0, FT0);
4595}
4596
4597void helper_fdivr_ST0_FT0(void)
4598{
4599 ST0 = helper_fdiv(FT0, ST0);
4600}
4601
4602/* fp operations between STN and ST0 */
4603
4604void helper_fadd_STN_ST0(int st_index)
4605{
4606 ST(st_index) += ST0;
4607}
4608
4609void helper_fmul_STN_ST0(int st_index)
4610{
4611 ST(st_index) *= ST0;
4612}
4613
4614void helper_fsub_STN_ST0(int st_index)
4615{
4616 ST(st_index) -= ST0;
4617}
4618
4619void helper_fsubr_STN_ST0(int st_index)
4620{
4621 CPU86_LDouble *p;
4622 p = &ST(st_index);
4623 *p = ST0 - *p;
4624}
4625
4626void helper_fdiv_STN_ST0(int st_index)
4627{
4628 CPU86_LDouble *p;
4629 p = &ST(st_index);
4630 *p = helper_fdiv(*p, ST0);
4631}
4632
4633void helper_fdivr_STN_ST0(int st_index)
4634{
4635 CPU86_LDouble *p;
4636 p = &ST(st_index);
4637 *p = helper_fdiv(ST0, *p);
4638}
4639
4640/* misc FPU operations */
4641void helper_fchs_ST0(void)
4642{
4643 ST0 = floatx_chs(ST0);
4644}
4645
4646void helper_fabs_ST0(void)
4647{
4648 ST0 = floatx_abs(ST0);
4649}
4650
4651void helper_fld1_ST0(void)
4652{
4653 ST0 = f15rk[1];
4654}
4655
4656void helper_fldl2t_ST0(void)
4657{
4658 ST0 = f15rk[6];
4659}
4660
4661void helper_fldl2e_ST0(void)
4662{
4663 ST0 = f15rk[5];
4664}
4665
4666void helper_fldpi_ST0(void)
4667{
4668 ST0 = f15rk[2];
4669}
4670
4671void helper_fldlg2_ST0(void)
4672{
4673 ST0 = f15rk[3];
4674}
4675
4676void helper_fldln2_ST0(void)
4677{
4678 ST0 = f15rk[4];
4679}
4680
4681void helper_fldz_ST0(void)
4682{
4683 ST0 = f15rk[0];
4684}
4685
4686void helper_fldz_FT0(void)
4687{
4688 FT0 = f15rk[0];
4689}
4690
4691#ifndef VBOX
4692uint32_t helper_fnstsw(void)
4693#else
4694RTCCUINTREG helper_fnstsw(void)
4695#endif
4696{
4697 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4698}
4699
4700#ifndef VBOX
4701uint32_t helper_fnstcw(void)
4702#else
4703RTCCUINTREG helper_fnstcw(void)
4704#endif
4705{
4706 return env->fpuc;
4707}
4708
4709static void update_fp_status(void)
4710{
4711 int rnd_type;
4712
4713 /* set rounding mode */
4714 switch(env->fpuc & RC_MASK) {
4715 default:
4716 case RC_NEAR:
4717 rnd_type = float_round_nearest_even;
4718 break;
4719 case RC_DOWN:
4720 rnd_type = float_round_down;
4721 break;
4722 case RC_UP:
4723 rnd_type = float_round_up;
4724 break;
4725 case RC_CHOP:
4726 rnd_type = float_round_to_zero;
4727 break;
4728 }
4729 set_float_rounding_mode(rnd_type, &env->fp_status);
4730#ifdef FLOATX80
4731 switch((env->fpuc >> 8) & 3) {
4732 case 0:
4733 rnd_type = 32;
4734 break;
4735 case 2:
4736 rnd_type = 64;
4737 break;
4738 case 3:
4739 default:
4740 rnd_type = 80;
4741 break;
4742 }
4743 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4744#endif
4745}
4746
4747void helper_fldcw(uint32_t val)
4748{
4749 env->fpuc = val;
4750 update_fp_status();
4751}
4752
4753void helper_fclex(void)
4754{
4755 env->fpus &= 0x7f00;
4756}
4757
4758void helper_fwait(void)
4759{
4760 if (env->fpus & FPUS_SE)
4761 fpu_raise_exception();
4762}
4763
4764void helper_fninit(void)
4765{
4766 env->fpus = 0;
4767 env->fpstt = 0;
4768 env->fpuc = 0x37f;
4769 env->fptags[0] = 1;
4770 env->fptags[1] = 1;
4771 env->fptags[2] = 1;
4772 env->fptags[3] = 1;
4773 env->fptags[4] = 1;
4774 env->fptags[5] = 1;
4775 env->fptags[6] = 1;
4776 env->fptags[7] = 1;
4777}
4778
4779/* BCD ops */
4780
4781void helper_fbld_ST0(target_ulong ptr)
4782{
4783 CPU86_LDouble tmp;
4784 uint64_t val;
4785 unsigned int v;
4786 int i;
4787
4788 val = 0;
4789 for(i = 8; i >= 0; i--) {
4790 v = ldub(ptr + i);
4791 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4792 }
4793 tmp = val;
4794 if (ldub(ptr + 9) & 0x80)
4795 tmp = -tmp;
4796 fpush();
4797 ST0 = tmp;
4798}
4799
4800void helper_fbst_ST0(target_ulong ptr)
4801{
4802 int v;
4803 target_ulong mem_ref, mem_end;
4804 int64_t val;
4805
4806 val = floatx_to_int64(ST0, &env->fp_status);
4807 mem_ref = ptr;
4808 mem_end = mem_ref + 9;
4809 if (val < 0) {
4810 stb(mem_end, 0x80);
4811 val = -val;
4812 } else {
4813 stb(mem_end, 0x00);
4814 }
4815 while (mem_ref < mem_end) {
4816 if (val == 0)
4817 break;
4818 v = val % 100;
4819 val = val / 100;
4820 v = ((v / 10) << 4) | (v % 10);
4821 stb(mem_ref++, v);
4822 }
4823 while (mem_ref < mem_end) {
4824 stb(mem_ref++, 0);
4825 }
4826}
4827
4828void helper_f2xm1(void)
4829{
4830 ST0 = pow(2.0,ST0) - 1.0;
4831}
4832
4833void helper_fyl2x(void)
4834{
4835 CPU86_LDouble fptemp;
4836
4837 fptemp = ST0;
4838 if (fptemp>0.0){
4839 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4840 ST1 *= fptemp;
4841 fpop();
4842 } else {
4843 env->fpus &= (~0x4700);
4844 env->fpus |= 0x400;
4845 }
4846}
4847
4848void helper_fptan(void)
4849{
4850 CPU86_LDouble fptemp;
4851
4852 fptemp = ST0;
4853 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4854 env->fpus |= 0x400;
4855 } else {
4856 ST0 = tan(fptemp);
4857 fpush();
4858 ST0 = 1.0;
4859 env->fpus &= (~0x400); /* C2 <-- 0 */
4860 /* the above code is for |arg| < 2**52 only */
4861 }
4862}
4863
4864void helper_fpatan(void)
4865{
4866 CPU86_LDouble fptemp, fpsrcop;
4867
4868 fpsrcop = ST1;
4869 fptemp = ST0;
4870 ST1 = atan2(fpsrcop,fptemp);
4871 fpop();
4872}
4873
4874void helper_fxtract(void)
4875{
4876 CPU86_LDoubleU temp;
4877 unsigned int expdif;
4878
4879 temp.d = ST0;
4880 expdif = EXPD(temp) - EXPBIAS;
4881 /*DP exponent bias*/
4882 ST0 = expdif;
4883 fpush();
4884 BIASEXPONENT(temp);
4885 ST0 = temp.d;
4886}
4887
4888void helper_fprem1(void)
4889{
4890 CPU86_LDouble dblq, fpsrcop, fptemp;
4891 CPU86_LDoubleU fpsrcop1, fptemp1;
4892 int expdif;
4893 signed long long int q;
4894
4895#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4896 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4897#else
4898 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4899#endif
4900 ST0 = 0.0 / 0.0; /* NaN */
4901 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4902 return;
4903 }
4904
4905 fpsrcop = ST0;
4906 fptemp = ST1;
4907 fpsrcop1.d = fpsrcop;
4908 fptemp1.d = fptemp;
4909 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4910
4911 if (expdif < 0) {
4912 /* optimisation? taken from the AMD docs */
4913 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4914 /* ST0 is unchanged */
4915 return;
4916 }
4917
4918 if (expdif < 53) {
4919 dblq = fpsrcop / fptemp;
4920 /* round dblq towards nearest integer */
4921 dblq = rint(dblq);
4922 ST0 = fpsrcop - fptemp * dblq;
4923
4924 /* convert dblq to q by truncating towards zero */
4925 if (dblq < 0.0)
4926 q = (signed long long int)(-dblq);
4927 else
4928 q = (signed long long int)dblq;
4929
4930 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4931 /* (C0,C3,C1) <-- (q2,q1,q0) */
4932 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4933 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4934 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4935 } else {
4936 env->fpus |= 0x400; /* C2 <-- 1 */
4937 fptemp = pow(2.0, expdif - 50);
4938 fpsrcop = (ST0 / ST1) / fptemp;
4939 /* fpsrcop = integer obtained by chopping */
4940 fpsrcop = (fpsrcop < 0.0) ?
4941 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4942 ST0 -= (ST1 * fpsrcop * fptemp);
4943 }
4944}
4945
4946void helper_fprem(void)
4947{
4948 CPU86_LDouble dblq, fpsrcop, fptemp;
4949 CPU86_LDoubleU fpsrcop1, fptemp1;
4950 int expdif;
4951 signed long long int q;
4952
4953#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4954 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4955#else
4956 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4957#endif
4958 ST0 = 0.0 / 0.0; /* NaN */
4959 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4960 return;
4961 }
4962
4963 fpsrcop = (CPU86_LDouble)ST0;
4964 fptemp = (CPU86_LDouble)ST1;
4965 fpsrcop1.d = fpsrcop;
4966 fptemp1.d = fptemp;
4967 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4968
4969 if (expdif < 0) {
4970 /* optimisation? taken from the AMD docs */
4971 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4972 /* ST0 is unchanged */
4973 return;
4974 }
4975
4976 if ( expdif < 53 ) {
4977 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4978 /* round dblq towards zero */
4979 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4980 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4981
4982 /* convert dblq to q by truncating towards zero */
4983 if (dblq < 0.0)
4984 q = (signed long long int)(-dblq);
4985 else
4986 q = (signed long long int)dblq;
4987
4988 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4989 /* (C0,C3,C1) <-- (q2,q1,q0) */
4990 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4991 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4992 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4993 } else {
4994 int N = 32 + (expdif % 32); /* as per AMD docs */
4995 env->fpus |= 0x400; /* C2 <-- 1 */
4996 fptemp = pow(2.0, (double)(expdif - N));
4997 fpsrcop = (ST0 / ST1) / fptemp;
4998 /* fpsrcop = integer obtained by chopping */
4999 fpsrcop = (fpsrcop < 0.0) ?
5000 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
5001 ST0 -= (ST1 * fpsrcop * fptemp);
5002 }
5003}
5004
5005void helper_fyl2xp1(void)
5006{
5007 CPU86_LDouble fptemp;
5008
5009 fptemp = ST0;
5010 if ((fptemp+1.0)>0.0) {
5011 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
5012 ST1 *= fptemp;
5013 fpop();
5014 } else {
5015 env->fpus &= (~0x4700);
5016 env->fpus |= 0x400;
5017 }
5018}
5019
5020void helper_fsqrt(void)
5021{
5022 CPU86_LDouble fptemp;
5023
5024 fptemp = ST0;
5025 if (fptemp<0.0) {
5026 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
5027 env->fpus |= 0x400;
5028 }
5029 ST0 = sqrt(fptemp);
5030}
5031
5032void helper_fsincos(void)
5033{
5034 CPU86_LDouble fptemp;
5035
5036 fptemp = ST0;
5037 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5038 env->fpus |= 0x400;
5039 } else {
5040 ST0 = sin(fptemp);
5041 fpush();
5042 ST0 = cos(fptemp);
5043 env->fpus &= (~0x400); /* C2 <-- 0 */
5044 /* the above code is for |arg| < 2**63 only */
5045 }
5046}
5047
5048void helper_frndint(void)
5049{
5050 ST0 = floatx_round_to_int(ST0, &env->fp_status);
5051}
5052
5053void helper_fscale(void)
5054{
5055 ST0 = ldexp (ST0, (int)(ST1));
5056}
5057
5058void helper_fsin(void)
5059{
5060 CPU86_LDouble fptemp;
5061
5062 fptemp = ST0;
5063 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5064 env->fpus |= 0x400;
5065 } else {
5066 ST0 = sin(fptemp);
5067 env->fpus &= (~0x400); /* C2 <-- 0 */
5068 /* the above code is for |arg| < 2**53 only */
5069 }
5070}
5071
5072void helper_fcos(void)
5073{
5074 CPU86_LDouble fptemp;
5075
5076 fptemp = ST0;
5077 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5078 env->fpus |= 0x400;
5079 } else {
5080 ST0 = cos(fptemp);
5081 env->fpus &= (~0x400); /* C2 <-- 0 */
5082 /* the above code is for |arg5 < 2**63 only */
5083 }
5084}
5085
5086void helper_fxam_ST0(void)
5087{
5088 CPU86_LDoubleU temp;
5089 int expdif;
5090
5091 temp.d = ST0;
5092
5093 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
5094 if (SIGND(temp))
5095 env->fpus |= 0x200; /* C1 <-- 1 */
5096
5097 /* XXX: test fptags too */
5098 expdif = EXPD(temp);
5099 if (expdif == MAXEXPD) {
5100#ifdef USE_X86LDOUBLE
5101 if (MANTD(temp) == 0x8000000000000000ULL)
5102#else
5103 if (MANTD(temp) == 0)
5104#endif
5105 env->fpus |= 0x500 /*Infinity*/;
5106 else
5107 env->fpus |= 0x100 /*NaN*/;
5108 } else if (expdif == 0) {
5109 if (MANTD(temp) == 0)
5110 env->fpus |= 0x4000 /*Zero*/;
5111 else
5112 env->fpus |= 0x4400 /*Denormal*/;
5113 } else {
5114 env->fpus |= 0x400;
5115 }
5116}
5117
5118void helper_fstenv(target_ulong ptr, int data32)
5119{
5120 int fpus, fptag, exp, i;
5121 uint64_t mant;
5122 CPU86_LDoubleU tmp;
5123
5124 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5125 fptag = 0;
5126 for (i=7; i>=0; i--) {
5127 fptag <<= 2;
5128 if (env->fptags[i]) {
5129 fptag |= 3;
5130 } else {
5131 tmp.d = env->fpregs[i].d;
5132 exp = EXPD(tmp);
5133 mant = MANTD(tmp);
5134 if (exp == 0 && mant == 0) {
5135 /* zero */
5136 fptag |= 1;
5137 } else if (exp == 0 || exp == MAXEXPD
5138#ifdef USE_X86LDOUBLE
5139 || (mant & (1LL << 63)) == 0
5140#endif
5141 ) {
5142 /* NaNs, infinity, denormal */
5143 fptag |= 2;
5144 }
5145 }
5146 }
5147 if (data32) {
5148 /* 32 bit */
5149 stl(ptr, env->fpuc);
5150 stl(ptr + 4, fpus);
5151 stl(ptr + 8, fptag);
5152 stl(ptr + 12, 0); /* fpip */
5153 stl(ptr + 16, 0); /* fpcs */
5154 stl(ptr + 20, 0); /* fpoo */
5155 stl(ptr + 24, 0); /* fpos */
5156 } else {
5157 /* 16 bit */
5158 stw(ptr, env->fpuc);
5159 stw(ptr + 2, fpus);
5160 stw(ptr + 4, fptag);
5161 stw(ptr + 6, 0);
5162 stw(ptr + 8, 0);
5163 stw(ptr + 10, 0);
5164 stw(ptr + 12, 0);
5165 }
5166}
5167
5168void helper_fldenv(target_ulong ptr, int data32)
5169{
5170 int i, fpus, fptag;
5171
5172 if (data32) {
5173 env->fpuc = lduw(ptr);
5174 fpus = lduw(ptr + 4);
5175 fptag = lduw(ptr + 8);
5176 }
5177 else {
5178 env->fpuc = lduw(ptr);
5179 fpus = lduw(ptr + 2);
5180 fptag = lduw(ptr + 4);
5181 }
5182 env->fpstt = (fpus >> 11) & 7;
5183 env->fpus = fpus & ~0x3800;
5184 for(i = 0;i < 8; i++) {
5185 env->fptags[i] = ((fptag & 3) == 3);
5186 fptag >>= 2;
5187 }
5188}
5189
5190void helper_fsave(target_ulong ptr, int data32)
5191{
5192 CPU86_LDouble tmp;
5193 int i;
5194
5195 helper_fstenv(ptr, data32);
5196
5197 ptr += (14 << data32);
5198 for(i = 0;i < 8; i++) {
5199 tmp = ST(i);
5200 helper_fstt(tmp, ptr);
5201 ptr += 10;
5202 }
5203
5204 /* fninit */
5205 env->fpus = 0;
5206 env->fpstt = 0;
5207 env->fpuc = 0x37f;
5208 env->fptags[0] = 1;
5209 env->fptags[1] = 1;
5210 env->fptags[2] = 1;
5211 env->fptags[3] = 1;
5212 env->fptags[4] = 1;
5213 env->fptags[5] = 1;
5214 env->fptags[6] = 1;
5215 env->fptags[7] = 1;
5216}
5217
5218void helper_frstor(target_ulong ptr, int data32)
5219{
5220 CPU86_LDouble tmp;
5221 int i;
5222
5223 helper_fldenv(ptr, data32);
5224 ptr += (14 << data32);
5225
5226 for(i = 0;i < 8; i++) {
5227 tmp = helper_fldt(ptr);
5228 ST(i) = tmp;
5229 ptr += 10;
5230 }
5231}
5232
5233void helper_fxsave(target_ulong ptr, int data64)
5234{
5235 int fpus, fptag, i, nb_xmm_regs;
5236 CPU86_LDouble tmp;
5237 target_ulong addr;
5238
5239 /* The operand must be 16 byte aligned */
5240 if (ptr & 0xf) {
5241 raise_exception(EXCP0D_GPF);
5242 }
5243
5244 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5245 fptag = 0;
5246 for(i = 0; i < 8; i++) {
5247 fptag |= (env->fptags[i] << i);
5248 }
5249 stw(ptr, env->fpuc);
5250 stw(ptr + 2, fpus);
5251 stw(ptr + 4, fptag ^ 0xff);
5252#ifdef TARGET_X86_64
5253 if (data64) {
5254 stq(ptr + 0x08, 0); /* rip */
5255 stq(ptr + 0x10, 0); /* rdp */
5256 } else
5257#endif
5258 {
5259 stl(ptr + 0x08, 0); /* eip */
5260 stl(ptr + 0x0c, 0); /* sel */
5261 stl(ptr + 0x10, 0); /* dp */
5262 stl(ptr + 0x14, 0); /* sel */
5263 }
5264
5265 addr = ptr + 0x20;
5266 for(i = 0;i < 8; i++) {
5267 tmp = ST(i);
5268 helper_fstt(tmp, addr);
5269 addr += 16;
5270 }
5271
5272 if (env->cr[4] & CR4_OSFXSR_MASK) {
5273 /* XXX: finish it */
5274 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5275 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5276 if (env->hflags & HF_CS64_MASK)
5277 nb_xmm_regs = 16;
5278 else
5279 nb_xmm_regs = 8;
5280 addr = ptr + 0xa0;
5281 /* Fast FXSAVE leaves out the XMM registers */
5282 if (!(env->efer & MSR_EFER_FFXSR)
5283 || (env->hflags & HF_CPL_MASK)
5284 || !(env->hflags & HF_LMA_MASK)) {
5285 for(i = 0; i < nb_xmm_regs; i++) {
5286 stq(addr, env->xmm_regs[i].XMM_Q(0));
5287 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5288 addr += 16;
5289 }
5290 }
5291 }
5292}
5293
5294void helper_fxrstor(target_ulong ptr, int data64)
5295{
5296 int i, fpus, fptag, nb_xmm_regs;
5297 CPU86_LDouble tmp;
5298 target_ulong addr;
5299
5300 /* The operand must be 16 byte aligned */
5301 if (ptr & 0xf) {
5302 raise_exception(EXCP0D_GPF);
5303 }
5304
5305 env->fpuc = lduw(ptr);
5306 fpus = lduw(ptr + 2);
5307 fptag = lduw(ptr + 4);
5308 env->fpstt = (fpus >> 11) & 7;
5309 env->fpus = fpus & ~0x3800;
5310 fptag ^= 0xff;
5311 for(i = 0;i < 8; i++) {
5312 env->fptags[i] = ((fptag >> i) & 1);
5313 }
5314
5315 addr = ptr + 0x20;
5316 for(i = 0;i < 8; i++) {
5317 tmp = helper_fldt(addr);
5318 ST(i) = tmp;
5319 addr += 16;
5320 }
5321
5322 if (env->cr[4] & CR4_OSFXSR_MASK) {
5323 /* XXX: finish it */
5324 env->mxcsr = ldl(ptr + 0x18);
5325 //ldl(ptr + 0x1c);
5326 if (env->hflags & HF_CS64_MASK)
5327 nb_xmm_regs = 16;
5328 else
5329 nb_xmm_regs = 8;
5330 addr = ptr + 0xa0;
5331 /* Fast FXRESTORE leaves out the XMM registers */
5332 if (!(env->efer & MSR_EFER_FFXSR)
5333 || (env->hflags & HF_CPL_MASK)
5334 || !(env->hflags & HF_LMA_MASK)) {
5335 for(i = 0; i < nb_xmm_regs; i++) {
5336#if !defined(VBOX) || __GNUC__ < 4
5337 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5338 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5339#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5340# if 1
5341 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5342 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5343 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5344 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5345# else
5346 /* this works fine on Mac OS X, gcc 4.0.1 */
5347 uint64_t u64 = ldq(addr);
5348 env->xmm_regs[i].XMM_Q(0);
5349 u64 = ldq(addr + 4);
5350 env->xmm_regs[i].XMM_Q(1) = u64;
5351# endif
5352#endif
5353 addr += 16;
5354 }
5355 }
5356 }
5357}
5358
5359#ifndef USE_X86LDOUBLE
5360
5361void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5362{
5363 CPU86_LDoubleU temp;
5364 int e;
5365
5366 temp.d = f;
5367 /* mantissa */
5368 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5369 /* exponent + sign */
5370 e = EXPD(temp) - EXPBIAS + 16383;
5371 e |= SIGND(temp) >> 16;
5372 *pexp = e;
5373}
5374
5375CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5376{
5377 CPU86_LDoubleU temp;
5378 int e;
5379 uint64_t ll;
5380
5381 /* XXX: handle overflow ? */
5382 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5383 e |= (upper >> 4) & 0x800; /* sign */
5384 ll = (mant >> 11) & ((1LL << 52) - 1);
5385#ifdef __arm__
5386 temp.l.upper = (e << 20) | (ll >> 32);
5387 temp.l.lower = ll;
5388#else
5389 temp.ll = ll | ((uint64_t)e << 52);
5390#endif
5391 return temp.d;
5392}
5393
5394#else
5395
5396void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5397{
5398 CPU86_LDoubleU temp;
5399
5400 temp.d = f;
5401 *pmant = temp.l.lower;
5402 *pexp = temp.l.upper;
5403}
5404
5405CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5406{
5407 CPU86_LDoubleU temp;
5408
5409 temp.l.upper = upper;
5410 temp.l.lower = mant;
5411 return temp.d;
5412}
5413#endif
5414
5415#ifdef TARGET_X86_64
5416
5417//#define DEBUG_MULDIV
5418
5419static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5420{
5421 *plow += a;
5422 /* carry test */
5423 if (*plow < a)
5424 (*phigh)++;
5425 *phigh += b;
5426}
5427
5428static void neg128(uint64_t *plow, uint64_t *phigh)
5429{
5430 *plow = ~ *plow;
5431 *phigh = ~ *phigh;
5432 add128(plow, phigh, 1, 0);
5433}
5434
5435/* return TRUE if overflow */
5436static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5437{
5438 uint64_t q, r, a1, a0;
5439 int i, qb, ab;
5440
5441 a0 = *plow;
5442 a1 = *phigh;
5443 if (a1 == 0) {
5444 q = a0 / b;
5445 r = a0 % b;
5446 *plow = q;
5447 *phigh = r;
5448 } else {
5449 if (a1 >= b)
5450 return 1;
5451 /* XXX: use a better algorithm */
5452 for(i = 0; i < 64; i++) {
5453 ab = a1 >> 63;
5454 a1 = (a1 << 1) | (a0 >> 63);
5455 if (ab || a1 >= b) {
5456 a1 -= b;
5457 qb = 1;
5458 } else {
5459 qb = 0;
5460 }
5461 a0 = (a0 << 1) | qb;
5462 }
5463#if defined(DEBUG_MULDIV)
5464 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5465 *phigh, *plow, b, a0, a1);
5466#endif
5467 *plow = a0;
5468 *phigh = a1;
5469 }
5470 return 0;
5471}
5472
5473/* return TRUE if overflow */
5474static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5475{
5476 int sa, sb;
5477 sa = ((int64_t)*phigh < 0);
5478 if (sa)
5479 neg128(plow, phigh);
5480 sb = (b < 0);
5481 if (sb)
5482 b = -b;
5483 if (div64(plow, phigh, b) != 0)
5484 return 1;
5485 if (sa ^ sb) {
5486 if (*plow > (1ULL << 63))
5487 return 1;
5488 *plow = - *plow;
5489 } else {
5490 if (*plow >= (1ULL << 63))
5491 return 1;
5492 }
5493 if (sa)
5494 *phigh = - *phigh;
5495 return 0;
5496}
5497
5498void helper_mulq_EAX_T0(target_ulong t0)
5499{
5500 uint64_t r0, r1;
5501
5502 mulu64(&r0, &r1, EAX, t0);
5503 EAX = r0;
5504 EDX = r1;
5505 CC_DST = r0;
5506 CC_SRC = r1;
5507}
5508
5509void helper_imulq_EAX_T0(target_ulong t0)
5510{
5511 uint64_t r0, r1;
5512
5513 muls64(&r0, &r1, EAX, t0);
5514 EAX = r0;
5515 EDX = r1;
5516 CC_DST = r0;
5517 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5518}
5519
5520target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5521{
5522 uint64_t r0, r1;
5523
5524 muls64(&r0, &r1, t0, t1);
5525 CC_DST = r0;
5526 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5527 return r0;
5528}
5529
5530void helper_divq_EAX(target_ulong t0)
5531{
5532 uint64_t r0, r1;
5533 if (t0 == 0) {
5534 raise_exception(EXCP00_DIVZ);
5535 }
5536 r0 = EAX;
5537 r1 = EDX;
5538 if (div64(&r0, &r1, t0))
5539 raise_exception(EXCP00_DIVZ);
5540 EAX = r0;
5541 EDX = r1;
5542}
5543
5544void helper_idivq_EAX(target_ulong t0)
5545{
5546 uint64_t r0, r1;
5547 if (t0 == 0) {
5548 raise_exception(EXCP00_DIVZ);
5549 }
5550 r0 = EAX;
5551 r1 = EDX;
5552 if (idiv64(&r0, &r1, t0))
5553 raise_exception(EXCP00_DIVZ);
5554 EAX = r0;
5555 EDX = r1;
5556}
5557#endif
5558
5559static void do_hlt(void)
5560{
5561 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5562 env->halted = 1;
5563 env->exception_index = EXCP_HLT;
5564 cpu_loop_exit();
5565}
5566
5567void helper_hlt(int next_eip_addend)
5568{
5569 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5570 EIP += next_eip_addend;
5571
5572 do_hlt();
5573}
5574
5575void helper_monitor(target_ulong ptr)
5576{
5577#ifdef VBOX
5578 if ((uint32_t)ECX > 1)
5579 raise_exception(EXCP0D_GPF);
5580#else /* !VBOX */
5581 if ((uint32_t)ECX != 0)
5582 raise_exception(EXCP0D_GPF);
5583#endif /* !VBOX */
5584 /* XXX: store address ? */
5585 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5586}
5587
5588void helper_mwait(int next_eip_addend)
5589{
5590 if ((uint32_t)ECX != 0)
5591 raise_exception(EXCP0D_GPF);
5592#ifdef VBOX
5593 helper_hlt(next_eip_addend);
5594#else /* !VBOX */
5595 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5596 EIP += next_eip_addend;
5597
5598 /* XXX: not complete but not completely erroneous */
5599 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5600 /* more than one CPU: do not sleep because another CPU may
5601 wake this one */
5602 } else {
5603 do_hlt();
5604 }
5605#endif /* !VBOX */
5606}
5607
5608void helper_debug(void)
5609{
5610 env->exception_index = EXCP_DEBUG;
5611 cpu_loop_exit();
5612}
5613
5614void helper_reset_rf(void)
5615{
5616 env->eflags &= ~RF_MASK;
5617}
5618
5619void helper_raise_interrupt(int intno, int next_eip_addend)
5620{
5621 raise_interrupt(intno, 1, 0, next_eip_addend);
5622}
5623
5624void helper_raise_exception(int exception_index)
5625{
5626 raise_exception(exception_index);
5627}
5628
5629void helper_cli(void)
5630{
5631 env->eflags &= ~IF_MASK;
5632}
5633
5634void helper_sti(void)
5635{
5636 env->eflags |= IF_MASK;
5637}
5638
5639#ifdef VBOX
5640void helper_cli_vme(void)
5641{
5642 env->eflags &= ~VIF_MASK;
5643}
5644
5645void helper_sti_vme(void)
5646{
5647 /* First check, then change eflags according to the AMD manual */
5648 if (env->eflags & VIP_MASK) {
5649 raise_exception(EXCP0D_GPF);
5650 }
5651 env->eflags |= VIF_MASK;
5652}
5653#endif /* VBOX */
5654
5655#if 0
5656/* vm86plus instructions */
5657void helper_cli_vm(void)
5658{
5659 env->eflags &= ~VIF_MASK;
5660}
5661
5662void helper_sti_vm(void)
5663{
5664 env->eflags |= VIF_MASK;
5665 if (env->eflags & VIP_MASK) {
5666 raise_exception(EXCP0D_GPF);
5667 }
5668}
5669#endif
5670
5671void helper_set_inhibit_irq(void)
5672{
5673 env->hflags |= HF_INHIBIT_IRQ_MASK;
5674}
5675
5676void helper_reset_inhibit_irq(void)
5677{
5678 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5679}
5680
5681void helper_boundw(target_ulong a0, int v)
5682{
5683 int low, high;
5684 low = ldsw(a0);
5685 high = ldsw(a0 + 2);
5686 v = (int16_t)v;
5687 if (v < low || v > high) {
5688 raise_exception(EXCP05_BOUND);
5689 }
5690}
5691
5692void helper_boundl(target_ulong a0, int v)
5693{
5694 int low, high;
5695 low = ldl(a0);
5696 high = ldl(a0 + 4);
5697 if (v < low || v > high) {
5698 raise_exception(EXCP05_BOUND);
5699 }
5700}
5701
5702static float approx_rsqrt(float a)
5703{
5704 return 1.0 / sqrt(a);
5705}
5706
5707static float approx_rcp(float a)
5708{
5709 return 1.0 / a;
5710}
5711
5712#if !defined(CONFIG_USER_ONLY)
5713
5714#define MMUSUFFIX _mmu
5715
5716#define SHIFT 0
5717#include "softmmu_template.h"
5718
5719#define SHIFT 1
5720#include "softmmu_template.h"
5721
5722#define SHIFT 2
5723#include "softmmu_template.h"
5724
5725#define SHIFT 3
5726#include "softmmu_template.h"
5727
5728#endif
5729
5730#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5731/* This code assumes real physical address always fit into host CPU reg,
5732 which is wrong in general, but true for our current use cases. */
5733RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5734{
5735 return remR3PhysReadS8(addr);
5736}
5737RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5738{
5739 return remR3PhysReadU8(addr);
5740}
5741void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5742{
5743 remR3PhysWriteU8(addr, val);
5744}
5745RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5746{
5747 return remR3PhysReadS16(addr);
5748}
5749RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5750{
5751 return remR3PhysReadU16(addr);
5752}
5753void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5754{
5755 remR3PhysWriteU16(addr, val);
5756}
5757RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5758{
5759 return remR3PhysReadS32(addr);
5760}
5761RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5762{
5763 return remR3PhysReadU32(addr);
5764}
5765void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5766{
5767 remR3PhysWriteU32(addr, val);
5768}
5769uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5770{
5771 return remR3PhysReadU64(addr);
5772}
5773void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5774{
5775 remR3PhysWriteU64(addr, val);
5776}
5777#endif /* VBOX */
5778
5779#if !defined(CONFIG_USER_ONLY)
5780/* try to fill the TLB and return an exception if error. If retaddr is
5781 NULL, it means that the function was called in C code (i.e. not
5782 from generated code or from helper.c) */
5783/* XXX: fix it to restore all registers */
5784void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5785{
5786 TranslationBlock *tb;
5787 int ret;
5788 uintptr_t pc;
5789 CPUX86State *saved_env;
5790
5791 /* XXX: hack to restore env in all cases, even if not called from
5792 generated code */
5793 saved_env = env;
5794 env = cpu_single_env;
5795
5796 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5797 if (ret) {
5798 if (retaddr) {
5799 /* now we have a real cpu fault */
5800 pc = (uintptr_t)retaddr;
5801 tb = tb_find_pc(pc);
5802 if (tb) {
5803 /* the PC is inside the translated code. It means that we have
5804 a virtual CPU fault */
5805 cpu_restore_state(tb, env, pc, NULL);
5806 }
5807 }
5808 raise_exception_err(env->exception_index, env->error_code);
5809 }
5810 env = saved_env;
5811}
5812#endif
5813
5814#ifdef VBOX
5815
5816/**
5817 * Correctly computes the eflags.
5818 * @returns eflags.
5819 * @param env1 CPU environment.
5820 */
5821uint32_t raw_compute_eflags(CPUX86State *env1)
5822{
5823 CPUX86State *savedenv = env;
5824 uint32_t efl;
5825 env = env1;
5826 efl = compute_eflags();
5827 env = savedenv;
5828 return efl;
5829}
5830
5831/**
5832 * Reads byte from virtual address in guest memory area.
5833 * XXX: is it working for any addresses? swapped out pages?
5834 * @returns read data byte.
5835 * @param env1 CPU environment.
5836 * @param pvAddr GC Virtual address.
5837 */
5838uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5839{
5840 CPUX86State *savedenv = env;
5841 uint8_t u8;
5842 env = env1;
5843 u8 = ldub_kernel(addr);
5844 env = savedenv;
5845 return u8;
5846}
5847
5848/**
5849 * Reads byte from virtual address in guest memory area.
5850 * XXX: is it working for any addresses? swapped out pages?
5851 * @returns read data byte.
5852 * @param env1 CPU environment.
5853 * @param pvAddr GC Virtual address.
5854 */
5855uint16_t read_word(CPUX86State *env1, target_ulong addr)
5856{
5857 CPUX86State *savedenv = env;
5858 uint16_t u16;
5859 env = env1;
5860 u16 = lduw_kernel(addr);
5861 env = savedenv;
5862 return u16;
5863}
5864
5865/**
5866 * Reads byte from virtual address in guest memory area.
5867 * XXX: is it working for any addresses? swapped out pages?
5868 * @returns read data byte.
5869 * @param env1 CPU environment.
5870 * @param pvAddr GC Virtual address.
5871 */
5872uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5873{
5874 CPUX86State *savedenv = env;
5875 uint32_t u32;
5876 env = env1;
5877 u32 = ldl_kernel(addr);
5878 env = savedenv;
5879 return u32;
5880}
5881
5882/**
5883 * Writes byte to virtual address in guest memory area.
5884 * XXX: is it working for any addresses? swapped out pages?
5885 * @returns read data byte.
5886 * @param env1 CPU environment.
5887 * @param pvAddr GC Virtual address.
5888 * @param val byte value
5889 */
5890void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5891{
5892 CPUX86State *savedenv = env;
5893 env = env1;
5894 stb(addr, val);
5895 env = savedenv;
5896}
5897
5898void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5899{
5900 CPUX86State *savedenv = env;
5901 env = env1;
5902 stw(addr, val);
5903 env = savedenv;
5904}
5905
5906void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5907{
5908 CPUX86State *savedenv = env;
5909 env = env1;
5910 stl(addr, val);
5911 env = savedenv;
5912}
5913
5914/**
5915 * Correctly loads selector into segment register with updating internal
5916 * qemu data/caches.
5917 * @param env1 CPU environment.
5918 * @param seg_reg Segment register.
5919 * @param selector Selector to load.
5920 */
5921void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5922{
5923 CPUX86State *savedenv = env;
5924#ifdef FORCE_SEGMENT_SYNC
5925 jmp_buf old_buf;
5926#endif
5927
5928 env = env1;
5929
5930 if ( env->eflags & X86_EFL_VM
5931 || !(env->cr[0] & X86_CR0_PE))
5932 {
5933 load_seg_vm(seg_reg, selector);
5934
5935 env = savedenv;
5936
5937 /* Successful sync. */
5938 Assert(env1->segs[seg_reg].newselector == 0);
5939 }
5940 else
5941 {
5942 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5943 time critical - let's not do that */
5944#ifdef FORCE_SEGMENT_SYNC
5945 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5946#endif
5947 if (setjmp(env1->jmp_env) == 0)
5948 {
5949 if (seg_reg == R_CS)
5950 {
5951 uint32_t e1, e2;
5952 e1 = e2 = 0;
5953 load_segment(&e1, &e2, selector);
5954 cpu_x86_load_seg_cache(env, R_CS, selector,
5955 get_seg_base(e1, e2),
5956 get_seg_limit(e1, e2),
5957 e2);
5958 }
5959 else
5960 helper_load_seg(seg_reg, selector);
5961 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5962 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5963
5964 env = savedenv;
5965
5966 /* Successful sync. */
5967 Assert(env1->segs[seg_reg].newselector == 0);
5968 }
5969 else
5970 {
5971 env = savedenv;
5972
5973 /* Postpone sync until the guest uses the selector. */
5974 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5975 env1->segs[seg_reg].newselector = selector;
5976 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5977 env1->exception_index = -1;
5978 env1->error_code = 0;
5979 env1->old_exception = -1;
5980 }
5981#ifdef FORCE_SEGMENT_SYNC
5982 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5983#endif
5984 }
5985
5986}
5987
5988DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5989{
5990 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
5991}
5992
5993
5994int emulate_single_instr(CPUX86State *env1)
5995{
5996 TranslationBlock *tb;
5997 TranslationBlock *current;
5998 int flags;
5999 uint8_t *tc_ptr;
6000 target_ulong old_eip;
6001
6002 /* ensures env is loaded! */
6003 CPUX86State *savedenv = env;
6004 env = env1;
6005
6006 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
6007
6008 current = env->current_tb;
6009 env->current_tb = NULL;
6010 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
6011
6012 /*
6013 * Translate only one instruction.
6014 */
6015 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
6016 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
6017 env->segs[R_CS].base, flags, 0);
6018
6019 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
6020
6021
6022 /* tb_link_phys: */
6023 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
6024 tb->jmp_next[0] = NULL;
6025 tb->jmp_next[1] = NULL;
6026 Assert(tb->jmp_next[0] == NULL);
6027 Assert(tb->jmp_next[1] == NULL);
6028 if (tb->tb_next_offset[0] != 0xffff)
6029 tb_reset_jump(tb, 0);
6030 if (tb->tb_next_offset[1] != 0xffff)
6031 tb_reset_jump(tb, 1);
6032
6033 /*
6034 * Execute it using emulation
6035 */
6036 old_eip = env->eip;
6037 env->current_tb = tb;
6038
6039 /*
6040 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
6041 * perhaps not a very safe hack
6042 */
6043 while (old_eip == env->eip)
6044 {
6045 tc_ptr = tb->tc_ptr;
6046
6047#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
6048 int fake_ret;
6049 tcg_qemu_tb_exec(tc_ptr, fake_ret);
6050#else
6051 tcg_qemu_tb_exec(tc_ptr);
6052#endif
6053
6054 /*
6055 * Exit once we detect an external interrupt and interrupts are enabled
6056 */
6057 if ( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))
6058 || ( (env->eflags & IF_MASK)
6059 && !(env->hflags & HF_INHIBIT_IRQ_MASK)
6060 && (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) )
6061 )
6062 {
6063 break;
6064 }
6065 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB) {
6066 tlb_flush(env, true);
6067 }
6068 }
6069 env->current_tb = current;
6070
6071 tb_phys_invalidate(tb, -1);
6072 tb_free(tb);
6073/*
6074 Assert(tb->tb_next_offset[0] == 0xffff);
6075 Assert(tb->tb_next_offset[1] == 0xffff);
6076 Assert(tb->tb_next[0] == 0xffff);
6077 Assert(tb->tb_next[1] == 0xffff);
6078 Assert(tb->jmp_next[0] == NULL);
6079 Assert(tb->jmp_next[1] == NULL);
6080 Assert(tb->jmp_first == NULL); */
6081
6082 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
6083
6084 /*
6085 * Execute the next instruction when we encounter instruction fusing.
6086 */
6087 if (env->hflags & HF_INHIBIT_IRQ_MASK)
6088 {
6089 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
6090 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6091 emulate_single_instr(env);
6092 }
6093
6094 env = savedenv;
6095 return 0;
6096}
6097
6098/**
6099 * Correctly loads a new ldtr selector.
6100 *
6101 * @param env1 CPU environment.
6102 * @param selector Selector to load.
6103 */
6104void sync_ldtr(CPUX86State *env1, int selector)
6105{
6106 CPUX86State *saved_env = env;
6107 if (setjmp(env1->jmp_env) == 0)
6108 {
6109 env = env1;
6110 helper_lldt(selector);
6111 env = saved_env;
6112 }
6113 else
6114 {
6115 env = saved_env;
6116#ifdef VBOX_STRICT
6117 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
6118#endif
6119 }
6120}
6121
6122int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
6123 uint32_t *esp_ptr, int dpl)
6124{
6125 int type, index, shift;
6126
6127 CPUX86State *savedenv = env;
6128 env = env1;
6129
6130 if (!(env->tr.flags & DESC_P_MASK))
6131 cpu_abort(env, "invalid tss");
6132 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
6133 if ((type & 7) != 1)
6134 cpu_abort(env, "invalid tss type %d", type);
6135 shift = type >> 3;
6136 index = (dpl * 4 + 2) << shift;
6137 if (index + (4 << shift) - 1 > env->tr.limit)
6138 {
6139 env = savedenv;
6140 return 0;
6141 }
6142 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
6143
6144 if (shift == 0) {
6145 *esp_ptr = lduw_kernel(env->tr.base + index);
6146 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
6147 } else {
6148 *esp_ptr = ldl_kernel(env->tr.base + index);
6149 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
6150 }
6151
6152 env = savedenv;
6153 return 1;
6154}
6155
6156//*****************************************************************************
6157// Needs to be at the bottom of the file (overriding macros)
6158
6159static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
6160{
6161#ifdef USE_X86LDOUBLE
6162 CPU86_LDoubleU tmp;
6163 tmp.l.lower = *(uint64_t const *)ptr;
6164 tmp.l.upper = *(uint16_t const *)(ptr + 8);
6165 return tmp.d;
6166#else
6167# error "Busted FPU saving/restoring!"
6168 return *(CPU86_LDouble *)ptr;
6169#endif
6170}
6171
6172static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
6173{
6174#ifdef USE_X86LDOUBLE
6175 CPU86_LDoubleU tmp;
6176 tmp.d = f;
6177 *(uint64_t *)(ptr + 0) = tmp.l.lower;
6178 *(uint16_t *)(ptr + 8) = tmp.l.upper;
6179 *(uint16_t *)(ptr + 10) = 0;
6180 *(uint32_t *)(ptr + 12) = 0;
6181 AssertCompile(sizeof(long double) > 8);
6182#else
6183# error "Busted FPU saving/restoring!"
6184 *(CPU86_LDouble *)ptr = f;
6185#endif
6186}
6187
6188#undef stw
6189#undef stl
6190#undef stq
6191#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
6192#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
6193#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
6194
6195//*****************************************************************************
6196void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6197{
6198 int fpus, fptag, i, nb_xmm_regs;
6199 CPU86_LDouble tmp;
6200 uint8_t *addr;
6201 int data64 = !!(env->hflags & HF_LMA_MASK);
6202
6203 if (env->cpuid_features & CPUID_FXSR)
6204 {
6205 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6206 fptag = 0;
6207 for(i = 0; i < 8; i++) {
6208 fptag |= (env->fptags[i] << i);
6209 }
6210 stw(ptr, env->fpuc);
6211 stw(ptr + 2, fpus);
6212 stw(ptr + 4, fptag ^ 0xff);
6213
6214 addr = ptr + 0x20;
6215 for(i = 0;i < 8; i++) {
6216 tmp = ST(i);
6217 helper_fstt_raw(tmp, addr);
6218 addr += 16;
6219 }
6220
6221 if (env->cr[4] & CR4_OSFXSR_MASK) {
6222 /* XXX: finish it */
6223 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6224 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6225 nb_xmm_regs = 8 << data64;
6226 addr = ptr + 0xa0;
6227 for(i = 0; i < nb_xmm_regs; i++) {
6228#if __GNUC__ < 4
6229 stq(addr, env->xmm_regs[i].XMM_Q(0));
6230 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6231#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6232 stl(addr, env->xmm_regs[i].XMM_L(0));
6233 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6234 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6235 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6236#endif
6237 addr += 16;
6238 }
6239 }
6240 }
6241 else
6242 {
6243 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6244 int fptag;
6245
6246 fp->FCW = env->fpuc;
6247 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6248 fptag = 0;
6249 for (i=7; i>=0; i--) {
6250 fptag <<= 2;
6251 if (env->fptags[i]) {
6252 fptag |= 3;
6253 } else {
6254 /* the FPU automatically computes it */
6255 }
6256 }
6257 fp->FTW = fptag;
6258
6259 for(i = 0;i < 8; i++) {
6260 tmp = ST(i);
6261 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6262 }
6263 }
6264}
6265
6266//*****************************************************************************
6267#undef lduw
6268#undef ldl
6269#undef ldq
6270#define lduw(a) *(uint16_t *)(a)
6271#define ldl(a) *(uint32_t *)(a)
6272#define ldq(a) *(uint64_t *)(a)
6273//*****************************************************************************
6274void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6275{
6276 int i, fpus, fptag, nb_xmm_regs;
6277 CPU86_LDouble tmp;
6278 uint8_t *addr;
6279 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6280
6281 if (env->cpuid_features & CPUID_FXSR)
6282 {
6283 env->fpuc = lduw(ptr);
6284 fpus = lduw(ptr + 2);
6285 fptag = lduw(ptr + 4);
6286 env->fpstt = (fpus >> 11) & 7;
6287 env->fpus = fpus & ~0x3800;
6288 fptag ^= 0xff;
6289 for(i = 0;i < 8; i++) {
6290 env->fptags[i] = ((fptag >> i) & 1);
6291 }
6292
6293 addr = ptr + 0x20;
6294 for(i = 0;i < 8; i++) {
6295 tmp = helper_fldt_raw(addr);
6296 ST(i) = tmp;
6297 addr += 16;
6298 }
6299
6300 if (env->cr[4] & CR4_OSFXSR_MASK) {
6301 /* XXX: finish it, endianness */
6302 env->mxcsr = ldl(ptr + 0x18);
6303 //ldl(ptr + 0x1c);
6304 nb_xmm_regs = 8 << data64;
6305 addr = ptr + 0xa0;
6306 for(i = 0; i < nb_xmm_regs; i++) {
6307#if HC_ARCH_BITS == 32
6308 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6309 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6310 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6311 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6312 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6313#else
6314 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6315 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6316#endif
6317 addr += 16;
6318 }
6319 }
6320 }
6321 else
6322 {
6323 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6324 int fptag, j;
6325
6326 env->fpuc = fp->FCW;
6327 env->fpstt = (fp->FSW >> 11) & 7;
6328 env->fpus = fp->FSW & ~0x3800;
6329 fptag = fp->FTW;
6330 for(i = 0;i < 8; i++) {
6331 env->fptags[i] = ((fptag & 3) == 3);
6332 fptag >>= 2;
6333 }
6334 j = env->fpstt;
6335 for(i = 0;i < 8; i++) {
6336 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6337 ST(i) = tmp;
6338 }
6339 }
6340}
6341//*****************************************************************************
6342//*****************************************************************************
6343
6344#endif /* VBOX */
6345
6346/* Secure Virtual Machine helpers */
6347
6348#if defined(CONFIG_USER_ONLY)
6349
6350void helper_vmrun(int aflag, int next_eip_addend)
6351{
6352}
6353void helper_vmmcall(void)
6354{
6355}
6356void helper_vmload(int aflag)
6357{
6358}
6359void helper_vmsave(int aflag)
6360{
6361}
6362void helper_stgi(void)
6363{
6364}
6365void helper_clgi(void)
6366{
6367}
6368void helper_skinit(void)
6369{
6370}
6371void helper_invlpga(int aflag)
6372{
6373}
6374void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6375{
6376}
6377void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6378{
6379}
6380
6381void helper_svm_check_io(uint32_t port, uint32_t param,
6382 uint32_t next_eip_addend)
6383{
6384}
6385#else
6386
6387static inline void svm_save_seg(target_phys_addr_t addr,
6388 const SegmentCache *sc)
6389{
6390 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6391 sc->selector);
6392 stq_phys(addr + offsetof(struct vmcb_seg, base),
6393 sc->base);
6394 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6395 sc->limit);
6396 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6397 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6398}
6399
6400static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6401{
6402 unsigned int flags;
6403
6404 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6405 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6406 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6407 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6408 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6409}
6410
6411static inline void svm_load_seg_cache(target_phys_addr_t addr,
6412 CPUState *env, int seg_reg)
6413{
6414 SegmentCache sc1, *sc = &sc1;
6415 svm_load_seg(addr, sc);
6416 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6417 sc->base, sc->limit, sc->flags);
6418}
6419
6420void helper_vmrun(int aflag, int next_eip_addend)
6421{
6422 target_ulong addr;
6423 uint32_t event_inj;
6424 uint32_t int_ctl;
6425
6426 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6427
6428 if (aflag == 2)
6429 addr = EAX;
6430 else
6431 addr = (uint32_t)EAX;
6432
6433 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6434
6435 env->vm_vmcb = addr;
6436
6437 /* save the current CPU state in the hsave page */
6438 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6439 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6440
6441 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6442 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6443
6444 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6445 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6446 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6447 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6448 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6449 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6450
6451 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6452 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6453
6454 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6455 &env->segs[R_ES]);
6456 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6457 &env->segs[R_CS]);
6458 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6459 &env->segs[R_SS]);
6460 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6461 &env->segs[R_DS]);
6462
6463 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6464 EIP + next_eip_addend);
6465 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6466 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6467
6468 /* load the interception bitmaps so we do not need to access the
6469 vmcb in svm mode */
6470 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6471 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6472 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6473 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6474 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6475 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6476
6477 /* enable intercepts */
6478 env->hflags |= HF_SVMI_MASK;
6479
6480 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6481
6482 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6483 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6484
6485 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6486 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6487
6488 /* clear exit_info_2 so we behave like the real hardware */
6489 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6490
6491 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6492 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6493 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6494 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6495 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6496 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6497 if (int_ctl & V_INTR_MASKING_MASK) {
6498 env->v_tpr = int_ctl & V_TPR_MASK;
6499 env->hflags2 |= HF2_VINTR_MASK;
6500 if (env->eflags & IF_MASK)
6501 env->hflags2 |= HF2_HIF_MASK;
6502 }
6503
6504 cpu_load_efer(env,
6505 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6506 env->eflags = 0;
6507 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6508 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6509 CC_OP = CC_OP_EFLAGS;
6510
6511 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6512 env, R_ES);
6513 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6514 env, R_CS);
6515 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6516 env, R_SS);
6517 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6518 env, R_DS);
6519
6520 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6521 env->eip = EIP;
6522 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6523 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6524 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6525 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6526 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6527
6528 /* FIXME: guest state consistency checks */
6529
6530 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6531 case TLB_CONTROL_DO_NOTHING:
6532 break;
6533 case TLB_CONTROL_FLUSH_ALL_ASID:
6534 /* FIXME: this is not 100% correct but should work for now */
6535 tlb_flush(env, 1);
6536 break;
6537 }
6538
6539 env->hflags2 |= HF2_GIF_MASK;
6540
6541 if (int_ctl & V_IRQ_MASK) {
6542 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6543 }
6544
6545 /* maybe we need to inject an event */
6546 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6547 if (event_inj & SVM_EVTINJ_VALID) {
6548 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6549 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6550 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6551
6552 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6553 /* FIXME: need to implement valid_err */
6554 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6555 case SVM_EVTINJ_TYPE_INTR:
6556 env->exception_index = vector;
6557 env->error_code = event_inj_err;
6558 env->exception_is_int = 0;
6559 env->exception_next_eip = -1;
6560 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6561 /* XXX: is it always correct ? */
6562 do_interrupt(vector, 0, 0, 0, 1);
6563 break;
6564 case SVM_EVTINJ_TYPE_NMI:
6565 env->exception_index = EXCP02_NMI;
6566 env->error_code = event_inj_err;
6567 env->exception_is_int = 0;
6568 env->exception_next_eip = EIP;
6569 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6570 cpu_loop_exit();
6571 break;
6572 case SVM_EVTINJ_TYPE_EXEPT:
6573 env->exception_index = vector;
6574 env->error_code = event_inj_err;
6575 env->exception_is_int = 0;
6576 env->exception_next_eip = -1;
6577 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6578 cpu_loop_exit();
6579 break;
6580 case SVM_EVTINJ_TYPE_SOFT:
6581 env->exception_index = vector;
6582 env->error_code = event_inj_err;
6583 env->exception_is_int = 1;
6584 env->exception_next_eip = EIP;
6585 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6586 cpu_loop_exit();
6587 break;
6588 }
6589 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6590 }
6591}
6592
6593void helper_vmmcall(void)
6594{
6595 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6596 raise_exception(EXCP06_ILLOP);
6597}
6598
6599void helper_vmload(int aflag)
6600{
6601 target_ulong addr;
6602 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6603
6604 if (aflag == 2)
6605 addr = EAX;
6606 else
6607 addr = (uint32_t)EAX;
6608
6609 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6610 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6611 env->segs[R_FS].base);
6612
6613 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6614 env, R_FS);
6615 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6616 env, R_GS);
6617 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6618 &env->tr);
6619 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6620 &env->ldt);
6621
6622#ifdef TARGET_X86_64
6623 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6624 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6625 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6626 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6627#endif
6628 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6629 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6630 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6631 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6632}
6633
6634void helper_vmsave(int aflag)
6635{
6636 target_ulong addr;
6637 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6638
6639 if (aflag == 2)
6640 addr = EAX;
6641 else
6642 addr = (uint32_t)EAX;
6643
6644 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6645 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6646 env->segs[R_FS].base);
6647
6648 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6649 &env->segs[R_FS]);
6650 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6651 &env->segs[R_GS]);
6652 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6653 &env->tr);
6654 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6655 &env->ldt);
6656
6657#ifdef TARGET_X86_64
6658 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6659 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6660 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6661 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6662#endif
6663 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6664 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6665 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6666 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6667}
6668
6669void helper_stgi(void)
6670{
6671 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6672 env->hflags2 |= HF2_GIF_MASK;
6673}
6674
6675void helper_clgi(void)
6676{
6677 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6678 env->hflags2 &= ~HF2_GIF_MASK;
6679}
6680
6681void helper_skinit(void)
6682{
6683 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6684 /* XXX: not implemented */
6685 raise_exception(EXCP06_ILLOP);
6686}
6687
6688void helper_invlpga(int aflag)
6689{
6690 target_ulong addr;
6691 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6692
6693 if (aflag == 2)
6694 addr = EAX;
6695 else
6696 addr = (uint32_t)EAX;
6697
6698 /* XXX: could use the ASID to see if it is needed to do the
6699 flush */
6700 tlb_flush_page(env, addr);
6701}
6702
6703void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6704{
6705 if (likely(!(env->hflags & HF_SVMI_MASK)))
6706 return;
6707#ifndef VBOX
6708 switch(type) {
6709 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6710 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6711 helper_vmexit(type, param);
6712 }
6713 break;
6714 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6715 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6716 helper_vmexit(type, param);
6717 }
6718 break;
6719 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6720 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6721 helper_vmexit(type, param);
6722 }
6723 break;
6724 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6725 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6726 helper_vmexit(type, param);
6727 }
6728 break;
6729 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6730 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6731 helper_vmexit(type, param);
6732 }
6733 break;
6734 case SVM_EXIT_MSR:
6735 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6736 /* FIXME: this should be read in at vmrun (faster this way?) */
6737 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6738 uint32_t t0, t1;
6739 switch((uint32_t)ECX) {
6740 case 0 ... 0x1fff:
6741 t0 = (ECX * 2) % 8;
6742 t1 = ECX / 8;
6743 break;
6744 case 0xc0000000 ... 0xc0001fff:
6745 t0 = (8192 + ECX - 0xc0000000) * 2;
6746 t1 = (t0 / 8);
6747 t0 %= 8;
6748 break;
6749 case 0xc0010000 ... 0xc0011fff:
6750 t0 = (16384 + ECX - 0xc0010000) * 2;
6751 t1 = (t0 / 8);
6752 t0 %= 8;
6753 break;
6754 default:
6755 helper_vmexit(type, param);
6756 t0 = 0;
6757 t1 = 0;
6758 break;
6759 }
6760 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6761 helper_vmexit(type, param);
6762 }
6763 break;
6764 default:
6765 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6766 helper_vmexit(type, param);
6767 }
6768 break;
6769 }
6770#else /* VBOX */
6771 AssertMsgFailed(("We shouldn't be here, HM supported differently!"));
6772#endif /* VBOX */
6773}
6774
6775void helper_svm_check_io(uint32_t port, uint32_t param,
6776 uint32_t next_eip_addend)
6777{
6778 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6779 /* FIXME: this should be read in at vmrun (faster this way?) */
6780 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6781 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6782 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6783 /* next EIP */
6784 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6785 env->eip + next_eip_addend);
6786 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6787 }
6788 }
6789}
6790
6791/* Note: currently only 32 bits of exit_code are used */
6792void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6793{
6794 uint32_t int_ctl;
6795
6796 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6797 exit_code, exit_info_1,
6798 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6799 EIP);
6800
6801 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6802 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6803 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6804 } else {
6805 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6806 }
6807
6808 /* Save the VM state in the vmcb */
6809 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6810 &env->segs[R_ES]);
6811 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6812 &env->segs[R_CS]);
6813 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6814 &env->segs[R_SS]);
6815 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6816 &env->segs[R_DS]);
6817
6818 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6819 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6820
6821 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6822 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6823
6824 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6825 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6826 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6827 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6828 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6829
6830 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6831 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6832 int_ctl |= env->v_tpr & V_TPR_MASK;
6833 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6834 int_ctl |= V_IRQ_MASK;
6835 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6836
6837 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6838 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6839 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6840 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6841 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6842 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6843 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6844
6845 /* Reload the host state from vm_hsave */
6846 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6847 env->hflags &= ~HF_SVMI_MASK;
6848 env->intercept = 0;
6849 env->intercept_exceptions = 0;
6850 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6851 env->tsc_offset = 0;
6852
6853 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6854 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6855
6856 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6857 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6858
6859 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6860 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6861 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6862 /* we need to set the efer after the crs so the hidden flags get
6863 set properly */
6864 cpu_load_efer(env,
6865 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6866 env->eflags = 0;
6867 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6868 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6869 CC_OP = CC_OP_EFLAGS;
6870
6871 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6872 env, R_ES);
6873 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6874 env, R_CS);
6875 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6876 env, R_SS);
6877 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6878 env, R_DS);
6879
6880 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6881 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6882 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6883
6884 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6885 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6886
6887 /* other setups */
6888 cpu_x86_set_cpl(env, 0);
6889 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6890 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6891
6892 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6893 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6894 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6895 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6896 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6897
6898 env->hflags2 &= ~HF2_GIF_MASK;
6899 /* FIXME: Resets the current ASID register to zero (host ASID). */
6900
6901 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6902
6903 /* Clears the TSC_OFFSET inside the processor. */
6904
6905 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6906 from the page table indicated the host's CR3. If the PDPEs contain
6907 illegal state, the processor causes a shutdown. */
6908
6909 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6910 env->cr[0] |= CR0_PE_MASK;
6911 env->eflags &= ~VM_MASK;
6912
6913 /* Disables all breakpoints in the host DR7 register. */
6914
6915 /* Checks the reloaded host state for consistency. */
6916
6917 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6918 host's code segment or non-canonical (in the case of long mode), a
6919 #GP fault is delivered inside the host.) */
6920
6921 /* remove any pending exception */
6922 env->exception_index = -1;
6923 env->error_code = 0;
6924 env->old_exception = -1;
6925
6926 cpu_loop_exit();
6927}
6928
6929#endif
6930
6931/* MMX/SSE */
6932/* XXX: optimize by storing fptt and fptags in the static cpu state */
6933void helper_enter_mmx(void)
6934{
6935 env->fpstt = 0;
6936 *(uint32_t *)(env->fptags) = 0;
6937 *(uint32_t *)(env->fptags + 4) = 0;
6938}
6939
6940void helper_emms(void)
6941{
6942 /* set to empty state */
6943 *(uint32_t *)(env->fptags) = 0x01010101;
6944 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6945}
6946
6947/* XXX: suppress */
6948void helper_movq(void *d, void *s)
6949{
6950 *(uint64_t *)d = *(uint64_t *)s;
6951}
6952
6953#define SHIFT 0
6954#include "ops_sse.h"
6955
6956#define SHIFT 1
6957#include "ops_sse.h"
6958
6959#define SHIFT 0
6960#include "helper_template.h"
6961#undef SHIFT
6962
6963#define SHIFT 1
6964#include "helper_template.h"
6965#undef SHIFT
6966
6967#define SHIFT 2
6968#include "helper_template.h"
6969#undef SHIFT
6970
6971#ifdef TARGET_X86_64
6972
6973#define SHIFT 3
6974#include "helper_template.h"
6975#undef SHIFT
6976
6977#endif
6978
6979/* bit operations */
6980target_ulong helper_bsf(target_ulong t0)
6981{
6982 int count;
6983 target_ulong res;
6984
6985 res = t0;
6986 count = 0;
6987 while ((res & 1) == 0) {
6988 count++;
6989 res >>= 1;
6990 }
6991 return count;
6992}
6993
6994target_ulong helper_lzcnt(target_ulong t0, int wordsize)
6995{
6996 int count;
6997 target_ulong res, mask;
6998
6999 if (wordsize > 0 && t0 == 0) {
7000 return wordsize;
7001 }
7002 res = t0;
7003 count = TARGET_LONG_BITS - 1;
7004 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
7005 while ((res & mask) == 0) {
7006 count--;
7007 res <<= 1;
7008 }
7009 if (wordsize > 0) {
7010 return wordsize - 1 - count;
7011 }
7012 return count;
7013}
7014
7015target_ulong helper_bsr(target_ulong t0)
7016{
7017 return helper_lzcnt(t0, 0);
7018}
7019
7020static int compute_all_eflags(void)
7021{
7022 return CC_SRC;
7023}
7024
7025static int compute_c_eflags(void)
7026{
7027 return CC_SRC & CC_C;
7028}
7029
7030uint32_t helper_cc_compute_all(int op)
7031{
7032 switch (op) {
7033 default: /* should never happen */ return 0;
7034
7035 case CC_OP_EFLAGS: return compute_all_eflags();
7036
7037 case CC_OP_MULB: return compute_all_mulb();
7038 case CC_OP_MULW: return compute_all_mulw();
7039 case CC_OP_MULL: return compute_all_mull();
7040
7041 case CC_OP_ADDB: return compute_all_addb();
7042 case CC_OP_ADDW: return compute_all_addw();
7043 case CC_OP_ADDL: return compute_all_addl();
7044
7045 case CC_OP_ADCB: return compute_all_adcb();
7046 case CC_OP_ADCW: return compute_all_adcw();
7047 case CC_OP_ADCL: return compute_all_adcl();
7048
7049 case CC_OP_SUBB: return compute_all_subb();
7050 case CC_OP_SUBW: return compute_all_subw();
7051 case CC_OP_SUBL: return compute_all_subl();
7052
7053 case CC_OP_SBBB: return compute_all_sbbb();
7054 case CC_OP_SBBW: return compute_all_sbbw();
7055 case CC_OP_SBBL: return compute_all_sbbl();
7056
7057 case CC_OP_LOGICB: return compute_all_logicb();
7058 case CC_OP_LOGICW: return compute_all_logicw();
7059 case CC_OP_LOGICL: return compute_all_logicl();
7060
7061 case CC_OP_INCB: return compute_all_incb();
7062 case CC_OP_INCW: return compute_all_incw();
7063 case CC_OP_INCL: return compute_all_incl();
7064
7065 case CC_OP_DECB: return compute_all_decb();
7066 case CC_OP_DECW: return compute_all_decw();
7067 case CC_OP_DECL: return compute_all_decl();
7068
7069 case CC_OP_SHLB: return compute_all_shlb();
7070 case CC_OP_SHLW: return compute_all_shlw();
7071 case CC_OP_SHLL: return compute_all_shll();
7072
7073 case CC_OP_SARB: return compute_all_sarb();
7074 case CC_OP_SARW: return compute_all_sarw();
7075 case CC_OP_SARL: return compute_all_sarl();
7076
7077#ifdef TARGET_X86_64
7078 case CC_OP_MULQ: return compute_all_mulq();
7079
7080 case CC_OP_ADDQ: return compute_all_addq();
7081
7082 case CC_OP_ADCQ: return compute_all_adcq();
7083
7084 case CC_OP_SUBQ: return compute_all_subq();
7085
7086 case CC_OP_SBBQ: return compute_all_sbbq();
7087
7088 case CC_OP_LOGICQ: return compute_all_logicq();
7089
7090 case CC_OP_INCQ: return compute_all_incq();
7091
7092 case CC_OP_DECQ: return compute_all_decq();
7093
7094 case CC_OP_SHLQ: return compute_all_shlq();
7095
7096 case CC_OP_SARQ: return compute_all_sarq();
7097#endif
7098 }
7099}
7100
7101uint32_t helper_cc_compute_c(int op)
7102{
7103 switch (op) {
7104 default: /* should never happen */ return 0;
7105
7106 case CC_OP_EFLAGS: return compute_c_eflags();
7107
7108 case CC_OP_MULB: return compute_c_mull();
7109 case CC_OP_MULW: return compute_c_mull();
7110 case CC_OP_MULL: return compute_c_mull();
7111
7112 case CC_OP_ADDB: return compute_c_addb();
7113 case CC_OP_ADDW: return compute_c_addw();
7114 case CC_OP_ADDL: return compute_c_addl();
7115
7116 case CC_OP_ADCB: return compute_c_adcb();
7117 case CC_OP_ADCW: return compute_c_adcw();
7118 case CC_OP_ADCL: return compute_c_adcl();
7119
7120 case CC_OP_SUBB: return compute_c_subb();
7121 case CC_OP_SUBW: return compute_c_subw();
7122 case CC_OP_SUBL: return compute_c_subl();
7123
7124 case CC_OP_SBBB: return compute_c_sbbb();
7125 case CC_OP_SBBW: return compute_c_sbbw();
7126 case CC_OP_SBBL: return compute_c_sbbl();
7127
7128 case CC_OP_LOGICB: return compute_c_logicb();
7129 case CC_OP_LOGICW: return compute_c_logicw();
7130 case CC_OP_LOGICL: return compute_c_logicl();
7131
7132 case CC_OP_INCB: return compute_c_incl();
7133 case CC_OP_INCW: return compute_c_incl();
7134 case CC_OP_INCL: return compute_c_incl();
7135
7136 case CC_OP_DECB: return compute_c_incl();
7137 case CC_OP_DECW: return compute_c_incl();
7138 case CC_OP_DECL: return compute_c_incl();
7139
7140 case CC_OP_SHLB: return compute_c_shlb();
7141 case CC_OP_SHLW: return compute_c_shlw();
7142 case CC_OP_SHLL: return compute_c_shll();
7143
7144 case CC_OP_SARB: return compute_c_sarl();
7145 case CC_OP_SARW: return compute_c_sarl();
7146 case CC_OP_SARL: return compute_c_sarl();
7147
7148#ifdef TARGET_X86_64
7149 case CC_OP_MULQ: return compute_c_mull();
7150
7151 case CC_OP_ADDQ: return compute_c_addq();
7152
7153 case CC_OP_ADCQ: return compute_c_adcq();
7154
7155 case CC_OP_SUBQ: return compute_c_subq();
7156
7157 case CC_OP_SBBQ: return compute_c_sbbq();
7158
7159 case CC_OP_LOGICQ: return compute_c_logicq();
7160
7161 case CC_OP_INCQ: return compute_c_incl();
7162
7163 case CC_OP_DECQ: return compute_c_incl();
7164
7165 case CC_OP_SHLQ: return compute_c_shlq();
7166
7167 case CC_OP_SARQ: return compute_c_sarl();
7168#endif
7169 }
7170}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette