VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 37689

Last change on this file since 37689 was 37689, checked in by vboxsync, 14 years ago

recompiler: Merged in changes from 0.13.0.

  • Property svn:eol-style set to native
File size: 52.2 KB
Line 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "config.h"
30#include "exec.h"
31#include "disas.h"
32#include "tcg.h"
33#include "kvm.h"
34#include "qemu-barrier.h"
35
36#if !defined(CONFIG_SOFTMMU)
37#undef EAX
38#undef ECX
39#undef EDX
40#undef EBX
41#undef ESP
42#undef EBP
43#undef ESI
44#undef EDI
45#undef EIP
46#include <signal.h>
47#ifdef __linux__
48#include <sys/ucontext.h>
49#endif
50#endif
51
52#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
53// Work around ugly bugs in glibc that mangle global register contents
54#undef env
55#define env cpu_single_env
56#endif
57
58int tb_invalidated_flag;
59
60//#define CONFIG_DEBUG_EXEC
61//#define DEBUG_SIGNAL
62
63int qemu_cpu_has_work(CPUState *env)
64{
65 return cpu_has_work(env);
66}
67
68void cpu_loop_exit(void)
69{
70 env->current_tb = NULL;
71 longjmp(env->jmp_env, 1);
72}
73
74/* exit the current TB from a signal handler. The host registers are
75 restored in a state compatible with the CPU emulator
76 */
77void cpu_resume_from_signal(CPUState *env1, void *puc)
78{
79#if !defined(CONFIG_SOFTMMU)
80#ifdef __linux__
81 struct ucontext *uc = puc;
82#elif defined(__OpenBSD__)
83 struct sigcontext *uc = puc;
84#endif
85#endif
86
87 env = env1;
88
89 /* XXX: restore cpu registers saved in host registers */
90
91#if !defined(CONFIG_SOFTMMU)
92 if (puc) {
93 /* XXX: use siglongjmp ? */
94#ifdef __linux__
95#ifdef __ia64
96 sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
97#else
98 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
99#endif
100#elif defined(__OpenBSD__)
101 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
102#endif
103 }
104#endif
105 env->exception_index = -1;
106 longjmp(env->jmp_env, 1);
107}
108
109/* Execute the code without caching the generated code. An interpreter
110 could be used if available. */
111static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
112{
113 unsigned long next_tb;
114 TranslationBlock *tb;
115
116 /* Should never happen.
117 We only end up here when an existing TB is too long. */
118 if (max_cycles > CF_COUNT_MASK)
119 max_cycles = CF_COUNT_MASK;
120
121 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
122 max_cycles);
123 env->current_tb = tb;
124 /* execute the generated code */
125#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
126 tcg_qemu_tb_exec(tb->tc_ptr, next_tb);
127#else
128 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
129#endif
130 env->current_tb = NULL;
131
132 if ((next_tb & 3) == 2) {
133 /* Restore PC. This may happen if async event occurs before
134 the TB starts executing. */
135 cpu_pc_from_tb(env, tb);
136 }
137 tb_phys_invalidate(tb, -1);
138 tb_free(tb);
139}
140
141static TranslationBlock *tb_find_slow(target_ulong pc,
142 target_ulong cs_base,
143 uint64_t flags)
144{
145 TranslationBlock *tb, **ptb1;
146 unsigned int h;
147 tb_page_addr_t phys_pc, phys_page1, phys_page2;
148 target_ulong virt_page2;
149
150 tb_invalidated_flag = 0;
151
152 /* find translated block using physical mappings */
153 phys_pc = get_page_addr_code(env, pc);
154 phys_page1 = phys_pc & TARGET_PAGE_MASK;
155 phys_page2 = -1;
156 h = tb_phys_hash_func(phys_pc);
157 ptb1 = &tb_phys_hash[h];
158 for(;;) {
159 tb = *ptb1;
160 if (!tb)
161 goto not_found;
162 if (tb->pc == pc &&
163 tb->page_addr[0] == phys_page1 &&
164 tb->cs_base == cs_base &&
165 tb->flags == flags) {
166 /* check next page if needed */
167 if (tb->page_addr[1] != -1) {
168 virt_page2 = (pc & TARGET_PAGE_MASK) +
169 TARGET_PAGE_SIZE;
170 phys_page2 = get_page_addr_code(env, virt_page2);
171 if (tb->page_addr[1] == phys_page2)
172 goto found;
173 } else {
174 goto found;
175 }
176 }
177 ptb1 = &tb->phys_hash_next;
178 }
179 not_found:
180 /* if no translated code available, then translate it now */
181 tb = tb_gen_code(env, pc, cs_base, flags, 0);
182
183 found:
184 /* we add the TB in the virtual pc hash table */
185 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
186 return tb;
187}
188
189static inline TranslationBlock *tb_find_fast(void)
190{
191 TranslationBlock *tb;
192 target_ulong cs_base, pc;
193 int flags;
194
195 /* we record a subset of the CPU state. It will
196 always be the same before a given translated block
197 is executed. */
198 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
199 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
200 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
201 tb->flags != flags)) {
202 tb = tb_find_slow(pc, cs_base, flags);
203 }
204 return tb;
205}
206
207static CPUDebugExcpHandler *debug_excp_handler;
208
209CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
210{
211 CPUDebugExcpHandler *old_handler = debug_excp_handler;
212
213 debug_excp_handler = handler;
214 return old_handler;
215}
216
217static void cpu_handle_debug_exception(CPUState *env)
218{
219 CPUWatchpoint *wp;
220
221 if (!env->watchpoint_hit)
222 QTAILQ_FOREACH(wp, &env->watchpoints, entry)
223 wp->flags &= ~BP_WATCHPOINT_HIT;
224
225 if (debug_excp_handler)
226 debug_excp_handler(env);
227}
228
229/* main execution loop */
230
231volatile sig_atomic_t exit_request;
232
233int cpu_exec(CPUState *env1)
234{
235 volatile host_reg_t saved_env_reg;
236 int ret VBOX_ONLY(= 0), interrupt_request;
237 TranslationBlock *tb;
238 uint8_t *tc_ptr;
239#ifndef VBOX
240 uintptr_t next_tb;
241#else /* VBOX */
242 unsigned long next_tb;
243#endif /* VBOX */
244
245# ifndef VBOX
246 if (cpu_halted(env1) == EXCP_HALTED)
247 return EXCP_HALTED;
248# endif /* !VBOX */
249
250 cpu_single_env = env1;
251
252 /* the access to env below is actually saving the global register's
253 value, so that files not including target-xyz/exec.h are free to
254 use it. */
255 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
256 saved_env_reg = (host_reg_t) env;
257 barrier();
258 env = env1;
259
260 if (unlikely(exit_request)) {
261 env->exit_request = 1;
262 }
263
264#if defined(TARGET_I386)
265 if (!kvm_enabled()) {
266 /* put eflags in CPU temporary format */
267 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
268 DF = 1 - (2 * ((env->eflags >> 10) & 1));
269 CC_OP = CC_OP_EFLAGS;
270 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
271 }
272#elif defined(TARGET_SPARC)
273#elif defined(TARGET_M68K)
274 env->cc_op = CC_OP_FLAGS;
275 env->cc_dest = env->sr & 0xf;
276 env->cc_x = (env->sr >> 4) & 1;
277#elif defined(TARGET_ALPHA)
278#elif defined(TARGET_ARM)
279#elif defined(TARGET_PPC)
280#elif defined(TARGET_MICROBLAZE)
281#elif defined(TARGET_MIPS)
282#elif defined(TARGET_SH4)
283#elif defined(TARGET_CRIS)
284#elif defined(TARGET_S390X)
285 /* XXXXX */
286#else
287#error unsupported target CPU
288#endif
289#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
290 env->exception_index = -1;
291#endif /* !VBOX */
292
293 /* prepare setjmp context for exception handling */
294 for(;;) {
295 if (setjmp(env->jmp_env) == 0) {
296#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
297#undef env
298 env = cpu_single_env;
299#define env cpu_single_env
300#endif
301#ifdef VBOX
302 env->current_tb = NULL; /* probably not needed, but whatever... */
303
304 /*
305 * Check for fatal errors first
306 */
307 if (env->interrupt_request & CPU_INTERRUPT_RC) {
308 env->exception_index = EXCP_RC;
309 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
310 ret = env->exception_index;
311 cpu_loop_exit();
312 }
313#endif
314
315 /* if an exception is pending, we execute it here */
316 if (env->exception_index >= 0) {
317 if (env->exception_index >= EXCP_INTERRUPT) {
318 /* exit request from the cpu execution loop */
319 ret = env->exception_index;
320#ifdef VBOX /* because of the above stuff */
321 env->exception_index = -1;
322#endif
323 if (ret == EXCP_DEBUG)
324 cpu_handle_debug_exception(env);
325 break;
326 } else {
327#if defined(CONFIG_USER_ONLY)
328 /* if user mode only, we simulate a fake exception
329 which will be handled outside the cpu execution
330 loop */
331#if defined(TARGET_I386)
332 do_interrupt_user(env->exception_index,
333 env->exception_is_int,
334 env->error_code,
335 env->exception_next_eip);
336 /* successfully delivered */
337 env->old_exception = -1;
338#endif
339 ret = env->exception_index;
340 break;
341#else
342#if defined(TARGET_I386)
343 /* simulate a real cpu exception. On i386, it can
344 trigger new exceptions, but we do not handle
345 double or triple faults yet. */
346# ifdef VBOX
347 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
348 Log(("do_interrupt: vec=%#x int=%d pc=%04x:%RGv\n", env->exception_index, env->exception_is_int,
349 env->segs[R_CS].selector, (RTGCPTR)env->exception_next_eip));
350# endif /* VBOX */
351 do_interrupt(env->exception_index,
352 env->exception_is_int,
353 env->error_code,
354 env->exception_next_eip, 0);
355 /* successfully delivered */
356 env->old_exception = -1;
357# ifdef VBOX
358 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
359# endif /* VBOX */
360#elif defined(TARGET_PPC)
361 do_interrupt(env);
362#elif defined(TARGET_MICROBLAZE)
363 do_interrupt(env);
364#elif defined(TARGET_MIPS)
365 do_interrupt(env);
366#elif defined(TARGET_SPARC)
367 do_interrupt(env);
368#elif defined(TARGET_ARM)
369 do_interrupt(env);
370#elif defined(TARGET_SH4)
371 do_interrupt(env);
372#elif defined(TARGET_ALPHA)
373 do_interrupt(env);
374#elif defined(TARGET_CRIS)
375 do_interrupt(env);
376#elif defined(TARGET_M68K)
377 do_interrupt(0);
378#endif
379 env->exception_index = -1;
380#endif
381 }
382 }
383
384# ifndef VBOX
385 if (kvm_enabled()) {
386 kvm_cpu_exec(env);
387 longjmp(env->jmp_env, 1);
388 }
389# endif /* !VBOX */
390
391 next_tb = 0; /* force lookup of first TB */
392 for(;;) {
393 interrupt_request = env->interrupt_request;
394 if (unlikely(interrupt_request)) {
395 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
396 /* Mask out external interrupts for this step. */
397 interrupt_request &= ~(CPU_INTERRUPT_HARD |
398 CPU_INTERRUPT_FIQ |
399 CPU_INTERRUPT_SMI |
400 CPU_INTERRUPT_NMI);
401 }
402 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
403 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
404 env->exception_index = EXCP_DEBUG;
405 cpu_loop_exit();
406 }
407#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
408 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
409 defined(TARGET_MICROBLAZE)
410 if (interrupt_request & CPU_INTERRUPT_HALT) {
411 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
412 env->halted = 1;
413 env->exception_index = EXCP_HLT;
414 cpu_loop_exit();
415 }
416#endif
417#if defined(TARGET_I386)
418# ifdef VBOX
419 /* Single instruction exec request, we execute it and return (one way or the other).
420 The caller will always reschedule after doing this operation! */
421 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
422 {
423 /* not in flight are we? (if we are, we trapped) */
424 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
425 {
426 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
427 env->exception_index = EXCP_SINGLE_INSTR;
428 if (emulate_single_instr(env) == -1)
429 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%RGv!!\n", (RTGCPTR)env->eip));
430
431 /* When we receive an external interrupt during execution of this single
432 instruction, then we should stay here. We will leave when we're ready
433 for raw-mode or when interrupted by pending EMT requests. */
434 interrupt_request = env->interrupt_request; /* reload this! */
435 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
436 || !(env->eflags & IF_MASK)
437 || (env->hflags & HF_INHIBIT_IRQ_MASK)
438 || (env->state & CPU_RAW_HWACC)
439 )
440 {
441 env->exception_index = ret = EXCP_SINGLE_INSTR;
442 cpu_loop_exit();
443 }
444 }
445 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
446 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
447# ifdef IEM_VERIFICATION_MODE
448 env->exception_index = ret = EXCP_SINGLE_INSTR;
449 cpu_loop_exit();
450# endif
451 }
452# endif /* VBOX */
453
454# ifndef VBOX /** @todo reconcile our code with the following... */
455 if (interrupt_request & CPU_INTERRUPT_INIT) {
456 svm_check_intercept(SVM_EXIT_INIT);
457 do_cpu_init(env);
458 env->exception_index = EXCP_HALTED;
459 cpu_loop_exit();
460 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
461 do_cpu_sipi(env);
462 } else if (env->hflags2 & HF2_GIF_MASK) {
463 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
464 !(env->hflags & HF_SMM_MASK)) {
465 svm_check_intercept(SVM_EXIT_SMI);
466 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
467 do_smm_enter();
468 next_tb = 0;
469 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
470 !(env->hflags2 & HF2_NMI_MASK)) {
471 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
472 env->hflags2 |= HF2_NMI_MASK;
473 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
474 next_tb = 0;
475 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
476 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
477 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
478 next_tb = 0;
479 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
480 (((env->hflags2 & HF2_VINTR_MASK) &&
481 (env->hflags2 & HF2_HIF_MASK)) ||
482 (!(env->hflags2 & HF2_VINTR_MASK) &&
483 (env->eflags & IF_MASK &&
484 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
485 int intno;
486 svm_check_intercept(SVM_EXIT_INTR);
487 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
488 intno = cpu_get_pic_interrupt(env);
489 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
490#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
491#undef env
492 env = cpu_single_env;
493#define env cpu_single_env
494#endif
495 do_interrupt(intno, 0, 0, 0, 1);
496 /* ensure that no TB jump will be modified as
497 the program flow was changed */
498 next_tb = 0;
499#if !defined(CONFIG_USER_ONLY)
500 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
501 (env->eflags & IF_MASK) &&
502 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
503 int intno;
504 /* FIXME: this should respect TPR */
505 svm_check_intercept(SVM_EXIT_VINTR);
506 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
507 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
508 do_interrupt(intno, 0, 0, 0, 1);
509 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
510 next_tb = 0;
511#endif
512 }
513 }
514# else /* VBOX */
515 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
516 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
517 !(env->hflags & HF_SMM_MASK)) {
518 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
519 do_smm_enter();
520 next_tb = 0;
521 }
522 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
523 (env->eflags & IF_MASK) &&
524 !(env->hflags & HF_INHIBIT_IRQ_MASK))
525 {
526 /* if hardware interrupt pending, we execute it */
527 int intno;
528 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
529 intno = cpu_get_pic_interrupt(env);
530 if (intno >= 0)
531 {
532 Log(("do_interrupt %d\n", intno));
533 do_interrupt(intno, 0, 0, 0, 1);
534 }
535 /* ensure that no TB jump will be modified as
536 the program flow was changed */
537 next_tb = 0;
538 }
539# endif /* VBOX */
540#elif defined(TARGET_PPC)
541#if 0
542 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
543 cpu_reset(env);
544 }
545#endif
546 if (interrupt_request & CPU_INTERRUPT_HARD) {
547 ppc_hw_interrupt(env);
548 if (env->pending_interrupts == 0)
549 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
550 next_tb = 0;
551 }
552#elif defined(TARGET_MICROBLAZE)
553 if ((interrupt_request & CPU_INTERRUPT_HARD)
554 && (env->sregs[SR_MSR] & MSR_IE)
555 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
556 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
557 env->exception_index = EXCP_IRQ;
558 do_interrupt(env);
559 next_tb = 0;
560 }
561#elif defined(TARGET_MIPS)
562 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
563 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
564 (env->CP0_Status & (1 << CP0St_IE)) &&
565 !(env->CP0_Status & (1 << CP0St_EXL)) &&
566 !(env->CP0_Status & (1 << CP0St_ERL)) &&
567 !(env->hflags & MIPS_HFLAG_DM)) {
568 /* Raise it */
569 env->exception_index = EXCP_EXT_INTERRUPT;
570 env->error_code = 0;
571 do_interrupt(env);
572 next_tb = 0;
573 }
574#elif defined(TARGET_SPARC)
575 if (interrupt_request & CPU_INTERRUPT_HARD) {
576 if (cpu_interrupts_enabled(env) &&
577 env->interrupt_index > 0) {
578 int pil = env->interrupt_index & 0xf;
579 int type = env->interrupt_index & 0xf0;
580
581 if (((type == TT_EXTINT) &&
582 cpu_pil_allowed(env, pil)) ||
583 type != TT_EXTINT) {
584 env->exception_index = env->interrupt_index;
585 do_interrupt(env);
586 next_tb = 0;
587 }
588 }
589 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
590 //do_interrupt(0, 0, 0, 0, 0);
591 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
592 }
593#elif defined(TARGET_ARM)
594 if (interrupt_request & CPU_INTERRUPT_FIQ
595 && !(env->uncached_cpsr & CPSR_F)) {
596 env->exception_index = EXCP_FIQ;
597 do_interrupt(env);
598 next_tb = 0;
599 }
600 /* ARMv7-M interrupt return works by loading a magic value
601 into the PC. On real hardware the load causes the
602 return to occur. The qemu implementation performs the
603 jump normally, then does the exception return when the
604 CPU tries to execute code at the magic address.
605 This will cause the magic PC value to be pushed to
606 the stack if an interrupt occured at the wrong time.
607 We avoid this by disabling interrupts when
608 pc contains a magic address. */
609 if (interrupt_request & CPU_INTERRUPT_HARD
610 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
611 || !(env->uncached_cpsr & CPSR_I))) {
612 env->exception_index = EXCP_IRQ;
613 do_interrupt(env);
614 next_tb = 0;
615 }
616#elif defined(TARGET_SH4)
617 if (interrupt_request & CPU_INTERRUPT_HARD) {
618 do_interrupt(env);
619 next_tb = 0;
620 }
621#elif defined(TARGET_ALPHA)
622 if (interrupt_request & CPU_INTERRUPT_HARD) {
623 do_interrupt(env);
624 next_tb = 0;
625 }
626#elif defined(TARGET_CRIS)
627 if (interrupt_request & CPU_INTERRUPT_HARD
628 && (env->pregs[PR_CCS] & I_FLAG)
629 && !env->locked_irq) {
630 env->exception_index = EXCP_IRQ;
631 do_interrupt(env);
632 next_tb = 0;
633 }
634 if (interrupt_request & CPU_INTERRUPT_NMI
635 && (env->pregs[PR_CCS] & M_FLAG)) {
636 env->exception_index = EXCP_NMI;
637 do_interrupt(env);
638 next_tb = 0;
639 }
640#elif defined(TARGET_M68K)
641 if (interrupt_request & CPU_INTERRUPT_HARD
642 && ((env->sr & SR_I) >> SR_I_SHIFT)
643 < env->pending_level) {
644 /* Real hardware gets the interrupt vector via an
645 IACK cycle at this point. Current emulated
646 hardware doesn't rely on this, so we
647 provide/save the vector when the interrupt is
648 first signalled. */
649 env->exception_index = env->pending_vector;
650 do_interrupt(1);
651 next_tb = 0;
652 }
653#endif
654 /* Don't use the cached interupt_request value,
655 do_interrupt may have updated the EXITTB flag. */
656 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
657#ifndef VBOX
658 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
659#else /* VBOX */
660 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
661#endif /* VBOX */
662 /* ensure that no TB jump will be modified as
663 the program flow was changed */
664 next_tb = 0;
665 }
666#ifdef VBOX
667 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
668 if (interrupt_request & CPU_INTERRUPT_RC) {
669 env->exception_index = EXCP_RC;
670 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
671 ret = env->exception_index;
672 cpu_loop_exit();
673 }
674 if (interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT)) {
675 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~(CPU_INTERRUPT_EXTERNAL_EXIT));
676 env->exit_request = 1;
677 }
678#endif
679 }
680 if (unlikely(env->exit_request)) {
681 env->exit_request = 0;
682 env->exception_index = EXCP_INTERRUPT;
683 cpu_loop_exit();
684 }
685
686#ifdef VBOX
687 /*
688 * Check if we the CPU state allows us to execute the code in raw-mode.
689 */
690 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
691 if (remR3CanExecuteRaw(env,
692 env->eip + env->segs[R_CS].base,
693 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
694 &env->exception_index))
695 {
696 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
697 ret = env->exception_index;
698 cpu_loop_exit();
699 }
700 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
701#endif /* VBOX */
702
703#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
704 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
705 /* restore flags in standard format */
706#if defined(TARGET_I386)
707 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
708 log_cpu_state(env, X86_DUMP_CCOP);
709 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
710#elif defined(TARGET_M68K)
711 cpu_m68k_flush_flags(env, env->cc_op);
712 env->cc_op = CC_OP_FLAGS;
713 env->sr = (env->sr & 0xffe0)
714 | env->cc_dest | (env->cc_x << 4);
715 log_cpu_state(env, 0);
716#else
717 log_cpu_state(env, 0);
718#endif
719 }
720#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
721#ifdef VBOX
722 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
723#endif /*VBOX*/
724 spin_lock(&tb_lock);
725 tb = tb_find_fast();
726 /* Note: we do it here to avoid a gcc bug on Mac OS X when
727 doing it in tb_find_slow */
728 if (tb_invalidated_flag) {
729 /* as some TB could have been invalidated because
730 of memory exceptions while generating the code, we
731 must recompute the hash index here */
732 next_tb = 0;
733 tb_invalidated_flag = 0;
734 }
735#ifdef CONFIG_DEBUG_EXEC
736 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
737 (long)tb->tc_ptr, tb->pc,
738 lookup_symbol(tb->pc));
739#endif
740 /* see if we can patch the calling TB. When the TB
741 spans two pages, we cannot safely do a direct
742 jump. */
743#ifndef VBOX
744 if (next_tb != 0 && tb->page_addr[1] == -1) {
745#else /* VBOX */
746 if (next_tb != 0 && !(tb->cflags & CF_RAW_MODE) && tb->page_addr[1] == -1) {
747#endif /* VBOX */
748 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
749 }
750 spin_unlock(&tb_lock);
751#ifdef VBOX
752 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
753#endif
754
755 /* cpu_interrupt might be called while translating the
756 TB, but before it is linked into a potentially
757 infinite loop and becomes env->current_tb. Avoid
758 starting execution if there is a pending interrupt. */
759 env->current_tb = tb;
760 barrier();
761 if (likely(!env->exit_request)) {
762 tc_ptr = tb->tc_ptr;
763 /* execute the generated code */
764#ifdef VBOX
765 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
766#endif
767#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
768#undef env
769 env = cpu_single_env;
770#define env cpu_single_env
771#endif
772#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
773 tcg_qemu_tb_exec(tc_ptr, next_tb);
774#else
775 next_tb = tcg_qemu_tb_exec(tc_ptr);
776#endif
777#ifdef VBOX
778 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
779#endif
780 if ((next_tb & 3) == 2) {
781 /* Instruction counter expired. */
782 int insns_left;
783 tb = (TranslationBlock *)(long)(next_tb & ~3);
784 /* Restore PC. */
785 cpu_pc_from_tb(env, tb);
786 insns_left = env->icount_decr.u32;
787 if (env->icount_extra && insns_left >= 0) {
788 /* Refill decrementer and continue execution. */
789 env->icount_extra += insns_left;
790 if (env->icount_extra > 0xffff) {
791 insns_left = 0xffff;
792 } else {
793 insns_left = env->icount_extra;
794 }
795 env->icount_extra -= insns_left;
796 env->icount_decr.u16.low = insns_left;
797 } else {
798 if (insns_left > 0) {
799 /* Execute remaining instructions. */
800 cpu_exec_nocache(insns_left, tb);
801 }
802 env->exception_index = EXCP_INTERRUPT;
803 next_tb = 0;
804 cpu_loop_exit();
805 }
806 }
807 }
808 env->current_tb = NULL;
809 /* reset soft MMU for next block (it can currently
810 only be set by a memory fault) */
811 } /* for(;;) */
812 }
813#ifdef VBOX_HIGH_RES_TIMERS_HACK
814 /* NULL the current_tb here so cpu_interrupt() doesn't do anything
815 unnecessary (like crashing during emulate single instruction).
816 Note! Don't use env1->pVM here, the code wouldn't run with
817 gcc-4.4/amd64 anymore, see #3883. */
818 env->current_tb = NULL;
819 if ( !(env->interrupt_request & ( CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC
820 | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
821 && ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
822 || TMTimerPollBool(env->pVM, env->pVCpu)) ) {
823 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
824 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
825 TMR3TimerQueuesDo(env->pVM);
826 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
827 }
828#endif
829 } /* for(;;) */
830
831
832#if defined(TARGET_I386)
833 /* restore flags in standard format */
834 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
835#elif defined(TARGET_ARM)
836 /* XXX: Save/restore host fpu exception state?. */
837#elif defined(TARGET_SPARC)
838#elif defined(TARGET_PPC)
839#elif defined(TARGET_M68K)
840 cpu_m68k_flush_flags(env, env->cc_op);
841 env->cc_op = CC_OP_FLAGS;
842 env->sr = (env->sr & 0xffe0)
843 | env->cc_dest | (env->cc_x << 4);
844#elif defined(TARGET_MICROBLAZE)
845#elif defined(TARGET_MIPS)
846#elif defined(TARGET_SH4)
847#elif defined(TARGET_ALPHA)
848#elif defined(TARGET_CRIS)
849#elif defined(TARGET_S390X)
850 /* XXXXX */
851#else
852#error unsupported target CPU
853#endif
854
855 /* restore global registers */
856 barrier();
857 env = (void *) saved_env_reg;
858
859# ifndef VBOX /* we might be using elsewhere, we only have one. */
860 /* fail safe : never use cpu_single_env outside cpu_exec() */
861 cpu_single_env = NULL;
862# endif
863 return ret;
864}
865
866/* must only be called from the generated code as an exception can be
867 generated */
868void tb_invalidate_page_range(target_ulong start, target_ulong end)
869{
870 /* XXX: cannot enable it yet because it yields to MMU exception
871 where NIP != read address on PowerPC */
872#if 0
873 target_ulong phys_addr;
874 phys_addr = get_phys_addr_code(env, start);
875 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
876#endif
877}
878
879#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
880
881void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
882{
883 CPUX86State *saved_env;
884
885 saved_env = env;
886 env = s;
887 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
888 selector &= 0xffff;
889 cpu_x86_load_seg_cache(env, seg_reg, selector,
890 (selector << 4), 0xffff, 0);
891 } else {
892 helper_load_seg(seg_reg, selector);
893 }
894 env = saved_env;
895}
896
897void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
898{
899 CPUX86State *saved_env;
900
901 saved_env = env;
902 env = s;
903
904 helper_fsave(ptr, data32);
905
906 env = saved_env;
907}
908
909void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
910{
911 CPUX86State *saved_env;
912
913 saved_env = env;
914 env = s;
915
916 helper_frstor(ptr, data32);
917
918 env = saved_env;
919}
920
921#endif /* TARGET_I386 */
922
923#if !defined(CONFIG_SOFTMMU)
924
925#if defined(TARGET_I386)
926#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
927#else
928#define EXCEPTION_ACTION cpu_loop_exit()
929#endif
930
931/* 'pc' is the host PC at which the exception was raised. 'address' is
932 the effective address of the memory exception. 'is_write' is 1 if a
933 write caused the exception and otherwise 0'. 'old_set' is the
934 signal set which should be restored */
935static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
936 int is_write, sigset_t *old_set,
937 void *puc)
938{
939 TranslationBlock *tb;
940 int ret;
941
942 if (cpu_single_env)
943 env = cpu_single_env; /* XXX: find a correct solution for multithread */
944#if defined(DEBUG_SIGNAL)
945 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
946 pc, address, is_write, *(unsigned long *)old_set);
947#endif
948 /* XXX: locking issue */
949 if (is_write && page_unprotect(h2g(address), pc, puc)) {
950 return 1;
951 }
952
953 /* see if it is an MMU fault */
954 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
955 if (ret < 0)
956 return 0; /* not an MMU fault */
957 if (ret == 0)
958 return 1; /* the MMU fault was handled without causing real CPU fault */
959 /* now we have a real cpu fault */
960 tb = tb_find_pc(pc);
961 if (tb) {
962 /* the PC is inside the translated code. It means that we have
963 a virtual CPU fault */
964 cpu_restore_state(tb, env, pc, puc);
965 }
966
967 /* we restore the process signal mask as the sigreturn should
968 do it (XXX: use sigsetjmp) */
969 sigprocmask(SIG_SETMASK, old_set, NULL);
970 EXCEPTION_ACTION;
971
972 /* never comes here */
973 return 1;
974}
975
976#if defined(__i386__)
977
978#if defined(__APPLE__)
979# include <sys/ucontext.h>
980
981# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
982# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
983# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
984# define MASK_sig(context) ((context)->uc_sigmask)
985#elif defined (__NetBSD__)
986# include <ucontext.h>
987
988# define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
989# define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
990# define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
991# define MASK_sig(context) ((context)->uc_sigmask)
992#elif defined (__FreeBSD__) || defined(__DragonFly__)
993# include <ucontext.h>
994
995# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
996# define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
997# define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
998# define MASK_sig(context) ((context)->uc_sigmask)
999#elif defined(__OpenBSD__)
1000# define EIP_sig(context) ((context)->sc_eip)
1001# define TRAP_sig(context) ((context)->sc_trapno)
1002# define ERROR_sig(context) ((context)->sc_err)
1003# define MASK_sig(context) ((context)->sc_mask)
1004#else
1005# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1006# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1007# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1008# define MASK_sig(context) ((context)->uc_sigmask)
1009#endif
1010
1011int cpu_signal_handler(int host_signum, void *pinfo,
1012 void *puc)
1013{
1014 siginfo_t *info = pinfo;
1015#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
1016 ucontext_t *uc = puc;
1017#elif defined(__OpenBSD__)
1018 struct sigcontext *uc = puc;
1019#else
1020 struct ucontext *uc = puc;
1021#endif
1022 unsigned long pc;
1023 int trapno;
1024
1025#ifndef REG_EIP
1026/* for glibc 2.1 */
1027#define REG_EIP EIP
1028#define REG_ERR ERR
1029#define REG_TRAPNO TRAPNO
1030#endif
1031 pc = EIP_sig(uc);
1032 trapno = TRAP_sig(uc);
1033 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1034 trapno == 0xe ?
1035 (ERROR_sig(uc) >> 1) & 1 : 0,
1036 &MASK_sig(uc), puc);
1037}
1038
1039#elif defined(__x86_64__)
1040
1041#ifdef __NetBSD__
1042#define PC_sig(context) _UC_MACHINE_PC(context)
1043#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1044#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1045#define MASK_sig(context) ((context)->uc_sigmask)
1046#elif defined(__OpenBSD__)
1047#define PC_sig(context) ((context)->sc_rip)
1048#define TRAP_sig(context) ((context)->sc_trapno)
1049#define ERROR_sig(context) ((context)->sc_err)
1050#define MASK_sig(context) ((context)->sc_mask)
1051#elif defined (__FreeBSD__) || defined(__DragonFly__)
1052#include <ucontext.h>
1053
1054#define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
1055#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
1056#define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
1057#define MASK_sig(context) ((context)->uc_sigmask)
1058#else
1059#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1060#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1061#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1062#define MASK_sig(context) ((context)->uc_sigmask)
1063#endif
1064
1065int cpu_signal_handler(int host_signum, void *pinfo,
1066 void *puc)
1067{
1068 siginfo_t *info = pinfo;
1069 unsigned long pc;
1070#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
1071 ucontext_t *uc = puc;
1072#elif defined(__OpenBSD__)
1073 struct sigcontext *uc = puc;
1074#else
1075 struct ucontext *uc = puc;
1076#endif
1077
1078 pc = PC_sig(uc);
1079 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1080 TRAP_sig(uc) == 0xe ?
1081 (ERROR_sig(uc) >> 1) & 1 : 0,
1082 &MASK_sig(uc), puc);
1083}
1084
1085#elif defined(_ARCH_PPC)
1086
1087/***********************************************************************
1088 * signal context platform-specific definitions
1089 * From Wine
1090 */
1091#ifdef linux
1092/* All Registers access - only for local access */
1093# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1094/* Gpr Registers access */
1095# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1096# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1097# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1098# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1099# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1100# define LR_sig(context) REG_sig(link, context) /* Link register */
1101# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1102/* Float Registers access */
1103# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1104# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1105/* Exception Registers access */
1106# define DAR_sig(context) REG_sig(dar, context)
1107# define DSISR_sig(context) REG_sig(dsisr, context)
1108# define TRAP_sig(context) REG_sig(trap, context)
1109#endif /* linux */
1110
1111#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
1112#include <ucontext.h>
1113# define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
1114# define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
1115# define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
1116# define XER_sig(context) ((context)->uc_mcontext.mc_xer)
1117# define LR_sig(context) ((context)->uc_mcontext.mc_lr)
1118# define CR_sig(context) ((context)->uc_mcontext.mc_cr)
1119/* Exception Registers access */
1120# define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
1121# define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
1122# define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
1123#endif /* __FreeBSD__|| __FreeBSD_kernel__ */
1124
1125#ifdef __APPLE__
1126# include <sys/ucontext.h>
1127typedef struct ucontext SIGCONTEXT;
1128/* All Registers access - only for local access */
1129# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1130# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1131# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1132# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1133/* Gpr Registers access */
1134# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1135# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1136# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1137# define CTR_sig(context) REG_sig(ctr, context)
1138# define XER_sig(context) REG_sig(xer, context) /* Link register */
1139# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1140# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1141/* Float Registers access */
1142# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1143# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1144/* Exception Registers access */
1145# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1146# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1147# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1148#endif /* __APPLE__ */
1149
1150int cpu_signal_handler(int host_signum, void *pinfo,
1151 void *puc)
1152{
1153 siginfo_t *info = pinfo;
1154#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
1155 ucontext_t *uc = puc;
1156#else
1157 struct ucontext *uc = puc;
1158#endif
1159 unsigned long pc;
1160 int is_write;
1161
1162 pc = IAR_sig(uc);
1163 is_write = 0;
1164#if 0
1165 /* ppc 4xx case */
1166 if (DSISR_sig(uc) & 0x00800000)
1167 is_write = 1;
1168#else
1169 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1170 is_write = 1;
1171#endif
1172 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1173 is_write, &uc->uc_sigmask, puc);
1174}
1175
1176#elif defined(__alpha__)
1177
1178int cpu_signal_handler(int host_signum, void *pinfo,
1179 void *puc)
1180{
1181 siginfo_t *info = pinfo;
1182 struct ucontext *uc = puc;
1183 uint32_t *pc = uc->uc_mcontext.sc_pc;
1184 uint32_t insn = *pc;
1185 int is_write = 0;
1186
1187 /* XXX: need kernel patch to get write flag faster */
1188 switch (insn >> 26) {
1189 case 0x0d: // stw
1190 case 0x0e: // stb
1191 case 0x0f: // stq_u
1192 case 0x24: // stf
1193 case 0x25: // stg
1194 case 0x26: // sts
1195 case 0x27: // stt
1196 case 0x2c: // stl
1197 case 0x2d: // stq
1198 case 0x2e: // stl_c
1199 case 0x2f: // stq_c
1200 is_write = 1;
1201 }
1202
1203 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1204 is_write, &uc->uc_sigmask, puc);
1205}
1206#elif defined(__sparc__)
1207
1208int cpu_signal_handler(int host_signum, void *pinfo,
1209 void *puc)
1210{
1211 siginfo_t *info = pinfo;
1212 int is_write;
1213 uint32_t insn;
1214#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1215 uint32_t *regs = (uint32_t *)(info + 1);
1216 void *sigmask = (regs + 20);
1217 /* XXX: is there a standard glibc define ? */
1218 unsigned long pc = regs[1];
1219#else
1220#ifdef __linux__
1221 struct sigcontext *sc = puc;
1222 unsigned long pc = sc->sigc_regs.tpc;
1223 void *sigmask = (void *)sc->sigc_mask;
1224#elif defined(__OpenBSD__)
1225 struct sigcontext *uc = puc;
1226 unsigned long pc = uc->sc_pc;
1227 void *sigmask = (void *)(long)uc->sc_mask;
1228#endif
1229#endif
1230
1231 /* XXX: need kernel patch to get write flag faster */
1232 is_write = 0;
1233 insn = *(uint32_t *)pc;
1234 if ((insn >> 30) == 3) {
1235 switch((insn >> 19) & 0x3f) {
1236 case 0x05: // stb
1237 case 0x15: // stba
1238 case 0x06: // sth
1239 case 0x16: // stha
1240 case 0x04: // st
1241 case 0x14: // sta
1242 case 0x07: // std
1243 case 0x17: // stda
1244 case 0x0e: // stx
1245 case 0x1e: // stxa
1246 case 0x24: // stf
1247 case 0x34: // stfa
1248 case 0x27: // stdf
1249 case 0x37: // stdfa
1250 case 0x26: // stqf
1251 case 0x36: // stqfa
1252 case 0x25: // stfsr
1253 case 0x3c: // casa
1254 case 0x3e: // casxa
1255 is_write = 1;
1256 break;
1257 }
1258 }
1259 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1260 is_write, sigmask, NULL);
1261}
1262
1263#elif defined(__arm__)
1264
1265int cpu_signal_handler(int host_signum, void *pinfo,
1266 void *puc)
1267{
1268 siginfo_t *info = pinfo;
1269 struct ucontext *uc = puc;
1270 unsigned long pc;
1271 int is_write;
1272
1273#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1274 pc = uc->uc_mcontext.gregs[R15];
1275#else
1276 pc = uc->uc_mcontext.arm_pc;
1277#endif
1278 /* XXX: compute is_write */
1279 is_write = 0;
1280 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1281 is_write,
1282 &uc->uc_sigmask, puc);
1283}
1284
1285#elif defined(__mc68000)
1286
1287int cpu_signal_handler(int host_signum, void *pinfo,
1288 void *puc)
1289{
1290 siginfo_t *info = pinfo;
1291 struct ucontext *uc = puc;
1292 unsigned long pc;
1293 int is_write;
1294
1295 pc = uc->uc_mcontext.gregs[16];
1296 /* XXX: compute is_write */
1297 is_write = 0;
1298 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1299 is_write,
1300 &uc->uc_sigmask, puc);
1301}
1302
1303#elif defined(__ia64)
1304
1305#ifndef __ISR_VALID
1306 /* This ought to be in <bits/siginfo.h>... */
1307# define __ISR_VALID 1
1308#endif
1309
1310int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1311{
1312 siginfo_t *info = pinfo;
1313 struct ucontext *uc = puc;
1314 unsigned long ip;
1315 int is_write = 0;
1316
1317 ip = uc->uc_mcontext.sc_ip;
1318 switch (host_signum) {
1319 case SIGILL:
1320 case SIGFPE:
1321 case SIGSEGV:
1322 case SIGBUS:
1323 case SIGTRAP:
1324 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1325 /* ISR.W (write-access) is bit 33: */
1326 is_write = (info->si_isr >> 33) & 1;
1327 break;
1328
1329 default:
1330 break;
1331 }
1332 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1333 is_write,
1334 (sigset_t *)&uc->uc_sigmask, puc);
1335}
1336
1337#elif defined(__s390__)
1338
1339int cpu_signal_handler(int host_signum, void *pinfo,
1340 void *puc)
1341{
1342 siginfo_t *info = pinfo;
1343 struct ucontext *uc = puc;
1344 unsigned long pc;
1345 uint16_t *pinsn;
1346 int is_write = 0;
1347
1348 pc = uc->uc_mcontext.psw.addr;
1349
1350 /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1351 of the normal 2 arguments. The 3rd argument contains the "int_code"
1352 from the hardware which does in fact contain the is_write value.
1353 The rt signal handler, as far as I can tell, does not give this value
1354 at all. Not that we could get to it from here even if it were. */
1355 /* ??? This is not even close to complete, since it ignores all
1356 of the read-modify-write instructions. */
1357 pinsn = (uint16_t *)pc;
1358 switch (pinsn[0] >> 8) {
1359 case 0x50: /* ST */
1360 case 0x42: /* STC */
1361 case 0x40: /* STH */
1362 is_write = 1;
1363 break;
1364 case 0xc4: /* RIL format insns */
1365 switch (pinsn[0] & 0xf) {
1366 case 0xf: /* STRL */
1367 case 0xb: /* STGRL */
1368 case 0x7: /* STHRL */
1369 is_write = 1;
1370 }
1371 break;
1372 case 0xe3: /* RXY format insns */
1373 switch (pinsn[2] & 0xff) {
1374 case 0x50: /* STY */
1375 case 0x24: /* STG */
1376 case 0x72: /* STCY */
1377 case 0x70: /* STHY */
1378 case 0x8e: /* STPQ */
1379 case 0x3f: /* STRVH */
1380 case 0x3e: /* STRV */
1381 case 0x2f: /* STRVG */
1382 is_write = 1;
1383 }
1384 break;
1385 }
1386 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1387 is_write, &uc->uc_sigmask, puc);
1388}
1389
1390#elif defined(__mips__)
1391
1392int cpu_signal_handler(int host_signum, void *pinfo,
1393 void *puc)
1394{
1395 siginfo_t *info = pinfo;
1396 struct ucontext *uc = puc;
1397 greg_t pc = uc->uc_mcontext.pc;
1398 int is_write;
1399
1400 /* XXX: compute is_write */
1401 is_write = 0;
1402 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1403 is_write, &uc->uc_sigmask, puc);
1404}
1405
1406#elif defined(__hppa__)
1407
1408int cpu_signal_handler(int host_signum, void *pinfo,
1409 void *puc)
1410{
1411 struct siginfo *info = pinfo;
1412 struct ucontext *uc = puc;
1413 unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
1414 uint32_t insn = *(uint32_t *)pc;
1415 int is_write = 0;
1416
1417 /* XXX: need kernel patch to get write flag faster. */
1418 switch (insn >> 26) {
1419 case 0x1a: /* STW */
1420 case 0x19: /* STH */
1421 case 0x18: /* STB */
1422 case 0x1b: /* STWM */
1423 is_write = 1;
1424 break;
1425
1426 case 0x09: /* CSTWX, FSTWX, FSTWS */
1427 case 0x0b: /* CSTDX, FSTDX, FSTDS */
1428 /* Distinguish from coprocessor load ... */
1429 is_write = (insn >> 9) & 1;
1430 break;
1431
1432 case 0x03:
1433 switch ((insn >> 6) & 15) {
1434 case 0xa: /* STWS */
1435 case 0x9: /* STHS */
1436 case 0x8: /* STBS */
1437 case 0xe: /* STWAS */
1438 case 0xc: /* STBYS */
1439 is_write = 1;
1440 }
1441 break;
1442 }
1443
1444 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1445 is_write, &uc->uc_sigmask, puc);
1446}
1447
1448#else
1449
1450#error host CPU specific signal handler needed
1451
1452#endif
1453
1454#endif /* !defined(CONFIG_SOFTMMU) */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette