VirtualBox

source: vbox/trunk/src/recompiler/cpu-all.h@ 36146

Last change on this file since 36146 was 36146, checked in by vboxsync, 14 years ago

rem: build fix; removed obsolete header section.

  • Property svn:eol-style set to native
File size: 32.7 KB
Line 
1/*
2 * defines common to all virtual CPUs
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#ifndef CPU_ALL_H
31#define CPU_ALL_H
32
33#ifdef VBOX
34# ifndef LOG_GROUP
35# define LOG_GROUP LOG_GROUP_REM
36# endif
37# include <VBox/log.h>
38# include <VBox/vmm/pgm.h> /* PGM_DYNAMIC_RAM_ALLOC */
39# include <stdio.h> /* FILE */
40#endif
41
42#if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__)
43#define WORDS_ALIGNED
44#endif
45
46/* some important defines:
47 *
48 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
49 * memory accesses.
50 *
51 * WORDS_BIGENDIAN : if defined, the host cpu is big endian and
52 * otherwise little endian.
53 *
54 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
55 *
56 * TARGET_WORDS_BIGENDIAN : same for target cpu
57 */
58
59#include "bswap.h"
60#include "softfloat.h"
61
62#if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
63#define BSWAP_NEEDED
64#endif
65
66#ifdef BSWAP_NEEDED
67
68static inline uint16_t tswap16(uint16_t s)
69{
70 return bswap16(s);
71}
72
73static inline uint32_t tswap32(uint32_t s)
74{
75 return bswap32(s);
76}
77
78static inline uint64_t tswap64(uint64_t s)
79{
80 return bswap64(s);
81}
82
83static inline void tswap16s(uint16_t *s)
84{
85 *s = bswap16(*s);
86}
87
88static inline void tswap32s(uint32_t *s)
89{
90 *s = bswap32(*s);
91}
92
93static inline void tswap64s(uint64_t *s)
94{
95 *s = bswap64(*s);
96}
97
98#else
99
100static inline uint16_t tswap16(uint16_t s)
101{
102 return s;
103}
104
105static inline uint32_t tswap32(uint32_t s)
106{
107 return s;
108}
109
110static inline uint64_t tswap64(uint64_t s)
111{
112 return s;
113}
114
115static inline void tswap16s(uint16_t *s)
116{
117}
118
119static inline void tswap32s(uint32_t *s)
120{
121}
122
123static inline void tswap64s(uint64_t *s)
124{
125}
126
127#endif
128
129#if TARGET_LONG_SIZE == 4
130#define tswapl(s) tswap32(s)
131#define tswapls(s) tswap32s((uint32_t *)(s))
132#define bswaptls(s) bswap32s(s)
133#else
134#define tswapl(s) tswap64(s)
135#define tswapls(s) tswap64s((uint64_t *)(s))
136#define bswaptls(s) bswap64s(s)
137#endif
138
139typedef union {
140 float32 f;
141 uint32_t l;
142} CPU_FloatU;
143
144/* NOTE: arm FPA is horrible as double 32 bit words are stored in big
145 endian ! */
146typedef union {
147 float64 d;
148#if defined(WORDS_BIGENDIAN) \
149 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
150 struct {
151 uint32_t upper;
152 uint32_t lower;
153 } l;
154#else
155 struct {
156 uint32_t lower;
157 uint32_t upper;
158 } l;
159#endif
160 uint64_t ll;
161} CPU_DoubleU;
162
163#ifdef TARGET_SPARC
164typedef union {
165 float128 q;
166#if defined(WORDS_BIGENDIAN) \
167 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
168 struct {
169 uint32_t upmost;
170 uint32_t upper;
171 uint32_t lower;
172 uint32_t lowest;
173 } l;
174 struct {
175 uint64_t upper;
176 uint64_t lower;
177 } ll;
178#else
179 struct {
180 uint32_t lowest;
181 uint32_t lower;
182 uint32_t upper;
183 uint32_t upmost;
184 } l;
185 struct {
186 uint64_t lower;
187 uint64_t upper;
188 } ll;
189#endif
190} CPU_QuadU;
191#endif
192
193/* CPU memory access without any memory or io remapping */
194
195/*
196 * the generic syntax for the memory accesses is:
197 *
198 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
199 *
200 * store: st{type}{size}{endian}_{access_type}(ptr, val)
201 *
202 * type is:
203 * (empty): integer access
204 * f : float access
205 *
206 * sign is:
207 * (empty): for floats or 32 bit size
208 * u : unsigned
209 * s : signed
210 *
211 * size is:
212 * b: 8 bits
213 * w: 16 bits
214 * l: 32 bits
215 * q: 64 bits
216 *
217 * endian is:
218 * (empty): target cpu endianness or 8 bit access
219 * r : reversed target cpu endianness (not implemented yet)
220 * be : big endian (not implemented yet)
221 * le : little endian (not implemented yet)
222 *
223 * access_type is:
224 * raw : host memory access
225 * user : user mode access using soft MMU
226 * kernel : kernel mode access using soft MMU
227 */
228
229#ifdef VBOX
230void remAbort(int rc, const char *pszTip) __attribute__((__noreturn__));
231
232void remR3PhysRead(RTGCPHYS SrcGCPhys, void *pvDst, unsigned cb);
233RTCCUINTREG remR3PhysReadU8(RTGCPHYS SrcGCPhys);
234RTCCINTREG remR3PhysReadS8(RTGCPHYS SrcGCPhys);
235RTCCUINTREG remR3PhysReadU16(RTGCPHYS SrcGCPhys);
236RTCCINTREG remR3PhysReadS16(RTGCPHYS SrcGCPhys);
237RTCCUINTREG remR3PhysReadU32(RTGCPHYS SrcGCPhys);
238RTCCINTREG remR3PhysReadS32(RTGCPHYS SrcGCPhys);
239uint64_t remR3PhysReadU64(RTGCPHYS SrcGCPhys);
240int64_t remR3PhysReadS64(RTGCPHYS SrcGCPhys);
241void remR3PhysWrite(RTGCPHYS DstGCPhys, const void *pvSrc, unsigned cb);
242void remR3PhysWriteU8(RTGCPHYS DstGCPhys, uint8_t val);
243void remR3PhysWriteU16(RTGCPHYS DstGCPhys, uint16_t val);
244void remR3PhysWriteU32(RTGCPHYS DstGCPhys, uint32_t val);
245void remR3PhysWriteU64(RTGCPHYS DstGCPhys, uint64_t val);
246
247#ifndef REM_PHYS_ADDR_IN_TLB
248void *remR3TlbGCPhys2Ptr(CPUState *env1, target_ulong physAddr, int fWritable);
249#endif
250
251#endif /* VBOX */
252
253#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
254
255DECLINLINE(uint8_t) ldub_p(void *ptr)
256{
257 VBOX_CHECK_ADDR(ptr);
258 return remR3PhysReadU8((uintptr_t)ptr);
259}
260
261DECLINLINE(int8_t) ldsb_p(void *ptr)
262{
263 VBOX_CHECK_ADDR(ptr);
264 return remR3PhysReadS8((uintptr_t)ptr);
265}
266
267DECLINLINE(void) stb_p(void *ptr, int v)
268{
269 VBOX_CHECK_ADDR(ptr);
270 remR3PhysWriteU8((uintptr_t)ptr, v);
271}
272
273DECLINLINE(uint32_t) lduw_le_p(void *ptr)
274{
275 VBOX_CHECK_ADDR(ptr);
276 return remR3PhysReadU16((uintptr_t)ptr);
277}
278
279DECLINLINE(int32_t) ldsw_le_p(void *ptr)
280{
281 VBOX_CHECK_ADDR(ptr);
282 return remR3PhysReadS16((uintptr_t)ptr);
283}
284
285DECLINLINE(void) stw_le_p(void *ptr, int v)
286{
287 VBOX_CHECK_ADDR(ptr);
288 remR3PhysWriteU16((uintptr_t)ptr, v);
289}
290
291DECLINLINE(uint32_t) ldl_le_p(void *ptr)
292{
293 VBOX_CHECK_ADDR(ptr);
294 return remR3PhysReadU32((uintptr_t)ptr);
295}
296
297DECLINLINE(void) stl_le_p(void *ptr, int v)
298{
299 VBOX_CHECK_ADDR(ptr);
300 remR3PhysWriteU32((uintptr_t)ptr, v);
301}
302
303DECLINLINE(void) stq_le_p(void *ptr, uint64_t v)
304{
305 VBOX_CHECK_ADDR(ptr);
306 remR3PhysWriteU64((uintptr_t)ptr, v);
307}
308
309DECLINLINE(uint64_t) ldq_le_p(void *ptr)
310{
311 VBOX_CHECK_ADDR(ptr);
312 return remR3PhysReadU64((uintptr_t)ptr);
313}
314
315#undef VBOX_CHECK_ADDR
316
317/* float access */
318
319DECLINLINE(float32) ldfl_le_p(void *ptr)
320{
321 union {
322 float32 f;
323 uint32_t i;
324 } u;
325 u.i = ldl_le_p(ptr);
326 return u.f;
327}
328
329DECLINLINE(void) stfl_le_p(void *ptr, float32 v)
330{
331 union {
332 float32 f;
333 uint32_t i;
334 } u;
335 u.f = v;
336 stl_le_p(ptr, u.i);
337}
338
339DECLINLINE(float64) ldfq_le_p(void *ptr)
340{
341 CPU_DoubleU u;
342 u.l.lower = ldl_le_p(ptr);
343 u.l.upper = ldl_le_p((uint8_t*)ptr + 4);
344 return u.d;
345}
346
347DECLINLINE(void) stfq_le_p(void *ptr, float64 v)
348{
349 CPU_DoubleU u;
350 u.d = v;
351 stl_le_p(ptr, u.l.lower);
352 stl_le_p((uint8_t*)ptr + 4, u.l.upper);
353}
354
355#else /* !VBOX */
356
357static inline int ldub_p(void *ptr)
358{
359 return *(uint8_t *)ptr;
360}
361
362static inline int ldsb_p(void *ptr)
363{
364 return *(int8_t *)ptr;
365}
366
367static inline void stb_p(void *ptr, int v)
368{
369 *(uint8_t *)ptr = v;
370}
371
372/* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
373 kernel handles unaligned load/stores may give better results, but
374 it is a system wide setting : bad */
375#if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
376
377/* conservative code for little endian unaligned accesses */
378static inline int lduw_le_p(void *ptr)
379{
380#ifdef __powerpc__
381 int val;
382 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
383 return val;
384#else
385 uint8_t *p = ptr;
386 return p[0] | (p[1] << 8);
387#endif
388}
389
390static inline int ldsw_le_p(void *ptr)
391{
392#ifdef __powerpc__
393 int val;
394 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
395 return (int16_t)val;
396#else
397 uint8_t *p = ptr;
398 return (int16_t)(p[0] | (p[1] << 8));
399#endif
400}
401
402static inline int ldl_le_p(void *ptr)
403{
404#ifdef __powerpc__
405 int val;
406 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
407 return val;
408#else
409 uint8_t *p = ptr;
410 return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
411#endif
412}
413
414static inline uint64_t ldq_le_p(void *ptr)
415{
416 uint8_t *p = ptr;
417 uint32_t v1, v2;
418 v1 = ldl_le_p(p);
419 v2 = ldl_le_p(p + 4);
420 return v1 | ((uint64_t)v2 << 32);
421}
422
423static inline void stw_le_p(void *ptr, int v)
424{
425#ifdef __powerpc__
426 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
427#else
428 uint8_t *p = ptr;
429 p[0] = v;
430 p[1] = v >> 8;
431#endif
432}
433
434static inline void stl_le_p(void *ptr, int v)
435{
436#ifdef __powerpc__
437 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
438#else
439 uint8_t *p = ptr;
440 p[0] = v;
441 p[1] = v >> 8;
442 p[2] = v >> 16;
443 p[3] = v >> 24;
444#endif
445}
446
447static inline void stq_le_p(void *ptr, uint64_t v)
448{
449 uint8_t *p = ptr;
450 stl_le_p(p, (uint32_t)v);
451 stl_le_p(p + 4, v >> 32);
452}
453
454/* float access */
455
456static inline float32 ldfl_le_p(void *ptr)
457{
458 union {
459 float32 f;
460 uint32_t i;
461 } u;
462 u.i = ldl_le_p(ptr);
463 return u.f;
464}
465
466static inline void stfl_le_p(void *ptr, float32 v)
467{
468 union {
469 float32 f;
470 uint32_t i;
471 } u;
472 u.f = v;
473 stl_le_p(ptr, u.i);
474}
475
476static inline float64 ldfq_le_p(void *ptr)
477{
478 CPU_DoubleU u;
479 u.l.lower = ldl_le_p(ptr);
480 u.l.upper = ldl_le_p(ptr + 4);
481 return u.d;
482}
483
484static inline void stfq_le_p(void *ptr, float64 v)
485{
486 CPU_DoubleU u;
487 u.d = v;
488 stl_le_p(ptr, u.l.lower);
489 stl_le_p(ptr + 4, u.l.upper);
490}
491
492#else
493
494static inline int lduw_le_p(void *ptr)
495{
496 return *(uint16_t *)ptr;
497}
498
499static inline int ldsw_le_p(void *ptr)
500{
501 return *(int16_t *)ptr;
502}
503
504static inline int ldl_le_p(void *ptr)
505{
506 return *(uint32_t *)ptr;
507}
508
509static inline uint64_t ldq_le_p(void *ptr)
510{
511 return *(uint64_t *)ptr;
512}
513
514static inline void stw_le_p(void *ptr, int v)
515{
516 *(uint16_t *)ptr = v;
517}
518
519static inline void stl_le_p(void *ptr, int v)
520{
521 *(uint32_t *)ptr = v;
522}
523
524static inline void stq_le_p(void *ptr, uint64_t v)
525{
526 *(uint64_t *)ptr = v;
527}
528
529/* float access */
530
531static inline float32 ldfl_le_p(void *ptr)
532{
533 return *(float32 *)ptr;
534}
535
536static inline float64 ldfq_le_p(void *ptr)
537{
538 return *(float64 *)ptr;
539}
540
541static inline void stfl_le_p(void *ptr, float32 v)
542{
543 *(float32 *)ptr = v;
544}
545
546static inline void stfq_le_p(void *ptr, float64 v)
547{
548 *(float64 *)ptr = v;
549}
550#endif
551#endif /* !VBOX */
552
553#if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
554
555static inline int lduw_be_p(void *ptr)
556{
557#if defined(__i386__)
558 int val;
559 asm volatile ("movzwl %1, %0\n"
560 "xchgb %b0, %h0\n"
561 : "=q" (val)
562 : "m" (*(uint16_t *)ptr));
563 return val;
564#else
565 uint8_t *b = (uint8_t *) ptr;
566 return ((b[0] << 8) | b[1]);
567#endif
568}
569
570static inline int ldsw_be_p(void *ptr)
571{
572#if defined(__i386__)
573 int val;
574 asm volatile ("movzwl %1, %0\n"
575 "xchgb %b0, %h0\n"
576 : "=q" (val)
577 : "m" (*(uint16_t *)ptr));
578 return (int16_t)val;
579#else
580 uint8_t *b = (uint8_t *) ptr;
581 return (int16_t)((b[0] << 8) | b[1]);
582#endif
583}
584
585static inline int ldl_be_p(void *ptr)
586{
587#if defined(__i386__) || defined(__x86_64__)
588 int val;
589 asm volatile ("movl %1, %0\n"
590 "bswap %0\n"
591 : "=r" (val)
592 : "m" (*(uint32_t *)ptr));
593 return val;
594#else
595 uint8_t *b = (uint8_t *) ptr;
596 return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
597#endif
598}
599
600static inline uint64_t ldq_be_p(void *ptr)
601{
602 uint32_t a,b;
603 a = ldl_be_p(ptr);
604 b = ldl_be_p((uint8_t *)ptr + 4);
605 return (((uint64_t)a<<32)|b);
606}
607
608static inline void stw_be_p(void *ptr, int v)
609{
610#if defined(__i386__)
611 asm volatile ("xchgb %b0, %h0\n"
612 "movw %w0, %1\n"
613 : "=q" (v)
614 : "m" (*(uint16_t *)ptr), "0" (v));
615#else
616 uint8_t *d = (uint8_t *) ptr;
617 d[0] = v >> 8;
618 d[1] = v;
619#endif
620}
621
622static inline void stl_be_p(void *ptr, int v)
623{
624#if defined(__i386__) || defined(__x86_64__)
625 asm volatile ("bswap %0\n"
626 "movl %0, %1\n"
627 : "=r" (v)
628 : "m" (*(uint32_t *)ptr), "0" (v));
629#else
630 uint8_t *d = (uint8_t *) ptr;
631 d[0] = v >> 24;
632 d[1] = v >> 16;
633 d[2] = v >> 8;
634 d[3] = v;
635#endif
636}
637
638static inline void stq_be_p(void *ptr, uint64_t v)
639{
640 stl_be_p(ptr, v >> 32);
641 stl_be_p((uint8_t *)ptr + 4, v);
642}
643
644/* float access */
645
646static inline float32 ldfl_be_p(void *ptr)
647{
648 union {
649 float32 f;
650 uint32_t i;
651 } u;
652 u.i = ldl_be_p(ptr);
653 return u.f;
654}
655
656static inline void stfl_be_p(void *ptr, float32 v)
657{
658 union {
659 float32 f;
660 uint32_t i;
661 } u;
662 u.f = v;
663 stl_be_p(ptr, u.i);
664}
665
666static inline float64 ldfq_be_p(void *ptr)
667{
668 CPU_DoubleU u;
669 u.l.upper = ldl_be_p(ptr);
670 u.l.lower = ldl_be_p((uint8_t *)ptr + 4);
671 return u.d;
672}
673
674static inline void stfq_be_p(void *ptr, float64 v)
675{
676 CPU_DoubleU u;
677 u.d = v;
678 stl_be_p(ptr, u.l.upper);
679 stl_be_p((uint8_t *)ptr + 4, u.l.lower);
680}
681
682#else
683
684static inline int lduw_be_p(void *ptr)
685{
686 return *(uint16_t *)ptr;
687}
688
689static inline int ldsw_be_p(void *ptr)
690{
691 return *(int16_t *)ptr;
692}
693
694static inline int ldl_be_p(void *ptr)
695{
696 return *(uint32_t *)ptr;
697}
698
699static inline uint64_t ldq_be_p(void *ptr)
700{
701 return *(uint64_t *)ptr;
702}
703
704static inline void stw_be_p(void *ptr, int v)
705{
706 *(uint16_t *)ptr = v;
707}
708
709static inline void stl_be_p(void *ptr, int v)
710{
711 *(uint32_t *)ptr = v;
712}
713
714static inline void stq_be_p(void *ptr, uint64_t v)
715{
716 *(uint64_t *)ptr = v;
717}
718
719/* float access */
720
721static inline float32 ldfl_be_p(void *ptr)
722{
723 return *(float32 *)ptr;
724}
725
726static inline float64 ldfq_be_p(void *ptr)
727{
728 return *(float64 *)ptr;
729}
730
731static inline void stfl_be_p(void *ptr, float32 v)
732{
733 *(float32 *)ptr = v;
734}
735
736static inline void stfq_be_p(void *ptr, float64 v)
737{
738 *(float64 *)ptr = v;
739}
740
741#endif
742
743/* target CPU memory access functions */
744#if defined(TARGET_WORDS_BIGENDIAN)
745#define lduw_p(p) lduw_be_p(p)
746#define ldsw_p(p) ldsw_be_p(p)
747#define ldl_p(p) ldl_be_p(p)
748#define ldq_p(p) ldq_be_p(p)
749#define ldfl_p(p) ldfl_be_p(p)
750#define ldfq_p(p) ldfq_be_p(p)
751#define stw_p(p, v) stw_be_p(p, v)
752#define stl_p(p, v) stl_be_p(p, v)
753#define stq_p(p, v) stq_be_p(p, v)
754#define stfl_p(p, v) stfl_be_p(p, v)
755#define stfq_p(p, v) stfq_be_p(p, v)
756#else
757#define lduw_p(p) lduw_le_p(p)
758#define ldsw_p(p) ldsw_le_p(p)
759#define ldl_p(p) ldl_le_p(p)
760#define ldq_p(p) ldq_le_p(p)
761#define ldfl_p(p) ldfl_le_p(p)
762#define ldfq_p(p) ldfq_le_p(p)
763#define stw_p(p, v) stw_le_p(p, v)
764#define stl_p(p, v) stl_le_p(p, v)
765#define stq_p(p, v) stq_le_p(p, v)
766#define stfl_p(p, v) stfl_le_p(p, v)
767#define stfq_p(p, v) stfq_le_p(p, v)
768#endif
769
770/* MMU memory access macros */
771
772#if defined(CONFIG_USER_ONLY)
773/* On some host systems the guest address space is reserved on the host.
774 * This allows the guest address space to be offset to a convenient location.
775 */
776//#define GUEST_BASE 0x20000000
777#define GUEST_BASE 0
778
779/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
780#define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
781#define h2g(x) ((target_ulong)((unsigned long)(x) - GUEST_BASE))
782
783#define saddr(x) g2h(x)
784#define laddr(x) g2h(x)
785
786#else /* !CONFIG_USER_ONLY */
787/* NOTE: we use double casts if pointers and target_ulong have
788 different sizes */
789#define saddr(x) (uint8_t *)(long)(x)
790#define laddr(x) (uint8_t *)(long)(x)
791#endif
792
793#define ldub_raw(p) ldub_p(laddr((p)))
794#define ldsb_raw(p) ldsb_p(laddr((p)))
795#define lduw_raw(p) lduw_p(laddr((p)))
796#define ldsw_raw(p) ldsw_p(laddr((p)))
797#define ldl_raw(p) ldl_p(laddr((p)))
798#define ldq_raw(p) ldq_p(laddr((p)))
799#define ldfl_raw(p) ldfl_p(laddr((p)))
800#define ldfq_raw(p) ldfq_p(laddr((p)))
801#define stb_raw(p, v) stb_p(saddr((p)), v)
802#define stw_raw(p, v) stw_p(saddr((p)), v)
803#define stl_raw(p, v) stl_p(saddr((p)), v)
804#define stq_raw(p, v) stq_p(saddr((p)), v)
805#define stfl_raw(p, v) stfl_p(saddr((p)), v)
806#define stfq_raw(p, v) stfq_p(saddr((p)), v)
807
808
809#if defined(CONFIG_USER_ONLY)
810
811/* if user mode, no other memory access functions */
812#define ldub(p) ldub_raw(p)
813#define ldsb(p) ldsb_raw(p)
814#define lduw(p) lduw_raw(p)
815#define ldsw(p) ldsw_raw(p)
816#define ldl(p) ldl_raw(p)
817#define ldq(p) ldq_raw(p)
818#define ldfl(p) ldfl_raw(p)
819#define ldfq(p) ldfq_raw(p)
820#define stb(p, v) stb_raw(p, v)
821#define stw(p, v) stw_raw(p, v)
822#define stl(p, v) stl_raw(p, v)
823#define stq(p, v) stq_raw(p, v)
824#define stfl(p, v) stfl_raw(p, v)
825#define stfq(p, v) stfq_raw(p, v)
826
827#define ldub_code(p) ldub_raw(p)
828#define ldsb_code(p) ldsb_raw(p)
829#define lduw_code(p) lduw_raw(p)
830#define ldsw_code(p) ldsw_raw(p)
831#define ldl_code(p) ldl_raw(p)
832#define ldq_code(p) ldq_raw(p)
833
834#define ldub_kernel(p) ldub_raw(p)
835#define ldsb_kernel(p) ldsb_raw(p)
836#define lduw_kernel(p) lduw_raw(p)
837#define ldsw_kernel(p) ldsw_raw(p)
838#define ldl_kernel(p) ldl_raw(p)
839#define ldq_kernel(p) ldq_raw(p)
840#define ldfl_kernel(p) ldfl_raw(p)
841#define ldfq_kernel(p) ldfq_raw(p)
842#define stb_kernel(p, v) stb_raw(p, v)
843#define stw_kernel(p, v) stw_raw(p, v)
844#define stl_kernel(p, v) stl_raw(p, v)
845#define stq_kernel(p, v) stq_raw(p, v)
846#define stfl_kernel(p, v) stfl_raw(p, v)
847#define stfq_kernel(p, vt) stfq_raw(p, v)
848
849#endif /* defined(CONFIG_USER_ONLY) */
850
851/* page related stuff */
852
853#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
854#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
855#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
856
857/* ??? These should be the larger of unsigned long and target_ulong. */
858extern unsigned long qemu_real_host_page_size;
859extern unsigned long qemu_host_page_bits;
860extern unsigned long qemu_host_page_size;
861extern unsigned long qemu_host_page_mask;
862
863#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
864
865/* same as PROT_xxx */
866#define PAGE_READ 0x0001
867#define PAGE_WRITE 0x0002
868#define PAGE_EXEC 0x0004
869#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
870#define PAGE_VALID 0x0008
871/* original state of the write flag (used when tracking self-modifying
872 code */
873#define PAGE_WRITE_ORG 0x0010
874#define PAGE_RESERVED 0x0020
875
876void page_dump(FILE *f);
877int page_get_flags(target_ulong address);
878void page_set_flags(target_ulong start, target_ulong end, int flags);
879int page_check_range(target_ulong start, target_ulong len, int flags);
880void page_unprotect_range(target_ulong data, target_ulong data_size);
881
882void cpu_dump_state(CPUState *env, FILE *f,
883 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
884 int flags);
885void cpu_dump_statistics (CPUState *env, FILE *f,
886 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
887 int flags);
888
889void cpu_abort(CPUState *env, const char *fmt, ...)
890#ifndef VBOX
891 __attribute__ ((__format__ (__printf__, 2, 3)))
892#endif
893 __attribute__ ((__noreturn__));
894extern CPUState *first_cpu;
895extern CPUState *cpu_single_env;
896extern int64_t qemu_icount;
897extern int use_icount;
898
899#define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */
900#define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */
901#define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
902#define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */
903#define CPU_INTERRUPT_FIQ 0x10 /* Fast interrupt pending. */
904#define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */
905#define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */
906#define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occured. */
907#define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */
908#define CPU_INTERRUPT_NMI 0x200 /* NMI pending. */
909
910#ifdef VBOX
911/** Executes a single instruction. cpu_exec() will normally return EXCP_SINGLE_INSTR. */
912# define CPU_INTERRUPT_SINGLE_INSTR 0x0400
913/** Executing a CPU_INTERRUPT_SINGLE_INSTR request, quit the cpu_loop. (for exceptions and suchlike) */
914# define CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT 0x0800
915/** VM execution was interrupted by VMR3Reset, VMR3Suspend or VMR3PowerOff. */
916# define CPU_INTERRUPT_RC 0x1000
917/** Exit current TB to process an external interrupt request (also in op.c!!) */
918# define CPU_INTERRUPT_EXTERNAL_EXIT 0x2000
919/** Exit current TB to process an external interrupt request (also in op.c!!) */
920# define CPU_INTERRUPT_EXTERNAL_HARD 0x4000
921/** Exit current TB to process an external interrupt request (also in op.c!!) */
922# define CPU_INTERRUPT_EXTERNAL_TIMER 0x8000
923/** Exit current TB to process an external interrupt request (also in op.c!!) */
924# define CPU_INTERRUPT_EXTERNAL_DMA 0x10000
925#endif /* VBOX */
926void cpu_interrupt(CPUState *s, int mask);
927void cpu_reset_interrupt(CPUState *env, int mask);
928
929int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type);
930int cpu_watchpoint_remove(CPUState *env, target_ulong addr);
931void cpu_watchpoint_remove_all(CPUState *env);
932int cpu_breakpoint_insert(CPUState *env, target_ulong pc);
933int cpu_breakpoint_remove(CPUState *env, target_ulong pc);
934void cpu_breakpoint_remove_all(CPUState *env);
935
936#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
937#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
938#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
939
940void cpu_single_step(CPUState *env, int enabled);
941void cpu_reset(CPUState *s);
942
943/* Return the physical page corresponding to a virtual one. Use it
944 only for debugging because no protection checks are done. Return -1
945 if no page found. */
946target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
947
948#define CPU_LOG_TB_OUT_ASM (1 << 0)
949#define CPU_LOG_TB_IN_ASM (1 << 1)
950#define CPU_LOG_TB_OP (1 << 2)
951#define CPU_LOG_TB_OP_OPT (1 << 3)
952#define CPU_LOG_INT (1 << 4)
953#define CPU_LOG_EXEC (1 << 5)
954#define CPU_LOG_PCALL (1 << 6)
955#define CPU_LOG_IOPORT (1 << 7)
956#define CPU_LOG_TB_CPU (1 << 8)
957
958/* define log items */
959typedef struct CPULogItem {
960 int mask;
961 const char *name;
962 const char *help;
963} CPULogItem;
964
965extern const CPULogItem cpu_log_items[];
966
967void cpu_set_log(int log_flags);
968void cpu_set_log_filename(const char *filename);
969int cpu_str_to_log_mask(const char *str);
970
971/* IO ports API */
972
973/* NOTE: as these functions may be even used when there is an isa
974 brige on non x86 targets, we always defined them */
975#ifndef NO_CPU_IO_DEFS
976void cpu_outb(CPUState *env, int addr, int val);
977void cpu_outw(CPUState *env, int addr, int val);
978void cpu_outl(CPUState *env, int addr, int val);
979int cpu_inb(CPUState *env, int addr);
980int cpu_inw(CPUState *env, int addr);
981int cpu_inl(CPUState *env, int addr);
982#endif
983
984/* address in the RAM (different from a physical address) */
985#ifdef USE_KQEMU
986typedef uint32_t ram_addr_t;
987#else
988typedef unsigned long ram_addr_t;
989#endif
990
991/* memory API */
992
993#ifndef VBOX
994extern ram_addr_t phys_ram_size;
995extern int phys_ram_fd;
996extern uint8_t *phys_ram_base;
997extern uint8_t *phys_ram_dirty;
998extern ram_addr_t ram_size;
999#else /* VBOX */
1000extern RTGCPHYS phys_ram_size;
1001/** This is required for bounds checking the phys_ram_dirty accesses. */
1002extern RTGCPHYS phys_ram_dirty_size;
1003extern uint8_t *phys_ram_dirty;
1004#endif /* VBOX */
1005
1006/* physical memory access */
1007
1008/* MMIO pages are identified by a combination of an IO device index and
1009 3 flags. The ROMD code stores the page ram offset in iotlb entry,
1010 so only a limited number of ids are avaiable. */
1011
1012#define IO_MEM_SHIFT 3
1013#define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT))
1014
1015#define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
1016#define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
1017#define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
1018#define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT)
1019
1020/* Acts like a ROM when read and like a device when written. */
1021#define IO_MEM_ROMD (1)
1022#define IO_MEM_SUBPAGE (2)
1023#define IO_MEM_SUBWIDTH (4)
1024
1025/* Flags stored in the low bits of the TLB virtual address. These are
1026 defined so that fast path ram access is all zeros. */
1027/* Zero if TLB entry is valid. */
1028#define TLB_INVALID_MASK (1 << 3)
1029/* Set if TLB entry references a clean RAM page. The iotlb entry will
1030 contain the page physical address. */
1031#define TLB_NOTDIRTY (1 << 4)
1032/* Set if TLB entry is an IO callback. */
1033#define TLB_MMIO (1 << 5)
1034
1035typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
1036typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
1037
1038void cpu_register_physical_memory(target_phys_addr_t start_addr,
1039 ram_addr_t size,
1040 ram_addr_t phys_offset);
1041ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr);
1042ram_addr_t qemu_ram_alloc(ram_addr_t);
1043void qemu_ram_free(ram_addr_t addr);
1044int cpu_register_io_memory(int io_index,
1045 CPUReadMemoryFunc **mem_read,
1046 CPUWriteMemoryFunc **mem_write,
1047 void *opaque);
1048CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index);
1049CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index);
1050
1051void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1052 int len, int is_write);
1053static inline void cpu_physical_memory_read(target_phys_addr_t addr,
1054 uint8_t *buf, int len)
1055{
1056 cpu_physical_memory_rw(addr, buf, len, 0);
1057}
1058static inline void cpu_physical_memory_write(target_phys_addr_t addr,
1059 const uint8_t *buf, int len)
1060{
1061 cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);
1062}
1063uint32_t ldub_phys(target_phys_addr_t addr);
1064uint32_t lduw_phys(target_phys_addr_t addr);
1065uint32_t ldl_phys(target_phys_addr_t addr);
1066uint64_t ldq_phys(target_phys_addr_t addr);
1067void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
1068void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
1069void stb_phys(target_phys_addr_t addr, uint32_t val);
1070void stw_phys(target_phys_addr_t addr, uint32_t val);
1071void stl_phys(target_phys_addr_t addr, uint32_t val);
1072void stq_phys(target_phys_addr_t addr, uint64_t val);
1073
1074void cpu_physical_memory_write_rom(target_phys_addr_t addr,
1075 const uint8_t *buf, int len);
1076int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
1077 uint8_t *buf, int len, int is_write);
1078
1079#define VGA_DIRTY_FLAG 0x01
1080#define CODE_DIRTY_FLAG 0x02
1081#define KQEMU_DIRTY_FLAG 0x04
1082#define MIGRATION_DIRTY_FLAG 0x08
1083
1084/* read dirty bit (return 0 or 1) */
1085#ifndef VBOX
1086static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
1087{
1088 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
1089}
1090#else
1091DECLINLINE(int) cpu_physical_memory_is_dirty(ram_addr_t addr)
1092{
1093 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
1094 {
1095 Log(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));
1096 /*AssertMsgFailed(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));*/
1097 return 0;
1098 }
1099 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
1100}
1101#endif
1102
1103#ifndef VBOX
1104static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
1105 int dirty_flags)
1106{
1107 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
1108}
1109#else
1110DECLINLINE(int) cpu_physical_memory_get_dirty(ram_addr_t addr,
1111 int dirty_flags)
1112{
1113 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
1114 {
1115 Log(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));
1116 /*AssertMsgFailed(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));*/
1117 return 0xff & dirty_flags; /** @todo I don't think this is the right thing to return, fix! */
1118 }
1119 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
1120}
1121#endif
1122
1123#ifndef VBOX
1124static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
1125{
1126 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
1127}
1128#else
1129DECLINLINE(void) cpu_physical_memory_set_dirty(ram_addr_t addr)
1130{
1131 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))
1132 {
1133 Log(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));
1134 /*AssertMsgFailed(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));*/
1135 return;
1136 }
1137 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
1138}
1139#endif
1140
1141void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1142 int dirty_flags);
1143void cpu_tlb_update_dirty(CPUState *env);
1144
1145int cpu_physical_memory_set_dirty_tracking(int enable);
1146
1147int cpu_physical_memory_get_dirty_tracking(void);
1148
1149void dump_exec_info(FILE *f,
1150 int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
1151
1152/*******************************************/
1153/* host CPU ticks (if available) */
1154
1155#if defined(__powerpc__)
1156
1157static inline uint32_t get_tbl(void)
1158{
1159 uint32_t tbl;
1160 asm volatile("mftb %0" : "=r" (tbl));
1161 return tbl;
1162}
1163
1164static inline uint32_t get_tbu(void)
1165{
1166 uint32_t tbl;
1167 asm volatile("mftbu %0" : "=r" (tbl));
1168 return tbl;
1169}
1170
1171static inline int64_t cpu_get_real_ticks(void)
1172{
1173 uint32_t l, h, h1;
1174 /* NOTE: we test if wrapping has occurred */
1175 do {
1176 h = get_tbu();
1177 l = get_tbl();
1178 h1 = get_tbu();
1179 } while (h != h1);
1180 return ((int64_t)h << 32) | l;
1181}
1182
1183#elif defined(__i386__)
1184
1185static inline int64_t cpu_get_real_ticks(void)
1186{
1187 int64_t val;
1188 asm volatile ("rdtsc" : "=A" (val));
1189 return val;
1190}
1191
1192#elif defined(__x86_64__)
1193
1194static inline int64_t cpu_get_real_ticks(void)
1195{
1196 uint32_t low,high;
1197 int64_t val;
1198 asm volatile("rdtsc" : "=a" (low), "=d" (high));
1199 val = high;
1200 val <<= 32;
1201 val |= low;
1202 return val;
1203}
1204
1205#elif defined(__hppa__)
1206
1207static inline int64_t cpu_get_real_ticks(void)
1208{
1209 int val;
1210 asm volatile ("mfctl %%cr16, %0" : "=r"(val));
1211 return val;
1212}
1213
1214#elif defined(__ia64)
1215
1216static inline int64_t cpu_get_real_ticks(void)
1217{
1218 int64_t val;
1219 asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
1220 return val;
1221}
1222
1223#elif defined(__s390__)
1224
1225static inline int64_t cpu_get_real_ticks(void)
1226{
1227 int64_t val;
1228 asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
1229 return val;
1230}
1231
1232#elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
1233
1234static inline int64_t cpu_get_real_ticks (void)
1235{
1236#if defined(_LP64)
1237 uint64_t rval;
1238 asm volatile("rd %%tick,%0" : "=r"(rval));
1239 return rval;
1240#else
1241 union {
1242 uint64_t i64;
1243 struct {
1244 uint32_t high;
1245 uint32_t low;
1246 } i32;
1247 } rval;
1248 asm volatile("rd %%tick,%1; srlx %1,32,%0"
1249 : "=r"(rval.i32.high), "=r"(rval.i32.low));
1250 return rval.i64;
1251#endif
1252}
1253
1254#elif defined(__mips__)
1255
1256static inline int64_t cpu_get_real_ticks(void)
1257{
1258#if __mips_isa_rev >= 2
1259 uint32_t count;
1260 static uint32_t cyc_per_count = 0;
1261
1262 if (!cyc_per_count)
1263 __asm__ __volatile__("rdhwr %0, $3" : "=r" (cyc_per_count));
1264
1265 __asm__ __volatile__("rdhwr %1, $2" : "=r" (count));
1266 return (int64_t)(count * cyc_per_count);
1267#else
1268 /* FIXME */
1269 static int64_t ticks = 0;
1270 return ticks++;
1271#endif
1272}
1273
1274#else
1275/* The host CPU doesn't have an easily accessible cycle counter.
1276 Just return a monotonically increasing value. This will be
1277 totally wrong, but hopefully better than nothing. */
1278static inline int64_t cpu_get_real_ticks (void)
1279{
1280 static int64_t ticks = 0;
1281 return ticks++;
1282}
1283#endif
1284
1285/* profiling */
1286#ifdef CONFIG_PROFILER
1287static inline int64_t profile_getclock(void)
1288{
1289 return cpu_get_real_ticks();
1290}
1291
1292extern int64_t kqemu_time, kqemu_time_start;
1293extern int64_t qemu_time, qemu_time_start;
1294extern int64_t tlb_flush_time;
1295extern int64_t kqemu_exec_count;
1296extern int64_t dev_time;
1297extern int64_t kqemu_ret_int_count;
1298extern int64_t kqemu_ret_excp_count;
1299extern int64_t kqemu_ret_intr_count;
1300#endif
1301
1302#ifdef VBOX
1303void tb_invalidate_virt(CPUState *env, uint32_t eip);
1304#endif /* VBOX */
1305
1306#endif /* CPU_ALL_H */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette