VirtualBox

source: vbox/trunk/src/recompiler/qemu-lock.h@ 36125

Last change on this file since 36125 was 36125, checked in by vboxsync, 14 years ago

recompiler: Removing traces of attempts at making the recompiler compile with the microsoft compiler. (untested)

  • Property svn:eol-style set to native
File size: 6.7 KB
Line 
1/*
2 * Copyright (c) 2003 Fabrice Bellard
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19/*
20 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
21 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
22 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
23 * a choice of LGPL license versions is made available with the language indicating
24 * that LGPLv2 or any later version may be used, or where a choice of which version
25 * of the LGPL is applied is otherwise unspecified.
26 */
27
28/* Locking primitives. Most of this code should be redundant -
29 system emulation doesn't need/use locking, NPTL userspace uses
30 pthread mutexes, and non-NPTL userspace isn't threadsafe anyway.
31 In either case a spinlock is probably the wrong kind of lock.
32 Spinlocks are only good if you know another CPU has the lock and is
33 likely to release it soon. In environments where you have more threads
34 than physical CPUs (the extreme case being a single CPU host) a spinlock
35 simply wastes CPU until the OS decides to preempt it. */
36#if defined(USE_NPTL)
37
38#include <pthread.h>
39#define spin_lock pthread_mutex_lock
40#define spin_unlock pthread_mutex_unlock
41#define spinlock_t pthread_mutex_t
42#define SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER
43
44#else
45
46#if defined(__hppa__)
47
48typedef int spinlock_t[4];
49
50#define SPIN_LOCK_UNLOCKED { 1, 1, 1, 1 }
51
52static inline void resetlock (spinlock_t *p)
53{
54 (*p)[0] = (*p)[1] = (*p)[2] = (*p)[3] = 1;
55}
56
57#else
58
59typedef int spinlock_t;
60
61#define SPIN_LOCK_UNLOCKED 0
62
63static inline void resetlock (spinlock_t *p)
64{
65 *p = SPIN_LOCK_UNLOCKED;
66}
67
68#endif
69
70#ifdef VBOX
71DECLINLINE(int) testandset (int *p)
72{
73 return ASMAtomicCmpXchgU32((volatile uint32_t *)p, 1, 0) ? 0 : 1;
74}
75#elif defined(__powerpc__)
76static inline int testandset (int *p)
77{
78 int ret;
79 __asm__ __volatile__ (
80 "0: lwarx %0,0,%1\n"
81 " xor. %0,%3,%0\n"
82 " bne 1f\n"
83 " stwcx. %2,0,%1\n"
84 " bne- 0b\n"
85 "1: "
86 : "=&r" (ret)
87 : "r" (p), "r" (1), "r" (0)
88 : "cr0", "memory");
89 return ret;
90}
91#elif defined(__i386__)
92static inline int testandset (int *p)
93{
94 long int readval = 0;
95
96 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
97 : "+m" (*p), "+a" (readval)
98 : "r" (1)
99 : "cc");
100 return readval;
101}
102#elif defined(__x86_64__)
103static inline int testandset (int *p)
104{
105 long int readval = 0;
106
107 __asm__ __volatile__ ("lock; cmpxchgl %2, %0"
108 : "+m" (*p), "+a" (readval)
109 : "r" (1)
110 : "cc");
111 return readval;
112}
113#elif defined(__s390__)
114static inline int testandset (int *p)
115{
116 int ret;
117
118 __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
119 " jl 0b"
120 : "=&d" (ret)
121 : "r" (1), "a" (p), "0" (*p)
122 : "cc", "memory" );
123 return ret;
124}
125#elif defined(__alpha__)
126static inline int testandset (int *p)
127{
128 int ret;
129 unsigned long one;
130
131 __asm__ __volatile__ ("0: mov 1,%2\n"
132 " ldl_l %0,%1\n"
133 " stl_c %2,%1\n"
134 " beq %2,1f\n"
135 ".subsection 2\n"
136 "1: br 0b\n"
137 ".previous"
138 : "=r" (ret), "=m" (*p), "=r" (one)
139 : "m" (*p));
140 return ret;
141}
142#elif defined(__sparc__)
143static inline int testandset (int *p)
144{
145 int ret;
146
147 __asm__ __volatile__("ldstub [%1], %0"
148 : "=r" (ret)
149 : "r" (p)
150 : "memory");
151
152 return (ret ? 1 : 0);
153}
154#elif defined(__arm__)
155static inline int testandset (int *spinlock)
156{
157 register unsigned int ret;
158 __asm__ __volatile__("swp %0, %1, [%2]"
159 : "=r"(ret)
160 : "0"(1), "r"(spinlock));
161
162 return ret;
163}
164#elif defined(__mc68000)
165static inline int testandset (int *p)
166{
167 char ret;
168 __asm__ __volatile__("tas %1; sne %0"
169 : "=r" (ret)
170 : "m" (p)
171 : "cc","memory");
172 return ret;
173}
174#elif defined(__hppa__)
175
176/* Because malloc only guarantees 8-byte alignment for malloc'd data,
177 and GCC only guarantees 8-byte alignment for stack locals, we can't
178 be assured of 16-byte alignment for atomic lock data even if we
179 specify "__attribute ((aligned(16)))" in the type declaration. So,
180 we use a struct containing an array of four ints for the atomic lock
181 type and dynamically select the 16-byte aligned int from the array
182 for the semaphore. */
183#define __PA_LDCW_ALIGNMENT 16
184static inline void *ldcw_align (void *p) {
185 unsigned long a = (unsigned long)p;
186 a = (a + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1);
187 return (void *)a;
188}
189
190static inline int testandset (spinlock_t *p)
191{
192 unsigned int ret;
193 p = ldcw_align(p);
194 __asm__ __volatile__("ldcw 0(%1),%0"
195 : "=r" (ret)
196 : "r" (p)
197 : "memory" );
198 return !ret;
199}
200
201#elif defined(__ia64)
202
203#include <ia64intrin.h>
204
205static inline int testandset (int *p)
206{
207 return __sync_lock_test_and_set (p, 1);
208}
209#elif defined(__mips__)
210static inline int testandset (int *p)
211{
212 int ret;
213
214 __asm__ __volatile__ (
215 " .set push \n"
216 " .set noat \n"
217 " .set mips2 \n"
218 "1: li $1, 1 \n"
219 " ll %0, %1 \n"
220 " sc $1, %1 \n"
221 " beqz $1, 1b \n"
222 " .set pop "
223 : "=r" (ret), "+R" (*p)
224 :
225 : "memory");
226
227 return ret;
228}
229#else
230#error unimplemented CPU support
231#endif
232
233#if defined(CONFIG_USER_ONLY)
234static inline void spin_lock(spinlock_t *lock)
235{
236 while (testandset(lock));
237}
238
239static inline void spin_unlock(spinlock_t *lock)
240{
241 resetlock(lock);
242}
243
244static inline int spin_trylock(spinlock_t *lock)
245{
246 return !testandset(lock);
247}
248#else
249static inline void spin_lock(spinlock_t *lock)
250{
251}
252
253static inline void spin_unlock(spinlock_t *lock)
254{
255}
256
257static inline int spin_trylock(spinlock_t *lock)
258{
259 return 1;
260}
261#endif
262
263#endif
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette