debuggers.hg

view xen/include/asm-x86/bitops.h @ 22855:1d1eec7e1fb4

xl: Perform minimal validation of virtual disk file while parsing config file

This patch performs some very basic validation on the virtual disk
file passed through the config file. This validation ensures that we
don't go too far with the initialization like spawn qemu and more
while there could be some potentially fundamental issues.

[ Patch fixed up to work with PHYSTYPE_EMPTY 22808:6ec61438713a -iwj ]

Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Kamala Narasimhan <kamala.narasimhan@gmail.com>
date Tue Jan 25 18:09:49 2011 +0000 (2011-01-25)
parents ca86364254bb
children
line source
1 #ifndef _X86_BITOPS_H
2 #define _X86_BITOPS_H
4 /*
5 * Copyright 1992, Linus Torvalds.
6 */
8 #include <xen/config.h>
10 /*
11 * We specify the memory operand as both input and output because the memory
12 * operand is both read from and written to. Since the operand is in fact a
13 * word array, we also specify "memory" in the clobbers list to indicate that
14 * words other than the one directly addressed by the memory operand may be
15 * modified. We don't use "+m" because the gcc manual says that it should be
16 * used only when the constraint allows the operand to reside in a register.
17 */
19 #define ADDR (*(volatile long *) addr)
20 #define CONST_ADDR (*(const volatile long *) addr)
22 extern void __bitop_bad_size(void);
23 #define bitop_bad_size(addr) (sizeof(*(addr)) < 4)
25 /**
26 * set_bit - Atomically set a bit in memory
27 * @nr: the bit to set
28 * @addr: the address to start counting from
29 *
30 * This function is atomic and may not be reordered. See __set_bit()
31 * if you do not require the atomic guarantees.
32 * Note that @nr may be almost arbitrarily large; this function is not
33 * restricted to acting on a single-word quantity.
34 */
35 static inline void set_bit(int nr, volatile void *addr)
36 {
37 asm volatile (
38 "lock; btsl %1,%0"
39 : "=m" (ADDR)
40 : "Ir" (nr), "m" (ADDR) : "memory");
41 }
42 #define set_bit(nr, addr) ({ \
43 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
44 set_bit(nr, addr); \
45 })
47 /**
48 * __set_bit - Set a bit in memory
49 * @nr: the bit to set
50 * @addr: the address to start counting from
51 *
52 * Unlike set_bit(), this function is non-atomic and may be reordered.
53 * If it's called on the same region of memory simultaneously, the effect
54 * may be that only one operation succeeds.
55 */
56 static inline void __set_bit(int nr, volatile void *addr)
57 {
58 asm volatile (
59 "btsl %1,%0"
60 : "=m" (ADDR)
61 : "Ir" (nr), "m" (ADDR) : "memory");
62 }
63 #define __set_bit(nr, addr) ({ \
64 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
65 __set_bit(nr, addr); \
66 })
68 /**
69 * clear_bit - Clears a bit in memory
70 * @nr: Bit to clear
71 * @addr: Address to start counting from
72 *
73 * clear_bit() is atomic and may not be reordered.
74 */
75 static inline void clear_bit(int nr, volatile void *addr)
76 {
77 asm volatile (
78 "lock; btrl %1,%0"
79 : "=m" (ADDR)
80 : "Ir" (nr), "m" (ADDR) : "memory");
81 }
82 #define clear_bit(nr, addr) ({ \
83 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
84 clear_bit(nr, addr); \
85 })
87 /**
88 * __clear_bit - Clears a bit in memory
89 * @nr: Bit to clear
90 * @addr: Address to start counting from
91 *
92 * Unlike clear_bit(), this function is non-atomic and may be reordered.
93 * If it's called on the same region of memory simultaneously, the effect
94 * may be that only one operation succeeds.
95 */
96 static inline void __clear_bit(int nr, volatile void *addr)
97 {
98 asm volatile (
99 "btrl %1,%0"
100 : "=m" (ADDR)
101 : "Ir" (nr), "m" (ADDR) : "memory");
102 }
103 #define __clear_bit(nr, addr) ({ \
104 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
105 __clear_bit(nr, addr); \
106 })
108 /**
109 * __change_bit - Toggle a bit in memory
110 * @nr: the bit to set
111 * @addr: the address to start counting from
112 *
113 * Unlike change_bit(), this function is non-atomic and may be reordered.
114 * If it's called on the same region of memory simultaneously, the effect
115 * may be that only one operation succeeds.
116 */
117 static inline void __change_bit(int nr, volatile void *addr)
118 {
119 asm volatile (
120 "btcl %1,%0"
121 : "=m" (ADDR)
122 : "Ir" (nr), "m" (ADDR) : "memory");
123 }
124 #define __change_bit(nr, addr) ({ \
125 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
126 __change_bit(nr, addr); \
127 })
129 /**
130 * change_bit - Toggle a bit in memory
131 * @nr: Bit to clear
132 * @addr: Address to start counting from
133 *
134 * change_bit() is atomic and may not be reordered.
135 * Note that @nr may be almost arbitrarily large; this function is not
136 * restricted to acting on a single-word quantity.
137 */
138 static inline void change_bit(int nr, volatile void *addr)
139 {
140 asm volatile (
141 "lock; btcl %1,%0"
142 : "=m" (ADDR)
143 : "Ir" (nr), "m" (ADDR) : "memory");
144 }
145 #define change_bit(nr, addr) ({ \
146 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
147 change_bit(nr, addr); \
148 })
150 /**
151 * test_and_set_bit - Set a bit and return its old value
152 * @nr: Bit to set
153 * @addr: Address to count from
154 *
155 * This operation is atomic and cannot be reordered.
156 * It also implies a memory barrier.
157 */
158 static inline int test_and_set_bit(int nr, volatile void *addr)
159 {
160 int oldbit;
162 asm volatile (
163 "lock; btsl %2,%1\n\tsbbl %0,%0"
164 : "=r" (oldbit), "=m" (ADDR)
165 : "Ir" (nr), "m" (ADDR) : "memory");
166 return oldbit;
167 }
168 #define test_and_set_bit(nr, addr) ({ \
169 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
170 test_and_set_bit(nr, addr); \
171 })
173 /**
174 * __test_and_set_bit - Set a bit and return its old value
175 * @nr: Bit to set
176 * @addr: Address to count from
177 *
178 * This operation is non-atomic and can be reordered.
179 * If two examples of this operation race, one can appear to succeed
180 * but actually fail. You must protect multiple accesses with a lock.
181 */
182 static inline int __test_and_set_bit(int nr, volatile void *addr)
183 {
184 int oldbit;
186 asm volatile (
187 "btsl %2,%1\n\tsbbl %0,%0"
188 : "=r" (oldbit), "=m" (ADDR)
189 : "Ir" (nr), "m" (ADDR) : "memory");
190 return oldbit;
191 }
192 #define __test_and_set_bit(nr, addr) ({ \
193 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
194 __test_and_set_bit(nr, addr); \
195 })
197 /**
198 * test_and_clear_bit - Clear a bit and return its old value
199 * @nr: Bit to set
200 * @addr: Address to count from
201 *
202 * This operation is atomic and cannot be reordered.
203 * It also implies a memory barrier.
204 */
205 static inline int test_and_clear_bit(int nr, volatile void *addr)
206 {
207 int oldbit;
209 asm volatile (
210 "lock; btrl %2,%1\n\tsbbl %0,%0"
211 : "=r" (oldbit), "=m" (ADDR)
212 : "Ir" (nr), "m" (ADDR) : "memory");
213 return oldbit;
214 }
215 #define test_and_clear_bit(nr, addr) ({ \
216 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
217 test_and_clear_bit(nr, addr); \
218 })
220 /**
221 * __test_and_clear_bit - Clear a bit and return its old value
222 * @nr: Bit to set
223 * @addr: Address to count from
224 *
225 * This operation is non-atomic and can be reordered.
226 * If two examples of this operation race, one can appear to succeed
227 * but actually fail. You must protect multiple accesses with a lock.
228 */
229 static inline int __test_and_clear_bit(int nr, volatile void *addr)
230 {
231 int oldbit;
233 asm volatile (
234 "btrl %2,%1\n\tsbbl %0,%0"
235 : "=r" (oldbit), "=m" (ADDR)
236 : "Ir" (nr), "m" (ADDR) : "memory");
237 return oldbit;
238 }
239 #define __test_and_clear_bit(nr, addr) ({ \
240 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
241 __test_and_clear_bit(nr, addr); \
242 })
244 /* WARNING: non atomic and it can be reordered! */
245 static inline int __test_and_change_bit(int nr, volatile void *addr)
246 {
247 int oldbit;
249 asm volatile (
250 "btcl %2,%1\n\tsbbl %0,%0"
251 : "=r" (oldbit), "=m" (ADDR)
252 : "Ir" (nr), "m" (ADDR) : "memory");
253 return oldbit;
254 }
255 #define __test_and_change_bit(nr, addr) ({ \
256 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
257 __test_and_change_bit(nr, addr); \
258 })
260 /**
261 * test_and_change_bit - Change a bit and return its new value
262 * @nr: Bit to set
263 * @addr: Address to count from
264 *
265 * This operation is atomic and cannot be reordered.
266 * It also implies a memory barrier.
267 */
268 static inline int test_and_change_bit(int nr, volatile void *addr)
269 {
270 int oldbit;
272 asm volatile (
273 "lock; btcl %2,%1\n\tsbbl %0,%0"
274 : "=r" (oldbit), "=m" (ADDR)
275 : "Ir" (nr), "m" (ADDR) : "memory");
276 return oldbit;
277 }
278 #define test_and_change_bit(nr, addr) ({ \
279 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
280 test_and_change_bit(nr, addr); \
281 })
283 static inline int constant_test_bit(int nr, const volatile void *addr)
284 {
285 return ((1U << (nr & 31)) &
286 (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
287 }
289 static inline int variable_test_bit(int nr, const volatile void *addr)
290 {
291 int oldbit;
293 asm volatile (
294 "btl %2,%1\n\tsbbl %0,%0"
295 : "=r" (oldbit)
296 : "m" (CONST_ADDR), "Ir" (nr) : "memory" );
297 return oldbit;
298 }
300 #define test_bit(nr, addr) ({ \
301 if ( bitop_bad_size(addr) ) __bitop_bad_size(); \
302 (__builtin_constant_p(nr) ? \
303 constant_test_bit((nr),(addr)) : \
304 variable_test_bit((nr),(addr))); \
305 })
307 extern unsigned int __find_first_bit(
308 const unsigned long *addr, unsigned int size);
309 extern unsigned int __find_next_bit(
310 const unsigned long *addr, unsigned int size, unsigned int offset);
311 extern unsigned int __find_first_zero_bit(
312 const unsigned long *addr, unsigned int size);
313 extern unsigned int __find_next_zero_bit(
314 const unsigned long *addr, unsigned int size, unsigned int offset);
316 static inline unsigned int __scanbit(unsigned long val, unsigned long max)
317 {
318 asm ( "bsf %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max) );
319 return (unsigned int)val;
320 }
322 /**
323 * find_first_bit - find the first set bit in a memory region
324 * @addr: The address to start the search at
325 * @size: The maximum size to search
326 *
327 * Returns the bit-number of the first set bit, not the number of the byte
328 * containing a bit.
329 */
330 #define find_first_bit(addr,size) \
331 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
332 (__scanbit(*(const unsigned long *)addr, size)) : \
333 __find_first_bit(addr,size)))
335 /**
336 * find_next_bit - find the first set bit in a memory region
337 * @addr: The address to base the search on
338 * @offset: The bitnumber to start searching at
339 * @size: The maximum size to search
340 */
341 #define find_next_bit(addr,size,off) \
342 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
343 ((off) + (__scanbit((*(const unsigned long *)addr) >> (off), size))) : \
344 __find_next_bit(addr,size,off)))
346 /**
347 * find_first_zero_bit - find the first zero bit in a memory region
348 * @addr: The address to start the search at
349 * @size: The maximum size to search
350 *
351 * Returns the bit-number of the first zero bit, not the number of the byte
352 * containing a bit.
353 */
354 #define find_first_zero_bit(addr,size) \
355 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
356 (__scanbit(~*(const unsigned long *)addr, size)) : \
357 __find_first_zero_bit(addr,size)))
359 /**
360 * find_next_zero_bit - find the first zero bit in a memory region
361 * @addr: The address to base the search on
362 * @offset: The bitnumber to start searching at
363 * @size: The maximum size to search
364 */
365 #define find_next_zero_bit(addr,size,off) \
366 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
367 ((off)+(__scanbit(~(((*(const unsigned long *)addr)) >> (off)), size))) : \
368 __find_next_zero_bit(addr,size,off)))
371 /**
372 * find_first_set_bit - find the first set bit in @word
373 * @word: the word to search
374 *
375 * Returns the bit-number of the first set bit. The input must *not* be zero.
376 */
377 static inline unsigned int find_first_set_bit(unsigned long word)
378 {
379 asm ( "bsf %1,%0" : "=r" (word) : "r" (word) );
380 return (unsigned int)word;
381 }
383 /**
384 * ffs - find first bit set
385 * @x: the word to search
386 *
387 * This is defined the same way as the libc and compiler builtin ffs routines.
388 */
389 static inline int ffs(unsigned long x)
390 {
391 long r;
393 asm ( "bsf %1,%0\n\t"
394 "jnz 1f\n\t"
395 "mov $-1,%0\n"
396 "1:" : "=r" (r) : "rm" (x));
397 return (int)r+1;
398 }
400 /**
401 * fls - find last bit set
402 * @x: the word to search
403 *
404 * This is defined the same way as ffs.
405 */
406 static inline int fls(unsigned long x)
407 {
408 long r;
410 asm ( "bsr %1,%0\n\t"
411 "jnz 1f\n\t"
412 "mov $-1,%0\n"
413 "1:" : "=r" (r) : "rm" (x));
414 return (int)r+1;
415 }
417 /**
418 * hweightN - returns the hamming weight of a N-bit word
419 * @x: the word to weigh
420 *
421 * The Hamming Weight of a number is the total number of bits set in it.
422 */
423 #define hweight64(x) generic_hweight64(x)
424 #define hweight32(x) generic_hweight32(x)
425 #define hweight16(x) generic_hweight16(x)
426 #define hweight8(x) generic_hweight8(x)
428 #endif /* _X86_BITOPS_H */