debuggers.hg

view xen/include/asm-x86/atomic.h @ 22855:1d1eec7e1fb4

xl: Perform minimal validation of virtual disk file while parsing config file

This patch performs some very basic validation on the virtual disk
file passed through the config file. This validation ensures that we
don't go too far with the initialization like spawn qemu and more
while there could be some potentially fundamental issues.

[ Patch fixed up to work with PHYSTYPE_EMPTY 22808:6ec61438713a -iwj ]

Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Kamala Narasimhan <kamala.narasimhan@gmail.com>
date Tue Jan 25 18:09:49 2011 +0000 (2011-01-25)
parents 533d6e5c0099
children
line source
1 #ifndef __ARCH_X86_ATOMIC__
2 #define __ARCH_X86_ATOMIC__
4 #include <xen/config.h>
5 #include <asm/system.h>
7 #define build_atomic_read(name, size, type, reg, barrier) \
8 static inline type name(const volatile type *addr) \
9 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \
10 :"m" (*(volatile type *)addr) barrier); return ret; }
12 #define build_atomic_write(name, size, type, reg, barrier) \
13 static inline void name(volatile type *addr, type val) \
14 { asm volatile("mov" size " %1,%0": "=m" (*(volatile type *)addr) \
15 :reg (val) barrier); }
17 build_atomic_read(atomic_read8, "b", uint8_t, "=q", )
18 build_atomic_read(atomic_read16, "w", uint16_t, "=r", )
19 build_atomic_read(atomic_read32, "l", uint32_t, "=r", )
20 build_atomic_read(atomic_read_int, "l", int, "=r", )
22 build_atomic_write(atomic_write8, "b", uint8_t, "q", )
23 build_atomic_write(atomic_write16, "w", uint16_t, "r", )
24 build_atomic_write(atomic_write32, "l", uint32_t, "r", )
25 build_atomic_write(atomic_write_int, "l", int, "r", )
27 #ifdef __x86_64__
28 build_atomic_read(atomic_read64, "q", uint64_t, "=r", )
29 build_atomic_write(atomic_write64, "q", uint64_t, "r", )
30 #else
31 static inline uint64_t atomic_read64(const volatile uint64_t *addr)
32 {
33 uint64_t *__addr = (uint64_t *)addr;
34 return __cmpxchg8b(__addr, 0, 0);
35 }
36 static inline void atomic_write64(volatile uint64_t *addr, uint64_t val)
37 {
38 uint64_t old = *addr, new, *__addr = (uint64_t *)addr;
39 while ( (new = __cmpxchg8b(__addr, old, val)) != old )
40 old = new;
41 }
42 #endif
44 #undef build_atomic_read
45 #undef build_atomic_write
47 /*
48 * NB. I've pushed the volatile qualifier into the operations. This allows
49 * fast accessors such as _atomic_read() and _atomic_set() which don't give
50 * the compiler a fit.
51 */
52 typedef struct { int counter; } atomic_t;
54 #define ATOMIC_INIT(i) { (i) }
56 /**
57 * atomic_read - read atomic variable
58 * @v: pointer of type atomic_t
59 *
60 * Atomically reads the value of @v.
61 */
62 #define _atomic_read(v) ((v).counter)
63 #define atomic_read(v) atomic_read_int(&((v)->counter))
65 /**
66 * atomic_set - set atomic variable
67 * @v: pointer of type atomic_t
68 * @i: required value
69 *
70 * Atomically sets the value of @v to @i.
71 */
72 #define _atomic_set(v,i) (((v).counter) = (i))
73 #define atomic_set(v,i) atomic_write_int(&((v)->counter), (i))
75 /**
76 * atomic_add - add integer to atomic variable
77 * @i: integer value to add
78 * @v: pointer of type atomic_t
79 *
80 * Atomically adds @i to @v.
81 */
82 static inline void atomic_add(int i, atomic_t *v)
83 {
84 asm volatile (
85 "lock; addl %1,%0"
86 : "=m" (*(volatile int *)&v->counter)
87 : "ir" (i), "m" (*(volatile int *)&v->counter) );
88 }
90 /**
91 * atomic_sub - subtract the atomic variable
92 * @i: integer value to subtract
93 * @v: pointer of type atomic_t
94 *
95 * Atomically subtracts @i from @v.
96 */
97 static inline void atomic_sub(int i, atomic_t *v)
98 {
99 asm volatile (
100 "lock; subl %1,%0"
101 : "=m" (*(volatile int *)&v->counter)
102 : "ir" (i), "m" (*(volatile int *)&v->counter) );
103 }
105 /**
106 * atomic_sub_and_test - subtract value from variable and test result
107 * @i: integer value to subtract
108 * @v: pointer of type atomic_t
109 *
110 * Atomically subtracts @i from @v and returns
111 * true if the result is zero, or false for all
112 * other cases.
113 */
114 static inline int atomic_sub_and_test(int i, atomic_t *v)
115 {
116 unsigned char c;
118 asm volatile (
119 "lock; subl %2,%0; sete %1"
120 : "=m" (*(volatile int *)&v->counter), "=qm" (c)
121 : "ir" (i), "m" (*(volatile int *)&v->counter) : "memory" );
122 return c;
123 }
125 /**
126 * atomic_inc - increment atomic variable
127 * @v: pointer of type atomic_t
128 *
129 * Atomically increments @v by 1.
130 */
131 static inline void atomic_inc(atomic_t *v)
132 {
133 asm volatile (
134 "lock; incl %0"
135 : "=m" (*(volatile int *)&v->counter)
136 : "m" (*(volatile int *)&v->counter) );
137 }
139 /**
140 * atomic_dec - decrement atomic variable
141 * @v: pointer of type atomic_t
142 *
143 * Atomically decrements @v by 1.
144 */
145 static inline void atomic_dec(atomic_t *v)
146 {
147 asm volatile (
148 "lock; decl %0"
149 : "=m" (*(volatile int *)&v->counter)
150 : "m" (*(volatile int *)&v->counter) );
151 }
153 /**
154 * atomic_dec_and_test - decrement and test
155 * @v: pointer of type atomic_t
156 *
157 * Atomically decrements @v by 1 and
158 * returns true if the result is 0, or false for all other
159 * cases.
160 */
161 static inline int atomic_dec_and_test(atomic_t *v)
162 {
163 unsigned char c;
165 asm volatile (
166 "lock; decl %0; sete %1"
167 : "=m" (*(volatile int *)&v->counter), "=qm" (c)
168 : "m" (*(volatile int *)&v->counter) : "memory" );
169 return c != 0;
170 }
172 /**
173 * atomic_inc_and_test - increment and test
174 * @v: pointer of type atomic_t
175 *
176 * Atomically increments @v by 1
177 * and returns true if the result is zero, or false for all
178 * other cases.
179 */
180 static inline int atomic_inc_and_test(atomic_t *v)
181 {
182 unsigned char c;
184 asm volatile (
185 "lock; incl %0; sete %1"
186 : "=m" (*(volatile int *)&v->counter), "=qm" (c)
187 : "m" (*(volatile int *)&v->counter) : "memory" );
188 return c != 0;
189 }
191 /**
192 * atomic_add_negative - add and test if negative
193 * @v: pointer of type atomic_t
194 * @i: integer value to add
195 *
196 * Atomically adds @i to @v and returns true
197 * if the result is negative, or false when
198 * result is greater than or equal to zero.
199 */
200 static inline int atomic_add_negative(int i, atomic_t *v)
201 {
202 unsigned char c;
204 asm volatile (
205 "lock; addl %2,%0; sets %1"
206 : "=m" (*(volatile int *)&v->counter), "=qm" (c)
207 : "ir" (i), "m" (*(volatile int *)&v->counter) : "memory" );
208 return c;
209 }
211 static inline atomic_t atomic_compareandswap(
212 atomic_t old, atomic_t new, atomic_t *v)
213 {
214 atomic_t rc;
215 rc.counter = __cmpxchg(&v->counter, old.counter, new.counter, sizeof(int));
216 return rc;
217 }
219 #endif /* __ARCH_X86_ATOMIC__ */