debuggers.hg

view xen/include/asm-x86/system.h @ 22855:1d1eec7e1fb4

xl: Perform minimal validation of virtual disk file while parsing config file

This patch performs some very basic validation on the virtual disk
file passed through the config file. This validation ensures that we
don't go too far with the initialization like spawn qemu and more
while there could be some potentially fundamental issues.

[ Patch fixed up to work with PHYSTYPE_EMPTY 22808:6ec61438713a -iwj ]

Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Kamala Narasimhan <kamala.narasimhan@gmail.com>
date Tue Jan 25 18:09:49 2011 +0000 (2011-01-25)
parents 04f15c2de8fa
children
line source
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
4 #include <xen/lib.h>
5 #include <asm/bitops.h>
7 #define read_segment_register(name) \
8 ({ u16 __sel; \
9 asm volatile ( "movw %%" STR(name) ",%0" : "=r" (__sel) ); \
10 __sel; \
11 })
13 #define wbinvd() \
14 asm volatile ( "wbinvd" : : : "memory" )
16 #define clflush(a) \
17 asm volatile ( "clflush (%0)" : : "r"(a) )
19 #define nop() \
20 asm volatile ( "nop" )
22 #define xchg(ptr,v) \
23 ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
25 struct __xchg_dummy { unsigned long a[100]; };
26 #define __xg(x) ((volatile struct __xchg_dummy *)(x))
28 #if defined(__i386__)
29 # include <asm/x86_32/system.h>
30 #elif defined(__x86_64__)
31 # include <asm/x86_64/system.h>
32 #endif
34 /*
35 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
36 * Note 2: xchg has side effect, so that attribute volatile is necessary,
37 * but generally the primitive is invalid, *ptr is output argument. --ANK
38 */
39 static always_inline unsigned long __xchg(
40 unsigned long x, volatile void *ptr, int size)
41 {
42 switch ( size )
43 {
44 case 1:
45 asm volatile ( "xchgb %b0,%1"
46 : "=q" (x)
47 : "m" (*__xg((volatile void *)ptr)), "0" (x)
48 : "memory" );
49 break;
50 case 2:
51 asm volatile ( "xchgw %w0,%1"
52 : "=r" (x)
53 : "m" (*__xg((volatile void *)ptr)), "0" (x)
54 : "memory" );
55 break;
56 #if defined(__i386__)
57 case 4:
58 asm volatile ( "xchgl %0,%1"
59 : "=r" (x)
60 : "m" (*__xg((volatile void *)ptr)), "0" (x)
61 : "memory" );
62 break;
63 #elif defined(__x86_64__)
64 case 4:
65 asm volatile ( "xchgl %k0,%1"
66 : "=r" (x)
67 : "m" (*__xg((volatile void *)ptr)), "0" (x)
68 : "memory" );
69 break;
70 case 8:
71 asm volatile ( "xchgq %0,%1"
72 : "=r" (x)
73 : "m" (*__xg((volatile void *)ptr)), "0" (x)
74 : "memory" );
75 break;
76 #endif
77 }
78 return x;
79 }
81 /*
82 * Atomic compare and exchange. Compare OLD with MEM, if identical,
83 * store NEW in MEM. Return the initial value in MEM. Success is
84 * indicated by comparing RETURN with OLD.
85 */
87 static always_inline unsigned long __cmpxchg(
88 volatile void *ptr, unsigned long old, unsigned long new, int size)
89 {
90 unsigned long prev;
91 switch ( size )
92 {
93 case 1:
94 asm volatile ( "lock; cmpxchgb %b1,%2"
95 : "=a" (prev)
96 : "q" (new), "m" (*__xg((volatile void *)ptr)),
97 "0" (old)
98 : "memory" );
99 return prev;
100 case 2:
101 asm volatile ( "lock; cmpxchgw %w1,%2"
102 : "=a" (prev)
103 : "r" (new), "m" (*__xg((volatile void *)ptr)),
104 "0" (old)
105 : "memory" );
106 return prev;
107 #if defined(__i386__)
108 case 4:
109 asm volatile ( "lock; cmpxchgl %1,%2"
110 : "=a" (prev)
111 : "r" (new), "m" (*__xg((volatile void *)ptr)),
112 "0" (old)
113 : "memory" );
114 return prev;
115 #elif defined(__x86_64__)
116 case 4:
117 asm volatile ( "lock; cmpxchgl %k1,%2"
118 : "=a" (prev)
119 : "r" (new), "m" (*__xg((volatile void *)ptr)),
120 "0" (old)
121 : "memory" );
122 return prev;
123 case 8:
124 asm volatile ( "lock; cmpxchgq %1,%2"
125 : "=a" (prev)
126 : "r" (new), "m" (*__xg((volatile void *)ptr)),
127 "0" (old)
128 : "memory" );
129 return prev;
130 #endif
131 }
132 return old;
133 }
135 #define __HAVE_ARCH_CMPXCHG
137 #define cmpxchgptr(ptr,o,n) ({ \
138 const __typeof__(**(ptr)) *__o = (o); \
139 __typeof__(**(ptr)) *__n = (n); \
140 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)__o, \
141 (unsigned long)__n,sizeof(*(ptr)))); \
142 })
144 /*
145 * Both Intel and AMD agree that, from a programmer's viewpoint:
146 * Loads cannot be reordered relative to other loads.
147 * Stores cannot be reordered relative to other stores.
148 *
149 * Intel64 Architecture Memory Ordering White Paper
150 * <http://developer.intel.com/products/processor/manuals/318147.pdf>
151 *
152 * AMD64 Architecture Programmer's Manual, Volume 2: System Programming
153 * <http://www.amd.com/us-en/assets/content_type/\
154 * white_papers_and_tech_docs/24593.pdf>
155 */
156 #define rmb() barrier()
157 #define wmb() barrier()
159 #ifdef CONFIG_SMP
160 #define smp_mb() mb()
161 #define smp_rmb() rmb()
162 #define smp_wmb() wmb()
163 #else
164 #define smp_mb() barrier()
165 #define smp_rmb() barrier()
166 #define smp_wmb() barrier()
167 #endif
169 #define set_mb(var, value) do { xchg(&var, value); } while (0)
170 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
172 #define local_irq_disable() asm volatile ( "cli" : : : "memory" )
173 #define local_irq_enable() asm volatile ( "sti" : : : "memory" )
175 /* used in the idle loop; sti takes one instruction cycle to complete */
176 #define safe_halt() asm volatile ( "sti; hlt" : : : "memory" )
177 /* used when interrupts are already enabled or to shutdown the processor */
178 #define halt() asm volatile ( "hlt" : : : "memory" )
180 #define local_save_flags(x) \
181 ({ \
182 BUILD_BUG_ON(sizeof(x) != sizeof(long)); \
183 asm volatile ( "pushf" __OS " ; pop" __OS " %0" : "=g" (x)); \
184 })
185 #define local_irq_save(x) \
186 ({ \
187 local_save_flags(x); \
188 local_irq_disable(); \
189 })
190 #define local_irq_restore(x) \
191 ({ \
192 BUILD_BUG_ON(sizeof(x) != sizeof(long)); \
193 asm volatile ( "push" __OS " %0 ; popf" __OS \
194 : : "g" (x) : "memory", "cc" ); \
195 })
197 static inline int local_irq_is_enabled(void)
198 {
199 unsigned long flags;
200 local_save_flags(flags);
201 return !!(flags & (1<<9)); /* EFLAGS_IF */
202 }
204 #define BROKEN_ACPI_Sx 0x0001
205 #define BROKEN_INIT_AFTER_S1 0x0002
207 void trap_init(void);
208 void percpu_traps_init(void);
209 void subarch_percpu_traps_init(void);
211 #endif