debuggers.hg

view xen/common/spinlock.c @ 22855:1d1eec7e1fb4

xl: Perform minimal validation of virtual disk file while parsing config file

This patch performs some very basic validation on the virtual disk
file passed through the config file. This validation ensures that we
don't go too far with the initialization like spawn qemu and more
while there could be some potentially fundamental issues.

[ Patch fixed up to work with PHYSTYPE_EMPTY 22808:6ec61438713a -iwj ]

Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Kamala Narasimhan <kamala.narasimhan@gmail.com>
date Tue Jan 25 18:09:49 2011 +0000 (2011-01-25)
parents 0b88ccf6332d
children
line source
1 #include <xen/lib.h>
2 #include <xen/config.h>
3 #include <xen/irq.h>
4 #include <xen/smp.h>
5 #include <xen/time.h>
6 #include <xen/spinlock.h>
7 #include <xen/guest_access.h>
8 #include <xen/preempt.h>
9 #include <public/sysctl.h>
10 #include <asm/processor.h>
12 #ifndef NDEBUG
14 static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0);
16 static void check_lock(struct lock_debug *debug)
17 {
18 int irq_safe = !local_irq_is_enabled();
20 if ( unlikely(atomic_read(&spin_debug) <= 0) )
21 return;
23 /* A few places take liberties with this. */
24 /* BUG_ON(in_irq() && !irq_safe); */
26 if ( unlikely(debug->irq_safe != irq_safe) )
27 {
28 int seen = cmpxchg(&debug->irq_safe, -1, irq_safe);
29 BUG_ON(seen == !irq_safe);
30 }
31 }
33 void spin_debug_enable(void)
34 {
35 atomic_inc(&spin_debug);
36 }
38 void spin_debug_disable(void)
39 {
40 atomic_dec(&spin_debug);
41 }
43 #else /* defined(NDEBUG) */
45 #define check_lock(l) ((void)0)
47 #endif
49 #ifdef LOCK_PROFILE
51 #define LOCK_PROFILE_REL \
52 lock->profile.time_hold += NOW() - lock->profile.time_locked; \
53 lock->profile.lock_cnt++;
54 #define LOCK_PROFILE_VAR s_time_t block = 0
55 #define LOCK_PROFILE_BLOCK block = block ? : NOW();
56 #define LOCK_PROFILE_GOT \
57 lock->profile.time_locked = NOW(); \
58 if (block) \
59 { \
60 lock->profile.time_block += lock->profile.time_locked - block; \
61 lock->profile.block_cnt++; \
62 }
64 #else
66 #define LOCK_PROFILE_REL
67 #define LOCK_PROFILE_VAR
68 #define LOCK_PROFILE_BLOCK
69 #define LOCK_PROFILE_GOT
71 #endif
73 void _spin_lock(spinlock_t *lock)
74 {
75 LOCK_PROFILE_VAR;
77 check_lock(&lock->debug);
78 while ( unlikely(!_raw_spin_trylock(&lock->raw)) )
79 {
80 LOCK_PROFILE_BLOCK;
81 while ( likely(_raw_spin_is_locked(&lock->raw)) )
82 cpu_relax();
83 }
84 LOCK_PROFILE_GOT;
85 preempt_disable();
86 }
88 void _spin_lock_irq(spinlock_t *lock)
89 {
90 LOCK_PROFILE_VAR;
92 ASSERT(local_irq_is_enabled());
93 local_irq_disable();
94 check_lock(&lock->debug);
95 while ( unlikely(!_raw_spin_trylock(&lock->raw)) )
96 {
97 LOCK_PROFILE_BLOCK;
98 local_irq_enable();
99 while ( likely(_raw_spin_is_locked(&lock->raw)) )
100 cpu_relax();
101 local_irq_disable();
102 }
103 LOCK_PROFILE_GOT;
104 preempt_disable();
105 }
107 unsigned long _spin_lock_irqsave(spinlock_t *lock)
108 {
109 unsigned long flags;
110 LOCK_PROFILE_VAR;
112 local_irq_save(flags);
113 check_lock(&lock->debug);
114 while ( unlikely(!_raw_spin_trylock(&lock->raw)) )
115 {
116 LOCK_PROFILE_BLOCK;
117 local_irq_restore(flags);
118 while ( likely(_raw_spin_is_locked(&lock->raw)) )
119 cpu_relax();
120 local_irq_save(flags);
121 }
122 LOCK_PROFILE_GOT;
123 preempt_disable();
124 return flags;
125 }
127 void _spin_unlock(spinlock_t *lock)
128 {
129 preempt_enable();
130 LOCK_PROFILE_REL;
131 _raw_spin_unlock(&lock->raw);
132 }
134 void _spin_unlock_irq(spinlock_t *lock)
135 {
136 preempt_enable();
137 LOCK_PROFILE_REL;
138 _raw_spin_unlock(&lock->raw);
139 local_irq_enable();
140 }
142 void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
143 {
144 preempt_enable();
145 LOCK_PROFILE_REL;
146 _raw_spin_unlock(&lock->raw);
147 local_irq_restore(flags);
148 }
150 int _spin_is_locked(spinlock_t *lock)
151 {
152 check_lock(&lock->debug);
153 return _raw_spin_is_locked(&lock->raw);
154 }
156 int _spin_trylock(spinlock_t *lock)
157 {
158 check_lock(&lock->debug);
159 if ( !_raw_spin_trylock(&lock->raw) )
160 return 0;
161 #ifdef LOCK_PROFILE
162 lock->profile.time_locked = NOW();
163 #endif
164 preempt_disable();
165 return 1;
166 }
168 void _spin_barrier(spinlock_t *lock)
169 {
170 #ifdef LOCK_PROFILE
171 s_time_t block = NOW();
172 u64 loop = 0;
174 check_lock(&lock->debug);
175 do { mb(); loop++;} while ( _raw_spin_is_locked(&lock->raw) );
176 if (loop > 1)
177 {
178 lock->profile.time_block += NOW() - block;
179 lock->profile.block_cnt++;
180 }
181 #else
182 check_lock(&lock->debug);
183 do { mb(); } while ( _raw_spin_is_locked(&lock->raw) );
184 #endif
185 mb();
186 }
188 void _spin_barrier_irq(spinlock_t *lock)
189 {
190 unsigned long flags;
191 local_irq_save(flags);
192 _spin_barrier(lock);
193 local_irq_restore(flags);
194 }
196 int _spin_trylock_recursive(spinlock_t *lock)
197 {
198 int cpu = smp_processor_id();
200 /* Don't allow overflow of recurse_cpu field. */
201 BUILD_BUG_ON(NR_CPUS > 0xfffu);
203 check_lock(&lock->debug);
205 if ( likely(lock->recurse_cpu != cpu) )
206 {
207 if ( !spin_trylock(lock) )
208 return 0;
209 lock->recurse_cpu = cpu;
210 }
212 /* We support only fairly shallow recursion, else the counter overflows. */
213 ASSERT(lock->recurse_cnt < 0xfu);
214 lock->recurse_cnt++;
216 return 1;
217 }
219 void _spin_lock_recursive(spinlock_t *lock)
220 {
221 while ( !spin_trylock_recursive(lock) )
222 cpu_relax();
223 }
225 void _spin_unlock_recursive(spinlock_t *lock)
226 {
227 if ( likely(--lock->recurse_cnt == 0) )
228 {
229 lock->recurse_cpu = 0xfffu;
230 spin_unlock(lock);
231 }
232 }
234 void _read_lock(rwlock_t *lock)
235 {
236 check_lock(&lock->debug);
237 _raw_read_lock(&lock->raw);
238 preempt_disable();
239 }
241 void _read_lock_irq(rwlock_t *lock)
242 {
243 ASSERT(local_irq_is_enabled());
244 local_irq_disable();
245 check_lock(&lock->debug);
246 _raw_read_lock(&lock->raw);
247 preempt_disable();
248 }
250 unsigned long _read_lock_irqsave(rwlock_t *lock)
251 {
252 unsigned long flags;
253 local_irq_save(flags);
254 check_lock(&lock->debug);
255 _raw_read_lock(&lock->raw);
256 preempt_disable();
257 return flags;
258 }
260 void _read_unlock(rwlock_t *lock)
261 {
262 preempt_enable();
263 _raw_read_unlock(&lock->raw);
264 }
266 void _read_unlock_irq(rwlock_t *lock)
267 {
268 preempt_enable();
269 _raw_read_unlock(&lock->raw);
270 local_irq_enable();
271 }
273 void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
274 {
275 preempt_enable();
276 _raw_read_unlock(&lock->raw);
277 local_irq_restore(flags);
278 }
280 void _write_lock(rwlock_t *lock)
281 {
282 check_lock(&lock->debug);
283 _raw_write_lock(&lock->raw);
284 preempt_disable();
285 }
287 void _write_lock_irq(rwlock_t *lock)
288 {
289 ASSERT(local_irq_is_enabled());
290 local_irq_disable();
291 check_lock(&lock->debug);
292 _raw_write_lock(&lock->raw);
293 preempt_disable();
294 }
296 unsigned long _write_lock_irqsave(rwlock_t *lock)
297 {
298 unsigned long flags;
299 local_irq_save(flags);
300 check_lock(&lock->debug);
301 _raw_write_lock(&lock->raw);
302 preempt_disable();
303 return flags;
304 }
306 int _write_trylock(rwlock_t *lock)
307 {
308 check_lock(&lock->debug);
309 if ( !_raw_write_trylock(&lock->raw) )
310 return 0;
311 preempt_disable();
312 return 1;
313 }
315 void _write_unlock(rwlock_t *lock)
316 {
317 preempt_enable();
318 _raw_write_unlock(&lock->raw);
319 }
321 void _write_unlock_irq(rwlock_t *lock)
322 {
323 preempt_enable();
324 _raw_write_unlock(&lock->raw);
325 local_irq_enable();
326 }
328 void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
329 {
330 preempt_enable();
331 _raw_write_unlock(&lock->raw);
332 local_irq_restore(flags);
333 }
335 int _rw_is_locked(rwlock_t *lock)
336 {
337 check_lock(&lock->debug);
338 return _raw_rw_is_locked(&lock->raw);
339 }
341 int _rw_is_write_locked(rwlock_t *lock)
342 {
343 check_lock(&lock->debug);
344 return _raw_rw_is_write_locked(&lock->raw);
345 }
347 #ifdef LOCK_PROFILE
349 struct lock_profile_anc {
350 struct lock_profile_qhead *head_q; /* first head of this type */
351 char *name; /* descriptive string for print */
352 };
354 typedef void lock_profile_subfunc(
355 struct lock_profile *, int32_t, int32_t, void *);
357 extern struct lock_profile *__lock_profile_start;
358 extern struct lock_profile *__lock_profile_end;
360 static s_time_t lock_profile_start;
361 static struct lock_profile_anc lock_profile_ancs[LOCKPROF_TYPE_N];
362 static struct lock_profile_qhead lock_profile_glb_q;
363 static spinlock_t lock_profile_lock = SPIN_LOCK_UNLOCKED;
365 static void spinlock_profile_iterate(lock_profile_subfunc *sub, void *par)
366 {
367 int i;
368 struct lock_profile_qhead *hq;
369 struct lock_profile *eq;
371 spin_lock(&lock_profile_lock);
372 for ( i = 0; i < LOCKPROF_TYPE_N; i++ )
373 for ( hq = lock_profile_ancs[i].head_q; hq; hq = hq->head_q )
374 for ( eq = hq->elem_q; eq; eq = eq->next )
375 sub(eq, i, hq->idx, par);
376 spin_unlock(&lock_profile_lock);
377 }
379 static void spinlock_profile_print_elem(struct lock_profile *data,
380 int32_t type, int32_t idx, void *par)
381 {
382 if ( type == LOCKPROF_TYPE_GLOBAL )
383 printk("%s %s:\n", lock_profile_ancs[idx].name, data->name);
384 else
385 printk("%s %d %s:\n", lock_profile_ancs[idx].name, idx, data->name);
386 printk(" lock:%12"PRId64"(%08X:%08X), block:%12"PRId64"(%08X:%08X)\n",
387 data->lock_cnt, (u32)(data->time_hold >> 32), (u32)data->time_hold,
388 data->block_cnt, (u32)(data->time_block >> 32),
389 (u32)data->time_block);
390 }
392 void spinlock_profile_printall(unsigned char key)
393 {
394 s_time_t now = NOW();
395 s_time_t diff;
397 diff = now - lock_profile_start;
398 printk("Xen lock profile info SHOW (now = %08X:%08X, "
399 "total = %08X:%08X)\n", (u32)(now>>32), (u32)now,
400 (u32)(diff>>32), (u32)diff);
401 spinlock_profile_iterate(spinlock_profile_print_elem, NULL);
402 }
404 static void spinlock_profile_reset_elem(struct lock_profile *data,
405 int32_t type, int32_t idx, void *par)
406 {
407 data->lock_cnt = 0;
408 data->block_cnt = 0;
409 data->time_hold = 0;
410 data->time_block = 0;
411 }
413 void spinlock_profile_reset(unsigned char key)
414 {
415 s_time_t now = NOW();
417 if ( key != '\0' )
418 printk("Xen lock profile info RESET (now = %08X:%08X)\n",
419 (u32)(now>>32), (u32)now);
420 lock_profile_start = now;
421 spinlock_profile_iterate(spinlock_profile_reset_elem, NULL);
422 }
424 typedef struct {
425 xen_sysctl_lockprof_op_t *pc;
426 int rc;
427 } spinlock_profile_ucopy_t;
429 static void spinlock_profile_ucopy_elem(struct lock_profile *data,
430 int32_t type, int32_t idx, void *par)
431 {
432 spinlock_profile_ucopy_t *p = par;
433 xen_sysctl_lockprof_data_t elem;
435 if ( p->rc )
436 return;
438 if ( p->pc->nr_elem < p->pc->max_elem )
439 {
440 safe_strcpy(elem.name, data->name);
441 elem.type = type;
442 elem.idx = idx;
443 elem.lock_cnt = data->lock_cnt;
444 elem.block_cnt = data->block_cnt;
445 elem.lock_time = data->time_hold;
446 elem.block_time = data->time_block;
447 if ( copy_to_guest_offset(p->pc->data, p->pc->nr_elem, &elem, 1) )
448 p->rc = -EFAULT;
449 }
451 if ( !p->rc )
452 p->pc->nr_elem++;
453 }
455 /* Dom0 control of lock profiling */
456 int spinlock_profile_control(xen_sysctl_lockprof_op_t *pc)
457 {
458 int rc = 0;
459 spinlock_profile_ucopy_t par;
461 switch ( pc->cmd )
462 {
463 case XEN_SYSCTL_LOCKPROF_reset:
464 spinlock_profile_reset('\0');
465 break;
466 case XEN_SYSCTL_LOCKPROF_query:
467 pc->nr_elem = 0;
468 par.rc = 0;
469 par.pc = pc;
470 spinlock_profile_iterate(spinlock_profile_ucopy_elem, &par);
471 pc->time = NOW() - lock_profile_start;
472 rc = par.rc;
473 break;
474 default:
475 rc = -EINVAL;
476 break;
477 }
479 return rc;
480 }
482 void _lock_profile_register_struct(
483 int32_t type, struct lock_profile_qhead *qhead, int32_t idx, char *name)
484 {
485 qhead->idx = idx;
486 spin_lock(&lock_profile_lock);
487 qhead->head_q = lock_profile_ancs[type].head_q;
488 lock_profile_ancs[type].head_q = qhead;
489 lock_profile_ancs[type].name = name;
490 spin_unlock(&lock_profile_lock);
491 }
493 void _lock_profile_deregister_struct(
494 int32_t type, struct lock_profile_qhead *qhead)
495 {
496 struct lock_profile_qhead **q;
498 spin_lock(&lock_profile_lock);
499 for ( q = &lock_profile_ancs[type].head_q; *q; q = &(*q)->head_q )
500 {
501 if ( *q == qhead )
502 {
503 *q = qhead->head_q;
504 break;
505 }
506 }
507 spin_unlock(&lock_profile_lock);
508 }
510 static int __init lock_prof_init(void)
511 {
512 struct lock_profile **q;
514 for ( q = &__lock_profile_start; q < &__lock_profile_end; q++ )
515 {
516 (*q)->next = lock_profile_glb_q.elem_q;
517 lock_profile_glb_q.elem_q = *q;
518 }
520 _lock_profile_register_struct(
521 LOCKPROF_TYPE_GLOBAL, &lock_profile_glb_q,
522 0, "Global lock");
524 return 0;
525 }
526 __initcall(lock_prof_init);
528 #endif /* LOCK_PROFILE */