debuggers.hg

annotate xen/common/spinlock.c @ 22855:1d1eec7e1fb4

xl: Perform minimal validation of virtual disk file while parsing config file

This patch performs some very basic validation on the virtual disk
file passed through the config file. This validation ensures that we
don't go too far with the initialization like spawn qemu and more
while there could be some potentially fundamental issues.

[ Patch fixed up to work with PHYSTYPE_EMPTY 22808:6ec61438713a -iwj ]

Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Kamala Narasimhan <kamala.narasimhan@gmail.com>
date Tue Jan 25 18:09:49 2011 +0000 (2011-01-25)
parents 0b88ccf6332d
children
rev   line source
keir@20350 1 #include <xen/lib.h>
keir@18697 2 #include <xen/config.h>
keir@18744 3 #include <xen/irq.h>
keir@18697 4 #include <xen/smp.h>
keir@20350 5 #include <xen/time.h>
keir@18697 6 #include <xen/spinlock.h>
keir@20350 7 #include <xen/guest_access.h>
keir@22446 8 #include <xen/preempt.h>
keir@20350 9 #include <public/sysctl.h>
keir@19516 10 #include <asm/processor.h>
keir@18697 11
keir@18744 12 #ifndef NDEBUG
keir@18744 13
keir@18744 14 static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0);
keir@18744 15
keir@18744 16 static void check_lock(struct lock_debug *debug)
keir@18744 17 {
keir@18744 18 int irq_safe = !local_irq_is_enabled();
keir@18744 19
keir@18744 20 if ( unlikely(atomic_read(&spin_debug) <= 0) )
keir@18744 21 return;
keir@18744 22
keir@18744 23 /* A few places take liberties with this. */
keir@18744 24 /* BUG_ON(in_irq() && !irq_safe); */
keir@18744 25
keir@18744 26 if ( unlikely(debug->irq_safe != irq_safe) )
keir@18744 27 {
keir@18744 28 int seen = cmpxchg(&debug->irq_safe, -1, irq_safe);
keir@18744 29 BUG_ON(seen == !irq_safe);
keir@18744 30 }
keir@18744 31 }
keir@18744 32
keir@18744 33 void spin_debug_enable(void)
keir@18744 34 {
keir@18744 35 atomic_inc(&spin_debug);
keir@18744 36 }
keir@18744 37
keir@18744 38 void spin_debug_disable(void)
keir@18744 39 {
keir@18744 40 atomic_dec(&spin_debug);
keir@18744 41 }
keir@18744 42
keir@18744 43 #else /* defined(NDEBUG) */
keir@18744 44
keir@18744 45 #define check_lock(l) ((void)0)
keir@18744 46
keir@18744 47 #endif
keir@18744 48
keir@20350 49 #ifdef LOCK_PROFILE
keir@20350 50
keir@20350 51 #define LOCK_PROFILE_REL \
keir@20350 52 lock->profile.time_hold += NOW() - lock->profile.time_locked; \
keir@20350 53 lock->profile.lock_cnt++;
keir@20350 54 #define LOCK_PROFILE_VAR s_time_t block = 0
keir@20350 55 #define LOCK_PROFILE_BLOCK block = block ? : NOW();
keir@20350 56 #define LOCK_PROFILE_GOT \
keir@20350 57 lock->profile.time_locked = NOW(); \
keir@20350 58 if (block) \
keir@20350 59 { \
keir@20350 60 lock->profile.time_block += lock->profile.time_locked - block; \
keir@20350 61 lock->profile.block_cnt++; \
keir@20350 62 }
keir@20350 63
keir@20350 64 #else
keir@20350 65
keir@20350 66 #define LOCK_PROFILE_REL
keir@20350 67 #define LOCK_PROFILE_VAR
keir@20350 68 #define LOCK_PROFILE_BLOCK
keir@20350 69 #define LOCK_PROFILE_GOT
keir@20350 70
keir@20350 71 #endif
keir@20350 72
keir@18697 73 void _spin_lock(spinlock_t *lock)
keir@18697 74 {
keir@20350 75 LOCK_PROFILE_VAR;
keir@20350 76
keir@18744 77 check_lock(&lock->debug);
keir@19516 78 while ( unlikely(!_raw_spin_trylock(&lock->raw)) )
keir@20350 79 {
keir@20350 80 LOCK_PROFILE_BLOCK;
keir@19516 81 while ( likely(_raw_spin_is_locked(&lock->raw)) )
keir@19516 82 cpu_relax();
keir@20350 83 }
keir@20350 84 LOCK_PROFILE_GOT;
keir@22446 85 preempt_disable();
keir@18697 86 }
keir@18697 87
keir@18697 88 void _spin_lock_irq(spinlock_t *lock)
keir@18697 89 {
keir@20350 90 LOCK_PROFILE_VAR;
keir@20350 91
keir@18735 92 ASSERT(local_irq_is_enabled());
keir@18697 93 local_irq_disable();
keir@18744 94 check_lock(&lock->debug);
keir@19516 95 while ( unlikely(!_raw_spin_trylock(&lock->raw)) )
keir@19516 96 {
keir@20350 97 LOCK_PROFILE_BLOCK;
keir@19516 98 local_irq_enable();
keir@19516 99 while ( likely(_raw_spin_is_locked(&lock->raw)) )
keir@19516 100 cpu_relax();
keir@19516 101 local_irq_disable();
keir@19516 102 }
keir@20350 103 LOCK_PROFILE_GOT;
keir@22446 104 preempt_disable();
keir@18697 105 }
keir@18697 106
keir@18697 107 unsigned long _spin_lock_irqsave(spinlock_t *lock)
keir@18697 108 {
keir@18697 109 unsigned long flags;
keir@20350 110 LOCK_PROFILE_VAR;
keir@20350 111
keir@18697 112 local_irq_save(flags);
keir@18744 113 check_lock(&lock->debug);
keir@19516 114 while ( unlikely(!_raw_spin_trylock(&lock->raw)) )
keir@19516 115 {
keir@20350 116 LOCK_PROFILE_BLOCK;
keir@19516 117 local_irq_restore(flags);
keir@19516 118 while ( likely(_raw_spin_is_locked(&lock->raw)) )
keir@19516 119 cpu_relax();
keir@19516 120 local_irq_save(flags);
keir@19516 121 }
keir@20350 122 LOCK_PROFILE_GOT;
keir@22446 123 preempt_disable();
keir@18697 124 return flags;
keir@18697 125 }
keir@18697 126
keir@18697 127 void _spin_unlock(spinlock_t *lock)
keir@18697 128 {
keir@22446 129 preempt_enable();
keir@20350 130 LOCK_PROFILE_REL;
keir@18697 131 _raw_spin_unlock(&lock->raw);
keir@18697 132 }
keir@18697 133
keir@18697 134 void _spin_unlock_irq(spinlock_t *lock)
keir@18697 135 {
keir@22446 136 preempt_enable();
keir@20350 137 LOCK_PROFILE_REL;
keir@18697 138 _raw_spin_unlock(&lock->raw);
keir@18697 139 local_irq_enable();
keir@18697 140 }
keir@18697 141
keir@18697 142 void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
keir@18697 143 {
keir@22446 144 preempt_enable();
keir@20350 145 LOCK_PROFILE_REL;
keir@18697 146 _raw_spin_unlock(&lock->raw);
keir@18697 147 local_irq_restore(flags);
keir@18697 148 }
keir@18697 149
keir@18697 150 int _spin_is_locked(spinlock_t *lock)
keir@18697 151 {
keir@18744 152 check_lock(&lock->debug);
keir@18697 153 return _raw_spin_is_locked(&lock->raw);
keir@18697 154 }
keir@18697 155
keir@18697 156 int _spin_trylock(spinlock_t *lock)
keir@18697 157 {
keir@18744 158 check_lock(&lock->debug);
keir@22441 159 if ( !_raw_spin_trylock(&lock->raw) )
keir@22441 160 return 0;
keir@22441 161 #ifdef LOCK_PROFILE
keir@20350 162 lock->profile.time_locked = NOW();
keir@22441 163 #endif
keir@22446 164 preempt_disable();
keir@20350 165 return 1;
keir@18697 166 }
keir@18697 167
keir@18697 168 void _spin_barrier(spinlock_t *lock)
keir@18697 169 {
keir@20350 170 #ifdef LOCK_PROFILE
keir@20350 171 s_time_t block = NOW();
keir@20350 172 u64 loop = 0;
keir@20350 173
keir@20350 174 check_lock(&lock->debug);
keir@20350 175 do { mb(); loop++;} while ( _raw_spin_is_locked(&lock->raw) );
keir@20350 176 if (loop > 1)
keir@20350 177 {
keir@20350 178 lock->profile.time_block += NOW() - block;
keir@20350 179 lock->profile.block_cnt++;
keir@20350 180 }
keir@20350 181 #else
keir@18744 182 check_lock(&lock->debug);
keir@18697 183 do { mb(); } while ( _raw_spin_is_locked(&lock->raw) );
keir@20350 184 #endif
keir@18697 185 mb();
keir@18697 186 }
keir@18697 187
keir@18734 188 void _spin_barrier_irq(spinlock_t *lock)
keir@18734 189 {
keir@18734 190 unsigned long flags;
keir@18734 191 local_irq_save(flags);
keir@18734 192 _spin_barrier(lock);
keir@18734 193 local_irq_restore(flags);
keir@18734 194 }
keir@18734 195
keir@21430 196 int _spin_trylock_recursive(spinlock_t *lock)
keir@18697 197 {
keir@18697 198 int cpu = smp_processor_id();
keir@18699 199
keir@18699 200 /* Don't allow overflow of recurse_cpu field. */
keir@18699 201 BUILD_BUG_ON(NR_CPUS > 0xfffu);
keir@18699 202
keir@18744 203 check_lock(&lock->debug);
keir@18744 204
keir@18697 205 if ( likely(lock->recurse_cpu != cpu) )
keir@18697 206 {
keir@21430 207 if ( !spin_trylock(lock) )
keir@21430 208 return 0;
keir@18697 209 lock->recurse_cpu = cpu;
keir@18697 210 }
keir@18699 211
keir@18699 212 /* We support only fairly shallow recursion, else the counter overflows. */
keir@18699 213 ASSERT(lock->recurse_cnt < 0xfu);
keir@18697 214 lock->recurse_cnt++;
keir@21430 215
keir@21430 216 return 1;
keir@21430 217 }
keir@21430 218
keir@21430 219 void _spin_lock_recursive(spinlock_t *lock)
keir@21430 220 {
keir@21430 221 while ( !spin_trylock_recursive(lock) )
keir@21430 222 cpu_relax();
keir@18697 223 }
keir@18697 224
keir@18697 225 void _spin_unlock_recursive(spinlock_t *lock)
keir@18697 226 {
keir@18697 227 if ( likely(--lock->recurse_cnt == 0) )
keir@18697 228 {
keir@18699 229 lock->recurse_cpu = 0xfffu;
keir@18697 230 spin_unlock(lock);
keir@18697 231 }
keir@18697 232 }
keir@18697 233
keir@18697 234 void _read_lock(rwlock_t *lock)
keir@18697 235 {
keir@18744 236 check_lock(&lock->debug);
keir@18697 237 _raw_read_lock(&lock->raw);
keir@22446 238 preempt_disable();
keir@18697 239 }
keir@18697 240
keir@18697 241 void _read_lock_irq(rwlock_t *lock)
keir@18697 242 {
keir@18735 243 ASSERT(local_irq_is_enabled());
keir@18697 244 local_irq_disable();
keir@18744 245 check_lock(&lock->debug);
keir@18697 246 _raw_read_lock(&lock->raw);
keir@22446 247 preempt_disable();
keir@18697 248 }
keir@18697 249
keir@18697 250 unsigned long _read_lock_irqsave(rwlock_t *lock)
keir@18697 251 {
keir@18697 252 unsigned long flags;
keir@18697 253 local_irq_save(flags);
keir@18744 254 check_lock(&lock->debug);
keir@18697 255 _raw_read_lock(&lock->raw);
keir@22446 256 preempt_disable();
keir@18697 257 return flags;
keir@18697 258 }
keir@18697 259
keir@18697 260 void _read_unlock(rwlock_t *lock)
keir@18697 261 {
keir@22446 262 preempt_enable();
keir@18697 263 _raw_read_unlock(&lock->raw);
keir@18697 264 }
keir@18697 265
keir@18697 266 void _read_unlock_irq(rwlock_t *lock)
keir@18697 267 {
keir@22446 268 preempt_enable();
keir@18697 269 _raw_read_unlock(&lock->raw);
keir@18697 270 local_irq_enable();
keir@18697 271 }
keir@18697 272
keir@18697 273 void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
keir@18697 274 {
keir@22446 275 preempt_enable();
keir@18697 276 _raw_read_unlock(&lock->raw);
keir@18697 277 local_irq_restore(flags);
keir@18697 278 }
keir@18697 279
keir@18697 280 void _write_lock(rwlock_t *lock)
keir@18697 281 {
keir@18744 282 check_lock(&lock->debug);
keir@18697 283 _raw_write_lock(&lock->raw);
keir@22446 284 preempt_disable();
keir@18697 285 }
keir@18697 286
keir@18697 287 void _write_lock_irq(rwlock_t *lock)
keir@18697 288 {
keir@18735 289 ASSERT(local_irq_is_enabled());
keir@18697 290 local_irq_disable();
keir@18744 291 check_lock(&lock->debug);
keir@18697 292 _raw_write_lock(&lock->raw);
keir@22446 293 preempt_disable();
keir@18697 294 }
keir@18697 295
keir@18697 296 unsigned long _write_lock_irqsave(rwlock_t *lock)
keir@18697 297 {
keir@18697 298 unsigned long flags;
keir@18697 299 local_irq_save(flags);
keir@18744 300 check_lock(&lock->debug);
keir@18697 301 _raw_write_lock(&lock->raw);
keir@22446 302 preempt_disable();
keir@18697 303 return flags;
keir@18697 304 }
keir@18697 305
keir@19684 306 int _write_trylock(rwlock_t *lock)
keir@19684 307 {
keir@19684 308 check_lock(&lock->debug);
keir@22441 309 if ( !_raw_write_trylock(&lock->raw) )
keir@22441 310 return 0;
keir@22446 311 preempt_disable();
keir@22441 312 return 1;
keir@19684 313 }
keir@19684 314
keir@18697 315 void _write_unlock(rwlock_t *lock)
keir@18697 316 {
keir@22446 317 preempt_enable();
keir@18697 318 _raw_write_unlock(&lock->raw);
keir@18697 319 }
keir@18697 320
keir@18697 321 void _write_unlock_irq(rwlock_t *lock)
keir@18697 322 {
keir@22446 323 preempt_enable();
keir@18697 324 _raw_write_unlock(&lock->raw);
keir@18697 325 local_irq_enable();
keir@18697 326 }
keir@18697 327
keir@18697 328 void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
keir@18697 329 {
keir@22446 330 preempt_enable();
keir@18697 331 _raw_write_unlock(&lock->raw);
keir@18697 332 local_irq_restore(flags);
keir@18697 333 }
keir@18953 334
keir@18953 335 int _rw_is_locked(rwlock_t *lock)
keir@18953 336 {
keir@18953 337 check_lock(&lock->debug);
keir@18953 338 return _raw_rw_is_locked(&lock->raw);
keir@18953 339 }
keir@19684 340
keir@19684 341 int _rw_is_write_locked(rwlock_t *lock)
keir@19684 342 {
keir@19684 343 check_lock(&lock->debug);
keir@19684 344 return _raw_rw_is_write_locked(&lock->raw);
keir@19684 345 }
keir@20350 346
keir@20350 347 #ifdef LOCK_PROFILE
keir@20354 348
keir@20350 349 struct lock_profile_anc {
keir@20350 350 struct lock_profile_qhead *head_q; /* first head of this type */
keir@20350 351 char *name; /* descriptive string for print */
keir@20350 352 };
keir@20350 353
keir@20354 354 typedef void lock_profile_subfunc(
keir@20354 355 struct lock_profile *, int32_t, int32_t, void *);
keir@20350 356
keir@20350 357 extern struct lock_profile *__lock_profile_start;
keir@20350 358 extern struct lock_profile *__lock_profile_end;
keir@20350 359
keir@20354 360 static s_time_t lock_profile_start;
keir@20350 361 static struct lock_profile_anc lock_profile_ancs[LOCKPROF_TYPE_N];
keir@20350 362 static struct lock_profile_qhead lock_profile_glb_q;
keir@20350 363 static spinlock_t lock_profile_lock = SPIN_LOCK_UNLOCKED;
keir@20350 364
keir@20350 365 static void spinlock_profile_iterate(lock_profile_subfunc *sub, void *par)
keir@20350 366 {
keir@20354 367 int i;
keir@20350 368 struct lock_profile_qhead *hq;
keir@20350 369 struct lock_profile *eq;
keir@20350 370
keir@20350 371 spin_lock(&lock_profile_lock);
keir@20354 372 for ( i = 0; i < LOCKPROF_TYPE_N; i++ )
keir@20354 373 for ( hq = lock_profile_ancs[i].head_q; hq; hq = hq->head_q )
keir@20354 374 for ( eq = hq->elem_q; eq; eq = eq->next )
keir@20350 375 sub(eq, i, hq->idx, par);
keir@20350 376 spin_unlock(&lock_profile_lock);
keir@20350 377 }
keir@20350 378
keir@20350 379 static void spinlock_profile_print_elem(struct lock_profile *data,
keir@20350 380 int32_t type, int32_t idx, void *par)
keir@20350 381 {
keir@20354 382 if ( type == LOCKPROF_TYPE_GLOBAL )
keir@20350 383 printk("%s %s:\n", lock_profile_ancs[idx].name, data->name);
keir@20350 384 else
keir@20350 385 printk("%s %d %s:\n", lock_profile_ancs[idx].name, idx, data->name);
keir@20354 386 printk(" lock:%12"PRId64"(%08X:%08X), block:%12"PRId64"(%08X:%08X)\n",
keir@20354 387 data->lock_cnt, (u32)(data->time_hold >> 32), (u32)data->time_hold,
keir@20354 388 data->block_cnt, (u32)(data->time_block >> 32),
keir@20354 389 (u32)data->time_block);
keir@20350 390 }
keir@20350 391
keir@20350 392 void spinlock_profile_printall(unsigned char key)
keir@20350 393 {
keir@20350 394 s_time_t now = NOW();
keir@20350 395 s_time_t diff;
keir@20350 396
keir@20350 397 diff = now - lock_profile_start;
keir@20350 398 printk("Xen lock profile info SHOW (now = %08X:%08X, "
keir@20350 399 "total = %08X:%08X)\n", (u32)(now>>32), (u32)now,
keir@20350 400 (u32)(diff>>32), (u32)diff);
keir@20350 401 spinlock_profile_iterate(spinlock_profile_print_elem, NULL);
keir@20350 402 }
keir@20350 403
keir@20350 404 static void spinlock_profile_reset_elem(struct lock_profile *data,
keir@20350 405 int32_t type, int32_t idx, void *par)
keir@20350 406 {
keir@20350 407 data->lock_cnt = 0;
keir@20350 408 data->block_cnt = 0;
keir@20350 409 data->time_hold = 0;
keir@20350 410 data->time_block = 0;
keir@20350 411 }
keir@20350 412
keir@20350 413 void spinlock_profile_reset(unsigned char key)
keir@20350 414 {
keir@20350 415 s_time_t now = NOW();
keir@20350 416
keir@20350 417 if ( key != '\0' )
keir@20350 418 printk("Xen lock profile info RESET (now = %08X:%08X)\n",
keir@20350 419 (u32)(now>>32), (u32)now);
keir@20350 420 lock_profile_start = now;
keir@20350 421 spinlock_profile_iterate(spinlock_profile_reset_elem, NULL);
keir@20350 422 }
keir@20350 423
keir@20350 424 typedef struct {
keir@20350 425 xen_sysctl_lockprof_op_t *pc;
keir@20350 426 int rc;
keir@20350 427 } spinlock_profile_ucopy_t;
keir@20350 428
keir@20350 429 static void spinlock_profile_ucopy_elem(struct lock_profile *data,
keir@20350 430 int32_t type, int32_t idx, void *par)
keir@20350 431 {
keir@20354 432 spinlock_profile_ucopy_t *p = par;
keir@20350 433 xen_sysctl_lockprof_data_t elem;
keir@20350 434
keir@20354 435 if ( p->rc )
keir@20350 436 return;
keir@20350 437
keir@20354 438 if ( p->pc->nr_elem < p->pc->max_elem )
keir@20350 439 {
keir@20350 440 safe_strcpy(elem.name, data->name);
keir@20350 441 elem.type = type;
keir@20350 442 elem.idx = idx;
keir@20350 443 elem.lock_cnt = data->lock_cnt;
keir@20350 444 elem.block_cnt = data->block_cnt;
keir@20350 445 elem.lock_time = data->time_hold;
keir@20350 446 elem.block_time = data->time_block;
keir@20354 447 if ( copy_to_guest_offset(p->pc->data, p->pc->nr_elem, &elem, 1) )
keir@20350 448 p->rc = -EFAULT;
keir@20350 449 }
keir@20354 450
keir@20354 451 if ( !p->rc )
keir@20354 452 p->pc->nr_elem++;
keir@20350 453 }
keir@20350 454
keir@20350 455 /* Dom0 control of lock profiling */
keir@20350 456 int spinlock_profile_control(xen_sysctl_lockprof_op_t *pc)
keir@20350 457 {
keir@20354 458 int rc = 0;
keir@20350 459 spinlock_profile_ucopy_t par;
keir@20350 460
keir@20354 461 switch ( pc->cmd )
keir@20350 462 {
keir@20350 463 case XEN_SYSCTL_LOCKPROF_reset:
keir@20350 464 spinlock_profile_reset('\0');
keir@20350 465 break;
keir@20350 466 case XEN_SYSCTL_LOCKPROF_query:
keir@20354 467 pc->nr_elem = 0;
keir@20354 468 par.rc = 0;
keir@20354 469 par.pc = pc;
keir@20350 470 spinlock_profile_iterate(spinlock_profile_ucopy_elem, &par);
keir@20350 471 pc->time = NOW() - lock_profile_start;
keir@20354 472 rc = par.rc;
keir@20350 473 break;
keir@20350 474 default:
keir@20350 475 rc = -EINVAL;
keir@20350 476 break;
keir@20350 477 }
keir@20354 478
keir@20350 479 return rc;
keir@20350 480 }
keir@20350 481
keir@20354 482 void _lock_profile_register_struct(
keir@20354 483 int32_t type, struct lock_profile_qhead *qhead, int32_t idx, char *name)
keir@20350 484 {
keir@20350 485 qhead->idx = idx;
keir@20350 486 spin_lock(&lock_profile_lock);
keir@20350 487 qhead->head_q = lock_profile_ancs[type].head_q;
keir@20350 488 lock_profile_ancs[type].head_q = qhead;
keir@20350 489 lock_profile_ancs[type].name = name;
keir@20350 490 spin_unlock(&lock_profile_lock);
keir@20350 491 }
keir@20350 492
keir@20354 493 void _lock_profile_deregister_struct(
keir@20354 494 int32_t type, struct lock_profile_qhead *qhead)
keir@20350 495 {
keir@20350 496 struct lock_profile_qhead **q;
keir@20350 497
keir@20350 498 spin_lock(&lock_profile_lock);
keir@20354 499 for ( q = &lock_profile_ancs[type].head_q; *q; q = &(*q)->head_q )
keir@20350 500 {
keir@20354 501 if ( *q == qhead )
keir@20350 502 {
keir@20350 503 *q = qhead->head_q;
keir@20350 504 break;
keir@20350 505 }
keir@20350 506 }
keir@20350 507 spin_unlock(&lock_profile_lock);
keir@20350 508 }
keir@20350 509
keir@20350 510 static int __init lock_prof_init(void)
keir@20350 511 {
keir@20350 512 struct lock_profile **q;
keir@20350 513
keir@20354 514 for ( q = &__lock_profile_start; q < &__lock_profile_end; q++ )
keir@20350 515 {
keir@20350 516 (*q)->next = lock_profile_glb_q.elem_q;
keir@20354 517 lock_profile_glb_q.elem_q = *q;
keir@20350 518 }
keir@20354 519
keir@20354 520 _lock_profile_register_struct(
keir@20354 521 LOCKPROF_TYPE_GLOBAL, &lock_profile_glb_q,
keir@20350 522 0, "Global lock");
keir@20354 523
keir@20350 524 return 0;
keir@20350 525 }
keir@20350 526 __initcall(lock_prof_init);
keir@20354 527
keir@20354 528 #endif /* LOCK_PROFILE */