debuggers.hg

annotate xen/common/domctl.c @ 22855:1d1eec7e1fb4

xl: Perform minimal validation of virtual disk file while parsing config file

This patch performs some very basic validation on the virtual disk
file passed through the config file. This validation ensures that we
don't go too far with the initialization like spawn qemu and more
while there could be some potentially fundamental issues.

[ Patch fixed up to work with PHYSTYPE_EMPTY 22808:6ec61438713a -iwj ]

Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Kamala Narasimhan <kamala.narasimhan@gmail.com>
date Tue Jan 25 18:09:49 2011 +0000 (2011-01-25)
parents 2208a036f8d9
children
rev   line source
kfraser@11295 1 /******************************************************************************
kfraser@11295 2 * domctl.c
kfraser@11295 3 *
kfraser@11295 4 * Domain management operations. For use by node control stack.
kfraser@11295 5 *
kfraser@11295 6 * Copyright (c) 2002-2006, K A Fraser
kfraser@11295 7 */
kfraser@11295 8
kfraser@11295 9 #include <xen/config.h>
kfraser@11295 10 #include <xen/types.h>
kfraser@11295 11 #include <xen/lib.h>
kfraser@11295 12 #include <xen/mm.h>
kfraser@11295 13 #include <xen/sched.h>
keir@21258 14 #include <xen/sched-if.h>
kfraser@11295 15 #include <xen/domain.h>
kfraser@11295 16 #include <xen/event.h>
kfraser@11295 17 #include <xen/domain_page.h>
kfraser@11295 18 #include <xen/trace.h>
kfraser@11295 19 #include <xen/console.h>
kfraser@11295 20 #include <xen/iocap.h>
kfraser@14074 21 #include <xen/rcupdate.h>
kfraser@11295 22 #include <xen/guest_access.h>
kfraser@13540 23 #include <xen/bitmap.h>
Tim@15668 24 #include <xen/paging.h>
kfraser@11295 25 #include <asm/current.h>
kfraser@11295 26 #include <public/domctl.h>
kfraser@15846 27 #include <xsm/xsm.h>
kfraser@11295 28
keir@19557 29 static DEFINE_SPINLOCK(domctl_lock);
keir@17442 30
kfraser@13632 31 extern long arch_do_domctl(
kfraser@11295 32 struct xen_domctl *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
ack@13310 33
keir@21396 34 int cpumask_to_xenctl_cpumap(
kfraser@11295 35 struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask)
kfraser@11295 36 {
kfraser@11295 37 unsigned int guest_bytes, copy_bytes, i;
kfraser@11295 38 uint8_t zero = 0;
kfraser@13540 39 uint8_t bytemap[(NR_CPUS + 7) / 8];
kfraser@11295 40
kfraser@11295 41 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
kfraser@13540 42 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
kfraser@11295 43
kfraser@13540 44 bitmap_long_to_byte(bytemap, cpus_addr(*cpumask), NR_CPUS);
kfraser@13540 45
keir@16557 46 if ( copy_bytes != 0 )
keir@21396 47 if ( copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes) )
keir@21396 48 return -EFAULT;
kfraser@11295 49
kfraser@11295 50 for ( i = copy_bytes; i < guest_bytes; i++ )
keir@21396 51 if ( copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1) )
keir@21396 52 return -EFAULT;
keir@21396 53
keir@21396 54 return 0;
kfraser@11295 55 }
kfraser@11295 56
keir@21396 57 int xenctl_cpumap_to_cpumask(
kfraser@11295 58 cpumask_t *cpumask, struct xenctl_cpumap *xenctl_cpumap)
kfraser@11295 59 {
kfraser@11295 60 unsigned int guest_bytes, copy_bytes;
kfraser@13540 61 uint8_t bytemap[(NR_CPUS + 7) / 8];
kfraser@11295 62
kfraser@11295 63 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
kfraser@13540 64 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
kfraser@11295 65
keir@16557 66 memset(bytemap, 0, sizeof(bytemap));
kfraser@11295 67
keir@16557 68 if ( copy_bytes != 0 )
keir@16557 69 {
keir@21396 70 if ( copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes) )
keir@21396 71 return -EFAULT;
keir@16557 72 if ( (xenctl_cpumap->nr_cpus & 7) && (guest_bytes <= sizeof(bytemap)) )
keir@16557 73 bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 7));
keir@16557 74 }
kfraser@13540 75
kfraser@13540 76 bitmap_byte_to_long(cpus_addr(*cpumask), bytemap, NR_CPUS);
keir@21396 77
keir@21396 78 return 0;
kfraser@11295 79 }
kfraser@11295 80
kfraser@11295 81 static inline int is_free_domid(domid_t dom)
kfraser@11295 82 {
kfraser@11295 83 struct domain *d;
kfraser@11295 84
kfraser@11295 85 if ( dom >= DOMID_FIRST_RESERVED )
kfraser@11295 86 return 0;
kfraser@11295 87
kfraser@14220 88 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
kfraser@11295 89 return 1;
kfraser@11295 90
kfraser@14220 91 rcu_unlock_domain(d);
kfraser@11295 92 return 0;
kfraser@11295 93 }
kfraser@11295 94
kfraser@11295 95 void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info)
kfraser@11295 96 {
keir@17491 97 struct vcpu *v;
kfraser@11295 98 u64 cpu_time = 0;
kfraser@12262 99 int flags = XEN_DOMINF_blocked;
kfraser@11295 100 struct vcpu_runstate_info runstate;
kfraser@11295 101
kfraser@11295 102 info->domain = d->domain_id;
kfraser@11295 103 info->nr_online_vcpus = 0;
keir@18384 104 info->ssidref = 0;
kfraser@11295 105
kfraser@11295 106 /*
kfraser@11295 107 * - domain is marked as blocked only if all its vcpus are blocked
kfraser@11295 108 * - domain is marked as running if any of its vcpus is running
kfraser@11295 109 */
kfraser@12262 110 for_each_vcpu ( d, v )
kfraser@12262 111 {
kfraser@11295 112 vcpu_runstate_get(v, &runstate);
kfraser@11295 113 cpu_time += runstate.time[RUNSTATE_running];
kfraser@11295 114 info->max_vcpu_id = v->vcpu_id;
kfraser@14698 115 if ( !test_bit(_VPF_down, &v->pause_flags) )
kfraser@11295 116 {
kfraser@14698 117 if ( !(v->pause_flags & VPF_blocked) )
kfraser@12262 118 flags &= ~XEN_DOMINF_blocked;
kfraser@14692 119 if ( v->is_running )
kfraser@12262 120 flags |= XEN_DOMINF_running;
kfraser@11295 121 info->nr_online_vcpus++;
kfraser@11295 122 }
kfraser@11295 123 }
kaf24@12279 124
kfraser@11295 125 info->cpu_time = cpu_time;
kaf24@12279 126
keir@17491 127 info->flags = (info->nr_online_vcpus ? flags : 0) |
kfraser@15856 128 ((d->is_dying == DOMDYING_dead) ? XEN_DOMINF_dying : 0) |
kfraser@15856 129 (d->is_shut_down ? XEN_DOMINF_shutdown : 0) |
kfraser@15856 130 (d->is_paused_by_controller ? XEN_DOMINF_paused : 0) |
kfraser@15856 131 (d->debugger_attached ? XEN_DOMINF_debugged : 0) |
kfraser@12262 132 d->shutdown_code << XEN_DOMINF_shutdownshift;
kfraser@11295 133
kfraser@12262 134 if ( is_hvm_domain(d) )
kfraser@12262 135 info->flags |= XEN_DOMINF_hvm_guest;
kfraser@12262 136
kfraser@15846 137 xsm_security_domaininfo(d, info);
kfraser@15846 138
kfraser@11295 139 info->tot_pages = d->tot_pages;
kfraser@11295 140 info->max_pages = d->max_pages;
keir@20731 141 info->shr_pages = atomic_read(&d->shr_pages);
kfraser@12835 142 info->shared_info_frame = mfn_to_gmfn(d, __pa(d->shared_info)>>PAGE_SHIFT);
keir@20726 143 BUG_ON(SHARED_M2P(info->shared_info_frame));
kfraser@11295 144
keir@21258 145 info->cpupool = d->cpupool ? d->cpupool->cpupool_id : CPUPOOLID_NONE;
keir@21258 146
kfraser@11295 147 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
kfraser@11295 148 }
kfraser@11295 149
keir@21258 150 static unsigned int default_vcpu0_location(cpumask_t *online)
kfraser@11295 151 {
kfraser@11295 152 struct domain *d;
kfraser@11295 153 struct vcpu *v;
keir@18561 154 unsigned int i, cpu, nr_cpus, *cnt;
kfraser@11295 155 cpumask_t cpu_exclude_map;
kfraser@11295 156
kfraser@11295 157 /* Do an initial CPU placement. Pick the least-populated CPU. */
keir@21436 158 nr_cpus = last_cpu(cpu_online_map) + 1;
keir@18561 159 cnt = xmalloc_array(unsigned int, nr_cpus);
keir@18561 160 if ( cnt )
keir@18561 161 {
keir@18561 162 memset(cnt, 0, nr_cpus * sizeof(*cnt));
keir@18561 163
keir@18561 164 rcu_read_lock(&domlist_read_lock);
keir@18561 165 for_each_domain ( d )
keir@18561 166 for_each_vcpu ( d, v )
keir@21436 167 if ( !test_bit(_VPF_down, &v->pause_flags)
keir@21436 168 && ((cpu = v->processor) < nr_cpus) )
keir@21436 169 cnt[cpu]++;
keir@18561 170 rcu_read_unlock(&domlist_read_lock);
keir@18561 171 }
kfraser@11295 172
kfraser@11295 173 /*
kfraser@11295 174 * If we're on a HT system, we only auto-allocate to a non-primary HT. We
kfraser@11295 175 * favour high numbered CPUs in the event of a tie.
kfraser@11295 176 */
keir@19965 177 cpu = first_cpu(per_cpu(cpu_sibling_map, 0));
keir@19965 178 if ( cpus_weight(per_cpu(cpu_sibling_map, 0)) > 1 )
keir@19965 179 cpu = next_cpu(cpu, per_cpu(cpu_sibling_map, 0));
keir@19965 180 cpu_exclude_map = per_cpu(cpu_sibling_map, 0);
keir@21258 181 for_each_cpu_mask(i, *online)
kfraser@11295 182 {
kfraser@11295 183 if ( cpu_isset(i, cpu_exclude_map) )
kfraser@11295 184 continue;
keir@19965 185 if ( (i == first_cpu(per_cpu(cpu_sibling_map, i))) &&
keir@19965 186 (cpus_weight(per_cpu(cpu_sibling_map, i)) > 1) )
kfraser@11295 187 continue;
keir@19965 188 cpus_or(cpu_exclude_map, cpu_exclude_map, per_cpu(cpu_sibling_map, i));
keir@18561 189 if ( !cnt || cnt[i] <= cnt[cpu] )
kfraser@11295 190 cpu = i;
kfraser@11295 191 }
kfraser@11295 192
keir@18561 193 xfree(cnt);
keir@18561 194
kfraser@11295 195 return cpu;
kfraser@11295 196 }
kfraser@11295 197
keir@19557 198 bool_t domctl_lock_acquire(void)
keir@19557 199 {
keir@19557 200 /*
keir@19557 201 * Caller may try to pause its own VCPUs. We must prevent deadlock
keir@19557 202 * against other non-domctl routines which try to do the same.
keir@19557 203 */
keir@19557 204 if ( !spin_trylock(&current->domain->hypercall_deadlock_mutex) )
keir@19557 205 return 0;
keir@19557 206
keir@19557 207 /*
keir@19557 208 * Trylock here is paranoia if we have multiple privileged domains. Then
keir@19557 209 * we could have one domain trying to pause another which is spinning
keir@19557 210 * on domctl_lock -- results in deadlock.
keir@19557 211 */
keir@19557 212 if ( spin_trylock(&domctl_lock) )
keir@19557 213 return 1;
keir@19557 214
keir@19557 215 spin_unlock(&current->domain->hypercall_deadlock_mutex);
keir@19557 216 return 0;
keir@19557 217 }
keir@19557 218
keir@19557 219 void domctl_lock_release(void)
keir@19557 220 {
keir@19557 221 spin_unlock(&domctl_lock);
keir@19557 222 spin_unlock(&current->domain->hypercall_deadlock_mutex);
keir@19557 223 }
keir@19557 224
kfraser@13632 225 long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
kfraser@11295 226 {
kfraser@13632 227 long ret = 0;
kfraser@11295 228 struct xen_domctl curop, *op = &curop;
kfraser@11295 229
kfraser@11295 230 if ( copy_from_guest(op, u_domctl, 1) )
kfraser@11295 231 return -EFAULT;
kfraser@11295 232
kfraser@11295 233 if ( op->interface_version != XEN_DOMCTL_INTERFACE_VERSION )
kfraser@11295 234 return -EACCES;
kfraser@11295 235
keir@20345 236 switch ( op->cmd )
keir@20345 237 {
keir@20345 238 case XEN_DOMCTL_ioport_mapping:
keir@20345 239 case XEN_DOMCTL_memory_mapping:
keir@20345 240 case XEN_DOMCTL_bind_pt_irq:
keir@20404 241 case XEN_DOMCTL_unbind_pt_irq: {
keir@20358 242 struct domain *d;
keir@20345 243 bool_t is_priv = IS_PRIV(current->domain);
keir@20345 244 if ( !is_priv && ((d = rcu_lock_domain_by_id(op->domain)) != NULL) )
keir@20345 245 {
keir@20404 246 is_priv = IS_PRIV_FOR(current->domain, d);
keir@20345 247 rcu_unlock_domain(d);
keir@20345 248 }
keir@20345 249 if ( !is_priv )
keir@20345 250 return -EPERM;
keir@20345 251 break;
keir@20345 252 }
keir@20345 253 default:
keir@20345 254 if ( !IS_PRIV(current->domain) )
keir@20345 255 return -EPERM;
keir@20345 256 break;
keir@20345 257 }
keir@20345 258
keir@19557 259 if ( !domctl_lock_acquire() )
keir@19557 260 return hypercall_create_continuation(
keir@19557 261 __HYPERVISOR_domctl, "h", u_domctl);
kfraser@11295 262
kfraser@11295 263 switch ( op->cmd )
kfraser@11295 264 {
kfraser@11295 265
kfraser@11295 266 case XEN_DOMCTL_setvcpucontext:
kfraser@11295 267 {
kfraser@14220 268 struct domain *d = rcu_lock_domain_by_id(op->domain);
kfraser@13632 269 vcpu_guest_context_u c = { .nat = NULL };
kfraser@13632 270 unsigned int vcpu = op->u.vcpucontext.vcpu;
kfraser@13632 271 struct vcpu *v;
kfraser@13632 272
kfraser@11295 273 ret = -ESRCH;
kfraser@13632 274 if ( d == NULL )
kfraser@13632 275 break;
kfraser@13632 276
keir@16558 277 ret = xsm_setvcpucontext(d);
keir@16558 278 if ( ret )
keir@16558 279 goto svc_out;
keir@16558 280
kfraser@13632 281 ret = -EINVAL;
keir@18563 282 if ( (d == current->domain) || /* no domain_pause() */
keir@19826 283 (vcpu >= d->max_vcpus) || ((v = d->vcpu[vcpu]) == NULL) )
kfraser@13632 284 goto svc_out;
kfraser@13632 285
kfraser@13632 286 if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
kfraser@11295 287 {
keir@17719 288 vcpu_reset(v);
keir@17719 289 ret = 0;
kfraser@13632 290 goto svc_out;
kfraser@11295 291 }
kfraser@13632 292
kfraser@13632 293 #ifdef CONFIG_COMPAT
kfraser@13632 294 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
kfraser@13632 295 < sizeof(struct compat_vcpu_guest_context));
kfraser@13632 296 #endif
kfraser@13632 297 ret = -ENOMEM;
kfraser@13632 298 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
kfraser@13632 299 goto svc_out;
kfraser@13632 300
keir@19304 301 #ifdef CONFIG_COMPAT
keir@19304 302 if ( !is_pv_32on64_vcpu(v) )
kfraser@13632 303 ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
kfraser@13632 304 else
kfraser@13632 305 ret = copy_from_guest(c.cmp,
kfraser@13632 306 guest_handle_cast(op->u.vcpucontext.ctxt,
kfraser@13632 307 void), 1);
keir@19304 308 #else
keir@19304 309 ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
kfraser@13632 310 #endif
kfraser@13632 311 ret = ret ? -EFAULT : 0;
kfraser@13632 312
kfraser@13632 313 if ( ret == 0 )
kfraser@13632 314 {
kfraser@13632 315 domain_pause(d);
kfraser@13632 316 ret = arch_set_info_guest(v, c);
kfraser@13632 317 domain_unpause(d);
kfraser@13632 318 }
kfraser@13632 319
kfraser@13632 320 svc_out:
kfraser@13632 321 xfree(c.nat);
kfraser@14220 322 rcu_unlock_domain(d);
kfraser@11295 323 }
kfraser@11295 324 break;
kfraser@11295 325
kfraser@11295 326 case XEN_DOMCTL_pausedomain:
kfraser@11295 327 {
kfraser@14220 328 struct domain *d = rcu_lock_domain_by_id(op->domain);
kfraser@11295 329 ret = -ESRCH;
kfraser@11295 330 if ( d != NULL )
kfraser@11295 331 {
keir@16558 332 ret = xsm_pausedomain(d);
keir@16558 333 if ( ret )
keir@16558 334 goto pausedomain_out;
keir@16558 335
kfraser@11295 336 ret = -EINVAL;
kfraser@11295 337 if ( d != current->domain )
kfraser@11295 338 {
kfraser@11295 339 domain_pause_by_systemcontroller(d);
kfraser@11295 340 ret = 0;
kfraser@11295 341 }
keir@16558 342 pausedomain_out:
kfraser@14220 343 rcu_unlock_domain(d);
kfraser@11295 344 }
kfraser@11295 345 }
kfraser@11295 346 break;
kfraser@11295 347
kfraser@11295 348 case XEN_DOMCTL_unpausedomain:
kfraser@11295 349 {
kfraser@14220 350 struct domain *d = rcu_lock_domain_by_id(op->domain);
kfraser@14388 351
kfraser@11295 352 ret = -ESRCH;
kfraser@14388 353 if ( d == NULL )
kfraser@14388 354 break;
keir@16558 355
keir@16558 356 ret = xsm_unpausedomain(d);
keir@16558 357 if ( ret )
keir@17357 358 {
keir@17357 359 rcu_unlock_domain(d);
keir@17357 360 break;
keir@17357 361 }
keir@16558 362
kfraser@14388 363 domain_unpause_by_systemcontroller(d);
keir@17357 364 rcu_unlock_domain(d);
keir@16894 365 ret = 0;
kfraser@11295 366 }
kfraser@11295 367 break;
kfraser@11295 368
kfraser@13554 369 case XEN_DOMCTL_resumedomain:
kfraser@13554 370 {
kfraser@14220 371 struct domain *d = rcu_lock_domain_by_id(op->domain);
kfraser@13554 372
kfraser@13554 373 ret = -ESRCH;
keir@14711 374 if ( d == NULL )
keir@14711 375 break;
keir@14711 376
keir@16558 377 ret = xsm_resumedomain(d);
keir@16558 378 if ( ret )
keir@17357 379 {
keir@17357 380 rcu_unlock_domain(d);
keir@17357 381 break;
keir@17357 382 }
keir@16558 383
kfraser@14739 384 domain_resume(d);
keir@17357 385 rcu_unlock_domain(d);
keir@16894 386 ret = 0;
kfraser@13554 387 }
kfraser@13554 388 break;
kfraser@13554 389
kfraser@11295 390 case XEN_DOMCTL_createdomain:
kfraser@11295 391 {
kfraser@11295 392 struct domain *d;
kfraser@11295 393 domid_t dom;
kfraser@11295 394 static domid_t rover = 0;
kfraser@12234 395 unsigned int domcr_flags;
kfraser@11295 396
kfraser@14936 397 ret = -EINVAL;
kfraser@12234 398 if ( supervisor_mode_kernel ||
keir@16969 399 (op->u.createdomain.flags &
keir@19302 400 ~(XEN_DOMCTL_CDF_hvm_guest | XEN_DOMCTL_CDF_hap |
keir@20382 401 XEN_DOMCTL_CDF_s3_integrity | XEN_DOMCTL_CDF_oos_off)) )
kfraser@14936 402 break;
kfraser@11295 403
kfraser@11295 404 dom = op->domain;
kfraser@11295 405 if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
kfraser@11295 406 {
kfraser@11295 407 ret = -EINVAL;
kfraser@11295 408 if ( !is_free_domid(dom) )
kfraser@11295 409 break;
kfraser@11295 410 }
kfraser@11295 411 else
kfraser@11295 412 {
kfraser@11295 413 for ( dom = rover + 1; dom != rover; dom++ )
kfraser@11295 414 {
kfraser@11295 415 if ( dom == DOMID_FIRST_RESERVED )
kfraser@11295 416 dom = 0;
kfraser@11295 417 if ( is_free_domid(dom) )
kfraser@11295 418 break;
kfraser@11295 419 }
kfraser@11295 420
kfraser@11295 421 ret = -ENOMEM;
kfraser@11295 422 if ( dom == rover )
kfraser@11295 423 break;
kfraser@11295 424
kfraser@11295 425 rover = dom;
kfraser@11295 426 }
kfraser@11295 427
kfraser@12234 428 domcr_flags = 0;
kfraser@12234 429 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest )
kfraser@12234 430 domcr_flags |= DOMCRF_hvm;
keir@16969 431 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hap )
keir@16969 432 domcr_flags |= DOMCRF_hap;
keir@19302 433 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_s3_integrity )
keir@19302 434 domcr_flags |= DOMCRF_s3_integrity;
keir@20382 435 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_oos_off )
keir@20382 436 domcr_flags |= DOMCRF_oos_off;
kfraser@12234 437
kfraser@11295 438 ret = -ENOMEM;
kfraser@14947 439 d = domain_create(dom, domcr_flags, op->u.createdomain.ssidref);
kfraser@14947 440 if ( d == NULL )
kfraser@11295 441 break;
kfraser@11295 442
ack@13310 443 ret = 0;
ack@13310 444
kfraser@11295 445 memcpy(d->handle, op->u.createdomain.handle,
kfraser@11295 446 sizeof(xen_domain_handle_t));
kfraser@11295 447
kfraser@11295 448 op->domain = d->domain_id;
kfraser@11295 449 if ( copy_to_guest(u_domctl, op, 1) )
kfraser@11295 450 ret = -EFAULT;
kfraser@11295 451 }
kfraser@11295 452 break;
kfraser@11295 453
kfraser@11295 454 case XEN_DOMCTL_max_vcpus:
kfraser@11295 455 {
kfraser@11295 456 struct domain *d;
kfraser@11295 457 unsigned int i, max = op->u.max_vcpus.max, cpu;
keir@21258 458 cpumask_t *online;
kfraser@11295 459
kfraser@11295 460 ret = -ESRCH;
kfraser@14220 461 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
kfraser@11295 462 break;
kfraser@11295 463
keir@18563 464 ret = -EINVAL;
keir@18563 465 if ( (d == current->domain) || /* no domain_pause() */
keir@19826 466 (max > MAX_VIRT_CPUS) ||
keir@20435 467 (is_hvm_domain(d) && (max > MAX_HVM_VCPUS)) )
keir@18563 468 {
keir@18563 469 rcu_unlock_domain(d);
keir@18563 470 break;
keir@18563 471 }
keir@18563 472
keir@16558 473 ret = xsm_max_vcpus(d);
keir@16558 474 if ( ret )
keir@17357 475 {
keir@17357 476 rcu_unlock_domain(d);
keir@17357 477 break;
keir@17357 478 }
keir@16558 479
keir@19826 480 /* Until Xenoprof can dynamically grow its vcpu-s array... */
keir@19826 481 if ( d->xenoprof )
keir@19826 482 {
keir@19826 483 rcu_unlock_domain(d);
keir@19826 484 ret = -EAGAIN;
keir@19826 485 break;
keir@19826 486 }
keir@19826 487
kfraser@11295 488 /* Needed, for example, to ensure writable p.t. state is synced. */
kfraser@11295 489 domain_pause(d);
kfraser@11295 490
kfraser@11295 491 /* We cannot reduce maximum VCPUs. */
kfraser@11295 492 ret = -EINVAL;
keir@19826 493 if ( (max < d->max_vcpus) && (d->vcpu[max] != NULL) )
kfraser@11295 494 goto maxvcpu_out;
kfraser@11295 495
keir@19845 496 /*
keir@19845 497 * For now don't allow increasing the vcpu count from a non-zero
keir@19845 498 * value: This code and all readers of d->vcpu would otherwise need
keir@19845 499 * to be converted to use RCU, but at present there's no tools side
keir@19845 500 * code path that would issue such a request.
keir@19845 501 */
keir@19845 502 ret = -EBUSY;
keir@19845 503 if ( (d->max_vcpus > 0) && (max > d->max_vcpus) )
keir@19845 504 goto maxvcpu_out;
keir@19845 505
kfraser@11295 506 ret = -ENOMEM;
keir@21258 507 online = (d->cpupool == NULL) ? &cpu_online_map : &d->cpupool->cpu_valid;
keir@19826 508 if ( max > d->max_vcpus )
keir@19826 509 {
keir@19845 510 struct vcpu **vcpus;
keir@19826 511
keir@19845 512 BUG_ON(d->vcpu != NULL);
keir@19845 513 BUG_ON(d->max_vcpus != 0);
keir@19845 514
keir@19845 515 if ( (vcpus = xmalloc_array(struct vcpu *, max)) == NULL )
keir@19826 516 goto maxvcpu_out;
keir@19845 517 memset(vcpus, 0, max * sizeof(*vcpus));
keir@19826 518
keir@19845 519 /* Install vcpu array /then/ update max_vcpus. */
keir@19826 520 d->vcpu = vcpus;
keir@19826 521 wmb();
keir@19826 522 d->max_vcpus = max;
keir@19826 523 }
keir@19845 524
kfraser@11295 525 for ( i = 0; i < max; i++ )
kfraser@11295 526 {
kfraser@11295 527 if ( d->vcpu[i] != NULL )
kfraser@11295 528 continue;
kfraser@11295 529
kfraser@11295 530 cpu = (i == 0) ?
keir@21258 531 default_vcpu0_location(online) :
keir@21258 532 cycle_cpu(d->vcpu[i-1]->processor, *online);
kfraser@11295 533
kfraser@11295 534 if ( alloc_vcpu(d, i, cpu) == NULL )
kfraser@11295 535 goto maxvcpu_out;
kfraser@11295 536 }
kfraser@11295 537
kfraser@11295 538 ret = 0;
kfraser@11295 539
kfraser@11295 540 maxvcpu_out:
kfraser@11295 541 domain_unpause(d);
kfraser@14220 542 rcu_unlock_domain(d);
kfraser@11295 543 }
kfraser@11295 544 break;
kfraser@11295 545
kfraser@11295 546 case XEN_DOMCTL_destroydomain:
kfraser@11295 547 {
kfraser@14220 548 struct domain *d = rcu_lock_domain_by_id(op->domain);
kfraser@11295 549 ret = -ESRCH;
kfraser@11295 550 if ( d != NULL )
kfraser@11295 551 {
keir@17357 552 ret = xsm_destroydomain(d) ? : domain_kill(d);
kfraser@14220 553 rcu_unlock_domain(d);
kfraser@11295 554 }
kfraser@11295 555 }
kfraser@11295 556 break;
kfraser@11295 557
kfraser@11295 558 case XEN_DOMCTL_setvcpuaffinity:
kfraser@11295 559 case XEN_DOMCTL_getvcpuaffinity:
kfraser@11295 560 {
kfraser@11295 561 domid_t dom = op->domain;
kfraser@14220 562 struct domain *d = rcu_lock_domain_by_id(dom);
kfraser@11295 563 struct vcpu *v;
kfraser@11295 564 cpumask_t new_affinity;
kfraser@11295 565
kfraser@11601 566 ret = -ESRCH;
kfraser@11295 567 if ( d == NULL )
kfraser@11295 568 break;
kfraser@11295 569
keir@16558 570 ret = xsm_vcpuaffinity(op->cmd, d);
keir@16558 571 if ( ret )
keir@16558 572 goto vcpuaffinity_out;
keir@16558 573
kfraser@11601 574 ret = -EINVAL;
keir@19826 575 if ( op->u.vcpuaffinity.vcpu >= d->max_vcpus )
kfraser@11601 576 goto vcpuaffinity_out;
kfraser@11601 577
kfraser@11601 578 ret = -ESRCH;
kfraser@11601 579 if ( (v = d->vcpu[op->u.vcpuaffinity.vcpu]) == NULL )
kfraser@11601 580 goto vcpuaffinity_out;
kfraser@11295 581
kfraser@11295 582 if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
kfraser@11295 583 {
keir@21396 584 ret = xenctl_cpumap_to_cpumask(
kfraser@11295 585 &new_affinity, &op->u.vcpuaffinity.cpumap);
keir@21396 586 if ( !ret )
keir@21396 587 ret = vcpu_set_affinity(v, &new_affinity);
kfraser@11295 588 }
kfraser@11295 589 else
kfraser@11295 590 {
keir@21396 591 ret = cpumask_to_xenctl_cpumap(
kfraser@11295 592 &op->u.vcpuaffinity.cpumap, &v->cpu_affinity);
kfraser@11295 593 }
kfraser@11295 594
kfraser@11601 595 vcpuaffinity_out:
kfraser@14220 596 rcu_unlock_domain(d);
kfraser@11295 597 }
kfraser@11295 598 break;
kfraser@11295 599
kfraser@11295 600 case XEN_DOMCTL_scheduler_op:
kfraser@11295 601 {
kfraser@11295 602 struct domain *d;
kfraser@11295 603
kfraser@11295 604 ret = -ESRCH;
kfraser@14220 605 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
kfraser@11295 606 break;
kfraser@11295 607
keir@16558 608 ret = xsm_scheduler(d);
keir@16558 609 if ( ret )
keir@16558 610 goto scheduler_op_out;
keir@16558 611
kfraser@11295 612 ret = sched_adjust(d, &op->u.scheduler_op);
kfraser@11295 613 if ( copy_to_guest(u_domctl, op, 1) )
kfraser@11295 614 ret = -EFAULT;
kfraser@11295 615
keir@16558 616 scheduler_op_out:
kfraser@14220 617 rcu_unlock_domain(d);
kfraser@11295 618 }
kfraser@11295 619 break;
kfraser@11295 620
kfraser@11295 621 case XEN_DOMCTL_getdomaininfo:
kfraser@11295 622 {
kfraser@11295 623 struct domain *d;
kfraser@15763 624 domid_t dom = op->domain;
kfraser@11295 625
kfraser@14074 626 rcu_read_lock(&domlist_read_lock);
kfraser@11295 627
kfraser@11295 628 for_each_domain ( d )
keir@17357 629 if ( d->domain_id >= dom )
kfraser@11295 630 break;
kfraser@11295 631
kfraser@14224 632 if ( d == NULL )
kfraser@11295 633 {
kfraser@14074 634 rcu_read_unlock(&domlist_read_lock);
kfraser@11295 635 ret = -ESRCH;
kfraser@11295 636 break;
kfraser@11295 637 }
kfraser@11295 638
keir@16558 639 ret = xsm_getdomaininfo(d);
keir@16558 640 if ( ret )
keir@16558 641 goto getdomaininfo_out;
keir@16558 642
kfraser@11295 643 getdomaininfo(d, &op->u.getdomaininfo);
kfraser@11295 644
kfraser@11295 645 op->domain = op->u.getdomaininfo.domain;
kfraser@11295 646 if ( copy_to_guest(u_domctl, op, 1) )
kfraser@11295 647 ret = -EFAULT;
kfraser@11295 648
keir@16558 649 getdomaininfo_out:
kfraser@14224 650 rcu_read_unlock(&domlist_read_lock);
kfraser@11295 651 }
kfraser@11295 652 break;
kfraser@11295 653
kfraser@11295 654 case XEN_DOMCTL_getvcpucontext:
kfraser@11295 655 {
kfraser@13632 656 vcpu_guest_context_u c = { .nat = NULL };
kfraser@13632 657 struct domain *d;
kfraser@13632 658 struct vcpu *v;
kfraser@11295 659
kfraser@11295 660 ret = -ESRCH;
kfraser@14220 661 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
kfraser@11295 662 break;
kfraser@11295 663
keir@16558 664 ret = xsm_getvcpucontext(d);
keir@16558 665 if ( ret )
keir@16558 666 goto getvcpucontext_out;
keir@16558 667
kfraser@11295 668 ret = -EINVAL;
keir@19826 669 if ( op->u.vcpucontext.vcpu >= d->max_vcpus )
kfraser@11295 670 goto getvcpucontext_out;
kfraser@11295 671
kfraser@11295 672 ret = -ESRCH;
kfraser@11295 673 if ( (v = d->vcpu[op->u.vcpucontext.vcpu]) == NULL )
kfraser@11295 674 goto getvcpucontext_out;
kfraser@11295 675
kfraser@11295 676 ret = -ENODATA;
kfraser@14692 677 if ( !v->is_initialised )
kfraser@11295 678 goto getvcpucontext_out;
kfraser@11295 679
ack@13310 680 #ifdef CONFIG_COMPAT
ack@13310 681 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
ack@13310 682 < sizeof(struct compat_vcpu_guest_context));
ack@13310 683 #endif
kfraser@11295 684 ret = -ENOMEM;
ack@13310 685 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
kfraser@11295 686 goto getvcpucontext_out;
kfraser@11295 687
kfraser@11295 688 if ( v != current )
kfraser@11295 689 vcpu_pause(v);
kfraser@11295 690
ack@13310 691 arch_get_info_guest(v, c);
kfraser@11295 692 ret = 0;
kfraser@11295 693
kfraser@11295 694 if ( v != current )
kfraser@11295 695 vcpu_unpause(v);
kfraser@11295 696
keir@19304 697 #ifdef CONFIG_COMPAT
keir@19304 698 if ( !is_pv_32on64_vcpu(v) )
kfraser@13632 699 ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
ack@13310 700 else
kfraser@13632 701 ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt,
kfraser@13632 702 void), c.cmp, 1);
keir@19304 703 #else
keir@19304 704 ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
ack@13310 705 #endif
kfraser@11295 706
kfraser@13632 707 if ( copy_to_guest(u_domctl, op, 1) || ret )
kfraser@11295 708 ret = -EFAULT;
kfraser@11295 709
kfraser@11295 710 getvcpucontext_out:
kfraser@13632 711 xfree(c.nat);
kfraser@14220 712 rcu_unlock_domain(d);
kfraser@11295 713 }
kfraser@11295 714 break;
kfraser@11295 715
kfraser@11295 716 case XEN_DOMCTL_getvcpuinfo:
kfraser@11295 717 {
kfraser@11295 718 struct domain *d;
kfraser@11295 719 struct vcpu *v;
kfraser@11295 720 struct vcpu_runstate_info runstate;
kfraser@11295 721
kfraser@11295 722 ret = -ESRCH;
kfraser@14220 723 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
kfraser@11295 724 break;
kfraser@11295 725
keir@16558 726 ret = xsm_getvcpuinfo(d);
keir@16558 727 if ( ret )
keir@16558 728 goto getvcpuinfo_out;
keir@16558 729
kfraser@11295 730 ret = -EINVAL;
keir@19826 731 if ( op->u.getvcpuinfo.vcpu >= d->max_vcpus )
kfraser@11295 732 goto getvcpuinfo_out;
kfraser@11295 733
kfraser@11295 734 ret = -ESRCH;
kfraser@11295 735 if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
kfraser@11295 736 goto getvcpuinfo_out;
kfraser@11295 737
kfraser@11295 738 vcpu_runstate_get(v, &runstate);
kfraser@11295 739
kfraser@14698 740 op->u.getvcpuinfo.online = !test_bit(_VPF_down, &v->pause_flags);
kfraser@14698 741 op->u.getvcpuinfo.blocked = test_bit(_VPF_blocked, &v->pause_flags);
kfraser@14692 742 op->u.getvcpuinfo.running = v->is_running;
kfraser@11295 743 op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
kfraser@11295 744 op->u.getvcpuinfo.cpu = v->processor;
kfraser@11295 745 ret = 0;
kfraser@11295 746
kfraser@11295 747 if ( copy_to_guest(u_domctl, op, 1) )
kfraser@11295 748 ret = -EFAULT;
kfraser@11295 749
kfraser@11295 750 getvcpuinfo_out:
kfraser@14220 751 rcu_unlock_domain(d);
kfraser@11295 752 }
kfraser@11295 753 break;
kfraser@11295 754
kfraser@11295 755 case XEN_DOMCTL_max_mem:
kfraser@11295 756 {
kfraser@11295 757 struct domain *d;
kfraser@11295 758 unsigned long new_max;
kfraser@11295 759
kfraser@11295 760 ret = -ESRCH;
kfraser@14220 761 d = rcu_lock_domain_by_id(op->domain);
kfraser@11295 762 if ( d == NULL )
kfraser@11295 763 break;
kfraser@11295 764
keir@16558 765 ret = xsm_setdomainmaxmem(d);
keir@16558 766 if ( ret )
keir@16558 767 goto max_mem_out;
keir@16558 768
kfraser@11295 769 ret = -EINVAL;
kfraser@11295 770 new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10);
kfraser@11295 771
kfraser@11295 772 spin_lock(&d->page_alloc_lock);
keir@22325 773 /*
keir@22325 774 * NB. We removed a check that new_max >= current tot_pages; this means
keir@22325 775 * that the domain will now be allowed to "ratchet" down to new_max. In
keir@22325 776 * the meantime, while tot > max, all new allocations are disallowed.
keir@22325 777 */
keir@22325 778 d->max_pages = new_max;
keir@22325 779 ret = 0;
kfraser@11295 780 spin_unlock(&d->page_alloc_lock);
kfraser@11295 781
keir@16558 782 max_mem_out:
kfraser@14220 783 rcu_unlock_domain(d);
kfraser@11295 784 }
kfraser@11295 785 break;
kfraser@11295 786
kfraser@11295 787 case XEN_DOMCTL_setdomainhandle:
kfraser@11295 788 {
kfraser@11295 789 struct domain *d;
kfraser@14676 790
kfraser@11295 791 ret = -ESRCH;
kfraser@14220 792 d = rcu_lock_domain_by_id(op->domain);
kfraser@14676 793 if ( d == NULL )
kfraser@14676 794 break;
kfraser@14676 795
keir@16558 796 ret = xsm_setdomainhandle(d);
keir@16558 797 if ( ret )
keir@17357 798 {
keir@17357 799 rcu_unlock_domain(d);
keir@17357 800 break;
keir@17357 801 }
keir@16558 802
kfraser@14676 803 memcpy(d->handle, op->u.setdomainhandle.handle,
kfraser@14676 804 sizeof(xen_domain_handle_t));
keir@17357 805 rcu_unlock_domain(d);
keir@16894 806 ret = 0;
kfraser@11295 807 }
kfraser@11295 808 break;
kfraser@11295 809
kfraser@11295 810 case XEN_DOMCTL_setdebugging:
kfraser@11295 811 {
kfraser@11295 812 struct domain *d;
kfraser@14676 813
kfraser@11295 814 ret = -ESRCH;
kfraser@14220 815 d = rcu_lock_domain_by_id(op->domain);
kfraser@14676 816 if ( d == NULL )
kfraser@14676 817 break;
kfraser@14676 818
keir@18563 819 ret = -EINVAL;
keir@18563 820 if ( d == current->domain ) /* no domain_pause() */
keir@18563 821 {
keir@18563 822 rcu_unlock_domain(d);
keir@18563 823 break;
keir@18563 824 }
keir@18563 825
keir@16558 826 ret = xsm_setdebugging(d);
keir@16558 827 if ( ret )
keir@17357 828 {
keir@17357 829 rcu_unlock_domain(d);
keir@17357 830 break;
keir@17357 831 }
keir@16558 832
kfraser@14676 833 domain_pause(d);
kfraser@14676 834 d->debugger_attached = !!op->u.setdebugging.enable;
kfraser@14676 835 domain_unpause(d); /* causes guest to latch new status */
keir@17357 836 rcu_unlock_domain(d);
keir@16894 837 ret = 0;
kfraser@11295 838 }
kfraser@11295 839 break;
kfraser@11295 840
kfraser@11295 841 case XEN_DOMCTL_irq_permission:
kfraser@11295 842 {
kfraser@11295 843 struct domain *d;
kfraser@11295 844 unsigned int pirq = op->u.irq_permission.pirq;
kfraser@11295 845
kfraser@11295 846 ret = -ESRCH;
kfraser@14220 847 d = rcu_lock_domain_by_id(op->domain);
kfraser@11295 848 if ( d == NULL )
kfraser@11295 849 break;
kfraser@11295 850
keir@19688 851 if ( pirq >= d->nr_pirqs )
keir@19688 852 ret = -EINVAL;
keir@19688 853 else if ( op->u.irq_permission.allow_access )
kfraser@11295 854 ret = irq_permit_access(d, pirq);
kfraser@11295 855 else
kfraser@11295 856 ret = irq_deny_access(d, pirq);
kfraser@11295 857
kfraser@14220 858 rcu_unlock_domain(d);
kfraser@11295 859 }
kfraser@11295 860 break;
kfraser@11295 861
kfraser@11295 862 case XEN_DOMCTL_iomem_permission:
kfraser@11295 863 {
kfraser@11295 864 struct domain *d;
kfraser@11295 865 unsigned long mfn = op->u.iomem_permission.first_mfn;
kfraser@11295 866 unsigned long nr_mfns = op->u.iomem_permission.nr_mfns;
kfraser@11295 867
kfraser@11295 868 ret = -EINVAL;
kfraser@11295 869 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
kfraser@11295 870 break;
kfraser@11295 871
kfraser@11295 872 ret = -ESRCH;
kfraser@14220 873 d = rcu_lock_domain_by_id(op->domain);
kfraser@11295 874 if ( d == NULL )
kfraser@11295 875 break;
kfraser@11295 876
kfraser@11295 877 if ( op->u.iomem_permission.allow_access )
kfraser@11295 878 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
kfraser@11295 879 else
kfraser@11295 880 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
kfraser@11295 881
kfraser@14220 882 rcu_unlock_domain(d);
kfraser@11295 883 }
kfraser@11295 884 break;
kfraser@11295 885
kfraser@11295 886 case XEN_DOMCTL_settimeoffset:
kfraser@11295 887 {
kfraser@11295 888 struct domain *d;
kfraser@11295 889
kfraser@11295 890 ret = -ESRCH;
kfraser@14220 891 d = rcu_lock_domain_by_id(op->domain);
keir@16894 892 if ( d == NULL )
keir@16894 893 break;
keir@16894 894
keir@16894 895 ret = xsm_domain_settime(d);
keir@16894 896 if ( ret )
keir@17357 897 {
keir@17357 898 rcu_unlock_domain(d);
keir@17357 899 break;
keir@17357 900 }
keir@16894 901
keir@17986 902 domain_set_time_offset(d, op->u.settimeoffset.time_offset_seconds);
keir@17357 903 rcu_unlock_domain(d);
keir@16894 904 ret = 0;
keir@16894 905 }
keir@16894 906 break;
keir@16894 907
keir@16894 908 case XEN_DOMCTL_set_target:
keir@16894 909 {
keir@16894 910 struct domain *d, *e;
keir@16894 911
keir@16894 912 ret = -ESRCH;
keir@16894 913 d = rcu_lock_domain_by_id(op->domain);
keir@16894 914 if ( d == NULL )
keir@16894 915 break;
keir@16558 916
keir@16894 917 ret = -ESRCH;
keir@16894 918 e = get_domain_by_id(op->u.set_target.target);
keir@16894 919 if ( e == NULL )
keir@16894 920 goto set_target_out;
keir@16894 921
keir@17357 922 ret = -EINVAL;
keir@17357 923 if ( (d == e) || (d->target != NULL) )
keir@17357 924 {
keir@16894 925 put_domain(e);
keir@16894 926 goto set_target_out;
kfraser@11295 927 }
keir@16894 928
keir@18461 929 ret = xsm_set_target(d, e);
keir@18461 930 if ( ret ) {
keir@18461 931 put_domain(e);
keir@18461 932 goto set_target_out;
keir@18461 933 }
keir@18461 934
keir@17357 935 /* Hold reference on @e until we destroy @d. */
keir@17357 936 d->target = e;
keir@16894 937
keir@16894 938 ret = 0;
keir@16894 939
keir@17357 940 set_target_out:
keir@16894 941 rcu_unlock_domain(d);
kfraser@11295 942 }
kfraser@11295 943 break;
kfraser@11295 944
keir@18000 945 case XEN_DOMCTL_subscribe:
keir@18000 946 {
keir@18000 947 struct domain *d;
keir@18000 948
keir@18000 949 ret = -ESRCH;
keir@18000 950 d = rcu_lock_domain_by_id(op->domain);
keir@18000 951 if ( d != NULL )
keir@18000 952 {
keir@18000 953 d->suspend_evtchn = op->u.subscribe.port;
keir@18000 954 rcu_unlock_domain(d);
keir@18000 955 ret = 0;
keir@18000 956 }
keir@18000 957 }
keir@18000 958 break;
keir@18000 959
keir@20391 960 case XEN_DOMCTL_disable_migrate:
keir@20391 961 {
keir@20391 962 struct domain *d;
keir@20391 963 ret = -ESRCH;
keir@20391 964 if ( (d = rcu_lock_domain_by_id(op->domain)) != NULL )
keir@20391 965 {
keir@20391 966 d->disable_migrate = op->u.disable_migrate.disable;
keir@20391 967 rcu_unlock_domain(d);
keir@20391 968 ret = 0;
keir@20391 969 }
keir@20391 970 }
keir@20391 971 break;
keir@20391 972
kfraser@11295 973 default:
kfraser@11295 974 ret = arch_do_domctl(op, u_domctl);
kfraser@11295 975 break;
kfraser@11295 976 }
kfraser@11295 977
keir@19557 978 domctl_lock_release();
kfraser@11295 979
kfraser@11295 980 return ret;
kfraser@11295 981 }
kfraser@11295 982
kfraser@11295 983 /*
kfraser@11295 984 * Local variables:
kfraser@11295 985 * mode: C
kfraser@11295 986 * c-set-style: "BSD"
kfraser@11295 987 * c-basic-offset: 4
kfraser@11295 988 * tab-width: 4
kfraser@11295 989 * indent-tabs-mode: nil
kfraser@11295 990 * End:
kfraser@11295 991 */