debuggers.hg

annotate xen/common/domain.c @ 22855:1d1eec7e1fb4

xl: Perform minimal validation of virtual disk file while parsing config file

This patch performs some very basic validation on the virtual disk
file passed through the config file. This validation ensures that we
don't go too far with the initialization like spawn qemu and more
while there could be some potentially fundamental issues.

[ Patch fixed up to work with PHYSTYPE_EMPTY 22808:6ec61438713a -iwj ]

Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Kamala Narasimhan <kamala.narasimhan@gmail.com>
date Tue Jan 25 18:09:49 2011 +0000 (2011-01-25)
parents e8acb9753ff1
children 700ac6445812
rev   line source
kaf24@1787 1 /******************************************************************************
kaf24@1787 2 * domain.c
kaf24@1787 3 *
kaf24@1787 4 * Generic domain-handling functions.
kaf24@1787 5 */
kaf24@1488 6
kaf24@1248 7 #include <xen/config.h>
kfraser@13560 8 #include <xen/compat.h>
kaf24@1248 9 #include <xen/init.h>
kaf24@1248 10 #include <xen/lib.h>
keir@19688 11 #include <xen/ctype.h>
kaf24@1248 12 #include <xen/errno.h>
kaf24@1248 13 #include <xen/sched.h>
cl349@5285 14 #include <xen/domain.h>
kaf24@1248 15 #include <xen/mm.h>
kaf24@1248 16 #include <xen/event.h>
kaf24@1248 17 #include <xen/time.h>
kaf24@1544 18 #include <xen/console.h>
kaf24@4373 19 #include <xen/softirq.h>
keir@21242 20 #include <xen/tasklet.h>
kaf24@5394 21 #include <xen/domain_page.h>
kaf24@8486 22 #include <xen/rangeset.h>
kaf24@9166 23 #include <xen/guest_access.h>
kaf24@9197 24 #include <xen/hypercall.h>
kaf24@9511 25 #include <xen/delay.h>
kaf24@10989 26 #include <xen/shutdown.h>
kaf24@11029 27 #include <xen/percpu.h>
kfraser@12405 28 #include <xen/multicall.h>
kfraser@14074 29 #include <xen/rcupdate.h>
keir@22442 30 #include <xen/wait.h>
keir@18917 31 #include <acpi/cpufreq/cpufreq.h>
kaf24@5394 32 #include <asm/debugger.h>
kaf24@7234 33 #include <public/sched.h>
keir@20350 34 #include <public/sysctl.h>
kaf24@7199 35 #include <public/vcpu.h>
kfraser@15846 36 #include <xsm/xsm.h>
keir@19342 37 #include <xen/trace.h>
keir@19684 38 #include <xen/tmem.h>
kaf24@919 39
keir@17677 40 /* Linux config option: propageted to domain0 */
keir@17677 41 /* xen_processor_pmbits: xen control Cx, Px, ... */
keir@18988 42 unsigned int xen_processor_pmbits = XEN_PROCESSOR_PM_PX;
keir@17677 43
keir@17225 44 /* opt_dom0_vcpus_pin: If true, dom0 VCPUs are pinned. */
keir@22676 45 static bool_t opt_dom0_vcpus_pin;
keir@17225 46 boolean_param("dom0_vcpus_pin", opt_dom0_vcpus_pin);
keir@17225 47
keir@18988 48 /* set xen as default cpufreq */
keir@18988 49 enum cpufreq_controller cpufreq_controller = FREQCTL_xen;
keir@18988 50
keir@17225 51 static void __init setup_cpufreq_option(char *str)
keir@17225 52 {
keir@18917 53 char *arg;
keir@18917 54
keir@17225 55 if ( !strcmp(str, "dom0-kernel") )
keir@17225 56 {
keir@17677 57 xen_processor_pmbits &= ~XEN_PROCESSOR_PM_PX;
keir@17225 58 cpufreq_controller = FREQCTL_dom0_kernel;
keir@17225 59 opt_dom0_vcpus_pin = 1;
keir@18917 60 return;
keir@17225 61 }
keir@18917 62
keir@18988 63 if ( !strcmp(str, "none") )
keir@18988 64 {
keir@18988 65 xen_processor_pmbits &= ~XEN_PROCESSOR_PM_PX;
keir@18988 66 cpufreq_controller = FREQCTL_none;
keir@18988 67 return;
keir@18988 68 }
keir@18988 69
keir@18917 70 if ( (arg = strpbrk(str, ",:")) != NULL )
keir@18917 71 *arg++ = '\0';
keir@18917 72
keir@18917 73 if ( !strcmp(str, "xen") )
keir@18917 74 if ( arg && *arg )
keir@18917 75 cpufreq_cmdline_parse(arg);
keir@17225 76 }
keir@17225 77 custom_param("cpufreq", setup_cpufreq_option);
keir@17225 78
kfraser@14074 79 /* Protect updates/reads (resp.) of domain_list and domain_hash. */
kfraser@14074 80 DEFINE_SPINLOCK(domlist_update_lock);
kfraser@14074 81 DEFINE_RCU_READ_LOCK(domlist_read_lock);
kfraser@14074 82
kfraser@14074 83 #define DOMAIN_HASH_SIZE 256
kfraser@14074 84 #define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1))
kfraser@14074 85 static struct domain *domain_hash[DOMAIN_HASH_SIZE];
kaf24@2844 86 struct domain *domain_list;
kaf24@420 87
kaf24@3376 88 struct domain *dom0;
kaf24@3376 89
kaf24@11024 90 struct vcpu *idle_vcpu[NR_CPUS] __read_mostly;
kaf24@10293 91
keir@20430 92 vcpu_info_t dummy_vcpu_info;
keir@20430 93
kaf24@12062 94 int current_domain_id(void)
kaf24@12062 95 {
kaf24@12062 96 return current->domain->domain_id;
kaf24@12062 97 }
kaf24@12062 98
kfraser@14739 99 static void __domain_finalise_shutdown(struct domain *d)
kfraser@14739 100 {
kfraser@14739 101 struct vcpu *v;
kfraser@14739 102
kfraser@14739 103 BUG_ON(!spin_is_locked(&d->shutdown_lock));
kfraser@14739 104
kfraser@14739 105 if ( d->is_shut_down )
kfraser@14739 106 return;
kfraser@14739 107
kfraser@14739 108 for_each_vcpu ( d, v )
kfraser@14739 109 if ( !v->paused_for_shutdown )
kfraser@14739 110 return;
kfraser@14739 111
kfraser@14739 112 d->is_shut_down = 1;
keir@18003 113 if ( (d->shutdown_code == SHUTDOWN_suspend) && d->suspend_evtchn )
keir@18004 114 evtchn_send(d, d->suspend_evtchn);
keir@18000 115 else
keir@18000 116 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
kfraser@14739 117 }
kfraser@14739 118
kfraser@14739 119 static void vcpu_check_shutdown(struct vcpu *v)
kfraser@14739 120 {
kfraser@14739 121 struct domain *d = v->domain;
kfraser@14739 122
kfraser@14739 123 spin_lock(&d->shutdown_lock);
kfraser@14739 124
kfraser@14739 125 if ( d->is_shutting_down )
kfraser@14739 126 {
kfraser@14739 127 if ( !v->paused_for_shutdown )
keir@17272 128 vcpu_pause_nosync(v);
kfraser@14739 129 v->paused_for_shutdown = 1;
kfraser@14739 130 v->defer_shutdown = 0;
kfraser@14739 131 __domain_finalise_shutdown(d);
kfraser@14739 132 }
kfraser@14739 133
kfraser@14739 134 spin_unlock(&d->shutdown_lock);
kfraser@14739 135 }
kfraser@14739 136
kaf24@10281 137 struct vcpu *alloc_vcpu(
kaf24@10281 138 struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
kaf24@10281 139 {
kaf24@10281 140 struct vcpu *v;
kaf24@10281 141
keir@19826 142 BUG_ON((!is_idle_domain(d) || vcpu_id) && d->vcpu[vcpu_id]);
kaf24@10281 143
kfraser@12284 144 if ( (v = alloc_vcpu_struct()) == NULL )
kaf24@10281 145 return NULL;
kaf24@10281 146
kaf24@10281 147 v->domain = d;
kaf24@10281 148 v->vcpu_id = vcpu_id;
kaf24@10281 149
keir@18220 150 spin_lock_init(&v->virq_lock);
keir@18220 151
keir@21219 152 tasklet_init(&v->continue_hypercall_tasklet, NULL, 0);
keir@21219 153
keir@18935 154 if ( is_idle_domain(d) )
kfraser@14611 155 {
keir@18935 156 v->runstate.state = RUNSTATE_running;
keir@18935 157 }
keir@18935 158 else
keir@18935 159 {
keir@18935 160 v->runstate.state = RUNSTATE_offline;
keir@18935 161 v->runstate.state_entry_time = NOW();
kfraser@14698 162 set_bit(_VPF_down, &v->pause_flags);
keir@20430 163 v->vcpu_info = ((vcpu_id < XEN_LEGACY_MAX_VCPUS)
keir@20430 164 ? (vcpu_info_t *)&shared_info(d, vcpu_info[vcpu_id])
keir@20430 165 : &dummy_vcpu_info);
keir@22442 166 init_waitqueue_vcpu(v);
kfraser@14611 167 }
kaf24@10281 168
kfraser@12284 169 if ( sched_init_vcpu(v, cpu_id) != 0 )
kaf24@10281 170 {
keir@22442 171 destroy_waitqueue_vcpu(v);
kaf24@10281 172 free_vcpu_struct(v);
kaf24@10281 173 return NULL;
kaf24@10281 174 }
kaf24@10281 175
kfraser@12284 176 if ( vcpu_initialise(v) != 0 )
kfraser@12284 177 {
kfraser@12284 178 sched_destroy_vcpu(v);
keir@22442 179 destroy_waitqueue_vcpu(v);
kfraser@12284 180 free_vcpu_struct(v);
kfraser@12284 181 return NULL;
kfraser@12284 182 }
kfraser@12284 183
kaf24@10281 184 d->vcpu[vcpu_id] = v;
kaf24@10281 185 if ( vcpu_id != 0 )
keir@20479 186 {
keir@20479 187 int prev_id = v->vcpu_id - 1;
keir@20479 188 while ( (prev_id >= 0) && (d->vcpu[prev_id] == NULL) )
keir@20479 189 prev_id--;
keir@20479 190 BUG_ON(prev_id < 0);
keir@20479 191 v->next_in_list = d->vcpu[prev_id]->next_in_list;
keir@20479 192 d->vcpu[prev_id]->next_in_list = v;
keir@20479 193 }
kaf24@10281 194
kfraser@14739 195 /* Must be called after making new vcpu visible to for_each_vcpu(). */
kfraser@14739 196 vcpu_check_shutdown(v);
kfraser@14739 197
keir@21959 198 domain_update_node_affinity(d);
keir@21959 199
kaf24@10281 200 return v;
kaf24@10281 201 }
kaf24@10281 202
keir@20419 203 static unsigned int __read_mostly extra_dom0_irqs = 256;
keir@20419 204 static unsigned int __read_mostly extra_domU_irqs = 32;
keir@19688 205 static void __init parse_extra_guest_irqs(const char *s)
keir@19688 206 {
keir@19688 207 if ( isdigit(*s) )
keir@19688 208 extra_domU_irqs = simple_strtoul(s, &s, 0);
keir@19688 209 if ( *s == ',' && isdigit(*++s) )
keir@19688 210 extra_dom0_irqs = simple_strtoul(s, &s, 0);
keir@19688 211 }
keir@19688 212 custom_param("extra_guest_irqs", parse_extra_guest_irqs);
keir@19688 213
kfraser@14947 214 struct domain *domain_create(
kfraser@14947 215 domid_t domid, unsigned int domcr_flags, ssidref_t ssidref)
iap10@274 216 {
kaf24@1628 217 struct domain *d, **pd;
keir@21575 218 enum { INIT_xsm = 1u<<0, INIT_watchdog = 1u<<1, INIT_rangeset = 1u<<2,
keir@21575 219 INIT_evtchn = 1u<<3, INIT_gnttab = 1u<<4, INIT_arch = 1u<<5 };
kfraser@15503 220 int init_status = 0;
keir@21258 221 int poolid = CPUPOOLID_NONE;
iap10@274 222
keir@17922 223 if ( (d = alloc_domain_struct()) == NULL )
kaf24@1161 224 return NULL;
iap10@274 225
keir@17922 226 d->domain_id = domid;
keir@17922 227
keir@20350 228 lock_profile_register_struct(LOCKPROF_TYPE_PERDOM, d, domid, "Domain");
keir@20350 229
keir@17922 230 if ( xsm_alloc_security_domain(d) != 0 )
keir@17922 231 goto fail;
keir@17922 232 init_status |= INIT_xsm;
keir@17922 233
keir@21575 234 watchdog_domain_init(d);
keir@21575 235 init_status |= INIT_watchdog;
keir@21575 236
keir@17922 237 atomic_set(&d->refcnt, 1);
keir@20350 238 spin_lock_init_prof(d, domain_lock);
keir@20350 239 spin_lock_init_prof(d, page_alloc_lock);
keir@17922 240 spin_lock_init(&d->hypercall_deadlock_mutex);
keir@19170 241 INIT_PAGE_LIST_HEAD(&d->page_list);
keir@19170 242 INIT_PAGE_LIST_HEAD(&d->xenpage_list);
keir@17922 243
keir@21959 244 spin_lock_init(&d->node_affinity_lock);
keir@21959 245
keir@21556 246 spin_lock_init(&d->shutdown_lock);
keir@21556 247 d->shutdown_code = -1;
keir@21556 248
kfraser@12234 249 if ( domcr_flags & DOMCRF_hvm )
kfraser@12234 250 d->is_hvm = 1;
kfraser@12234 251
keir@20391 252 if ( domid == 0 )
keir@20391 253 {
keir@20391 254 d->is_pinned = opt_dom0_vcpus_pin;
keir@20391 255 d->disable_migrate = 1;
keir@20391 256 }
keir@17225 257
keir@21419 258 rangeset_domain_initialise(d);
keir@21419 259 init_status |= INIT_rangeset;
keir@21419 260
keir@21419 261 d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex);
keir@21419 262 d->irq_caps = rangeset_new(d, "Interrupts", 0);
keir@21419 263 if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) )
keir@21419 264 goto fail;
keir@21419 265
keir@17922 266 if ( domcr_flags & DOMCRF_dummy )
keir@17922 267 return d;
keir@17922 268
kaf24@8649 269 if ( !is_idle_domain(d) )
kaf24@8649 270 {
kfraser@15846 271 if ( xsm_domain_create(d, ssidref) != 0 )
kfraser@15846 272 goto fail;
kfraser@15846 273
kfraser@14677 274 d->is_paused_by_controller = 1;
kfraser@14677 275 atomic_inc(&d->pause_count);
kfraser@14677 276
keir@20181 277 if ( domid )
keir@20181 278 d->nr_pirqs = nr_irqs_gsi + extra_domU_irqs;
keir@20181 279 else
keir@20181 280 d->nr_pirqs = nr_irqs_gsi + extra_dom0_irqs;
keir@22036 281 if ( d->nr_pirqs > nr_irqs )
keir@22036 282 d->nr_pirqs = nr_irqs;
keir@20181 283
keir@19713 284 d->pirq_to_evtchn = xmalloc_array(u16, d->nr_pirqs);
keir@19713 285 d->pirq_mask = xmalloc_array(
keir@19713 286 unsigned long, BITS_TO_LONGS(d->nr_pirqs));
keir@19713 287 if ( (d->pirq_to_evtchn == NULL) || (d->pirq_mask == NULL) )
keir@19713 288 goto fail;
keir@19713 289 memset(d->pirq_to_evtchn, 0, d->nr_pirqs * sizeof(*d->pirq_to_evtchn));
keir@19713 290 bitmap_zero(d->pirq_mask, d->nr_pirqs);
keir@19713 291
kaf24@8649 292 if ( evtchn_init(d) != 0 )
kfraser@15503 293 goto fail;
kfraser@15503 294 init_status |= INIT_evtchn;
kfraser@14677 295
kaf24@8649 296 if ( grant_table_create(d) != 0 )
kfraser@15503 297 goto fail;
kfraser@15503 298 init_status |= INIT_gnttab;
keir@21258 299
keir@21258 300 poolid = 0;
kaf24@8649 301 }
kaf24@8649 302
keir@16969 303 if ( arch_domain_create(d, domcr_flags) != 0 )
kfraser@15503 304 goto fail;
kfraser@15503 305 init_status |= INIT_arch;
kaf24@8649 306
keir@21258 307 if ( cpupool_add_domain(d, poolid) != 0 )
keir@21258 308 goto fail;
keir@21258 309
kfraser@12284 310 if ( sched_init_domain(d) != 0 )
kfraser@15503 311 goto fail;
kfraser@12284 312
kaf24@8537 313 if ( !is_idle_domain(d) )
kaf24@1161 314 {
kfraser@14074 315 spin_lock(&domlist_update_lock);
kfraser@10280 316 pd = &domain_list; /* NB. domain_list maintained in order of domid. */
kaf24@4836 317 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
kaf24@4915 318 if ( (*pd)->domain_id > d->domain_id )
kaf24@1161 319 break;
kaf24@4836 320 d->next_in_list = *pd;
kfraser@10280 321 d->next_in_hashbucket = domain_hash[DOMAIN_HASH(domid)];
kfraser@14074 322 rcu_assign_pointer(*pd, d);
kfraser@14074 323 rcu_assign_pointer(domain_hash[DOMAIN_HASH(domid)], d);
kfraser@14074 324 spin_unlock(&domlist_update_lock);
kaf24@1161 325 }
iap10@274 326
kaf24@1628 327 return d;
kaf24@8497 328
kfraser@15503 329 fail:
kfraser@15856 330 d->is_dying = DOMDYING_dead;
kfraser@15503 331 atomic_set(&d->refcnt, DOMAIN_DESTROYED);
kfraser@15503 332 if ( init_status & INIT_arch )
kfraser@15503 333 arch_domain_destroy(d);
kfraser@15503 334 if ( init_status & INIT_gnttab )
kaf24@8649 335 grant_table_destroy(d);
kfraser@15503 336 if ( init_status & INIT_evtchn )
keir@20768 337 {
kaf24@8649 338 evtchn_destroy(d);
keir@20768 339 evtchn_destroy_final(d);
keir@20768 340 }
keir@17922 341 if ( init_status & INIT_rangeset )
keir@17922 342 rangeset_domain_destroy(d);
keir@21575 343 if ( init_status & INIT_watchdog )
keir@21575 344 watchdog_domain_destroy(d);
keir@17922 345 if ( init_status & INIT_xsm )
keir@17922 346 xsm_free_security_domain(d);
keir@19713 347 xfree(d->pirq_mask);
keir@19713 348 xfree(d->pirq_to_evtchn);
keir@17922 349 free_domain_struct(d);
kaf24@8497 350 return NULL;
iap10@274 351 }
iap10@274 352
kaf24@414 353
keir@21959 354 void domain_update_node_affinity(struct domain *d)
keir@21959 355 {
keir@21959 356 cpumask_t cpumask = CPU_MASK_NONE;
keir@21959 357 nodemask_t nodemask = NODE_MASK_NONE;
keir@21959 358 struct vcpu *v;
keir@21959 359 unsigned int node;
keir@21959 360
keir@21959 361 spin_lock(&d->node_affinity_lock);
keir@21959 362
keir@21959 363 for_each_vcpu ( d, v )
keir@21959 364 cpus_or(cpumask, cpumask, v->cpu_affinity);
keir@21959 365
keir@21959 366 for_each_online_node ( node )
keir@21959 367 if ( cpus_intersects(node_to_cpumask(node), cpumask) )
keir@21959 368 node_set(node, nodemask);
keir@21959 369
keir@21959 370 d->node_affinity = nodemask;
keir@21959 371 spin_unlock(&d->node_affinity_lock);
keir@21959 372 }
keir@21959 373
keir@21959 374
kaf24@13687 375 struct domain *get_domain_by_id(domid_t dom)
iap10@274 376 {
kaf24@1628 377 struct domain *d;
iap10@274 378
kfraser@14074 379 rcu_read_lock(&domlist_read_lock);
kfraser@14075 380
kfraser@14075 381 for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]);
kfraser@14075 382 d != NULL;
kfraser@14075 383 d = rcu_dereference(d->next_in_hashbucket) )
kaf24@420 384 {
kaf24@4915 385 if ( d->domain_id == dom )
kaf24@420 386 {
kaf24@1628 387 if ( unlikely(!get_domain(d)) )
kaf24@1628 388 d = NULL;
kaf24@420 389 break;
iap10@274 390 }
kaf24@420 391 }
kfraser@14075 392
kfraser@14074 393 rcu_read_unlock(&domlist_read_lock);
iap10@274 394
kaf24@1628 395 return d;
iap10@274 396 }
iap10@274 397
iap10@274 398
kfraser@14219 399 struct domain *rcu_lock_domain_by_id(domid_t dom)
kfraser@14075 400 {
keir@22447 401 struct domain *d = NULL;
kfraser@14075 402
kfraser@14075 403 rcu_read_lock(&domlist_read_lock);
kfraser@14075 404
kfraser@14075 405 for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]);
kfraser@14075 406 d != NULL;
kfraser@14075 407 d = rcu_dereference(d->next_in_hashbucket) )
kfraser@14075 408 {
kfraser@14075 409 if ( d->domain_id == dom )
keir@22447 410 {
keir@22447 411 rcu_lock_domain(d);
keir@22447 412 break;
keir@22447 413 }
kfraser@14075 414 }
kfraser@14075 415
kfraser@14075 416 rcu_read_unlock(&domlist_read_lock);
kfraser@14075 417
keir@22447 418 return d;
kfraser@14075 419 }
kfraser@14075 420
keir@18604 421 int rcu_lock_target_domain_by_id(domid_t dom, struct domain **d)
keir@18604 422 {
keir@18604 423 if ( dom == DOMID_SELF )
keir@18604 424 {
keir@18604 425 *d = rcu_lock_current_domain();
keir@18604 426 return 0;
keir@18604 427 }
keir@18604 428
keir@18604 429 if ( (*d = rcu_lock_domain_by_id(dom)) == NULL )
keir@18604 430 return -ESRCH;
keir@18604 431
keir@18604 432 if ( !IS_PRIV_FOR(current->domain, *d) )
keir@18604 433 {
keir@18604 434 rcu_unlock_domain(*d);
keir@18604 435 return -EPERM;
keir@18604 436 }
keir@18604 437
keir@18604 438 return 0;
keir@18604 439 }
kfraser@14075 440
kfraser@15856 441 int domain_kill(struct domain *d)
iap10@274 442 {
kfraser@15856 443 int rc = 0;
kfraser@15856 444
kfraser@15856 445 if ( d == current->domain )
kfraser@15856 446 return -EINVAL;
kaf24@10281 447
kfraser@15856 448 /* Protected by domctl_lock. */
kfraser@15856 449 switch ( d->is_dying )
keir@14711 450 {
kfraser@15856 451 case DOMDYING_alive:
kfraser@15856 452 domain_pause(d);
kfraser@15856 453 d->is_dying = DOMDYING_dying;
keir@17985 454 spin_barrier(&d->domain_lock);
kfraser@15856 455 evtchn_destroy(d);
kfraser@15856 456 gnttab_release_mappings(d);
keir@19724 457 tmem_destroy(d->tmem);
keir@19724 458 d->tmem = NULL;
kfraser@15859 459 /* fallthrough */
kfraser@15856 460 case DOMDYING_dying:
kfraser@15856 461 rc = domain_relinquish_resources(d);
kfraser@15856 462 if ( rc != 0 )
kfraser@15856 463 {
kfraser@15856 464 BUG_ON(rc != -EAGAIN);
kfraser@15856 465 break;
kfraser@15856 466 }
kfraser@15856 467 d->is_dying = DOMDYING_dead;
kfraser@15856 468 put_domain(d);
kfraser@15856 469 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
kfraser@15859 470 /* fallthrough */
kfraser@15856 471 case DOMDYING_dead:
kfraser@15856 472 break;
keir@14711 473 }
cl349@3152 474
kfraser@15856 475 return rc;
kaf24@414 476 }
kaf24@414 477
kaf24@414 478
sos22@8698 479 void __domain_crash(struct domain *d)
kaf24@414 480 {
kfraser@14739 481 if ( d->is_shutting_down )
kfraser@12937 482 {
kfraser@12937 483 /* Print nothing: the domain is already shutting down. */
kfraser@12937 484 }
kfraser@12937 485 else if ( d == current->domain )
kaf24@7830 486 {
kaf24@7830 487 printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n",
kaf24@7830 488 d->domain_id, current->vcpu_id, smp_processor_id());
kfraser@10484 489 show_execution_state(guest_cpu_user_regs());
kaf24@7830 490 }
kaf24@7830 491 else
kaf24@7830 492 {
kaf24@7830 493 printk("Domain %d reported crashed by domain %d on cpu#%d:\n",
kaf24@7830 494 d->domain_id, current->domain->domain_id, smp_processor_id());
kaf24@7830 495 }
kaf24@7830 496
kaf24@7830 497 domain_shutdown(d, SHUTDOWN_crash);
cl349@4377 498 }
cl349@4377 499
cl349@4377 500
sos22@8698 501 void __domain_crash_synchronous(void)
cl349@4377 502 {
sos22@8698 503 __domain_crash(current->domain);
kfraser@12405 504
keir@17345 505 vcpu_end_shutdown_deferral(current);
keir@17345 506
cl349@4377 507 for ( ; ; )
cl349@4377 508 do_softirq();
kaf24@1485 509 }
kaf24@1485 510
sos22@1989 511
kaf24@7830 512 void domain_shutdown(struct domain *d, u8 reason)
kaf24@1485 513 {
kaf24@7830 514 struct vcpu *v;
shand@6613 515
keir@21556 516 spin_lock(&d->shutdown_lock);
keir@21556 517
keir@21556 518 if ( d->shutdown_code == -1 )
keir@21556 519 d->shutdown_code = reason;
keir@21556 520 reason = d->shutdown_code;
keir@21556 521
kaf24@4915 522 if ( d->domain_id == 0 )
kaf24@10989 523 dom0_shutdown(reason);
kaf24@1488 524
kfraser@14739 525 if ( d->is_shutting_down )
kfraser@14739 526 {
kfraser@14739 527 spin_unlock(&d->shutdown_lock);
kfraser@14739 528 return;
kfraser@14739 529 }
kfraser@14739 530
kfraser@14739 531 d->is_shutting_down = 1;
kfraser@14739 532
kfraser@14739 533 smp_mb(); /* set shutdown status /then/ check for per-cpu deferrals */
iap10@1449 534
kaf24@5327 535 for_each_vcpu ( d, v )
kfraser@14739 536 {
keir@19297 537 if ( reason == SHUTDOWN_crash )
keir@19297 538 v->defer_shutdown = 0;
keir@19297 539 else if ( v->defer_shutdown )
kfraser@14739 540 continue;
keir@17272 541 vcpu_pause_nosync(v);
kfraser@14739 542 v->paused_for_shutdown = 1;
kfraser@14739 543 }
kfraser@14739 544
kfraser@14739 545 __domain_finalise_shutdown(d);
kfraser@14739 546
kfraser@14739 547 spin_unlock(&d->shutdown_lock);
kfraser@14739 548 }
kfraser@14739 549
kfraser@14739 550 void domain_resume(struct domain *d)
kfraser@14739 551 {
kfraser@14739 552 struct vcpu *v;
kfraser@14739 553
kfraser@14739 554 /*
kfraser@14739 555 * Some code paths assume that shutdown status does not get reset under
kfraser@14739 556 * their feet (e.g., some assertions make this assumption).
kfraser@14739 557 */
kfraser@14739 558 domain_pause(d);
kfraser@14739 559
kfraser@14739 560 spin_lock(&d->shutdown_lock);
kfraser@14739 561
kfraser@14739 562 d->is_shutting_down = d->is_shut_down = 0;
keir@21556 563 d->shutdown_code = -1;
kaf24@8541 564
kfraser@14739 565 for_each_vcpu ( d, v )
kfraser@14739 566 {
kfraser@14739 567 if ( v->paused_for_shutdown )
kfraser@14739 568 vcpu_unpause(v);
kfraser@14739 569 v->paused_for_shutdown = 0;
kfraser@14739 570 }
kfraser@14739 571
kfraser@14739 572 spin_unlock(&d->shutdown_lock);
kfraser@14739 573
kfraser@14739 574 domain_unpause(d);
kfraser@14739 575 }
kfraser@14739 576
kfraser@14739 577 int vcpu_start_shutdown_deferral(struct vcpu *v)
kfraser@14739 578 {
keir@17345 579 if ( v->defer_shutdown )
keir@17345 580 return 1;
keir@17345 581
kfraser@14739 582 v->defer_shutdown = 1;
kfraser@14739 583 smp_mb(); /* set deferral status /then/ check for shutdown */
kfraser@14739 584 if ( unlikely(v->domain->is_shutting_down) )
kfraser@14739 585 vcpu_check_shutdown(v);
keir@17345 586
kfraser@14739 587 return v->defer_shutdown;
kfraser@14739 588 }
kfraser@14739 589
kfraser@14739 590 void vcpu_end_shutdown_deferral(struct vcpu *v)
kfraser@14739 591 {
kfraser@14739 592 v->defer_shutdown = 0;
kfraser@14739 593 smp_mb(); /* clear deferral status /then/ check for shutdown */
kfraser@14739 594 if ( unlikely(v->domain->is_shutting_down) )
kfraser@14739 595 vcpu_check_shutdown(v);
tlh20@461 596 }
tlh20@461 597
kaf24@5356 598 void domain_pause_for_debugger(void)
kaf24@5356 599 {
kaf24@5356 600 struct domain *d = current->domain;
kaf24@5356 601 struct vcpu *v;
kaf24@5356 602
kfraser@14677 603 atomic_inc(&d->pause_count);
keir@14792 604 if ( test_and_set_bool(d->is_paused_by_controller) )
kfraser@14677 605 domain_unpause(d); /* race-free atomic_dec(&d->pause_count) */
kaf24@12279 606
kaf24@5356 607 for_each_vcpu ( d, v )
kaf24@6483 608 vcpu_sleep_nosync(v);
kaf24@5356 609
kaf24@9582 610 send_guest_global_virq(dom0, VIRQ_DEBUGGER);
kaf24@5356 611 }
kaf24@5356 612
kfraser@14224 613 /* Complete domain destroy after RCU readers are not holding old references. */
kfraser@14074 614 static void complete_domain_destroy(struct rcu_head *head)
kfraser@14074 615 {
kfraser@14074 616 struct domain *d = container_of(head, struct domain, rcu);
kfraser@15226 617 struct vcpu *v;
kfraser@15226 618 int i;
kfraser@15226 619
keir@19826 620 for ( i = d->max_vcpus - 1; i >= 0; i-- )
kfraser@15226 621 {
kfraser@15226 622 if ( (v = d->vcpu[i]) == NULL )
kfraser@15226 623 continue;
keir@21219 624 tasklet_kill(&v->continue_hypercall_tasklet);
kfraser@15226 625 vcpu_destroy(v);
kfraser@15226 626 sched_destroy_vcpu(v);
keir@22442 627 destroy_waitqueue_vcpu(v);
kfraser@15226 628 }
kfraser@14074 629
kfraser@14074 630 grant_table_destroy(d);
kfraser@14074 631
kfraser@14074 632 arch_domain_destroy(d);
kfraser@14074 633
keir@21575 634 watchdog_domain_destroy(d);
keir@21575 635
keir@18944 636 rangeset_domain_destroy(d);
keir@18944 637
keir@21258 638 cpupool_rm_domain(d);
keir@21258 639
kfraser@15226 640 sched_destroy_domain(d);
kfraser@15226 641
keir@18886 642 /* Free page used by xen oprofile buffer. */
keir@18886 643 free_xenoprof_pages(d);
keir@18886 644
keir@19826 645 for ( i = d->max_vcpus - 1; i >= 0; i-- )
kfraser@15245 646 if ( (v = d->vcpu[i]) != NULL )
kfraser@15245 647 free_vcpu_struct(v);
kfraser@15245 648
keir@17357 649 if ( d->target != NULL )
keir@16894 650 put_domain(d->target);
keir@16894 651
keir@20768 652 evtchn_destroy_final(d);
keir@20768 653
keir@19713 654 xfree(d->pirq_mask);
keir@19713 655 xfree(d->pirq_to_evtchn);
keir@19713 656
keir@17922 657 xsm_free_security_domain(d);
keir@17922 658 free_domain_struct(d);
kfraser@14074 659
kfraser@14074 660 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
kfraser@14074 661 }
kaf24@5356 662
iap10@274 663 /* Release resources belonging to task @p. */
kaf24@8649 664 void domain_destroy(struct domain *d)
iap10@274 665 {
kaf24@1580 666 struct domain **pd;
kaf24@2382 667 atomic_t old, new;
kaf24@1543 668
kfraser@14677 669 BUG_ON(!d->is_dying);
kaf24@1543 670
kaf24@8649 671 /* May be already destroyed, or get_domain() can race us. */
kaf24@2382 672 _atomic_set(old, 0);
kaf24@8649 673 _atomic_set(new, DOMAIN_DESTROYED);
kaf24@2382 674 old = atomic_compareandswap(old, new, &d->refcnt);
kaf24@2382 675 if ( _atomic_read(old) != 0 )
kaf24@1543 676 return;
kaf24@406 677
kaf24@1543 678 /* Delete from task list and task hashtable. */
keir@19342 679 TRACE_1D(TRC_SCHED_DOM_REM, d->domain_id);
kfraser@14074 680 spin_lock(&domlist_update_lock);
kaf24@2844 681 pd = &domain_list;
kaf24@1580 682 while ( *pd != d )
kaf24@4836 683 pd = &(*pd)->next_in_list;
kfraser@14074 684 rcu_assign_pointer(*pd, d->next_in_list);
kaf24@4915 685 pd = &domain_hash[DOMAIN_HASH(d->domain_id)];
kaf24@1580 686 while ( *pd != d )
kaf24@4836 687 pd = &(*pd)->next_in_hashbucket;
kfraser@14074 688 rcu_assign_pointer(*pd, d->next_in_hashbucket);
kfraser@14074 689 spin_unlock(&domlist_update_lock);
kaf24@8486 690
kfraser@14224 691 /* Schedule RCU asynchronous completion of domain destroy. */
kfraser@14074 692 call_rcu(&d->rcu, complete_domain_destroy);
iap10@274 693 }
iap10@274 694
ack@13046 695 void vcpu_pause(struct vcpu *v)
ack@13046 696 {
ack@13046 697 ASSERT(v != current);
kfraser@14696 698 atomic_inc(&v->pause_count);
kaf24@6483 699 vcpu_sleep_sync(v);
cl349@5284 700 }
cl349@5284 701
ack@13046 702 void vcpu_pause_nosync(struct vcpu *v)
ack@13046 703 {
kfraser@14696 704 atomic_inc(&v->pause_count);
ack@13046 705 vcpu_sleep_nosync(v);
ack@13046 706 }
ack@13046 707
kfraser@10679 708 void vcpu_unpause(struct vcpu *v)
kfraser@10679 709 {
kfraser@14696 710 if ( atomic_dec_and_test(&v->pause_count) )
kfraser@10679 711 vcpu_wake(v);
kfraser@10679 712 }
kfraser@10679 713
cl349@5284 714 void domain_pause(struct domain *d)
cl349@5284 715 {
kaf24@5327 716 struct vcpu *v;
cl349@5284 717
kfraser@10679 718 ASSERT(d != current->domain);
kfraser@10679 719
kfraser@14677 720 atomic_inc(&d->pause_count);
kfraser@10679 721
kaf24@5327 722 for_each_vcpu( d, v )
kfraser@10679 723 vcpu_sleep_sync(v);
cl349@5284 724 }
cl349@5284 725
cl349@5284 726 void domain_unpause(struct domain *d)
cl349@5284 727 {
kaf24@5327 728 struct vcpu *v;
cl349@5284 729
kfraser@14677 730 if ( atomic_dec_and_test(&d->pause_count) )
kfraser@10679 731 for_each_vcpu( d, v )
kfraser@10679 732 vcpu_wake(v);
cl349@5284 733 }
cl349@5284 734
cl349@5284 735 void domain_pause_by_systemcontroller(struct domain *d)
cl349@5284 736 {
kfraser@14677 737 domain_pause(d);
keir@14792 738 if ( test_and_set_bool(d->is_paused_by_controller) )
kfraser@14677 739 domain_unpause(d);
cl349@5284 740 }
cl349@5284 741
cl349@5284 742 void domain_unpause_by_systemcontroller(struct domain *d)
cl349@5284 743 {
keir@14792 744 if ( test_and_clear_bool(d->is_paused_by_controller) )
kfraser@14677 745 domain_unpause(d);
cl349@5284 746 }
cl349@5284 747
ack@13310 748 int boot_vcpu(struct domain *d, int vcpuid, vcpu_guest_context_u ctxt)
cl349@2964 749 {
kaf24@7392 750 struct vcpu *v = d->vcpu[vcpuid];
cl349@2964 751
kfraser@14692 752 BUG_ON(v->is_initialised);
cl349@2964 753
kaf24@8113 754 return arch_set_info_guest(v, ctxt);
kaf24@7199 755 }
kaf24@7199 756
keir@17719 757 void vcpu_reset(struct vcpu *v)
kfraser@13560 758 {
kfraser@13560 759 struct domain *d = v->domain;
kfraser@13560 760
keir@17751 761 vcpu_pause(v);
keir@17485 762 domain_lock(d);
kfraser@13560 763
keir@17719 764 arch_vcpu_reset(v);
kfraser@13560 765
kfraser@14698 766 set_bit(_VPF_down, &v->pause_flags);
kfraser@13560 767
keir@18466 768 clear_bit(v->vcpu_id, d->poll_mask);
keir@18466 769 v->poll_evtchn = 0;
keir@18466 770
kfraser@14692 771 v->fpu_initialised = 0;
kfraser@14692 772 v->fpu_dirtied = 0;
kfraser@14692 773 v->is_initialised = 0;
keir@20593 774 #ifdef VCPU_TRAP_LAST
keir@20593 775 v->async_exception_mask = 0;
keir@20593 776 memset(v->async_exception_state, 0, sizeof(v->async_exception_state));
keir@20593 777 #endif
keir@20593 778 cpus_clear(v->cpu_affinity_tmp);
kfraser@14698 779 clear_bit(_VPF_blocked, &v->pause_flags);
kfraser@13560 780
keir@17485 781 domain_unlock(v->domain);
keir@17751 782 vcpu_unpause(v);
kfraser@13560 783 }
kfraser@13560 784
kfraser@13560 785
kaf24@9904 786 long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
kaf24@7199 787 {
kaf24@7199 788 struct domain *d = current->domain;
kaf24@7199 789 struct vcpu *v;
kaf24@7199 790 struct vcpu_guest_context *ctxt;
kaf24@7199 791 long rc = 0;
kaf24@7199 792
kaf24@7199 793 if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
kaf24@7199 794 return -EINVAL;
kaf24@7199 795
keir@19826 796 if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
kaf24@7199 797 return -ENOENT;
kaf24@7199 798
kaf24@7199 799 switch ( cmd )
kaf24@7199 800 {
kaf24@7232 801 case VCPUOP_initialise:
keir@20436 802 if ( v->vcpu_info == &dummy_vcpu_info )
keir@20436 803 return -EINVAL;
keir@20436 804
kaf24@7199 805 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
kfraser@14365 806 return -ENOMEM;
kaf24@7199 807
kaf24@9197 808 if ( copy_from_guest(ctxt, arg, 1) )
kaf24@7199 809 {
kaf24@7199 810 xfree(ctxt);
kfraser@14365 811 return -EFAULT;
kaf24@7199 812 }
kaf24@7199 813
keir@17485 814 domain_lock(d);
kaf24@7392 815 rc = -EEXIST;
kfraser@14692 816 if ( !v->is_initialised )
kaf24@7392 817 rc = boot_vcpu(d, vcpuid, ctxt);
keir@17485 818 domain_unlock(d);
kaf24@7199 819
kaf24@7199 820 xfree(ctxt);
kaf24@7199 821 break;
kaf24@7199 822
kaf24@7199 823 case VCPUOP_up:
kfraser@14692 824 if ( !v->is_initialised )
kfraser@14365 825 return -EINVAL;
kfraser@14365 826
kfraser@14698 827 if ( test_and_clear_bit(_VPF_down, &v->pause_flags) )
kaf24@7199 828 vcpu_wake(v);
kfraser@14365 829
kaf24@7199 830 break;
kaf24@7199 831
kaf24@7199 832 case VCPUOP_down:
kfraser@14698 833 if ( !test_and_set_bit(_VPF_down, &v->pause_flags) )
kaf24@7199 834 vcpu_sleep_nosync(v);
kaf24@7199 835 break;
kaf24@7199 836
kaf24@7199 837 case VCPUOP_is_up:
kfraser@14698 838 rc = !test_bit(_VPF_down, &v->pause_flags);
kaf24@7199 839 break;
kaf24@9008 840
kaf24@9008 841 case VCPUOP_get_runstate_info:
kaf24@9008 842 {
kaf24@9008 843 struct vcpu_runstate_info runstate;
kaf24@9008 844 vcpu_runstate_get(v, &runstate);
kaf24@9197 845 if ( copy_to_guest(arg, &runstate, 1) )
kaf24@9008 846 rc = -EFAULT;
kaf24@9008 847 break;
kaf24@9008 848 }
kaf24@9008 849
kfraser@14358 850 case VCPUOP_set_periodic_timer:
kfraser@14358 851 {
kfraser@14358 852 struct vcpu_set_periodic_timer set;
kfraser@14358 853
kfraser@14358 854 if ( copy_from_guest(&set, arg, 1) )
kfraser@14365 855 return -EFAULT;
kfraser@14358 856
kfraser@14358 857 if ( set.period_ns < MILLISECS(1) )
kfraser@14365 858 return -EINVAL;
kfraser@14358 859
kfraser@14358 860 v->periodic_period = set.period_ns;
kfraser@14358 861 vcpu_force_reschedule(v);
kfraser@14358 862
kfraser@14358 863 break;
kfraser@14358 864 }
kfraser@14358 865
kfraser@14358 866 case VCPUOP_stop_periodic_timer:
kfraser@14358 867 v->periodic_period = 0;
kfraser@14358 868 vcpu_force_reschedule(v);
kfraser@14358 869 break;
kfraser@14358 870
kfraser@14358 871 case VCPUOP_set_singleshot_timer:
kfraser@14358 872 {
kfraser@14358 873 struct vcpu_set_singleshot_timer set;
kfraser@14358 874
kfraser@14358 875 if ( v != current )
kfraser@14358 876 return -EINVAL;
kfraser@14358 877
kfraser@14358 878 if ( copy_from_guest(&set, arg, 1) )
kfraser@14358 879 return -EFAULT;
kfraser@14358 880
kfraser@14366 881 if ( (set.flags & VCPU_SSHOTTMR_future) &&
kfraser@14366 882 (set.timeout_abs_ns < NOW()) )
kfraser@14366 883 return -ETIME;
kfraser@14366 884
keir@21554 885 migrate_timer(&v->singleshot_timer, smp_processor_id());
kfraser@14358 886 set_timer(&v->singleshot_timer, set.timeout_abs_ns);
kfraser@14358 887
kfraser@14358 888 break;
kfraser@14358 889 }
kfraser@14358 890
kfraser@14358 891 case VCPUOP_stop_singleshot_timer:
kfraser@14358 892 if ( v != current )
kfraser@14358 893 return -EINVAL;
kfraser@14358 894
kfraser@14358 895 stop_timer(&v->singleshot_timer);
keir@16188 896
kfraser@14358 897 break;
keir@16188 898
keir@20593 899 #ifdef VCPU_TRAP_NMI
keir@16188 900 case VCPUOP_send_nmi:
keir@16188 901 if ( !guest_handle_is_null(arg) )
keir@16188 902 return -EINVAL;
keir@16188 903
keir@16188 904 if ( !test_and_set_bool(v->nmi_pending) )
keir@16188 905 vcpu_kick(v);
keir@16188 906
keir@16188 907 break;
keir@20593 908 #endif
kfraser@14358 909
kaf24@9008 910 default:
kaf24@9197 911 rc = arch_do_vcpu_op(cmd, v, arg);
kaf24@9008 912 break;
kaf24@7199 913 }
kaf24@7199 914
cl349@2964 915 return rc;
cl349@2964 916 }
cl349@2964 917
cl349@2486 918 long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
cl349@2486 919 {
cl349@2486 920 if ( type > MAX_VMASST_TYPE )
cl349@2486 921 return -EINVAL;
cl349@2486 922
cl349@2486 923 switch ( cmd )
cl349@2486 924 {
cl349@2486 925 case VMASST_CMD_enable:
cl349@2486 926 set_bit(type, &p->vm_assist);
cl349@2486 927 return 0;
cl349@2486 928 case VMASST_CMD_disable:
cl349@2486 929 clear_bit(type, &p->vm_assist);
cl349@2486 930 return 0;
cl349@2486 931 }
cl349@2486 932
cl349@2486 933 return -ENOSYS;
cl349@2486 934 }
kaf24@3952 935
keir@21212 936 struct migrate_info {
keir@21212 937 long (*func)(void *data);
keir@21212 938 void *data;
keir@21212 939 struct vcpu *vcpu;
keir@21219 940 unsigned int cpu;
keir@21212 941 unsigned int nest;
keir@21212 942 };
keir@21212 943
keir@21212 944 static DEFINE_PER_CPU(struct migrate_info *, continue_info);
keir@21212 945
keir@21212 946 static void continue_hypercall_tasklet_handler(unsigned long _info)
keir@21212 947 {
keir@21212 948 struct migrate_info *info = (struct migrate_info *)_info;
keir@21212 949 struct vcpu *v = info->vcpu;
keir@21212 950
keir@21244 951 /* Wait for vcpu to sleep so that we can access its register state. */
keir@21244 952 vcpu_sleep_sync(v);
keir@21212 953
keir@21212 954 this_cpu(continue_info) = info;
keir@21219 955 return_reg(v) = (info->cpu == smp_processor_id())
keir@21219 956 ? info->func(info->data) : -EINVAL;
keir@21212 957 this_cpu(continue_info) = NULL;
keir@21212 958
keir@21212 959 if ( info->nest-- == 0 )
keir@21212 960 {
keir@21212 961 xfree(info);
keir@21212 962 vcpu_unpause(v);
keir@21219 963 put_domain(v->domain);
keir@21212 964 }
keir@21212 965 }
keir@21212 966
keir@21223 967 int continue_hypercall_on_cpu(
keir@21223 968 unsigned int cpu, long (*func)(void *data), void *data)
keir@21212 969 {
keir@21212 970 struct migrate_info *info;
keir@21212 971
keir@21219 972 if ( (cpu >= NR_CPUS) || !cpu_online(cpu) )
keir@21219 973 return -EINVAL;
keir@21219 974
keir@21212 975 info = this_cpu(continue_info);
keir@21212 976 if ( info == NULL )
keir@21212 977 {
keir@21219 978 struct vcpu *curr = current;
keir@21219 979
keir@21212 980 info = xmalloc(struct migrate_info);
keir@21212 981 if ( info == NULL )
keir@21212 982 return -ENOMEM;
keir@21212 983
keir@21212 984 info->vcpu = curr;
keir@21212 985 info->nest = 0;
keir@21212 986
keir@21219 987 tasklet_kill(
keir@21219 988 &curr->continue_hypercall_tasklet);
keir@21212 989 tasklet_init(
keir@21212 990 &curr->continue_hypercall_tasklet,
keir@21212 991 continue_hypercall_tasklet_handler,
keir@21212 992 (unsigned long)info);
keir@21212 993
keir@21219 994 get_knownalive_domain(curr->domain);
keir@21212 995 vcpu_pause_nosync(curr);
keir@21212 996 }
keir@21212 997 else
keir@21212 998 {
keir@21212 999 BUG_ON(info->nest != 0);
keir@21212 1000 info->nest++;
keir@21212 1001 }
keir@21212 1002
keir@21212 1003 info->func = func;
keir@21212 1004 info->data = data;
keir@21219 1005 info->cpu = cpu;
keir@21212 1006
keir@21219 1007 tasklet_schedule_on_cpu(&info->vcpu->continue_hypercall_tasklet, cpu);
keir@21212 1008
keir@21212 1009 /* Dummy return value will be overwritten by tasklet. */
keir@21212 1010 return 0;
keir@21212 1011 }
keir@21212 1012
kaf24@3952 1013 /*
kaf24@3952 1014 * Local variables:
kaf24@3952 1015 * mode: C
kaf24@3952 1016 * c-set-style: "BSD"
kaf24@3952 1017 * c-basic-offset: 4
kaf24@3952 1018 * tab-width: 4
kaf24@3952 1019 * indent-tabs-mode: nil
kaf24@4026 1020 * End:
kaf24@3952 1021 */