debuggers.hg

annotate xen/common/cpupool.c @ 22855:1d1eec7e1fb4

xl: Perform minimal validation of virtual disk file while parsing config file

This patch performs some very basic validation on the virtual disk
file passed through the config file. This validation ensures that we
don't go too far with the initialization like spawn qemu and more
while there could be some potentially fundamental issues.

[ Patch fixed up to work with PHYSTYPE_EMPTY 22808:6ec61438713a -iwj ]

Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Kamala Narasimhan <kamala.narasimhan@gmail.com>
date Tue Jan 25 18:09:49 2011 +0000 (2011-01-25)
parents aee9a8f63aae
children
rev   line source
keir@21258 1 /******************************************************************************
keir@21258 2 * cpupool.c
keir@21258 3 *
keir@21258 4 * Generic cpupool-handling functions.
keir@21258 5 *
keir@21258 6 * Cpupools are a feature to have configurable scheduling domains. Each
keir@21258 7 * cpupool runs an own scheduler on a dedicated set of physical cpus.
keir@21258 8 * A domain is bound to one cpupool at any time, but it can be moved to
keir@21258 9 * another cpupool.
keir@21258 10 *
keir@21258 11 * (C) 2009, Juergen Gross, Fujitsu Technology Solutions
keir@21258 12 */
keir@21258 13
keir@21258 14 #include <xen/lib.h>
keir@21258 15 #include <xen/init.h>
keir@21258 16 #include <xen/cpumask.h>
keir@21258 17 #include <xen/percpu.h>
keir@21258 18 #include <xen/sched.h>
keir@21258 19 #include <xen/sched-if.h>
keir@21429 20 #include <xen/cpu.h>
keir@21258 21
keir@21258 22 #define for_each_cpupool(ptr) \
keir@21258 23 for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))
keir@21258 24
keir@21258 25 struct cpupool *cpupool0; /* Initial cpupool with Dom0 */
keir@21258 26 cpumask_t cpupool_free_cpus; /* cpus not in any cpupool */
keir@21258 27
keir@21258 28 static struct cpupool *cpupool_list; /* linked list, sorted by poolid */
keir@21258 29
keir@21258 30 static int cpupool_moving_cpu = -1;
keir@21258 31 static struct cpupool *cpupool_cpu_moving = NULL;
keir@21258 32 static cpumask_t cpupool_locked_cpus = CPU_MASK_NONE;
keir@21258 33
keir@21258 34 static DEFINE_SPINLOCK(cpupool_lock);
keir@21258 35
keir@21258 36 DEFINE_PER_CPU(struct cpupool *, cpupool);
keir@21258 37
keir@21477 38 #define cpupool_dprintk(x...) ((void)0)
keir@21477 39
keir@21258 40 static struct cpupool *alloc_cpupool_struct(void)
keir@21258 41 {
keir@21258 42 return xmalloc(struct cpupool);
keir@21258 43 }
keir@21258 44
keir@21258 45 static void free_cpupool_struct(struct cpupool *c)
keir@21258 46 {
keir@21258 47 xfree(c);
keir@21258 48 }
keir@21258 49
keir@21258 50 /*
keir@21258 51 * find a cpupool by it's id. to be called with cpupool lock held
keir@21258 52 * if exact is not specified, the first cpupool with an id larger or equal to
keir@21258 53 * the searched id is returned
keir@21258 54 * returns NULL if not found.
keir@21258 55 */
keir@21258 56 static struct cpupool *cpupool_find_by_id(int id, int exact)
keir@21258 57 {
keir@21258 58 struct cpupool **q;
keir@21258 59
keir@22478 60 ASSERT(spin_is_locked(&cpupool_lock));
keir@22478 61
keir@21258 62 for_each_cpupool(q)
keir@22478 63 if ( (*q)->cpupool_id >= id )
keir@21258 64 break;
keir@22478 65
keir@22478 66 return (!exact || ((*q)->cpupool_id == id)) ? *q : NULL;
keir@22478 67 }
keir@22478 68
keir@22478 69 static struct cpupool *__cpupool_get_by_id(int poolid, int exact)
keir@22478 70 {
keir@22478 71 struct cpupool *c;
keir@22478 72 spin_lock(&cpupool_lock);
keir@22478 73 c = cpupool_find_by_id(poolid, exact);
keir@22478 74 if ( c != NULL )
keir@22478 75 atomic_inc(&c->refcnt);
keir@22478 76 spin_unlock(&cpupool_lock);
keir@22478 77 return c;
keir@21258 78 }
keir@21258 79
keir@21672 80 struct cpupool *cpupool_get_by_id(int poolid)
keir@21672 81 {
keir@22478 82 return __cpupool_get_by_id(poolid, 1);
keir@21672 83 }
keir@21672 84
keir@21672 85 void cpupool_put(struct cpupool *pool)
keir@21672 86 {
keir@22478 87 if ( !atomic_dec_and_test(&pool->refcnt) )
keir@22478 88 return;
keir@22478 89 scheduler_free(pool->sched);
keir@22478 90 free_cpupool_struct(pool);
keir@21672 91 }
keir@21672 92
keir@21258 93 /*
keir@21258 94 * create a new cpupool with specified poolid and scheduler
keir@21258 95 * returns pointer to new cpupool structure if okay, NULL else
keir@21258 96 * possible failures:
keir@21258 97 * - no memory
keir@21258 98 * - poolid already used
keir@21258 99 * - unknown scheduler
keir@21258 100 */
keir@21672 101 static struct cpupool *cpupool_create(
keir@21672 102 int poolid, unsigned int sched_id, int *perr)
keir@21258 103 {
keir@21258 104 struct cpupool *c;
keir@21258 105 struct cpupool **q;
keir@21258 106 int last = 0;
keir@21258 107
keir@21672 108 *perr = -ENOMEM;
keir@21258 109 if ( (c = alloc_cpupool_struct()) == NULL )
keir@21258 110 return NULL;
keir@21258 111 memset(c, 0, sizeof(*c));
keir@21258 112
keir@22478 113 /* One reference for caller, one reference for cpupool_destroy(). */
keir@22478 114 atomic_set(&c->refcnt, 2);
keir@22478 115
keir@21672 116 cpupool_dprintk("cpupool_create(pool=%d,sched=%u)\n", poolid, sched_id);
keir@21672 117
keir@21258 118 spin_lock(&cpupool_lock);
keir@21672 119
keir@21258 120 for_each_cpupool(q)
keir@21258 121 {
keir@21258 122 last = (*q)->cpupool_id;
keir@21258 123 if ( (poolid != CPUPOOLID_NONE) && (last >= poolid) )
keir@21258 124 break;
keir@21258 125 }
keir@21258 126 if ( *q != NULL )
keir@21258 127 {
keir@21258 128 if ( (*q)->cpupool_id == poolid )
keir@21258 129 {
keir@21258 130 spin_unlock(&cpupool_lock);
keir@21258 131 free_cpupool_struct(c);
keir@21672 132 *perr = -EEXIST;
keir@21258 133 return NULL;
keir@21258 134 }
keir@21258 135 c->next = *q;
keir@21258 136 }
keir@21672 137
keir@21258 138 c->cpupool_id = (poolid == CPUPOOLID_NONE) ? (last + 1) : poolid;
keir@21672 139 if ( poolid == 0 )
keir@21672 140 {
keir@21672 141 c->sched = scheduler_get_default();
keir@21672 142 }
keir@21672 143 else
keir@21258 144 {
keir@21672 145 c->sched = scheduler_alloc(sched_id, perr);
keir@21672 146 if ( c->sched == NULL )
keir@21672 147 {
keir@21672 148 spin_unlock(&cpupool_lock);
keir@21672 149 free_cpupool_struct(c);
keir@21672 150 return NULL;
keir@21672 151 }
keir@21258 152 }
keir@21672 153
keir@21672 154 *q = c;
keir@21672 155
keir@21258 156 spin_unlock(&cpupool_lock);
keir@21258 157
keir@21477 158 cpupool_dprintk("Created cpupool %d with scheduler %s (%s)\n",
keir@21477 159 c->cpupool_id, c->sched->name, c->sched->opt_name);
keir@21258 160
keir@21672 161 *perr = 0;
keir@21258 162 return c;
keir@21258 163 }
keir@21258 164 /*
keir@21258 165 * destroys the given cpupool
keir@21258 166 * returns 0 on success, 1 else
keir@21258 167 * possible failures:
keir@21258 168 * - pool still in use
keir@21258 169 * - cpus still assigned to pool
keir@21258 170 * - pool not in list
keir@21258 171 */
keir@21672 172 static int cpupool_destroy(struct cpupool *c)
keir@21258 173 {
keir@21258 174 struct cpupool **q;
keir@21258 175
keir@21258 176 spin_lock(&cpupool_lock);
keir@21258 177 for_each_cpupool(q)
keir@21258 178 if ( *q == c )
keir@21258 179 break;
keir@21672 180 if ( *q != c )
keir@21258 181 {
keir@21258 182 spin_unlock(&cpupool_lock);
keir@21672 183 return -ENOENT;
keir@21672 184 }
keir@21672 185 if ( (c->n_dom != 0) || cpus_weight(c->cpu_valid) )
keir@21672 186 {
keir@21672 187 spin_unlock(&cpupool_lock);
keir@21672 188 return -EBUSY;
keir@21258 189 }
keir@21258 190 *q = c->next;
keir@21258 191 spin_unlock(&cpupool_lock);
keir@22478 192
keir@22478 193 cpupool_put(c);
keir@22478 194
keir@21477 195 cpupool_dprintk("cpupool_destroy(pool=%d)\n", c->cpupool_id);
keir@21258 196 return 0;
keir@21258 197 }
keir@21258 198
keir@21258 199 /*
keir@21258 200 * assign a specific cpu to a cpupool
keir@21258 201 * cpupool_lock must be held
keir@21258 202 */
keir@21258 203 static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
keir@21258 204 {
keir@21258 205 if ( (cpupool_moving_cpu == cpu) && (c != cpupool_cpu_moving) )
keir@21258 206 return -EBUSY;
keir@21258 207 per_cpu(cpupool, cpu) = c;
keir@21258 208 schedule_cpu_switch(cpu, c);
keir@21258 209 cpu_clear(cpu, cpupool_free_cpus);
keir@21258 210 if (cpupool_moving_cpu == cpu)
keir@21258 211 {
keir@21258 212 cpupool_moving_cpu = -1;
keir@22478 213 cpupool_put(cpupool_cpu_moving);
keir@21258 214 cpupool_cpu_moving = NULL;
keir@21258 215 }
keir@21258 216 cpu_set(cpu, c->cpu_valid);
keir@21258 217 return 0;
keir@21258 218 }
keir@21258 219
keir@21258 220 static long cpupool_unassign_cpu_helper(void *info)
keir@21258 221 {
keir@21258 222 int cpu = cpupool_moving_cpu;
keir@21258 223 long ret;
keir@21477 224
keir@21477 225 cpupool_dprintk("cpupool_unassign_cpu(pool=%d,cpu=%d) ret %ld\n",
keir@21477 226 cpupool_id, cpu, ret);
keir@21258 227
keir@22478 228 spin_lock(&cpupool_lock);
keir@21258 229 ret = cpu_disable_scheduler(cpu);
keir@21258 230 cpu_set(cpu, cpupool_free_cpus);
keir@21258 231 if ( !ret )
keir@21258 232 {
keir@21258 233 schedule_cpu_switch(cpu, NULL);
keir@21258 234 per_cpu(cpupool, cpu) = NULL;
keir@21258 235 cpupool_moving_cpu = -1;
keir@22478 236 cpupool_put(cpupool_cpu_moving);
keir@21258 237 cpupool_cpu_moving = NULL;
keir@21258 238 }
keir@21258 239 spin_unlock(&cpupool_lock);
keir@21258 240 return ret;
keir@21258 241 }
keir@21258 242
keir@21258 243 /*
keir@21258 244 * unassign a specific cpu from a cpupool
keir@21258 245 * we must be sure not to run on the cpu to be unassigned! to achieve this
keir@21258 246 * the main functionality is performed via continue_hypercall_on_cpu on a
keir@21258 247 * specific cpu.
keir@21258 248 * if the cpu to be removed is the last one of the cpupool no active domain
keir@21258 249 * must be bound to the cpupool. dying domains are moved to cpupool0 as they
keir@21258 250 * might be zombies.
keir@21258 251 * possible failures:
keir@21258 252 * - last cpu and still active domains in cpupool
keir@21258 253 * - cpu just being unplugged
keir@21258 254 */
keir@21258 255 int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
keir@21258 256 {
keir@21258 257 int work_cpu;
keir@21258 258 int ret;
keir@21258 259 struct domain *d;
keir@21258 260
keir@21477 261 cpupool_dprintk("cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
keir@21477 262 c->cpupool_id, cpu);
keir@21477 263
keir@21258 264 spin_lock(&cpupool_lock);
keir@21258 265 ret = -EBUSY;
keir@21258 266 if ( (cpupool_moving_cpu != -1) && (cpu != cpupool_moving_cpu) )
keir@21258 267 goto out;
keir@21258 268 if ( cpu_isset(cpu, cpupool_locked_cpus) )
keir@21258 269 goto out;
keir@21258 270
keir@21258 271 ret = 0;
keir@21258 272 if ( !cpu_isset(cpu, c->cpu_valid) && (cpu != cpupool_moving_cpu) )
keir@21258 273 goto out;
keir@21258 274
keir@21258 275 if ( (c->n_dom > 0) && (cpus_weight(c->cpu_valid) == 1) &&
keir@21258 276 (cpu != cpupool_moving_cpu) )
keir@21258 277 {
keir@21258 278 for_each_domain(d)
keir@21258 279 {
keir@21258 280 if ( d->cpupool != c )
keir@21258 281 continue;
keir@21258 282 if ( !d->is_dying )
keir@21258 283 {
keir@21258 284 ret = -EBUSY;
keir@21258 285 break;
keir@21258 286 }
keir@21258 287 c->n_dom--;
keir@21258 288 ret = sched_move_domain(d, cpupool0);
keir@21258 289 if ( ret )
keir@21258 290 {
keir@21258 291 c->n_dom++;
keir@21258 292 break;
keir@21258 293 }
keir@21258 294 cpupool0->n_dom++;
keir@21258 295 }
keir@21258 296 if ( ret )
keir@21258 297 goto out;
keir@21258 298 }
keir@21258 299 cpupool_moving_cpu = cpu;
keir@22478 300 atomic_inc(&c->refcnt);
keir@21258 301 cpupool_cpu_moving = c;
keir@21258 302 cpu_clear(cpu, c->cpu_valid);
keir@22478 303 spin_unlock(&cpupool_lock);
keir@22478 304
keir@21258 305 work_cpu = smp_processor_id();
keir@21258 306 if ( work_cpu == cpu )
keir@21258 307 {
keir@21258 308 work_cpu = first_cpu(cpupool0->cpu_valid);
keir@21258 309 if ( work_cpu == cpu )
keir@21258 310 work_cpu = next_cpu(cpu, cpupool0->cpu_valid);
keir@21258 311 }
keir@21258 312 return continue_hypercall_on_cpu(work_cpu, cpupool_unassign_cpu_helper, c);
keir@21258 313
keir@21258 314 out:
keir@21258 315 spin_unlock(&cpupool_lock);
keir@21477 316 cpupool_dprintk("cpupool_unassign_cpu(pool=%d,cpu=%d) ret %d\n",
keir@21477 317 cpupool_id, cpu, ret);
keir@21258 318 return ret;
keir@21258 319 }
keir@21258 320
keir@21258 321 /*
keir@21258 322 * add a new domain to a cpupool
keir@21258 323 * possible failures:
keir@21258 324 * - pool does not exist
keir@21258 325 * - no cpu assigned to pool
keir@21258 326 */
keir@21258 327 int cpupool_add_domain(struct domain *d, int poolid)
keir@21258 328 {
keir@21258 329 struct cpupool *c;
keir@21258 330 int rc = 1;
keir@21258 331 int n_dom;
keir@21258 332
keir@21258 333 if ( poolid == CPUPOOLID_NONE )
keir@21258 334 return 0;
keir@21258 335 spin_lock(&cpupool_lock);
keir@21258 336 c = cpupool_find_by_id(poolid, 1);
keir@21258 337 if ( (c != NULL) && cpus_weight(c->cpu_valid) )
keir@21258 338 {
keir@21258 339 c->n_dom++;
keir@21258 340 n_dom = c->n_dom;
keir@21258 341 d->cpupool = c;
keir@21258 342 rc = 0;
keir@21258 343 }
keir@21258 344 spin_unlock(&cpupool_lock);
keir@21477 345 cpupool_dprintk("cpupool_add_domain(dom=%d,pool=%d) n_dom %d rc %d\n",
keir@21477 346 d->domain_id, poolid, n_dom, rc);
keir@21258 347 return rc;
keir@21258 348 }
keir@21258 349
keir@21258 350 /*
keir@21258 351 * remove a domain from a cpupool
keir@21258 352 */
keir@21258 353 void cpupool_rm_domain(struct domain *d)
keir@21258 354 {
keir@21258 355 int cpupool_id;
keir@21258 356 int n_dom;
keir@21258 357
keir@21258 358 if ( d->cpupool == NULL )
keir@21258 359 return;
keir@21258 360 spin_lock(&cpupool_lock);
keir@21258 361 cpupool_id = d->cpupool->cpupool_id;
keir@21258 362 d->cpupool->n_dom--;
keir@21258 363 n_dom = d->cpupool->n_dom;
keir@21258 364 d->cpupool = NULL;
keir@21258 365 spin_unlock(&cpupool_lock);
keir@21477 366 cpupool_dprintk("cpupool_rm_domain(dom=%d,pool=%d) n_dom %d\n",
keir@21477 367 d->domain_id, cpupool_id, n_dom);
keir@21258 368 return;
keir@21258 369 }
keir@21258 370
keir@21258 371 /*
keir@21258 372 * called to add a new cpu to pool admin
keir@21258 373 * we add a hotplugged cpu to the cpupool0 to be able to add it to dom0
keir@21258 374 */
keir@21429 375 static void cpupool_cpu_add(unsigned int cpu)
keir@21258 376 {
keir@21258 377 spin_lock(&cpupool_lock);
keir@21258 378 cpu_clear(cpu, cpupool_locked_cpus);
keir@21258 379 cpu_set(cpu, cpupool_free_cpus);
keir@21453 380 cpupool_assign_cpu_locked(cpupool0, cpu);
keir@21258 381 spin_unlock(&cpupool_lock);
keir@21258 382 }
keir@21258 383
keir@21258 384 /*
keir@21258 385 * called to remove a cpu from pool admin
keir@21258 386 * the cpu to be removed is locked to avoid removing it from dom0
keir@21258 387 * returns failure if not in pool0
keir@21258 388 */
keir@21429 389 static int cpupool_cpu_remove(unsigned int cpu)
keir@21258 390 {
keir@21258 391 int ret = 0;
keir@21258 392
keir@21258 393 spin_lock(&cpupool_lock);
keir@21258 394 if ( !cpu_isset(cpu, cpupool0->cpu_valid))
keir@21258 395 ret = -EBUSY;
keir@21258 396 else
keir@21258 397 cpu_set(cpu, cpupool_locked_cpus);
keir@21258 398 spin_unlock(&cpupool_lock);
keir@21258 399
keir@21258 400 return ret;
keir@21258 401 }
keir@21258 402
keir@21258 403 /*
keir@21326 404 * do cpupool related sysctl operations
keir@21258 405 */
keir@21326 406 int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
keir@21258 407 {
keir@21258 408 int ret;
keir@21258 409 struct cpupool *c;
keir@21258 410
keir@21258 411 switch ( op->op )
keir@21258 412 {
keir@21258 413
keir@21326 414 case XEN_SYSCTL_CPUPOOL_OP_CREATE:
keir@21258 415 {
keir@21258 416 int poolid;
keir@21258 417
keir@21326 418 poolid = (op->cpupool_id == XEN_SYSCTL_CPUPOOL_PAR_ANY) ?
keir@21258 419 CPUPOOLID_NONE: op->cpupool_id;
keir@21672 420 c = cpupool_create(poolid, op->sched_id, &ret);
keir@21672 421 if ( c != NULL )
keir@22478 422 {
keir@21258 423 op->cpupool_id = c->cpupool_id;
keir@22478 424 cpupool_put(c);
keir@22478 425 }
keir@21258 426 }
keir@21258 427 break;
keir@21258 428
keir@21326 429 case XEN_SYSCTL_CPUPOOL_OP_DESTROY:
keir@21258 430 {
keir@22478 431 c = cpupool_get_by_id(op->cpupool_id);
keir@21258 432 ret = -ENOENT;
keir@21258 433 if ( c == NULL )
keir@21258 434 break;
keir@21672 435 ret = cpupool_destroy(c);
keir@22478 436 cpupool_put(c);
keir@21258 437 }
keir@21258 438 break;
keir@21258 439
keir@21326 440 case XEN_SYSCTL_CPUPOOL_OP_INFO:
keir@21258 441 {
keir@22478 442 c = __cpupool_get_by_id(op->cpupool_id, 0);
keir@21258 443 ret = -ENOENT;
keir@21258 444 if ( c == NULL )
keir@21258 445 break;
keir@21258 446 op->cpupool_id = c->cpupool_id;
keir@21453 447 op->sched_id = c->sched->sched_id;
keir@21258 448 op->n_dom = c->n_dom;
keir@22478 449 ret = cpumask_to_xenctl_cpumap(&op->cpumap, &c->cpu_valid);
keir@22478 450 cpupool_put(c);
keir@21258 451 }
keir@21258 452 break;
keir@21258 453
keir@21326 454 case XEN_SYSCTL_CPUPOOL_OP_ADDCPU:
keir@21258 455 {
keir@21258 456 unsigned cpu;
keir@21258 457
keir@21258 458 cpu = op->cpu;
keir@21477 459 cpupool_dprintk("cpupool_assign_cpu(pool=%d,cpu=%d)\n",
keir@21477 460 op->cpupool_id, cpu);
keir@21258 461 spin_lock(&cpupool_lock);
keir@21326 462 if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY )
keir@21258 463 cpu = first_cpu(cpupool_free_cpus);
keir@21258 464 ret = -EINVAL;
keir@21258 465 if ( cpu >= NR_CPUS )
keir@21258 466 goto addcpu_out;
keir@21258 467 ret = -EBUSY;
keir@21258 468 if ( !cpu_isset(cpu, cpupool_free_cpus) )
keir@21258 469 goto addcpu_out;
keir@21258 470 c = cpupool_find_by_id(op->cpupool_id, 0);
keir@21258 471 ret = -ENOENT;
keir@21258 472 if ( c == NULL )
keir@21258 473 goto addcpu_out;
keir@21258 474 ret = cpupool_assign_cpu_locked(c, cpu);
keir@21477 475 addcpu_out:
keir@21258 476 spin_unlock(&cpupool_lock);
keir@21477 477 cpupool_dprintk("cpupool_assign_cpu(pool=%d,cpu=%d) ret %d\n",
keir@21477 478 op->cpupool_id, cpu, ret);
keir@21258 479 }
keir@21258 480 break;
keir@21258 481
keir@21326 482 case XEN_SYSCTL_CPUPOOL_OP_RMCPU:
keir@21258 483 {
keir@21258 484 unsigned cpu;
keir@21258 485
keir@22478 486 c = __cpupool_get_by_id(op->cpupool_id, 0);
keir@21258 487 ret = -ENOENT;
keir@21258 488 if ( c == NULL )
keir@21258 489 break;
keir@21258 490 cpu = op->cpu;
keir@21326 491 if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY )
keir@21258 492 cpu = last_cpu(c->cpu_valid);
keir@22478 493 ret = (cpu < NR_CPUS) ? cpupool_unassign_cpu(c, cpu) : -EINVAL;
keir@22478 494 cpupool_put(c);
keir@21258 495 }
keir@21258 496 break;
keir@21258 497
keir@21326 498 case XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN:
keir@21258 499 {
keir@21258 500 struct domain *d;
keir@21258 501
keir@21258 502 ret = -EINVAL;
keir@21258 503 if ( op->domid == 0 )
keir@21258 504 break;
keir@21258 505 ret = -ESRCH;
keir@21258 506 d = rcu_lock_domain_by_id(op->domid);
keir@21258 507 if ( d == NULL )
keir@21258 508 break;
keir@21258 509 if ( d->cpupool == NULL )
keir@21258 510 {
keir@21258 511 ret = -EINVAL;
keir@21258 512 rcu_unlock_domain(d);
keir@21258 513 break;
keir@21258 514 }
keir@21273 515 if ( op->cpupool_id == d->cpupool->cpupool_id )
keir@21273 516 {
keir@21273 517 ret = 0;
keir@21273 518 rcu_unlock_domain(d);
keir@21273 519 break;
keir@21273 520 }
keir@21477 521 cpupool_dprintk("cpupool move_domain(dom=%d)->pool=%d\n",
keir@21477 522 d->domain_id, op->cpupool_id);
keir@21258 523 ret = -ENOENT;
keir@21258 524 spin_lock(&cpupool_lock);
keir@21258 525 c = cpupool_find_by_id(op->cpupool_id, 1);
keir@21258 526 if ( (c != NULL) && cpus_weight(c->cpu_valid) )
keir@21258 527 {
keir@21258 528 d->cpupool->n_dom--;
keir@21258 529 ret = sched_move_domain(d, c);
keir@21258 530 if ( ret )
keir@21258 531 d->cpupool->n_dom++;
keir@21258 532 else
keir@21258 533 c->n_dom++;
keir@21258 534 }
keir@21258 535 spin_unlock(&cpupool_lock);
keir@21477 536 cpupool_dprintk("cpupool move_domain(dom=%d)->pool=%d ret %d\n",
keir@21477 537 d->domain_id, op->cpupool_id, ret);
keir@21258 538 rcu_unlock_domain(d);
keir@21258 539 }
keir@21258 540 break;
keir@21258 541
keir@21326 542 case XEN_SYSCTL_CPUPOOL_OP_FREEINFO:
keir@21258 543 {
keir@21396 544 ret = cpumask_to_xenctl_cpumap(
keir@21396 545 &op->cpumap, &cpupool_free_cpus);
keir@21258 546 }
keir@21258 547 break;
keir@21258 548
keir@21258 549 default:
keir@21258 550 ret = -ENOSYS;
keir@21396 551 break;
keir@21258 552 }
keir@21258 553
keir@21258 554 return ret;
keir@21258 555 }
keir@21258 556
keir@21258 557 void schedule_dump(struct cpupool *c);
keir@21258 558
keir@21258 559 void dump_runq(unsigned char key)
keir@21258 560 {
keir@21258 561 unsigned long flags;
keir@21258 562 s_time_t now = NOW();
keir@21258 563 struct cpupool **c;
keir@21258 564
keir@21258 565 spin_lock(&cpupool_lock);
keir@21258 566 local_irq_save(flags);
keir@21258 567
keir@21258 568 printk("sched_smt_power_savings: %s\n",
keir@21258 569 sched_smt_power_savings? "enabled":"disabled");
keir@21258 570 printk("NOW=0x%08X%08X\n", (u32)(now>>32), (u32)now);
keir@21258 571
keir@21258 572 printk("Idle cpupool:\n");
keir@21258 573 schedule_dump(NULL);
keir@21258 574
keir@21258 575 for_each_cpupool(c)
keir@21258 576 {
keir@21258 577 printk("Cpupool %d:\n", (*c)->cpupool_id);
keir@21258 578 schedule_dump(*c);
keir@21258 579 }
keir@21258 580
keir@21258 581 local_irq_restore(flags);
keir@21258 582 spin_unlock(&cpupool_lock);
keir@21258 583 }
keir@21258 584
keir@21429 585 static int cpu_callback(
keir@21429 586 struct notifier_block *nfb, unsigned long action, void *hcpu)
keir@21429 587 {
keir@21429 588 unsigned int cpu = (unsigned long)hcpu;
keir@21429 589 int rc = 0;
keir@21429 590
keir@21429 591 switch ( action )
keir@21429 592 {
keir@21429 593 case CPU_DOWN_FAILED:
keir@21429 594 case CPU_ONLINE:
keir@21429 595 cpupool_cpu_add(cpu);
keir@21429 596 break;
keir@21429 597 case CPU_DOWN_PREPARE:
keir@21429 598 rc = cpupool_cpu_remove(cpu);
keir@21429 599 break;
keir@21429 600 default:
keir@21429 601 break;
keir@21429 602 }
keir@21429 603
keir@21429 604 return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
keir@21429 605 }
keir@21429 606
keir@21429 607 static struct notifier_block cpu_nfb = {
keir@21429 608 .notifier_call = cpu_callback
keir@21429 609 };
keir@21429 610
keir@21429 611 static int __init cpupool_presmp_init(void)
keir@21429 612 {
keir@21672 613 int err;
keir@21429 614 void *cpu = (void *)(long)smp_processor_id();
keir@21672 615 cpupool0 = cpupool_create(0, 0, &err);
keir@21453 616 BUG_ON(cpupool0 == NULL);
keir@22478 617 cpupool_put(cpupool0);
keir@21429 618 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
keir@21429 619 register_cpu_notifier(&cpu_nfb);
keir@21429 620 return 0;
keir@21429 621 }
keir@21429 622 presmp_initcall(cpupool_presmp_init);
keir@21429 623
keir@21258 624 /*
keir@21258 625 * Local variables:
keir@21258 626 * mode: C
keir@21258 627 * c-set-style: "BSD"
keir@21258 628 * c-basic-offset: 4
keir@21258 629 * tab-width: 4
keir@21258 630 * indent-tabs-mode: nil
keir@21258 631 * End:
keir@21258 632 */