debuggers.hg

view xen/include/xen/sched.h @ 21959:581ebaa7e2da

numa: Attempt more efficient NUMA allocation in hypervisor by default.

1. Try to allocate from nodes containing CPUs which a guest can be
scheduled on.
2. Remember which node we allocated from last, and round-robin
allocations among above-mentioned nodes.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Aug 04 15:35:28 2010 +0100 (2010-08-04)
parents db35740574a5
children cf70ef051a82
line source
2 #ifndef __SCHED_H__
3 #define __SCHED_H__
5 #include <xen/config.h>
6 #include <xen/types.h>
7 #include <xen/spinlock.h>
8 #include <xen/smp.h>
9 #include <xen/shared.h>
10 #include <public/xen.h>
11 #include <public/domctl.h>
12 #include <public/sysctl.h>
13 #include <public/vcpu.h>
14 #include <public/xsm/acm.h>
15 #include <xen/time.h>
16 #include <xen/timer.h>
17 #include <xen/grant_table.h>
18 #include <xen/rangeset.h>
19 #include <xen/domain.h>
20 #include <xen/xenoprof.h>
21 #include <xen/rcupdate.h>
22 #include <xen/irq.h>
23 #include <xen/mm.h>
24 #include <xen/tasklet.h>
25 #include <public/mem_event.h>
26 #include <xen/cpumask.h>
27 #include <xen/nodemask.h>
29 #ifdef CONFIG_COMPAT
30 #include <compat/vcpu.h>
31 DEFINE_XEN_GUEST_HANDLE(vcpu_runstate_info_compat_t);
32 #endif
34 /* A global pointer to the initial domain (DOM0). */
35 extern struct domain *dom0;
37 #ifndef CONFIG_COMPAT
38 #define BITS_PER_EVTCHN_WORD(d) BITS_PER_LONG
39 #else
40 #define BITS_PER_EVTCHN_WORD(d) (has_32bit_shinfo(d) ? 32 : BITS_PER_LONG)
41 #endif
42 #define MAX_EVTCHNS(d) (BITS_PER_EVTCHN_WORD(d) * BITS_PER_EVTCHN_WORD(d))
43 #define EVTCHNS_PER_BUCKET 128
44 #define NR_EVTCHN_BUCKETS (NR_EVENT_CHANNELS / EVTCHNS_PER_BUCKET)
46 struct evtchn
47 {
48 #define ECS_FREE 0 /* Channel is available for use. */
49 #define ECS_RESERVED 1 /* Channel is reserved. */
50 #define ECS_UNBOUND 2 /* Channel is waiting to bind to a remote domain. */
51 #define ECS_INTERDOMAIN 3 /* Channel is bound to another domain. */
52 #define ECS_PIRQ 4 /* Channel is bound to a physical IRQ line. */
53 #define ECS_VIRQ 5 /* Channel is bound to a virtual IRQ line. */
54 #define ECS_IPI 6 /* Channel is bound to a virtual IPI line. */
55 u8 state; /* ECS_* */
56 u8 consumer_is_xen; /* Consumed by Xen or by guest? */
57 u16 notify_vcpu_id; /* VCPU for local delivery notification */
58 union {
59 struct {
60 domid_t remote_domid;
61 } unbound; /* state == ECS_UNBOUND */
62 struct {
63 u16 remote_port;
64 struct domain *remote_dom;
65 } interdomain; /* state == ECS_INTERDOMAIN */
66 struct {
67 u16 irq;
68 u16 next_port;
69 u16 prev_port;
70 } pirq; /* state == ECS_PIRQ */
71 u16 virq; /* state == ECS_VIRQ */
72 } u;
73 #ifdef FLASK_ENABLE
74 void *ssid;
75 #endif
76 };
78 int evtchn_init(struct domain *d); /* from domain_create */
79 void evtchn_destroy(struct domain *d); /* from domain_kill */
80 void evtchn_destroy_final(struct domain *d); /* from complete_domain_destroy */
82 struct vcpu
83 {
84 int vcpu_id;
86 int processor;
88 vcpu_info_t *vcpu_info;
90 struct domain *domain;
92 struct vcpu *next_in_list;
94 s_time_t periodic_period;
95 s_time_t periodic_last_event;
96 struct timer periodic_timer;
97 struct timer singleshot_timer;
99 struct timer poll_timer; /* timeout for SCHEDOP_poll */
101 void *sched_priv; /* scheduler-specific data */
103 struct vcpu_runstate_info runstate;
104 #ifndef CONFIG_COMPAT
105 # define runstate_guest(v) ((v)->runstate_guest)
106 XEN_GUEST_HANDLE(vcpu_runstate_info_t) runstate_guest; /* guest address */
107 #else
108 # define runstate_guest(v) ((v)->runstate_guest.native)
109 union {
110 XEN_GUEST_HANDLE(vcpu_runstate_info_t) native;
111 XEN_GUEST_HANDLE(vcpu_runstate_info_compat_t) compat;
112 } runstate_guest; /* guest address */
113 #endif
115 /* last time when vCPU is scheduled out */
116 uint64_t last_run_time;
118 /* Has the FPU been initialised? */
119 bool_t fpu_initialised;
120 /* Has the FPU been used since it was last saved? */
121 bool_t fpu_dirtied;
122 /* Initialization completed for this VCPU? */
123 bool_t is_initialised;
124 /* Currently running on a CPU? */
125 bool_t is_running;
126 /* VCPU should wake fast (do not deep sleep the CPU). */
127 bool_t is_urgent;
129 #ifdef VCPU_TRAP_LAST
130 #define VCPU_TRAP_NONE 0
131 struct {
132 bool_t pending;
133 uint8_t old_mask;
134 } async_exception_state[VCPU_TRAP_LAST];
135 #define async_exception_state(t) async_exception_state[(t)-1]
136 uint8_t async_exception_mask;
137 #endif
139 /* Require shutdown to be deferred for some asynchronous operation? */
140 bool_t defer_shutdown;
141 /* VCPU is paused following shutdown request (d->is_shutting_down)? */
142 bool_t paused_for_shutdown;
144 /*
145 * > 0: a single port is being polled;
146 * = 0: nothing is being polled (vcpu should be clear in d->poll_mask);
147 * < 0: multiple ports may be being polled.
148 */
149 int poll_evtchn;
151 /* (over-)protected by ->domain->event_lock */
152 int pirq_evtchn_head;
154 unsigned long pause_flags;
155 atomic_t pause_count;
157 /* IRQ-safe virq_lock protects against delivering VIRQ to stale evtchn. */
158 u16 virq_to_evtchn[NR_VIRQS];
159 spinlock_t virq_lock;
161 /* Bitmask of CPUs on which this VCPU may run. */
162 cpumask_t cpu_affinity;
163 /* Used to change affinity temporarily. */
164 cpumask_t cpu_affinity_tmp;
166 /* Bitmask of CPUs which are holding onto this VCPU's state. */
167 cpumask_t vcpu_dirty_cpumask;
169 /* Tasklet for continue_hypercall_on_cpu(). */
170 struct tasklet continue_hypercall_tasklet;
172 struct arch_vcpu arch;
173 };
175 /* Per-domain lock can be recursively acquired in fault handlers. */
176 #define domain_lock(d) spin_lock_recursive(&(d)->domain_lock)
177 #define domain_unlock(d) spin_unlock_recursive(&(d)->domain_lock)
178 #define domain_is_locked(d) spin_is_locked(&(d)->domain_lock)
180 /* Memory event */
181 struct mem_event_domain
182 {
183 /* ring lock */
184 spinlock_t ring_lock;
185 /* shared page */
186 mem_event_shared_page_t *shared_page;
187 /* shared ring page */
188 void *ring_page;
189 /* front-end ring */
190 mem_event_front_ring_t front_ring;
191 /* if domain has been paused due to ring contention */
192 bool_t paused;
193 int paused_vcpus[MAX_VIRT_CPUS];
194 /* the memory event mode */
195 unsigned long mode;
196 /* domain to receive memory events */
197 struct domain *domain;
198 /* enabled? */
199 bool_t enabled;
200 /* event channel port (vcpu0 only) */
201 int xen_port;
202 };
204 struct domain
205 {
206 domid_t domain_id;
208 shared_info_t *shared_info; /* shared data area */
210 spinlock_t domain_lock;
212 spinlock_t page_alloc_lock; /* protects all the following fields */
213 struct page_list_head page_list; /* linked list, of size tot_pages */
214 struct page_list_head xenpage_list; /* linked list (size xenheap_pages) */
215 unsigned int tot_pages; /* number of pages currently possesed */
216 unsigned int max_pages; /* maximum value for tot_pages */
217 atomic_t shr_pages; /* number of shared pages */
218 unsigned int xenheap_pages; /* # pages allocated from Xen heap */
220 unsigned int max_vcpus;
222 /* Scheduling. */
223 void *sched_priv; /* scheduler-specific data */
224 struct cpupool *cpupool;
226 struct domain *next_in_list;
227 struct domain *next_in_hashbucket;
229 struct list_head rangesets;
230 spinlock_t rangesets_lock;
232 /* Event channel information. */
233 struct evtchn *evtchn[NR_EVTCHN_BUCKETS];
234 spinlock_t event_lock;
236 struct grant_table *grant_table;
238 /*
239 * Interrupt to event-channel mappings. Updates should be protected by the
240 * domain's event-channel spinlock. Read accesses can also synchronise on
241 * the lock, but races don't usually matter.
242 */
243 unsigned int nr_pirqs;
244 u16 *pirq_to_evtchn;
245 unsigned long *pirq_mask;
247 /* I/O capabilities (access to IRQs and memory-mapped I/O). */
248 struct rangeset *iomem_caps;
249 struct rangeset *irq_caps;
251 /* Is this an HVM guest? */
252 bool_t is_hvm;
253 /* Does this guest need iommu mappings? */
254 bool_t need_iommu;
255 /* Is this guest fully privileged (aka dom0)? */
256 bool_t is_privileged;
257 /* Which guest this guest has privileges on */
258 struct domain *target;
259 /* Is this guest being debugged by dom0? */
260 bool_t debugger_attached;
261 /* Is this guest dying (i.e., a zombie)? */
262 enum { DOMDYING_alive, DOMDYING_dying, DOMDYING_dead } is_dying;
263 /* Domain is paused by controller software? */
264 bool_t is_paused_by_controller;
265 /* Domain's VCPUs are pinned 1:1 to physical CPUs? */
266 bool_t is_pinned;
268 /* Are any VCPUs polling event channels (SCHEDOP_poll)? */
269 #if MAX_VIRT_CPUS <= BITS_PER_LONG
270 DECLARE_BITMAP(poll_mask, MAX_VIRT_CPUS);
271 #else
272 unsigned long *poll_mask;
273 #endif
275 /* Guest has shut down (inc. reason code)? */
276 spinlock_t shutdown_lock;
277 bool_t is_shutting_down; /* in process of shutting down? */
278 bool_t is_shut_down; /* fully shut down? */
279 int shutdown_code;
281 /* If this is not 0, send suspend notification here instead of
282 * raising DOM_EXC */
283 int suspend_evtchn;
285 atomic_t pause_count;
287 unsigned long vm_assist;
289 atomic_t refcnt;
291 struct vcpu **vcpu;
293 /* Bitmask of CPUs which are holding onto this domain's state. */
294 cpumask_t domain_dirty_cpumask;
296 struct arch_domain arch;
298 void *ssid; /* sHype security subject identifier */
300 /* Control-plane tools handle for this domain. */
301 xen_domain_handle_t handle;
303 /* OProfile support. */
304 struct xenoprof *xenoprof;
305 int32_t time_offset_seconds;
307 /* Domain watchdog. */
308 #define NR_DOMAIN_WATCHDOG_TIMERS 2
309 spinlock_t watchdog_lock;
310 uint32_t watchdog_inuse_map;
311 struct timer watchdog_timer[NR_DOMAIN_WATCHDOG_TIMERS];
313 struct rcu_head rcu;
315 /*
316 * Hypercall deadlock avoidance lock. Used if a hypercall might
317 * cause a deadlock. Acquirers don't spin waiting; they preempt.
318 */
319 spinlock_t hypercall_deadlock_mutex;
321 /* transcendent memory, auto-allocated on first tmem op by each domain */
322 void *tmem;
324 struct lock_profile_qhead profile_head;
326 /* Non-migratable and non-restoreable? */
327 bool_t disable_migrate;
329 /* Memory paging support */
330 struct mem_event_domain mem_event;
332 /* Currently computed from union of all vcpu cpu-affinity masks. */
333 nodemask_t node_affinity;
334 unsigned int last_alloc_node;
335 spinlock_t node_affinity_lock;
336 };
338 struct domain_setup_info
339 {
340 /* Initialised by caller. */
341 unsigned long image_addr;
342 unsigned long image_len;
343 /* Initialised by loader: Public. */
344 unsigned long v_start;
345 unsigned long v_end;
346 unsigned long v_kernstart;
347 unsigned long v_kernend;
348 unsigned long v_kernentry;
349 #define PAEKERN_no 0
350 #define PAEKERN_yes 1
351 #define PAEKERN_extended_cr3 2
352 #define PAEKERN_bimodal 3
353 unsigned int pae_kernel;
354 /* Initialised by loader: Private. */
355 unsigned long elf_paddr_offset;
356 unsigned int load_symtab;
357 unsigned long symtab_addr;
358 unsigned long symtab_len;
359 };
361 /* Protect updates/reads (resp.) of domain_list and domain_hash. */
362 extern spinlock_t domlist_update_lock;
363 extern rcu_read_lock_t domlist_read_lock;
365 extern struct vcpu *idle_vcpu[NR_CPUS];
366 #define IDLE_DOMAIN_ID (0x7FFFU)
367 #define is_idle_domain(d) ((d)->domain_id == IDLE_DOMAIN_ID)
368 #define is_idle_vcpu(v) (is_idle_domain((v)->domain))
370 #define DOMAIN_DESTROYED (1<<31) /* assumes atomic_t is >= 32 bits */
371 #define put_domain(_d) \
372 if ( atomic_dec_and_test(&(_d)->refcnt) ) domain_destroy(_d)
374 /*
375 * Use this when you don't have an existing reference to @d. It returns
376 * FALSE if @d is being destroyed.
377 */
378 static always_inline int get_domain(struct domain *d)
379 {
380 atomic_t old, new, seen = d->refcnt;
381 do
382 {
383 old = seen;
384 if ( unlikely(_atomic_read(old) & DOMAIN_DESTROYED) )
385 return 0;
386 _atomic_set(new, _atomic_read(old) + 1);
387 seen = atomic_compareandswap(old, new, &d->refcnt);
388 }
389 while ( unlikely(_atomic_read(seen) != _atomic_read(old)) );
390 return 1;
391 }
393 /*
394 * Use this when you already have, or are borrowing, a reference to @d.
395 * In this case we know that @d cannot be destroyed under our feet.
396 */
397 static inline void get_knownalive_domain(struct domain *d)
398 {
399 atomic_inc(&d->refcnt);
400 ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTROYED));
401 }
403 void domain_update_node_affinity(struct domain *d);
405 struct domain *domain_create(
406 domid_t domid, unsigned int domcr_flags, ssidref_t ssidref);
407 /* DOMCRF_hvm: Create an HVM domain, as opposed to a PV domain. */
408 #define _DOMCRF_hvm 0
409 #define DOMCRF_hvm (1U<<_DOMCRF_hvm)
410 /* DOMCRF_hap: Create a domain with hardware-assisted paging. */
411 #define _DOMCRF_hap 1
412 #define DOMCRF_hap (1U<<_DOMCRF_hap)
413 /* DOMCRF_s3_integrity: Create a domain with tboot memory integrity protection
414 by tboot */
415 #define _DOMCRF_s3_integrity 2
416 #define DOMCRF_s3_integrity (1U<<_DOMCRF_s3_integrity)
417 /* DOMCRF_dummy: Create a dummy domain (not scheduled; not on domain list) */
418 #define _DOMCRF_dummy 3
419 #define DOMCRF_dummy (1U<<_DOMCRF_dummy)
420 /* DOMCRF_oos_off: dont use out-of-sync optimization for shadow page tables */
421 #define _DOMCRF_oos_off 4
422 #define DOMCRF_oos_off (1U<<_DOMCRF_oos_off)
424 /*
425 * rcu_lock_domain_by_id() is more efficient than get_domain_by_id().
426 * This is the preferred function if the returned domain reference
427 * is short lived, but it cannot be used if the domain reference needs
428 * to be kept beyond the current scope (e.g., across a softirq).
429 * The returned domain reference must be discarded using rcu_unlock_domain().
430 */
431 struct domain *rcu_lock_domain_by_id(domid_t dom);
433 /*
434 * As above function, but accounts for current domain context:
435 * - Translates target DOMID_SELF into caller's domain id; and
436 * - Checks that caller has permission to act on the target domain.
437 */
438 int rcu_lock_target_domain_by_id(domid_t dom, struct domain **d);
440 /* Finish a RCU critical region started by rcu_lock_domain_by_id(). */
441 static inline void rcu_unlock_domain(struct domain *d)
442 {
443 rcu_read_unlock(&domlist_read_lock);
444 }
446 static inline struct domain *rcu_lock_domain(struct domain *d)
447 {
448 rcu_read_lock(d);
449 return d;
450 }
452 static inline struct domain *rcu_lock_current_domain(void)
453 {
454 return rcu_lock_domain(current->domain);
455 }
457 struct domain *get_domain_by_id(domid_t dom);
458 void domain_destroy(struct domain *d);
459 int domain_kill(struct domain *d);
460 void domain_shutdown(struct domain *d, u8 reason);
461 void domain_resume(struct domain *d);
462 void domain_pause_for_debugger(void);
464 int vcpu_start_shutdown_deferral(struct vcpu *v);
465 void vcpu_end_shutdown_deferral(struct vcpu *v);
467 /*
468 * Mark specified domain as crashed. This function always returns, even if the
469 * caller is the specified domain. The domain is not synchronously descheduled
470 * from any processor.
471 */
472 void __domain_crash(struct domain *d);
473 #define domain_crash(d) do { \
474 printk("domain_crash called from %s:%d\n", __FILE__, __LINE__); \
475 __domain_crash(d); \
476 } while (0)
478 /*
479 * Mark current domain as crashed and synchronously deschedule from the local
480 * processor. This function never returns.
481 */
482 void __domain_crash_synchronous(void) __attribute__((noreturn));
483 #define domain_crash_synchronous() do { \
484 printk("domain_crash_sync called from %s:%d\n", __FILE__, __LINE__); \
485 __domain_crash_synchronous(); \
486 } while (0)
488 #define set_current_state(_s) do { current->state = (_s); } while (0)
489 void scheduler_init(void);
490 int sched_init_vcpu(struct vcpu *v, unsigned int processor);
491 void sched_destroy_vcpu(struct vcpu *v);
492 int sched_init_domain(struct domain *d);
493 void sched_destroy_domain(struct domain *d);
494 int sched_move_domain(struct domain *d, struct cpupool *c);
495 long sched_adjust(struct domain *, struct xen_domctl_scheduler_op *);
496 long sched_adjust_global(struct xen_sysctl_scheduler_op *);
497 int sched_id(void);
498 void sched_tick_suspend(void);
499 void sched_tick_resume(void);
500 void vcpu_wake(struct vcpu *d);
501 void vcpu_sleep_nosync(struct vcpu *d);
502 void vcpu_sleep_sync(struct vcpu *d);
504 /*
505 * Force synchronisation of given VCPU's state. If it is currently descheduled,
506 * this call will ensure that all its state is committed to memory and that
507 * no CPU is using critical state (e.g., page tables) belonging to the VCPU.
508 */
509 void sync_vcpu_execstate(struct vcpu *v);
511 /* As above, for any lazy state being held on the local CPU. */
512 void sync_local_execstate(void);
514 /*
515 * Called by the scheduler to switch to another VCPU. This function must
516 * call context_saved(@prev) when the local CPU is no longer running in
517 * @prev's context, and that context is saved to memory. Alternatively, if
518 * implementing lazy context switching, it suffices to ensure that invoking
519 * sync_vcpu_execstate() will switch and commit @prev's state.
520 */
521 void context_switch(
522 struct vcpu *prev,
523 struct vcpu *next);
525 /*
526 * As described above, context_switch() must call this function when the
527 * local CPU is no longer running in @prev's context, and @prev's context is
528 * saved to memory. Alternatively, if implementing lazy context switching,
529 * ensure that invoking sync_vcpu_execstate() will switch and commit @prev.
530 */
531 void context_saved(struct vcpu *prev);
533 /* Called by the scheduler to continue running the current VCPU. */
534 void continue_running(
535 struct vcpu *same);
537 void startup_cpu_idle_loop(void);
539 /*
540 * Creates a continuation to resume the current hypercall. The caller should
541 * return immediately, propagating the value returned from this invocation.
542 * The format string specifies the types and number of hypercall arguments.
543 * It contains one character per argument as follows:
544 * 'i' [unsigned] {char, int}
545 * 'l' [unsigned] long
546 * 'h' guest handle (XEN_GUEST_HANDLE(foo))
547 */
548 unsigned long hypercall_create_continuation(
549 unsigned int op, const char *format, ...);
551 #define hypercall_preempt_check() (unlikely( \
552 softirq_pending(smp_processor_id()) | \
553 local_events_need_delivery() \
554 ))
556 extern struct domain *domain_list;
558 /* Caller must hold the domlist_read_lock or domlist_update_lock. */
559 #define for_each_domain(_d) \
560 for ( (_d) = rcu_dereference(domain_list); \
561 (_d) != NULL; \
562 (_d) = rcu_dereference((_d)->next_in_list )) \
564 #define for_each_vcpu(_d,_v) \
565 for ( (_v) = (_d)->vcpu ? (_d)->vcpu[0] : NULL; \
566 (_v) != NULL; \
567 (_v) = (_v)->next_in_list )
569 /*
570 * Per-VCPU pause flags.
571 */
572 /* Domain is blocked waiting for an event. */
573 #define _VPF_blocked 0
574 #define VPF_blocked (1UL<<_VPF_blocked)
575 /* VCPU is offline. */
576 #define _VPF_down 1
577 #define VPF_down (1UL<<_VPF_down)
578 /* VCPU is blocked awaiting an event to be consumed by Xen. */
579 #define _VPF_blocked_in_xen 2
580 #define VPF_blocked_in_xen (1UL<<_VPF_blocked_in_xen)
581 /* VCPU affinity has changed: migrating to a new CPU. */
582 #define _VPF_migrating 3
583 #define VPF_migrating (1UL<<_VPF_migrating)
585 static inline int vcpu_runnable(struct vcpu *v)
586 {
587 return !(v->pause_flags |
588 atomic_read(&v->pause_count) |
589 atomic_read(&v->domain->pause_count));
590 }
592 void vcpu_unblock(struct vcpu *v);
593 void vcpu_pause(struct vcpu *v);
594 void vcpu_pause_nosync(struct vcpu *v);
595 void domain_pause(struct domain *d);
596 void vcpu_unpause(struct vcpu *v);
597 void domain_unpause(struct domain *d);
598 void domain_pause_by_systemcontroller(struct domain *d);
599 void domain_unpause_by_systemcontroller(struct domain *d);
600 void cpu_init(void);
602 struct scheduler;
604 struct scheduler *scheduler_get_default(void);
605 struct scheduler *scheduler_alloc(unsigned int sched_id, int *perr);
606 void scheduler_free(struct scheduler *sched);
607 void schedule_cpu_switch(unsigned int cpu, struct cpupool *c);
608 void vcpu_force_reschedule(struct vcpu *v);
609 int cpu_disable_scheduler(unsigned int cpu);
610 int vcpu_set_affinity(struct vcpu *v, cpumask_t *affinity);
612 void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate);
613 uint64_t get_cpu_idle_time(unsigned int cpu);
615 /*
616 * Used by idle loop to decide whether there is work to do:
617 * (1) Run softirqs; or (2) Play dead; or (3) Run tasklets.
618 */
619 #define cpu_is_haltable(cpu) \
620 (!softirq_pending(cpu) && \
621 cpu_online(cpu) && \
622 !per_cpu(tasklet_work_to_do, cpu))
624 void watchdog_domain_init(struct domain *d);
625 void watchdog_domain_destroy(struct domain *d);
627 #define IS_PRIV(_d) ((_d)->is_privileged)
628 #define IS_PRIV_FOR(_d, _t) (IS_PRIV(_d) || ((_d)->target && (_d)->target == (_t)))
630 #define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
632 #define is_hvm_domain(d) ((d)->is_hvm)
633 #define is_hvm_vcpu(v) (is_hvm_domain(v->domain))
634 #define need_iommu(d) ((d)->need_iommu)
636 void set_vcpu_migration_delay(unsigned int delay);
637 unsigned int get_vcpu_migration_delay(void);
639 extern int sched_smt_power_savings;
641 extern enum cpufreq_controller {
642 FREQCTL_none, FREQCTL_dom0_kernel, FREQCTL_xen
643 } cpufreq_controller;
645 #define CPUPOOLID_NONE -1
647 struct cpupool *cpupool_get_by_id(int poolid);
648 void cpupool_put(struct cpupool *pool);
649 int cpupool_add_domain(struct domain *d, int poolid);
650 void cpupool_rm_domain(struct domain *d);
651 int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op);
652 #define num_cpupool_cpus(c) (cpus_weight((c)->cpu_valid))
654 #endif /* __SCHED_H__ */
656 /*
657 * Local variables:
658 * mode: C
659 * c-set-style: "BSD"
660 * c-basic-offset: 4
661 * tab-width: 4
662 * indent-tabs-mode: nil
663 * End:
664 */