debuggers.hg

view xen/include/xen/sched.h @ 3129:e0351a3744a5

bitkeeper revision 1.1159.187.4 (41a471c8NjyQJy-vepqpb8H7LdzHzA)

Allow preemption of long-running hypercalls for softirq processing.
author kaf24@scramble.cl.cam.ac.uk
date Wed Nov 24 11:34:32 2004 +0000 (2004-11-24)
parents fef4b77be191
children 2754a2ed61c3 2fae9947de6f b013a6b30d9e
line source
1 #ifndef __SCHED_H__
2 #define __SCHED_H__
4 #define STACK_SIZE (2*PAGE_SIZE)
6 #include <xen/config.h>
7 #include <xen/types.h>
8 #include <xen/spinlock.h>
9 #include <xen/cache.h>
10 #include <asm/regs.h>
11 #include <xen/smp.h>
12 #include <asm/page.h>
13 #include <asm/processor.h>
14 #include <public/xen.h>
15 #include <public/dom0_ops.h>
16 #include <xen/list.h>
17 #include <xen/time.h>
18 #include <xen/ac_timer.h>
19 #include <xen/delay.h>
20 #include <asm/atomic.h>
21 #include <asm/current.h>
22 #include <xen/spinlock.h>
23 #include <xen/grant_table.h>
24 #include <xen/irq_cpustat.h>
26 extern unsigned long volatile jiffies;
27 extern rwlock_t domlist_lock;
29 struct domain;
31 /* A global pointer to the initial domain (DOM0). */
32 extern struct domain *dom0;
34 typedef struct event_channel_st
35 {
36 #define ECS_FREE 0 /* Channel is available for use. */
37 #define ECS_UNBOUND 1 /* Channel is waiting to bind to a remote domain. */
38 #define ECS_INTERDOMAIN 2 /* Channel is bound to another domain. */
39 #define ECS_PIRQ 3 /* Channel is bound to a physical IRQ line. */
40 #define ECS_VIRQ 4 /* Channel is bound to a virtual IRQ line. */
41 u16 state;
42 union {
43 struct {
44 domid_t remote_domid;
45 } __attribute__ ((packed)) unbound; /* state == ECS_UNBOUND */
46 struct {
47 u16 remote_port;
48 struct domain *remote_dom;
49 } __attribute__ ((packed)) interdomain; /* state == ECS_INTERDOMAIN */
50 u16 pirq; /* state == ECS_PIRQ */
51 u16 virq; /* state == ECS_VIRQ */
52 } u;
53 } event_channel_t;
55 int init_event_channels(struct domain *d);
56 void destroy_event_channels(struct domain *d);
58 struct domain
59 {
60 u32 processor;
62 shared_info_t *shared_info;
64 domid_t id;
65 s_time_t create_time;
67 spinlock_t page_alloc_lock; /* protects all the following fields */
68 struct list_head page_list; /* linked list, of size tot_pages */
69 struct list_head xenpage_list; /* linked list, of size xenheap_pages */
70 unsigned int tot_pages; /* number of pages currently possesed */
71 unsigned int max_pages; /* maximum value for tot_pages */
72 unsigned int xenheap_pages; /* # pages allocated from Xen heap */
74 /* Scheduling. */
75 int shutdown_code; /* code value from OS (if DF_SHUTDOWN). */
76 s_time_t lastschd; /* time this domain was last scheduled */
77 s_time_t lastdeschd; /* time this domain was last descheduled */
78 s_time_t cpu_time; /* total CPU time received till now */
79 s_time_t wokenup; /* time domain got woken up */
80 struct ac_timer timer; /* one-shot timer for timeout values */
81 void *sched_priv; /* scheduler-specific data */
83 struct mm_struct mm;
85 struct thread_struct thread;
86 struct domain *next_list, *next_hash;
88 /* Event channel information. */
89 event_channel_t *event_channel;
90 unsigned int max_event_channel;
91 spinlock_t event_channel_lock;
93 grant_table_t *grant_table;
95 /*
96 * Interrupt to event-channel mappings. Updates should be protected by the
97 * domain's event-channel spinlock. Read accesses can also synchronise on
98 * the lock, but races don't usually matter.
99 */
100 #define NR_PIRQS 128 /* Put this somewhere sane! */
101 u16 pirq_to_evtchn[NR_PIRQS];
102 u16 virq_to_evtchn[NR_VIRQS];
103 u32 pirq_mask[NR_PIRQS/32];
105 /* Physical I/O */
106 spinlock_t pcidev_lock;
107 struct list_head pcidev_list;
109 unsigned long flags;
110 unsigned long vm_assist;
112 atomic_t refcnt;
113 atomic_t pausecnt;
114 };
116 struct domain_setup_info
117 {
118 unsigned long v_start;
119 unsigned long v_kernstart;
120 unsigned long v_kernend;
121 unsigned long v_kernentry;
123 unsigned int use_writable_pagetables;
124 };
126 #include <asm/uaccess.h> /* for KERNEL_DS */
128 extern struct domain idle0_task;
130 extern struct domain *idle_task[NR_CPUS];
131 #define IDLE_DOMAIN_ID (0x7FFFU)
132 #define is_idle_task(_p) (test_bit(DF_IDLETASK, &(_p)->flags))
134 void free_domain_struct(struct domain *d);
135 struct domain *alloc_domain_struct();
137 #define DOMAIN_DESTRUCTED (1<<31) /* assumes atomic_t is >= 32 bits */
138 #define put_domain(_d) \
139 if ( atomic_dec_and_test(&(_d)->refcnt) ) domain_destruct(_d)
141 /*
142 * Use this when you don't have an existing reference to @d. It returns
143 * FALSE if @d is being destructed.
144 */
145 static always_inline int get_domain(struct domain *d)
146 {
147 atomic_t old, new, seen = d->refcnt;
148 do
149 {
150 old = seen;
151 if ( unlikely(_atomic_read(old) & DOMAIN_DESTRUCTED) )
152 return 0;
153 _atomic_set(new, _atomic_read(old) + 1);
154 seen = atomic_compareandswap(old, new, &d->refcnt);
155 }
156 while ( unlikely(_atomic_read(seen) != _atomic_read(old)) );
157 return 1;
158 }
160 /*
161 * Use this when you already have, or are borrowing, a reference to @d.
162 * In this case we know that @d cannot be destructed under our feet.
163 */
164 static inline void get_knownalive_domain(struct domain *d)
165 {
166 atomic_inc(&d->refcnt);
167 ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTRUCTED));
168 }
170 extern struct domain *do_createdomain(
171 domid_t dom_id, unsigned int cpu);
172 extern int construct_dom0(struct domain *d,
173 unsigned long alloc_start,
174 unsigned long alloc_end,
175 char *image_start, unsigned long image_len,
176 char *initrd_start, unsigned long initrd_len,
177 char *cmdline);
178 extern int final_setup_guestos(struct domain *d, dom0_builddomain_t *);
180 struct domain *find_domain_by_id(domid_t dom);
181 struct domain *find_last_domain(void);
182 extern void domain_destruct(struct domain *d);
183 extern void domain_kill(struct domain *d);
184 extern void domain_crash(void);
185 extern void domain_shutdown(u8 reason);
187 void new_thread(struct domain *d,
188 unsigned long start_pc,
189 unsigned long start_stack,
190 unsigned long start_info);
192 extern unsigned long wait_init_idle;
193 #define init_idle() clear_bit(smp_processor_id(), &wait_init_idle);
195 #define set_current_state(_s) do { current->state = (_s); } while (0)
196 void scheduler_init(void);
197 void schedulers_start(void);
198 void sched_add_domain(struct domain *d);
199 void sched_rem_domain(struct domain *d);
200 long sched_ctl(struct sched_ctl_cmd *);
201 long sched_adjdom(struct sched_adjdom_cmd *);
202 int sched_id();
203 void init_idle_task(void);
204 void domain_wake(struct domain *d);
205 void domain_sleep(struct domain *d);
207 void __enter_scheduler(void);
209 extern void switch_to(struct domain *prev,
210 struct domain *next);
212 void domain_init(void);
214 int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */
216 void startup_cpu_idle_loop(void);
217 void continue_cpu_idle_loop(void);
219 void continue_nonidle_task(void);
221 void hypercall_create_continuation(unsigned int op, unsigned int nr_args, ...);
222 #define hypercall_may_preempt(_op, _nr_args, _args...) \
223 do { \
224 if ( unlikely(softirq_pending(smp_processor_id())) ) { \
225 hypercall_create_continuation(_op , _nr_args , ##_args); \
226 return _op; \
227 } } while ( 0 )
229 /* This domain_hash and domain_list are protected by the domlist_lock. */
230 #define DOMAIN_HASH_SIZE 256
231 #define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1))
232 extern struct domain *domain_hash[DOMAIN_HASH_SIZE];
233 extern struct domain *domain_list;
235 #define for_each_domain(_p) \
236 for ( (_p) = domain_list; (_p) != NULL; (_p) = (_p)->next_list )
238 #define DF_DONEFPUINIT 0 /* Has the FPU been initialised for this task? */
239 #define DF_USEDFPU 1 /* Has this task used the FPU since last save? */
240 #define DF_GUEST_STTS 2 /* Has the guest OS requested 'stts'? */
241 #define DF_CONSTRUCTED 3 /* Has the guest OS been fully built yet? */
242 #define DF_IDLETASK 4 /* Is this one of the per-CPU idle domains? */
243 #define DF_PRIVILEGED 5 /* Is this domain privileged? */
244 #define DF_PHYSDEV 6 /* May this domain do IO to physical devices? */
245 #define DF_BLOCKED 7 /* Domain is blocked waiting for an event. */
246 #define DF_CTRLPAUSE 8 /* Domain is paused by controller software. */
247 #define DF_SHUTDOWN 9 /* Guest shut itself down for some reason. */
248 #define DF_CRASHED 10 /* Domain crashed inside Xen, cannot continue. */
249 #define DF_DYING 11 /* Death rattle. */
250 #define DF_RUNNING 12 /* Currently running on a CPU. */
251 #define DF_CPUPINNED 13 /* Disables auto-migration. */
252 #define DF_MIGRATED 14 /* Domain migrated between CPUs. */
254 static inline int domain_runnable(struct domain *d)
255 {
256 return ( (atomic_read(&d->pausecnt) == 0) &&
257 !(d->flags & ((1<<DF_BLOCKED)|(1<<DF_CTRLPAUSE)|
258 (1<<DF_SHUTDOWN)|(1<<DF_CRASHED))) );
259 }
261 static inline void domain_pause(struct domain *d)
262 {
263 ASSERT(d != current);
264 atomic_inc(&d->pausecnt);
265 domain_sleep(d);
266 }
268 static inline void domain_unpause(struct domain *d)
269 {
270 ASSERT(d != current);
271 if ( atomic_dec_and_test(&d->pausecnt) )
272 domain_wake(d);
273 }
275 static inline void domain_unblock(struct domain *d)
276 {
277 if ( test_and_clear_bit(DF_BLOCKED, &d->flags) )
278 domain_wake(d);
279 }
281 static inline void domain_pause_by_systemcontroller(struct domain *d)
282 {
283 ASSERT(d != current);
284 if ( !test_and_set_bit(DF_CTRLPAUSE, &d->flags) )
285 domain_sleep(d);
286 }
288 static inline void domain_unpause_by_systemcontroller(struct domain *d)
289 {
290 if ( test_and_clear_bit(DF_CTRLPAUSE, &d->flags) )
291 domain_wake(d);
292 }
295 #define IS_PRIV(_d) (test_bit(DF_PRIVILEGED, &(_d)->flags))
296 #define IS_CAPABLE_PHYSDEV(_d) (test_bit(DF_PHYSDEV, &(_d)->flags))
298 #define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
300 #include <xen/slab.h>
301 #include <asm/domain.h>
303 #endif /* __SCHED_H__ */