debuggers.hg

view xen/include/xen/sched.h @ 2633:fe2f4bbcf869

bitkeeper revision 1.1159.99.4 (41626f06VquclgVVpIeHy9z2K3jW-A)

Rationalise scheduler locking. A bit more conservative now, but much
simpler! I only applied this to the basic BVT scheduler -- the others
are still unsafe and have been removed from the basic build.
author kaf24@freefall.cl.cam.ac.uk
date Tue Oct 05 09:53:10 2004 +0000 (2004-10-05)
parents 46859bdc5411
children 92fff25bf21e
line source
1 #ifndef __SCHED_H__
2 #define __SCHED_H__
4 #define STACK_SIZE (2*PAGE_SIZE)
5 #define MAX_DOMAIN_NAME 16
7 #include <xen/config.h>
8 #include <xen/types.h>
9 #include <xen/spinlock.h>
10 #include <asm/ptrace.h>
11 #include <xen/smp.h>
12 #include <asm/page.h>
13 #include <asm/processor.h>
14 #include <hypervisor-ifs/hypervisor-if.h>
15 #include <hypervisor-ifs/dom0_ops.h>
16 #include <xen/list.h>
17 #include <xen/time.h>
18 #include <xen/ac_timer.h>
19 #include <xen/delay.h>
20 #include <asm/atomic.h>
21 #include <asm/current.h>
22 #include <xen/spinlock.h>
23 #include <xen/grant_table.h>
25 extern unsigned long volatile jiffies;
26 extern rwlock_t tasklist_lock;
28 struct domain;
30 typedef struct event_channel_st
31 {
32 #define ECS_FREE 0 /* Channel is available for use. */
33 #define ECS_UNBOUND 1 /* Channel is not bound to a particular source. */
34 #define ECS_INTERDOMAIN 2 /* Channel is bound to another domain. */
35 #define ECS_PIRQ 3 /* Channel is bound to a physical IRQ line. */
36 #define ECS_VIRQ 4 /* Channel is bound to a virtual IRQ line. */
37 u16 state;
38 union {
39 struct {
40 u16 port;
41 struct domain *dom;
42 } __attribute__ ((packed)) remote; /* state == ECS_CONNECTED */
43 u16 pirq; /* state == ECS_PIRQ */
44 u16 virq; /* state == ECS_VIRQ */
45 } u;
46 } event_channel_t;
48 int init_event_channels(struct domain *d);
49 void destroy_event_channels(struct domain *d);
51 struct domain
52 {
53 /*
54 * DO NOT CHANGE THE ORDER OF THE FOLLOWING.
55 * Their offsets are hardcoded in entry.S
56 */
58 u32 processor; /* 00: current processor */
60 /* An unsafe pointer into a shared data area. */
61 shared_info_t *shared_info; /* 04: shared data area */
63 /*
64 * Return vectors pushed to us by guest OS.
65 * The stack frame for events is exactly that of an x86 hardware interrupt.
66 * The stack frame for a failsafe callback is augmented with saved values
67 * for segment registers %ds, %es, %fs and %gs:
68 * %ds, %es, %fs, %gs, %eip, %cs, %eflags [, %oldesp, %oldss]
69 */
70 unsigned long event_selector; /* 08: entry CS */
71 unsigned long event_address; /* 12: entry EIP */
73 /* Saved DS,ES,FS,GS immediately before return to guest OS. */
74 unsigned long failsafe_selectors[4]; /* 16-32 */
76 /*
77 * END OF FIRST CACHELINE. Stuff above is touched a lot!
78 */
80 unsigned long failsafe_selector; /* 32: entry CS */
81 unsigned long failsafe_address; /* 36: entry EIP */
83 /*
84 * From here on things can be added and shuffled without special attention
85 */
87 domid_t domain;
88 char name[MAX_DOMAIN_NAME];
89 s_time_t create_time;
91 spinlock_t page_alloc_lock; /* protects all the following fields */
92 struct list_head page_list; /* linked list, of size tot_pages */
93 struct list_head xenpage_list; /* linked list, of size xenheap_pages */
94 unsigned int tot_pages; /* number of pages currently possesed */
95 unsigned int max_pages; /* maximum value for tot_pages */
96 unsigned int xenheap_pages; /* # pages allocated from Xen heap */
98 /* Scheduling. */
99 int shutdown_code; /* code value from OS (if DF_SHUTDOWN). */
100 s_time_t lastschd; /* time this domain was last scheduled */
101 s_time_t lastdeschd; /* time this domain was last descheduled */
102 s_time_t cpu_time; /* total CPU time received till now */
103 s_time_t wokenup; /* time domain got woken up */
104 struct ac_timer timer; /* one-shot timer for timeout values */
105 void *sched_priv; /* scheduler-specific data */
107 struct mm_struct mm;
109 struct thread_struct thread;
110 struct domain *next_list, *next_hash;
112 /* Event channel information. */
113 event_channel_t *event_channel;
114 unsigned int max_event_channel;
115 spinlock_t event_channel_lock;
117 grant_table_t *grant_table;
119 /*
120 * Interrupt to event-channel mappings. Updates should be protected by the
121 * domain's event-channel spinlock. Read accesses can also synchronise on
122 * the lock, but races don't usually matter.
123 */
124 #define NR_PIRQS 128 /* Put this somewhere sane! */
125 u16 pirq_to_evtchn[NR_PIRQS];
126 u16 virq_to_evtchn[NR_VIRQS];
127 u32 pirq_mask[NR_PIRQS/32];
129 /* Physical I/O */
130 spinlock_t pcidev_lock;
131 struct list_head pcidev_list;
133 /* The following IO bitmap stuff is x86-dependent. */
134 u64 io_bitmap_sel; /* Selector to tell us which part of the IO bitmap are
135 * "interesting" (i.e. have clear bits) */
137 /* Handy macro - number of bytes of the IO bitmap, per selector bit. */
138 #define IOBMP_SELBIT_LWORDS ( IO_BITMAP_SIZE / 64 )
139 unsigned long *io_bitmap; /* Pointer to task's IO bitmap or NULL */
141 unsigned long flags;
142 unsigned long vm_assist;
144 atomic_t refcnt;
145 atomic_t pausecnt;
146 };
148 struct domain_setup_info
149 {
150 unsigned long v_start;
151 unsigned long v_kernstart;
152 unsigned long v_kernend;
153 unsigned long v_kernentry;
155 unsigned int use_writable_pagetables;
156 };
158 #include <asm/uaccess.h> /* for KERNEL_DS */
160 extern struct domain idle0_task;
162 extern struct domain *idle_task[NR_CPUS];
163 #define IDLE_DOMAIN_ID (0x7FFFU)
164 #define is_idle_task(_p) (test_bit(DF_IDLETASK, &(_p)->flags))
166 void free_domain_struct(struct domain *d);
167 struct domain *alloc_domain_struct();
169 #define DOMAIN_DESTRUCTED (1<<31) /* assumes atomic_t is >= 32 bits */
170 #define put_domain(_d) \
171 if ( atomic_dec_and_test(&(_d)->refcnt) ) domain_destruct(_d)
173 /*
174 * Use this when you don't have an existing reference to @d. It returns
175 * FALSE if @d is being destructed.
176 */
177 static always_inline int get_domain(struct domain *d)
178 {
179 atomic_t old, new, seen = d->refcnt;
180 do
181 {
182 old = seen;
183 if ( unlikely(_atomic_read(old) & DOMAIN_DESTRUCTED) )
184 return 0;
185 _atomic_set(new, _atomic_read(old) + 1);
186 seen = atomic_compareandswap(old, new, &d->refcnt);
187 }
188 while ( unlikely(_atomic_read(seen) != _atomic_read(old)) );
189 return 1;
190 }
192 /*
193 * Use this when you already have, or are borrowing, a reference to @d.
194 * In this case we know that @d cannot be destructed under our feet.
195 */
196 static inline void get_knownalive_domain(struct domain *d)
197 {
198 atomic_inc(&d->refcnt);
199 ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTRUCTED));
200 }
202 extern struct domain *do_createdomain(
203 domid_t dom_id, unsigned int cpu);
204 extern int construct_dom0(struct domain *d,
205 unsigned long alloc_start,
206 unsigned long alloc_end,
207 char *image_start, unsigned long image_len,
208 char *initrd_start, unsigned long initrd_len,
209 char *cmdline);
210 extern int final_setup_guestos(struct domain *d, dom0_builddomain_t *);
212 struct domain *find_domain_by_id(domid_t dom);
213 struct domain *find_last_domain(void);
214 extern void domain_destruct(struct domain *d);
215 extern void domain_kill(struct domain *d);
216 extern void domain_crash(void);
217 extern void domain_shutdown(u8 reason);
219 void new_thread(struct domain *d,
220 unsigned long start_pc,
221 unsigned long start_stack,
222 unsigned long start_info);
224 extern unsigned long wait_init_idle;
225 #define init_idle() clear_bit(smp_processor_id(), &wait_init_idle);
227 #define set_current_state(_s) do { current->state = (_s); } while (0)
228 void scheduler_init(void);
229 void schedulers_start(void);
230 void sched_add_domain(struct domain *d);
231 void sched_rem_domain(struct domain *d);
232 long sched_ctl(struct sched_ctl_cmd *);
233 long sched_adjdom(struct sched_adjdom_cmd *);
234 int sched_id();
235 void init_idle_task(void);
236 void domain_wake(struct domain *d);
237 void domain_sleep(struct domain *d);
239 void __enter_scheduler(void);
241 extern void switch_to(struct domain *prev,
242 struct domain *next);
244 void domain_init(void);
246 int idle_cpu(int cpu); /* Is CPU 'cpu' idle right now? */
248 void startup_cpu_idle_loop(void);
249 void continue_cpu_idle_loop(void);
251 void continue_nonidle_task(void);
253 /* This task_hash and task_list are protected by the tasklist_lock. */
254 #define TASK_HASH_SIZE 256
255 #define TASK_HASH(_id) ((int)(_id)&(TASK_HASH_SIZE-1))
256 extern struct domain *task_hash[TASK_HASH_SIZE];
257 extern struct domain *task_list;
259 #define for_each_domain(_p) \
260 for ( (_p) = task_list; (_p) != NULL; (_p) = (_p)->next_list )
262 #define DF_DONEFPUINIT 0 /* Has the FPU been initialised for this task? */
263 #define DF_USEDFPU 1 /* Has this task used the FPU since last save? */
264 #define DF_GUEST_STTS 2 /* Has the guest OS requested 'stts'? */
265 #define DF_CONSTRUCTED 3 /* Has the guest OS been fully built yet? */
266 #define DF_IDLETASK 4 /* Is this one of the per-CPU idle domains? */
267 #define DF_PRIVILEGED 5 /* Is this domain privileged? */
268 #define DF_PHYSDEV 6 /* May this domain do IO to physical devices? */
269 #define DF_BLOCKED 7 /* Domain is blocked waiting for an event. */
270 #define DF_CTRLPAUSE 8 /* Domain is paused by controller software. */
271 #define DF_SHUTDOWN 9 /* Guest shut itself down for some reason. */
272 #define DF_CRASHED 10 /* Domain crashed inside Xen, cannot continue. */
273 #define DF_DYING 11 /* Death rattle. */
274 #define DF_RUNNING 12 /* Currently running on a CPU. */
275 #define DF_CPUPINNED 13 /* Disables auto-migration. */
276 #define DF_MIGRATED 14 /* Domain migrated between CPUs. */
278 static inline int domain_runnable(struct domain *d)
279 {
280 return ( (atomic_read(&d->pausecnt) == 0) &&
281 !(d->flags & ((1<<DF_BLOCKED)|(1<<DF_CTRLPAUSE)|
282 (1<<DF_SHUTDOWN)|(1<<DF_CRASHED))) );
283 }
285 static inline void domain_pause(struct domain *d)
286 {
287 ASSERT(d != current);
288 atomic_inc(&d->pausecnt);
289 domain_sleep(d);
290 }
292 static inline void domain_unpause(struct domain *d)
293 {
294 ASSERT(d != current);
295 if ( atomic_dec_and_test(&d->pausecnt) )
296 domain_wake(d);
297 }
299 static inline void domain_unblock(struct domain *d)
300 {
301 if ( test_and_clear_bit(DF_BLOCKED, &d->flags) )
302 domain_wake(d);
303 }
305 static inline void domain_pause_by_systemcontroller(struct domain *d)
306 {
307 ASSERT(d != current);
308 if ( !test_and_set_bit(DF_CTRLPAUSE, &d->flags) )
309 domain_sleep(d);
310 }
312 static inline void domain_unpause_by_systemcontroller(struct domain *d)
313 {
314 if ( test_and_clear_bit(DF_CTRLPAUSE, &d->flags) )
315 domain_wake(d);
316 }
319 #define IS_PRIV(_d) (test_bit(DF_PRIVILEGED, &(_d)->flags))
320 #define IS_CAPABLE_PHYSDEV(_d) (test_bit(DF_PHYSDEV, &(_d)->flags))
322 #define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
324 #include <xen/slab.h>
325 #include <asm/domain.h>
327 #endif /* __SCHED_H__ */