debuggers.hg

view xen/common/domain.c @ 3705:4294cfa9fad3

bitkeeper revision 1.1159.212.95 (4204aa0ee0re5Xx1zWrJ9ejxzgRs3w)

Various cleanups. Remove PDB pending simpler GDB stub and/or NetBSD debugger.
Force emacs mode to appropriate tabbing in various files.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Sat Feb 05 11:12:14 2005 +0000 (2005-02-05)
parents 523e995bcc57
children d93748c50893
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /******************************************************************************
3 * domain.c
4 *
5 * Generic domain-handling functions.
6 */
8 #include <xen/config.h>
9 #include <xen/init.h>
10 #include <xen/lib.h>
11 #include <xen/sched.h>
12 #include <xen/errno.h>
13 #include <xen/sched.h>
14 #include <xen/mm.h>
15 #include <xen/event.h>
16 #include <xen/time.h>
17 #include <xen/console.h>
18 #include <asm/shadow.h>
19 #include <public/dom0_ops.h>
20 #include <asm/domain_page.h>
22 /* Both these structures are protected by the domlist_lock. */
23 rwlock_t domlist_lock = RW_LOCK_UNLOCKED;
24 struct domain *domain_hash[DOMAIN_HASH_SIZE];
25 struct domain *domain_list;
27 struct domain *dom0;
29 struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
30 {
31 struct domain *d, **pd;
32 struct exec_domain *ed;
34 if ( (d = alloc_domain_struct()) == NULL )
35 return NULL;
37 ed = d->exec_domain[0];
39 atomic_set(&d->refcnt, 1);
40 atomic_set(&ed->pausecnt, 0);
42 shadow_lock_init(ed);
44 d->id = dom_id;
45 ed->processor = cpu;
46 d->create_time = NOW();
48 memcpy(&ed->thread, &idle0_exec_domain.thread, sizeof(ed->thread));
50 spin_lock_init(&d->time_lock);
52 spin_lock_init(&d->big_lock);
54 spin_lock_init(&d->page_alloc_lock);
55 INIT_LIST_HEAD(&d->page_list);
56 INIT_LIST_HEAD(&d->xenpage_list);
58 /* Per-domain PCI-device list. */
59 spin_lock_init(&d->pcidev_lock);
60 INIT_LIST_HEAD(&d->pcidev_list);
62 if ( (d->id != IDLE_DOMAIN_ID) &&
63 ((init_event_channels(d) != 0) || (grant_table_create(d) != 0)) )
64 {
65 destroy_event_channels(d);
66 free_domain_struct(d);
67 return NULL;
68 }
70 arch_do_createdomain(ed);
72 sched_add_domain(ed);
74 if ( d->id != IDLE_DOMAIN_ID )
75 {
76 write_lock(&domlist_lock);
77 pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */
78 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_list )
79 if ( (*pd)->id > d->id )
80 break;
81 d->next_list = *pd;
82 *pd = d;
83 d->next_hash = domain_hash[DOMAIN_HASH(dom_id)];
84 domain_hash[DOMAIN_HASH(dom_id)] = d;
85 write_unlock(&domlist_lock);
86 }
88 return d;
89 }
92 struct domain *find_domain_by_id(domid_t dom)
93 {
94 struct domain *d;
96 read_lock(&domlist_lock);
97 d = domain_hash[DOMAIN_HASH(dom)];
98 while ( d != NULL )
99 {
100 if ( d->id == dom )
101 {
102 if ( unlikely(!get_domain(d)) )
103 d = NULL;
104 break;
105 }
106 d = d->next_hash;
107 }
108 read_unlock(&domlist_lock);
110 return d;
111 }
114 /* Return the most recently created domain. */
115 struct domain *find_last_domain(void)
116 {
117 struct domain *d, *dlast;
119 read_lock(&domlist_lock);
120 dlast = domain_list;
121 d = dlast->next_list;
122 while ( d != NULL )
123 {
124 if ( d->create_time > dlast->create_time )
125 dlast = d;
126 d = d->next_list;
127 }
128 if ( !get_domain(dlast) )
129 dlast = NULL;
130 read_unlock(&domlist_lock);
132 return dlast;
133 }
136 void domain_kill(struct domain *d)
137 {
138 struct exec_domain *ed;
140 domain_pause(d);
141 if ( !test_and_set_bit(DF_DYING, &d->d_flags) )
142 {
143 for_each_exec_domain(d, ed)
144 sched_rem_domain(ed);
145 domain_relinquish_memory(d);
146 put_domain(d);
147 }
148 }
151 void domain_crash(void)
152 {
153 struct domain *d = current->domain;
155 if ( d->id == 0 )
156 BUG();
158 set_bit(DF_CRASHED, &d->d_flags);
160 send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
162 __enter_scheduler();
163 BUG();
164 }
166 void domain_shutdown(u8 reason)
167 {
168 struct domain *d = current->domain;
170 if ( d->id == 0 )
171 {
172 extern void machine_restart(char *);
173 extern void machine_halt(void);
175 if ( reason == 0 )
176 {
177 printk("Domain 0 halted: halting machine.\n");
178 machine_halt();
179 }
180 else
181 {
182 printk("Domain 0 shutdown: rebooting machine.\n");
183 machine_restart(0);
184 }
185 }
187 d->shutdown_code = reason;
188 set_bit(DF_SHUTDOWN, &d->d_flags);
190 send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
192 __enter_scheduler();
193 }
195 unsigned int alloc_new_dom_mem(struct domain *d, unsigned int kbytes)
196 {
197 unsigned int alloc_pfns, nr_pages;
198 struct pfn_info *page;
200 nr_pages = (kbytes + ((PAGE_SIZE-1)>>10)) >> (PAGE_SHIFT - 10);
201 d->max_pages = nr_pages; /* this can now be controlled independently */
203 /* Grow the allocation if necessary. */
204 for ( alloc_pfns = d->tot_pages; alloc_pfns < nr_pages; alloc_pfns++ )
205 {
206 if ( unlikely((page = alloc_domheap_page(d)) == NULL) )
207 {
208 domain_relinquish_memory(d);
209 return -ENOMEM;
210 }
212 /* Initialise the machine-to-phys mapping for this page. */
213 set_machinetophys(page_to_pfn(page), alloc_pfns);
214 }
216 return 0;
217 }
220 /* Release resources belonging to task @p. */
221 void domain_destruct(struct domain *d)
222 {
223 struct domain **pd;
224 atomic_t old, new;
226 if ( !test_bit(DF_DYING, &d->d_flags) )
227 BUG();
229 /* May be already destructed, or get_domain() can race us. */
230 _atomic_set(old, 0);
231 _atomic_set(new, DOMAIN_DESTRUCTED);
232 old = atomic_compareandswap(old, new, &d->refcnt);
233 if ( _atomic_read(old) != 0 )
234 return;
236 /* Delete from task list and task hashtable. */
237 write_lock(&domlist_lock);
238 pd = &domain_list;
239 while ( *pd != d )
240 pd = &(*pd)->next_list;
241 *pd = d->next_list;
242 pd = &domain_hash[DOMAIN_HASH(d->id)];
243 while ( *pd != d )
244 pd = &(*pd)->next_hash;
245 *pd = d->next_hash;
246 write_unlock(&domlist_lock);
248 destroy_event_channels(d);
249 grant_table_destroy(d);
251 free_perdomain_pt(d);
252 free_xenheap_page((unsigned long)d->shared_info);
254 free_domain_struct(d);
255 }
258 /*
259 * final_setup_guestos is used for final setup and launching of domains other
260 * than domain 0. ie. the domains that are being built by the userspace dom0
261 * domain builder.
262 */
263 int final_setup_guestos(struct domain *p, dom0_builddomain_t *builddomain)
264 {
265 int rc = 0;
266 full_execution_context_t *c;
268 if ( (c = xmalloc(full_execution_context_t)) == NULL )
269 return -ENOMEM;
271 if ( test_bit(DF_CONSTRUCTED, &p->d_flags) )
272 {
273 rc = -EINVAL;
274 goto out;
275 }
277 if ( copy_from_user(c, builddomain->ctxt, sizeof(*c)) )
278 {
279 rc = -EFAULT;
280 goto out;
281 }
283 if ( (rc = arch_final_setup_guestos(p->exec_domain[0],c)) != 0 )
284 goto out;
286 /* Set up the shared info structure. */
287 update_dom_time(p);
289 set_bit(DF_CONSTRUCTED, &p->d_flags);
291 out:
292 if ( c != NULL )
293 xfree(c);
294 return rc;
295 }
297 /*
298 * final_setup_guestos is used for final setup and launching of domains other
299 * than domain 0. ie. the domains that are being built by the userspace dom0
300 * domain builder.
301 */
302 long do_boot_vcpu(unsigned long vcpu, full_execution_context_t *ctxt)
303 {
304 struct domain *d = current->domain;
305 struct exec_domain *ed;
306 int rc = 0;
307 full_execution_context_t *c;
309 if ( (vcpu >= MAX_VIRT_CPUS) || (d->exec_domain[vcpu] != NULL) )
310 return -EINVAL;
312 if ( alloc_exec_domain_struct(d, vcpu) == NULL )
313 return -ENOMEM;
315 if ( (c = xmalloc(full_execution_context_t)) == NULL )
316 {
317 rc = -ENOMEM;
318 goto out;
319 }
321 if ( copy_from_user(c, ctxt, sizeof(*c)) )
322 {
323 rc = -EFAULT;
324 goto out;
325 }
327 ed = d->exec_domain[vcpu];
329 atomic_set(&ed->pausecnt, 0);
330 shadow_lock_init(ed);
332 memcpy(&ed->thread, &idle0_exec_domain.thread, sizeof(ed->thread));
334 arch_do_boot_vcpu(ed);
336 sched_add_domain(ed);
338 if ( (rc = arch_final_setup_guestos(ed, c)) != 0 ) {
339 sched_rem_domain(ed);
340 goto out;
341 }
343 /* Set up the shared info structure. */
344 update_dom_time(d);
346 /* domain_unpause_by_systemcontroller */
347 if ( test_and_clear_bit(EDF_CTRLPAUSE, &ed->ed_flags) )
348 domain_wake(ed);
350 xfree(c);
351 return 0;
353 out:
354 if ( c != NULL )
355 xfree(c);
356 arch_free_exec_domain_struct(d->exec_domain[vcpu]);
357 d->exec_domain[vcpu] = NULL;
358 return rc;
359 }
361 long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
362 {
363 if ( type > MAX_VMASST_TYPE )
364 return -EINVAL;
366 switch ( cmd )
367 {
368 case VMASST_CMD_enable:
369 set_bit(type, &p->vm_assist);
370 if (vm_assist_info[type].enable)
371 (*vm_assist_info[type].enable)(p);
372 return 0;
373 case VMASST_CMD_disable:
374 clear_bit(type, &p->vm_assist);
375 if (vm_assist_info[type].disable)
376 (*vm_assist_info[type].disable)(p);
377 return 0;
378 }
380 return -ENOSYS;
381 }