debuggers.hg

view xen/common/domain.c @ 3701:523e995bcc57

bitkeeper revision 1.1159.212.93 (42049334yxlf_y0Z3UvBj3IoXK5ToA)

Merge scramble.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into scramble.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk
author kaf24@scramble.cl.cam.ac.uk
date Sat Feb 05 09:34:44 2005 +0000 (2005-02-05)
parents 0ef6e8e6e85d 61899d6ae2eb
children 4294cfa9fad3
line source
1 /******************************************************************************
2 * domain.c
3 *
4 * Generic domain-handling functions.
5 */
7 #include <xen/config.h>
8 #include <xen/init.h>
9 #include <xen/lib.h>
10 #include <xen/sched.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/mm.h>
14 #include <xen/event.h>
15 #include <xen/time.h>
16 #include <xen/console.h>
17 #include <asm/shadow.h>
18 #include <public/dom0_ops.h>
19 #include <asm/domain_page.h>
21 /* Both these structures are protected by the domlist_lock. */
22 rwlock_t domlist_lock = RW_LOCK_UNLOCKED;
23 struct domain *domain_hash[DOMAIN_HASH_SIZE];
24 struct domain *domain_list;
26 struct domain *dom0;
28 struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
29 {
30 struct domain *d, **pd;
31 struct exec_domain *ed;
33 if ( (d = alloc_domain_struct()) == NULL )
34 return NULL;
36 ed = d->exec_domain[0];
38 atomic_set(&d->refcnt, 1);
39 atomic_set(&ed->pausecnt, 0);
41 shadow_lock_init(ed);
43 d->id = dom_id;
44 ed->processor = cpu;
45 d->create_time = NOW();
47 memcpy(&ed->thread, &idle0_exec_domain.thread, sizeof(ed->thread));
49 spin_lock_init(&d->time_lock);
51 spin_lock_init(&d->big_lock);
53 spin_lock_init(&d->page_alloc_lock);
54 INIT_LIST_HEAD(&d->page_list);
55 INIT_LIST_HEAD(&d->xenpage_list);
57 /* Per-domain PCI-device list. */
58 spin_lock_init(&d->pcidev_lock);
59 INIT_LIST_HEAD(&d->pcidev_list);
61 if ( (d->id != IDLE_DOMAIN_ID) &&
62 ((init_event_channels(d) != 0) || (grant_table_create(d) != 0)) )
63 {
64 destroy_event_channels(d);
65 free_domain_struct(d);
66 return NULL;
67 }
69 arch_do_createdomain(ed);
71 sched_add_domain(ed);
73 if ( d->id != IDLE_DOMAIN_ID )
74 {
75 write_lock(&domlist_lock);
76 pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */
77 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_list )
78 if ( (*pd)->id > d->id )
79 break;
80 d->next_list = *pd;
81 *pd = d;
82 d->next_hash = domain_hash[DOMAIN_HASH(dom_id)];
83 domain_hash[DOMAIN_HASH(dom_id)] = d;
84 write_unlock(&domlist_lock);
85 }
87 return d;
88 }
91 struct domain *find_domain_by_id(domid_t dom)
92 {
93 struct domain *d;
95 read_lock(&domlist_lock);
96 d = domain_hash[DOMAIN_HASH(dom)];
97 while ( d != NULL )
98 {
99 if ( d->id == dom )
100 {
101 if ( unlikely(!get_domain(d)) )
102 d = NULL;
103 break;
104 }
105 d = d->next_hash;
106 }
107 read_unlock(&domlist_lock);
109 return d;
110 }
113 /* Return the most recently created domain. */
114 struct domain *find_last_domain(void)
115 {
116 struct domain *d, *dlast;
118 read_lock(&domlist_lock);
119 dlast = domain_list;
120 d = dlast->next_list;
121 while ( d != NULL )
122 {
123 if ( d->create_time > dlast->create_time )
124 dlast = d;
125 d = d->next_list;
126 }
127 if ( !get_domain(dlast) )
128 dlast = NULL;
129 read_unlock(&domlist_lock);
131 return dlast;
132 }
135 void domain_kill(struct domain *d)
136 {
137 struct exec_domain *ed;
139 domain_pause(d);
140 if ( !test_and_set_bit(DF_DYING, &d->d_flags) )
141 {
142 for_each_exec_domain(d, ed)
143 sched_rem_domain(ed);
144 domain_relinquish_memory(d);
145 put_domain(d);
146 }
147 }
150 void domain_crash(void)
151 {
152 struct domain *d = current->domain;
154 if ( d->id == 0 )
155 BUG();
157 set_bit(DF_CRASHED, &d->d_flags);
159 send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
161 __enter_scheduler();
162 BUG();
163 }
165 void domain_shutdown(u8 reason)
166 {
167 struct domain *d = current->domain;
169 if ( d->id == 0 )
170 {
171 extern void machine_restart(char *);
172 extern void machine_halt(void);
174 if ( reason == 0 )
175 {
176 printk("Domain 0 halted: halting machine.\n");
177 machine_halt();
178 }
179 else
180 {
181 printk("Domain 0 shutdown: rebooting machine.\n");
182 machine_restart(0);
183 }
184 }
186 d->shutdown_code = reason;
187 set_bit(DF_SHUTDOWN, &d->d_flags);
189 send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
191 __enter_scheduler();
192 }
194 unsigned int alloc_new_dom_mem(struct domain *d, unsigned int kbytes)
195 {
196 unsigned int alloc_pfns, nr_pages;
197 struct pfn_info *page;
199 nr_pages = (kbytes + ((PAGE_SIZE-1)>>10)) >> (PAGE_SHIFT - 10);
200 d->max_pages = nr_pages; /* this can now be controlled independently */
202 /* Grow the allocation if necessary. */
203 for ( alloc_pfns = d->tot_pages; alloc_pfns < nr_pages; alloc_pfns++ )
204 {
205 if ( unlikely((page = alloc_domheap_page(d)) == NULL) )
206 {
207 domain_relinquish_memory(d);
208 return -ENOMEM;
209 }
211 /* Initialise the machine-to-phys mapping for this page. */
212 set_machinetophys(page_to_pfn(page), alloc_pfns);
213 }
215 return 0;
216 }
219 /* Release resources belonging to task @p. */
220 void domain_destruct(struct domain *d)
221 {
222 struct domain **pd;
223 atomic_t old, new;
225 if ( !test_bit(DF_DYING, &d->d_flags) )
226 BUG();
228 /* May be already destructed, or get_domain() can race us. */
229 _atomic_set(old, 0);
230 _atomic_set(new, DOMAIN_DESTRUCTED);
231 old = atomic_compareandswap(old, new, &d->refcnt);
232 if ( _atomic_read(old) != 0 )
233 return;
235 /* Delete from task list and task hashtable. */
236 write_lock(&domlist_lock);
237 pd = &domain_list;
238 while ( *pd != d )
239 pd = &(*pd)->next_list;
240 *pd = d->next_list;
241 pd = &domain_hash[DOMAIN_HASH(d->id)];
242 while ( *pd != d )
243 pd = &(*pd)->next_hash;
244 *pd = d->next_hash;
245 write_unlock(&domlist_lock);
247 destroy_event_channels(d);
248 grant_table_destroy(d);
250 free_perdomain_pt(d);
251 free_xenheap_page((unsigned long)d->shared_info);
253 free_domain_struct(d);
254 }
257 /*
258 * final_setup_guestos is used for final setup and launching of domains other
259 * than domain 0. ie. the domains that are being built by the userspace dom0
260 * domain builder.
261 */
262 int final_setup_guestos(struct domain *p, dom0_builddomain_t *builddomain)
263 {
264 int rc = 0;
265 full_execution_context_t *c;
267 if ( (c = xmalloc(full_execution_context_t)) == NULL )
268 return -ENOMEM;
270 if ( test_bit(DF_CONSTRUCTED, &p->d_flags) )
271 {
272 rc = -EINVAL;
273 goto out;
274 }
276 if ( copy_from_user(c, builddomain->ctxt, sizeof(*c)) )
277 {
278 rc = -EFAULT;
279 goto out;
280 }
282 if ( (rc = arch_final_setup_guestos(p->exec_domain[0],c)) != 0 )
283 goto out;
285 /* Set up the shared info structure. */
286 update_dom_time(p);
288 set_bit(DF_CONSTRUCTED, &p->d_flags);
290 out:
291 if ( c != NULL )
292 xfree(c);
293 return rc;
294 }
296 /*
297 * final_setup_guestos is used for final setup and launching of domains other
298 * than domain 0. ie. the domains that are being built by the userspace dom0
299 * domain builder.
300 */
301 long do_boot_vcpu(unsigned long vcpu, full_execution_context_t *ctxt)
302 {
303 struct domain *d = current->domain;
304 struct exec_domain *ed;
305 int rc = 0;
306 full_execution_context_t *c;
308 if ( (vcpu >= MAX_VIRT_CPUS) || (d->exec_domain[vcpu] != NULL) )
309 return -EINVAL;
311 if ( alloc_exec_domain_struct(d, vcpu) == NULL )
312 return -ENOMEM;
314 if ( (c = xmalloc(full_execution_context_t)) == NULL )
315 {
316 rc = -ENOMEM;
317 goto out;
318 }
320 if ( copy_from_user(c, ctxt, sizeof(*c)) )
321 {
322 rc = -EFAULT;
323 goto out;
324 }
326 ed = d->exec_domain[vcpu];
328 atomic_set(&ed->pausecnt, 0);
329 shadow_lock_init(ed);
331 memcpy(&ed->thread, &idle0_exec_domain.thread, sizeof(ed->thread));
333 arch_do_boot_vcpu(ed);
335 sched_add_domain(ed);
337 if ( (rc = arch_final_setup_guestos(ed, c)) != 0 ) {
338 sched_rem_domain(ed);
339 goto out;
340 }
342 /* Set up the shared info structure. */
343 update_dom_time(d);
345 /* domain_unpause_by_systemcontroller */
346 if ( test_and_clear_bit(EDF_CTRLPAUSE, &ed->ed_flags) )
347 domain_wake(ed);
349 xfree(c);
350 return 0;
352 out:
353 if ( c != NULL )
354 xfree(c);
355 arch_free_exec_domain_struct(d->exec_domain[vcpu]);
356 d->exec_domain[vcpu] = NULL;
357 return rc;
358 }
360 long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
361 {
362 if ( type > MAX_VMASST_TYPE )
363 return -EINVAL;
365 switch ( cmd )
366 {
367 case VMASST_CMD_enable:
368 set_bit(type, &p->vm_assist);
369 if (vm_assist_info[type].enable)
370 (*vm_assist_info[type].enable)(p);
371 return 0;
372 case VMASST_CMD_disable:
373 clear_bit(type, &p->vm_assist);
374 if (vm_assist_info[type].disable)
375 (*vm_assist_info[type].disable)(p);
376 return 0;
377 }
379 return -ENOSYS;
380 }