debuggers.hg

view xen/common/domain.c @ 3329:37cb59b9ddfd

bitkeeper revision 1.1159.1.484 (41c1a3e20WEWxhNQDQK6avGv36pVEA)

Remove per vcpu misdirect virq support.
author cl349@arcadians.cl.cam.ac.uk
date Thu Dec 16 15:04:02 2004 +0000 (2004-12-16)
parents 545088ce72b5
children 597555bad4b5
line source
1 /******************************************************************************
2 * domain.c
3 *
4 * Generic domain-handling functions.
5 */
7 #include <xen/config.h>
8 #include <xen/init.h>
9 #include <xen/lib.h>
10 #include <xen/sched.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/mm.h>
14 #include <xen/event.h>
15 #include <xen/time.h>
16 #include <xen/console.h>
17 #include <asm/shadow.h>
18 #include <public/dom0_ops.h>
19 #include <asm/domain_page.h>
21 /* Both these structures are protected by the domlist_lock. */
22 rwlock_t domlist_lock = RW_LOCK_UNLOCKED;
23 struct domain *domain_hash[DOMAIN_HASH_SIZE];
24 struct domain *domain_list;
26 struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
27 {
28 struct domain *d, **pd;
29 struct exec_domain *ed;
31 if ( (d = alloc_domain_struct()) == NULL )
32 return NULL;
34 ed = d->exec_domain[0];
36 atomic_set(&d->refcnt, 1);
37 atomic_set(&ed->pausecnt, 0);
39 shadow_lock_init(ed);
41 d->id = dom_id;
42 ed->processor = cpu;
43 d->create_time = NOW();
45 memcpy(&ed->thread, &idle0_exec_domain.thread, sizeof(ed->thread));
47 spin_lock_init(&d->time_lock);
49 spin_lock_init(&d->big_lock);
51 spin_lock_init(&d->page_alloc_lock);
52 INIT_LIST_HEAD(&d->page_list);
53 INIT_LIST_HEAD(&d->xenpage_list);
55 /* Per-domain PCI-device list. */
56 spin_lock_init(&d->pcidev_lock);
57 INIT_LIST_HEAD(&d->pcidev_list);
59 if ( (d->id != IDLE_DOMAIN_ID) &&
60 ((init_event_channels(d) != 0) || (grant_table_create(d) != 0)) )
61 {
62 destroy_event_channels(d);
63 free_domain_struct(d);
64 return NULL;
65 }
67 arch_do_createdomain(ed);
69 sched_add_domain(ed);
71 if ( d->id != IDLE_DOMAIN_ID )
72 {
73 write_lock(&domlist_lock);
74 pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */
75 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_list )
76 if ( (*pd)->id > d->id )
77 break;
78 d->next_list = *pd;
79 *pd = d;
80 d->next_hash = domain_hash[DOMAIN_HASH(dom_id)];
81 domain_hash[DOMAIN_HASH(dom_id)] = d;
82 write_unlock(&domlist_lock);
83 }
85 return d;
86 }
89 struct domain *find_domain_by_id(domid_t dom)
90 {
91 struct domain *d;
93 read_lock(&domlist_lock);
94 d = domain_hash[DOMAIN_HASH(dom)];
95 while ( d != NULL )
96 {
97 if ( d->id == dom )
98 {
99 if ( unlikely(!get_domain(d)) )
100 d = NULL;
101 break;
102 }
103 d = d->next_hash;
104 }
105 read_unlock(&domlist_lock);
107 return d;
108 }
111 /* Return the most recently created domain. */
112 struct domain *find_last_domain(void)
113 {
114 struct domain *d, *dlast;
116 read_lock(&domlist_lock);
117 dlast = domain_list;
118 d = dlast->next_list;
119 while ( d != NULL )
120 {
121 if ( d->create_time > dlast->create_time )
122 dlast = d;
123 d = d->next_list;
124 }
125 if ( !get_domain(dlast) )
126 dlast = NULL;
127 read_unlock(&domlist_lock);
129 return dlast;
130 }
133 void domain_kill(struct domain *d)
134 {
135 struct exec_domain *ed;
137 domain_pause(d);
138 if ( !test_and_set_bit(DF_DYING, &d->d_flags) )
139 {
140 for_each_exec_domain(d, ed)
141 sched_rem_domain(ed);
142 domain_relinquish_memory(d);
143 put_domain(d);
144 }
145 }
148 void domain_crash(void)
149 {
150 struct domain *d = current->domain;
152 if ( d->id == 0 )
153 BUG();
155 set_bit(DF_CRASHED, &d->d_flags);
157 send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
159 __enter_scheduler();
160 BUG();
161 }
163 void domain_shutdown(u8 reason)
164 {
165 struct domain *d = current->domain;
167 if ( d->id == 0 )
168 {
169 extern void machine_restart(char *);
170 extern void machine_halt(void);
172 if ( reason == 0 )
173 {
174 printk("Domain 0 halted: halting machine.\n");
175 machine_halt();
176 }
177 else
178 {
179 printk("Domain 0 shutdown: rebooting machine.\n");
180 machine_restart(0);
181 }
182 }
184 d->shutdown_code = reason;
185 set_bit(DF_SHUTDOWN, &d->d_flags);
187 send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
189 __enter_scheduler();
190 }
192 unsigned int alloc_new_dom_mem(struct domain *d, unsigned int kbytes)
193 {
194 unsigned int alloc_pfns, nr_pages;
195 struct pfn_info *page;
197 nr_pages = (kbytes + ((PAGE_SIZE-1)>>10)) >> (PAGE_SHIFT - 10);
198 d->max_pages = nr_pages; /* this can now be controlled independently */
200 /* Grow the allocation if necessary. */
201 for ( alloc_pfns = d->tot_pages; alloc_pfns < nr_pages; alloc_pfns++ )
202 {
203 if ( unlikely((page = alloc_domheap_page(d)) == NULL) )
204 {
205 domain_relinquish_memory(d);
206 return -ENOMEM;
207 }
209 /* initialise to machine_to_phys_mapping table to likely pfn */
210 machine_to_phys_mapping[page-frame_table] = alloc_pfns;
211 }
213 return 0;
214 }
217 /* Release resources belonging to task @p. */
218 void domain_destruct(struct domain *d)
219 {
220 struct domain **pd;
221 atomic_t old, new;
223 if ( !test_bit(DF_DYING, &d->d_flags) )
224 BUG();
226 /* May be already destructed, or get_domain() can race us. */
227 _atomic_set(old, 0);
228 _atomic_set(new, DOMAIN_DESTRUCTED);
229 old = atomic_compareandswap(old, new, &d->refcnt);
230 if ( _atomic_read(old) != 0 )
231 return;
233 /* Delete from task list and task hashtable. */
234 write_lock(&domlist_lock);
235 pd = &domain_list;
236 while ( *pd != d )
237 pd = &(*pd)->next_list;
238 *pd = d->next_list;
239 pd = &domain_hash[DOMAIN_HASH(d->id)];
240 while ( *pd != d )
241 pd = &(*pd)->next_hash;
242 *pd = d->next_hash;
243 write_unlock(&domlist_lock);
245 destroy_event_channels(d);
246 grant_table_destroy(d);
248 free_perdomain_pt(d);
249 free_xenheap_page((unsigned long)d->shared_info);
251 free_domain_struct(d);
252 }
255 /*
256 * final_setup_guestos is used for final setup and launching of domains other
257 * than domain 0. ie. the domains that are being built by the userspace dom0
258 * domain builder.
259 */
260 int final_setup_guestos(struct domain *p, dom0_builddomain_t *builddomain)
261 {
262 int rc = 0;
263 full_execution_context_t *c;
265 if ( (c = xmalloc(sizeof(*c))) == NULL )
266 return -ENOMEM;
268 if ( test_bit(DF_CONSTRUCTED, &p->d_flags) )
269 {
270 rc = -EINVAL;
271 goto out;
272 }
274 if ( copy_from_user(c, builddomain->ctxt, sizeof(*c)) )
275 {
276 rc = -EFAULT;
277 goto out;
278 }
280 if ( (rc = arch_final_setup_guestos(p->exec_domain[0],c)) != 0 )
281 goto out;
283 /* Set up the shared info structure. */
284 update_dom_time(p);
286 set_bit(DF_CONSTRUCTED, &p->d_flags);
288 out:
289 if ( c != NULL )
290 xfree(c);
291 return rc;
292 }
294 extern xmem_cache_t *exec_domain_struct_cachep;
296 /*
297 * final_setup_guestos is used for final setup and launching of domains other
298 * than domain 0. ie. the domains that are being built by the userspace dom0
299 * domain builder.
300 */
301 long do_boot_vcpu(unsigned long vcpu, full_execution_context_t *ctxt)
302 {
303 struct domain *d = current->domain;
304 struct exec_domain *ed;
305 int rc = 0;
306 full_execution_context_t *c;
308 if ( d->exec_domain[vcpu] != NULL )
309 return EINVAL;
311 if ( alloc_exec_domain_struct(d, vcpu) == NULL )
312 return -ENOMEM;
314 if ( (c = xmalloc(sizeof(*c))) == NULL )
315 {
316 rc = -ENOMEM;
317 goto out;
318 }
320 if ( copy_from_user(c, ctxt, sizeof(*c)) )
321 {
322 rc = -EFAULT;
323 goto out;
324 }
326 ed = d->exec_domain[vcpu];
328 atomic_set(&ed->pausecnt, 0);
329 shadow_lock_init(ed);
331 memcpy(&ed->thread, &idle0_exec_domain.thread, sizeof(ed->thread));
333 /* arch_do_createdomain */
334 ed->thread.schedule_tail = d->exec_domain[0]->thread.schedule_tail;
335 ed->mm.perdomain_ptes = d->mm_perdomain_pt + (ed->eid << PDPT_VCPU_SHIFT);
337 sched_add_domain(ed);
339 if ( (rc = arch_final_setup_guestos(ed, c)) != 0 ) {
340 sched_rem_domain(ed);
341 goto out;
342 }
344 /* Set up the shared info structure. */
345 update_dom_time(d);
347 /* domain_unpause_by_systemcontroller */
348 if ( test_and_clear_bit(EDF_CTRLPAUSE, &ed->ed_flags) )
349 domain_wake(ed);
351 xfree(c);
352 return 0;
354 out:
355 if ( c != NULL )
356 xfree(c);
357 xmem_cache_free(exec_domain_struct_cachep, d->exec_domain[vcpu]);
358 d->exec_domain[vcpu] = NULL;
359 return rc;
360 }
362 long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
363 {
364 if ( type > MAX_VMASST_TYPE )
365 return -EINVAL;
367 switch ( cmd )
368 {
369 case VMASST_CMD_enable:
370 set_bit(type, &p->vm_assist);
371 if (vm_assist_info[type].enable)
372 (*vm_assist_info[type].enable)(p);
373 return 0;
374 case VMASST_CMD_disable:
375 clear_bit(type, &p->vm_assist);
376 if (vm_assist_info[type].disable)
377 (*vm_assist_info[type].disable)(p);
378 return 0;
379 }
381 return -ENOSYS;
382 }