debuggers.hg

view xen/arch/powerpc/domain.c @ 10989:16aa4b417c6b

[XEN] Clean up shutdown handling and ignore opt_noreboot if dom0
shuts down cleanly. The option is intended only to retain information
on the local console in case of a crash.

Based on a patch from Muli Ben-Yehuda <muli@il.ibm.com>

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Aug 07 15:35:06 2006 +0100 (2006-08-07)
parents 050de6b53961
children 3c361a48697f
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright (C) IBM Corp. 2005, 2006
17 *
18 * Authors: Jimi Xenidis <jimix@watson.ibm.com>
19 */
21 #include <stdarg.h>
22 #include <xen/config.h>
23 #include <xen/lib.h>
24 #include <xen/sched.h>
25 #include <xen/mm.h>
26 #include <xen/serial.h>
27 #include <xen/domain.h>
28 #include <xen/console.h>
29 #include <xen/shutdown.h>
30 #include <asm/htab.h>
31 #include <asm/current.h>
32 #include <asm/hcalls.h>
34 extern void idle_loop(void);
36 #define next_arg(fmt, args) ({ \
37 unsigned long __arg; \
38 switch ( *(fmt)++ ) \
39 { \
40 case 'i': __arg = (unsigned long)va_arg(args, unsigned int); break; \
41 case 'l': __arg = (unsigned long)va_arg(args, unsigned long); break; \
42 case 'p': __arg = (unsigned long)va_arg(args, void *); break; \
43 case 'h': __arg = (unsigned long)va_arg(args, void *); break; \
44 default: __arg = 0; BUG(); \
45 } \
46 __arg; \
47 })
49 unsigned long hypercall_create_continuation(unsigned int op,
50 const char *format, ...)
51 {
52 struct cpu_user_regs *regs = guest_cpu_user_regs();
53 const char *p = format;
54 va_list args;
55 int gprnum = 4;
56 int i;
58 va_start(args, format);
60 regs->pc -= 4; /* re-execute 'sc' */
62 for (i = 0; *p != '\0'; i++) {
63 regs->gprs[gprnum++] = next_arg(p, args);
64 }
66 va_end(args);
68 /* As luck would have it, we use the same register for hcall opcodes and
69 * for hcall return values. The return value from this function is placed
70 * in r3 on return, so modifying regs->gprs[3] would have no effect. */
71 return XEN_MARK(op);
72 }
74 int arch_domain_create(struct domain *d)
75 {
77 if (d->domain_id == IDLE_DOMAIN_ID) {
78 d->shared_info = (void *)alloc_xenheap_page();
79 clear_page(d->shared_info);
81 return 0;
82 }
84 /* XXX the hackage... hardcode 64M domains */
85 d->arch.rma_base = (64<<20) * (d->domain_id + 1);
86 d->arch.rma_size = (64<<20);
88 printk("clearing RMO: 0x%lx[0x%lx]\n", d->arch.rma_base, d->arch.rma_size);
89 memset((void*)d->arch.rma_base, 0, d->arch.rma_size);
91 htab_alloc(d, LOG_DEFAULT_HTAB_BYTES);
93 d->shared_info = (shared_info_t *)
94 (rma_addr(&d->arch, RMA_SHARED_INFO) + d->arch.rma_base);
96 d->arch.large_page_sizes = 1;
97 d->arch.large_page_shift[0] = 24; /* 16 M for 970s */
99 return 0;
100 }
102 void arch_domain_destroy(struct domain *d)
103 {
104 unimplemented();
105 }
107 void machine_halt(void)
108 {
109 printf("machine_halt called: spinning....\n");
110 console_start_sync();
111 while(1);
112 }
114 void machine_restart(char * __unused)
115 {
116 printf("machine_restart called: spinning....\n");
117 console_start_sync();
118 while(1);
119 }
121 struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id)
122 {
123 struct vcpu *v;
125 if ( (v = xmalloc(struct vcpu)) == NULL )
126 return NULL;
128 memset(v, 0, sizeof(*v));
129 v->vcpu_id = vcpu_id;
131 return v;
132 }
134 void free_vcpu_struct(struct vcpu *v)
135 {
136 BUG_ON(v->next_in_list != NULL);
137 if ( v->vcpu_id != 0 )
138 v->domain->vcpu[v->vcpu_id - 1]->next_in_list = NULL;
139 xfree(v);
140 }
142 int arch_set_info_guest(struct vcpu *v, vcpu_guest_context_t *c)
143 {
144 memcpy(&v->arch.ctxt, &c->user_regs, sizeof(c->user_regs));
146 set_bit(_VCPUF_initialised, &v->vcpu_flags);
148 cpu_init_vcpu(v);
150 return 0;
151 }
153 void dump_pageframe_info(struct domain *d)
154 {
155 struct page_info *page;
157 printk("Memory pages belonging to domain %u:\n", d->domain_id);
159 if ( d->tot_pages >= 10 )
160 {
161 printk(" DomPage list too long to display\n");
162 }
163 else
164 {
165 list_for_each_entry ( page, &d->page_list, list )
166 {
167 printk(" DomPage %p: mfn=%p, caf=%016lx, taf=%" PRtype_info "\n",
168 _p(page_to_maddr(page)), _p(page_to_mfn(page)),
169 page->count_info, page->u.inuse.type_info);
170 }
171 }
173 list_for_each_entry ( page, &d->xenpage_list, list )
174 {
175 printk(" XenPage %p: mfn=%p, caf=%016lx, taf=%" PRtype_info "\n",
176 _p(page_to_maddr(page)), _p(page_to_mfn(page)),
177 page->count_info, page->u.inuse.type_info);
178 }
179 }
182 void context_switch(struct vcpu *prev, struct vcpu *next)
183 {
184 struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
185 cpumask_t dirty_mask = next->vcpu_dirty_cpumask;
186 unsigned int cpu = smp_processor_id();
188 #if 0
189 printf("%s: dom %x to dom %x\n", __func__, prev->domain->domain_id,
190 next->domain->domain_id);
191 #endif
193 /* Allow at most one CPU at a time to be dirty. */
194 ASSERT(cpus_weight(dirty_mask) <= 1);
195 if (unlikely(!cpu_isset(cpu, dirty_mask) && !cpus_empty(dirty_mask)))
196 {
197 /* Other cpus call __sync_lazy_execstate from flush ipi handler. */
198 if (!cpus_empty(next->vcpu_dirty_cpumask))
199 flush_tlb_mask(next->vcpu_dirty_cpumask);
200 }
202 /* copy prev guest state off the stack into its vcpu */
203 memcpy(&prev->arch.ctxt, stack_regs, sizeof(struct cpu_user_regs));
205 set_current(next);
207 /* copy next guest state onto the stack */
208 memcpy(stack_regs, &next->arch.ctxt, sizeof(struct cpu_user_regs));
210 /* save old domain state */
211 save_sprs(prev);
212 save_float(prev);
213 save_segments(prev);
215 context_saved(prev);
217 /* load up new domain */
218 load_sprs(next);
219 load_float(next);
220 load_segments(next);
222 mtsdr1(next->domain->arch.htab.sdr1);
223 local_flush_tlb(); /* XXX maybe flush_tlb_mask? */
225 if (is_idle_vcpu(next)) {
226 reset_stack_and_jump(idle_loop);
227 }
229 reset_stack_and_jump(full_resume);
230 /* not reached */
231 }
233 void continue_running(struct vcpu *same)
234 {
235 /* nothing to do */
236 }
238 void sync_vcpu_execstate(struct vcpu *v)
239 {
240 /* XXX for now, for domain destruction, make this non-fatal */
241 printf("%s: called\n", __func__);
242 }
244 void domain_relinquish_resources(struct domain *d)
245 {
246 /* nothing to do? */
247 }
249 void arch_dump_domain_info(struct domain *d)
250 {
251 }