debuggers.hg

view xen/arch/x86/hvm/hvm.c @ 10949:b33c08de3d98

[HVM] Add a concept of HVM parameters to the hypervisor.

Each HVM domain has a space of HVM parameters associated with it,
and these can be manipulated via a new hvm_op hypercall. This means
that the hypervisor no longer needs to parse the hvm_info table, so
remove that code.

Signed-off-by: Steven Smith <ssmith@xensource.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Aug 03 13:53:33 2006 +0100 (2006-08-03)
parents c8ee670ac87e
children 7ff6020e4758
line source
1 /*
2 * hvm.c: Common hardware virtual machine abstractions.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/lib.h>
24 #include <xen/trace.h>
25 #include <xen/sched.h>
26 #include <xen/irq.h>
27 #include <xen/softirq.h>
28 #include <xen/domain.h>
29 #include <xen/domain_page.h>
30 #include <xen/hypercall.h>
31 #include <asm/current.h>
32 #include <asm/io.h>
33 #include <asm/shadow.h>
34 #include <asm/regs.h>
35 #include <asm/cpufeature.h>
36 #include <asm/processor.h>
37 #include <asm/types.h>
38 #include <asm/msr.h>
39 #include <asm/spinlock.h>
40 #include <asm/hvm/hvm.h>
41 #include <asm/hvm/support.h>
42 #include <asm/shadow.h>
43 #if CONFIG_PAGING_LEVELS >= 3
44 #include <asm/shadow_64.h>
45 #endif
46 #include <public/sched.h>
47 #include <public/hvm/ioreq.h>
48 #include <public/hvm/hvm_info_table.h>
49 #include <xen/guest_access.h>
51 int hvm_enabled = 0;
53 unsigned int opt_hvm_debug_level = 0;
54 integer_param("hvm_debug", opt_hvm_debug_level);
56 struct hvm_function_table hvm_funcs;
58 static void hvm_zap_mmio_range(
59 struct domain *d, unsigned long pfn, unsigned long nr_pfn)
60 {
61 unsigned long i, val = INVALID_MFN;
63 for ( i = 0; i < nr_pfn; i++ )
64 {
65 if ( pfn + i >= 0xfffff )
66 break;
68 __copy_to_user(&phys_to_machine_mapping[pfn + i], &val, sizeof (val));
69 }
70 }
72 static void hvm_map_io_shared_page(struct domain *d)
73 {
74 int i;
75 unsigned char e820_map_nr;
76 struct e820entry *e820entry;
77 unsigned char *p;
78 unsigned long mfn;
79 unsigned long gpfn = 0;
81 local_flush_tlb_pge();
83 mfn = get_mfn_from_gpfn(E820_MAP_PAGE >> PAGE_SHIFT);
84 if (mfn == INVALID_MFN) {
85 printk("Can not find E820 memory map page for HVM domain.\n");
86 domain_crash_synchronous();
87 }
89 p = map_domain_page(mfn);
90 if (p == NULL) {
91 printk("Can not map E820 memory map page for HVM domain.\n");
92 domain_crash_synchronous();
93 }
95 e820_map_nr = *(p + E820_MAP_NR_OFFSET);
96 e820entry = (struct e820entry *)(p + E820_MAP_OFFSET);
98 for ( i = 0; i < e820_map_nr; i++ )
99 {
100 if ( e820entry[i].type == E820_SHARED_PAGE )
101 gpfn = (e820entry[i].addr >> PAGE_SHIFT);
102 if ( e820entry[i].type == E820_IO )
103 hvm_zap_mmio_range(
104 d,
105 e820entry[i].addr >> PAGE_SHIFT,
106 e820entry[i].size >> PAGE_SHIFT);
107 }
109 if ( gpfn == 0 ) {
110 printk("Can not get io request shared page"
111 " from E820 memory map for HVM domain.\n");
112 unmap_domain_page(p);
113 domain_crash_synchronous();
114 }
115 unmap_domain_page(p);
117 /* Initialise shared page */
118 mfn = get_mfn_from_gpfn(gpfn);
119 if (mfn == INVALID_MFN) {
120 printk("Can not find io request shared page for HVM domain.\n");
121 domain_crash_synchronous();
122 }
124 p = map_domain_page_global(mfn);
125 if (p == NULL) {
126 printk("Can not map io request shared page for HVM domain.\n");
127 domain_crash_synchronous();
128 }
129 d->arch.hvm_domain.shared_page_va = (unsigned long)p;
130 }
132 void hvm_setup_platform(struct domain* d)
133 {
134 struct hvm_domain *platform;
135 struct vcpu *v=current;
137 if ( !hvm_guest(v) || (v->vcpu_id != 0) )
138 return;
140 if ( shadow_direct_map_init(d) == 0 )
141 {
142 printk("Can not allocate shadow direct map for HVM domain.\n");
143 domain_crash_synchronous();
144 }
146 hvm_map_io_shared_page(d);
148 platform = &d->arch.hvm_domain;
149 pic_init(&platform->vpic, pic_irq_request, &platform->interrupt_request);
150 register_pic_io_hook();
152 if ( hvm_apic_support(d) )
153 {
154 spin_lock_init(&d->arch.hvm_domain.round_robin_lock);
155 hvm_vioapic_init(d);
156 }
158 init_timer(&platform->pl_time.periodic_tm.timer, pt_timer_fn, v, v->processor);
159 pit_init(v, cpu_khz);
160 }
162 void pic_irq_request(void *data, int level)
163 {
164 int *interrupt_request = data;
165 *interrupt_request = level;
166 }
168 void hvm_pic_assist(struct vcpu *v)
169 {
170 global_iodata_t *spg;
171 u16 *virq_line, irqs;
172 struct hvm_virpic *pic = &v->domain->arch.hvm_domain.vpic;
174 spg = &get_sp(v->domain)->sp_global;
175 virq_line = &spg->pic_clear_irr;
176 if ( *virq_line ) {
177 do {
178 irqs = *(volatile u16*)virq_line;
179 } while ( (u16)cmpxchg(virq_line,irqs, 0) != irqs );
180 do_pic_irqs_clear(pic, irqs);
181 }
182 virq_line = &spg->pic_irr;
183 if ( *virq_line ) {
184 do {
185 irqs = *(volatile u16*)virq_line;
186 } while ( (u16)cmpxchg(virq_line,irqs, 0) != irqs );
187 do_pic_irqs(pic, irqs);
188 }
189 }
191 u64 hvm_get_guest_time(struct vcpu *v)
192 {
193 u64 host_tsc;
195 rdtscll(host_tsc);
196 return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
197 }
199 int cpu_get_interrupt(struct vcpu *v, int *type)
200 {
201 int intno;
202 struct hvm_virpic *s = &v->domain->arch.hvm_domain.vpic;
203 unsigned long flags;
205 if ( (intno = cpu_get_apic_interrupt(v, type)) != -1 ) {
206 /* set irq request if a PIC irq is still pending */
207 /* XXX: improve that */
208 spin_lock_irqsave(&s->lock, flags);
209 pic_update_irq(s);
210 spin_unlock_irqrestore(&s->lock, flags);
211 return intno;
212 }
213 /* read the irq from the PIC */
214 if ( v->vcpu_id == 0 && (intno = cpu_get_pic_interrupt(v, type)) != -1 )
215 return intno;
217 return -1;
218 }
220 /*
221 * Copy from/to guest virtual.
222 */
223 int
224 hvm_copy(void *buf, unsigned long vaddr, int size, int dir)
225 {
226 unsigned long gpa, mfn;
227 char *addr;
228 int count;
230 while (size > 0) {
231 count = PAGE_SIZE - (vaddr & ~PAGE_MASK);
232 if (count > size)
233 count = size;
235 if (hvm_paging_enabled(current)) {
236 gpa = gva_to_gpa(vaddr);
237 mfn = get_mfn_from_gpfn(gpa >> PAGE_SHIFT);
238 } else
239 mfn = get_mfn_from_gpfn(vaddr >> PAGE_SHIFT);
240 if (mfn == INVALID_MFN)
241 return 0;
243 addr = (char *)map_domain_page(mfn) + (vaddr & ~PAGE_MASK);
245 if (dir == HVM_COPY_IN)
246 memcpy(buf, addr, count);
247 else
248 memcpy(addr, buf, count);
250 unmap_domain_page(addr);
252 vaddr += count;
253 buf += count;
254 size -= count;
255 }
257 return 1;
258 }
260 /*
261 * HVM specific printbuf. Mostly used for hvmloader chit-chat.
262 */
263 void hvm_print_line(struct vcpu *v, const char c)
264 {
265 int *index = &v->domain->arch.hvm_domain.pbuf_index;
266 char *pbuf = v->domain->arch.hvm_domain.pbuf;
268 if (*index == HVM_PBUF_SIZE-2 || c == '\n') {
269 if (*index == HVM_PBUF_SIZE-2)
270 pbuf[(*index)++] = c;
271 pbuf[*index] = '\0';
272 printk("(GUEST: %u) %s\n", v->domain->domain_id, pbuf);
273 *index = 0;
274 } else
275 pbuf[(*index)++] = c;
276 }
278 #if defined(__i386__)
280 typedef unsigned long hvm_hypercall_t(
281 unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
282 #define HYPERCALL(x) [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x
283 static hvm_hypercall_t *hvm_hypercall_table[] = {
284 HYPERCALL(mmu_update),
285 HYPERCALL(memory_op),
286 HYPERCALL(multicall),
287 HYPERCALL(update_va_mapping),
288 HYPERCALL(event_channel_op_compat),
289 HYPERCALL(xen_version),
290 HYPERCALL(grant_table_op),
291 HYPERCALL(event_channel_op),
292 HYPERCALL(hvm_op)
293 };
294 #undef HYPERCALL
296 void hvm_do_hypercall(struct cpu_user_regs *pregs)
297 {
298 if ( ring_3(pregs) )
299 {
300 pregs->eax = -EPERM;
301 return;
302 }
304 if ( pregs->eax > ARRAY_SIZE(hvm_hypercall_table) ||
305 !hvm_hypercall_table[pregs->eax] )
306 {
307 DPRINTK("HVM vcpu %d:%d did a bad hypercall %d.\n",
308 current->domain->domain_id, current->vcpu_id,
309 pregs->eax);
310 pregs->eax = -ENOSYS;
311 }
312 else
313 {
314 pregs->eax = hvm_hypercall_table[pregs->eax](
315 pregs->ebx, pregs->ecx, pregs->edx, pregs->esi, pregs->edi);
316 }
317 }
319 #else /* __x86_64__ */
321 void hvm_do_hypercall(struct cpu_user_regs *pregs)
322 {
323 printk("not supported yet!\n");
324 }
326 #endif
328 /* Initialise a hypercall transfer page for a VMX domain using
329 paravirtualised drivers. */
330 void hvm_hypercall_page_initialise(struct domain *d,
331 void *hypercall_page)
332 {
333 hvm_funcs.init_hypercall_page(d, hypercall_page);
334 }
337 /*
338 * only called in HVM domain BSP context
339 * when booting, vcpuid is always equal to apic_id
340 */
341 int hvm_bringup_ap(int vcpuid, int trampoline_vector)
342 {
343 struct vcpu *bsp = current, *v;
344 struct domain *d = bsp->domain;
345 struct vcpu_guest_context *ctxt;
346 int rc = 0;
348 /* current must be HVM domain BSP */
349 if ( !(hvm_guest(bsp) && bsp->vcpu_id == 0) ) {
350 printk("Not calling hvm_bringup_ap from BSP context.\n");
351 domain_crash_synchronous();
352 }
354 if ( (v = d->vcpu[vcpuid]) == NULL )
355 return -ENOENT;
357 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL ) {
358 printk("Failed to allocate memory in hvm_bringup_ap.\n");
359 return -ENOMEM;
360 }
362 hvm_init_ap_context(ctxt, vcpuid, trampoline_vector);
364 LOCK_BIGLOCK(d);
365 rc = -EEXIST;
366 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
367 rc = boot_vcpu(d, vcpuid, ctxt);
368 UNLOCK_BIGLOCK(d);
370 if ( rc != 0 )
371 printk("AP %d bringup failed in boot_vcpu %x.\n", vcpuid, rc);
372 else {
373 if ( test_and_clear_bit(_VCPUF_down, &d->vcpu[vcpuid]->vcpu_flags) )
374 vcpu_wake(d->vcpu[vcpuid]);
375 printk("AP %d bringup suceeded.\n", vcpuid);
376 }
378 xfree(ctxt);
380 return rc;
381 }
383 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
385 {
386 long rc = 0;
388 switch ( op )
389 {
390 case HVMOP_set_param:
391 case HVMOP_get_param:
392 {
393 struct xen_hvm_param a;
394 struct domain *d;
396 if ( copy_from_guest(&a, arg, 1) )
397 return -EFAULT;
399 if ( a.index >= HVM_NR_PARAMS )
400 return -EINVAL;
402 if ( a.domid == DOMID_SELF )
403 {
404 get_knownalive_domain(current->domain);
405 d = current->domain;
406 }
407 else if ( IS_PRIV(current->domain) )
408 {
409 d = find_domain_by_id(a.domid);
410 if ( !d )
411 return -ESRCH;
412 }
413 else
414 {
415 return -EPERM;
416 }
418 if ( op == HVMOP_set_param )
419 {
420 rc = 0;
421 d->arch.hvm_domain.params[a.index] = a.value;
422 }
423 else
424 {
425 rc = d->arch.hvm_domain.params[a.index];
426 }
428 put_domain(d);
429 return rc;
430 }
432 default:
433 {
434 DPRINTK("Bad HVM op %ld.\n", op);
435 rc = -ENOSYS;
436 }
437 }
439 return rc;
440 }
442 /*
443 * Local variables:
444 * mode: C
445 * c-set-style: "BSD"
446 * c-basic-offset: 4
447 * tab-width: 4
448 * indent-tabs-mode: nil
449 * End:
450 */