debuggers.hg

view xen/arch/x86/hvm/hvm.c @ 10986:49dcd838b7df

[HVMLOADER] HVM loader initialises hypercall shim and uses
it to interrogate Xen version information. Also add support
for HVM hypercall execution on 64-bit host.

Signed-off-by: Steven Smith <ssmith@xensource.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Aug 04 20:30:12 2006 +0100 (2006-08-04)
parents bfe12b4d45d3
children 857e7b864bb0
line source
1 /*
2 * hvm.c: Common hardware virtual machine abstractions.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/lib.h>
24 #include <xen/trace.h>
25 #include <xen/sched.h>
26 #include <xen/irq.h>
27 #include <xen/softirq.h>
28 #include <xen/domain.h>
29 #include <xen/domain_page.h>
30 #include <xen/hypercall.h>
31 #include <xen/guest_access.h>
32 #include <asm/current.h>
33 #include <asm/io.h>
34 #include <asm/shadow.h>
35 #include <asm/regs.h>
36 #include <asm/cpufeature.h>
37 #include <asm/processor.h>
38 #include <asm/types.h>
39 #include <asm/msr.h>
40 #include <asm/spinlock.h>
41 #include <asm/hvm/hvm.h>
42 #include <asm/hvm/support.h>
43 #include <asm/shadow.h>
44 #if CONFIG_PAGING_LEVELS >= 3
45 #include <asm/shadow_64.h>
46 #endif
47 #include <public/sched.h>
48 #include <public/hvm/ioreq.h>
49 #include <public/hvm/hvm_info_table.h>
50 #include <public/version.h>
51 #include <public/memory.h>
53 int hvm_enabled = 0;
55 unsigned int opt_hvm_debug_level = 0;
56 integer_param("hvm_debug", opt_hvm_debug_level);
58 struct hvm_function_table hvm_funcs;
60 static void hvm_zap_mmio_range(
61 struct domain *d, unsigned long pfn, unsigned long nr_pfn)
62 {
63 unsigned long i, val = INVALID_MFN;
65 ASSERT(d == current->domain);
67 for ( i = 0; i < nr_pfn; i++ )
68 {
69 if ( pfn + i >= 0xfffff )
70 break;
72 __copy_to_user(&phys_to_machine_mapping[pfn + i], &val, sizeof (val));
73 }
74 }
76 static void e820_zap_iommu_callback(struct domain *d,
77 struct e820entry *e,
78 void *ign)
79 {
80 if ( e->type == E820_IO )
81 hvm_zap_mmio_range(d, e->addr >> PAGE_SHIFT, e->size >> PAGE_SHIFT);
82 }
84 static void e820_foreach(struct domain *d,
85 void (*cb)(struct domain *d,
86 struct e820entry *e,
87 void *data),
88 void *data)
89 {
90 int i;
91 unsigned char e820_map_nr;
92 struct e820entry *e820entry;
93 unsigned char *p;
94 unsigned long mfn;
96 mfn = gmfn_to_mfn(d, E820_MAP_PAGE >> PAGE_SHIFT);
97 if ( mfn == INVALID_MFN )
98 {
99 printk("Can not find E820 memory map page for HVM domain.\n");
100 domain_crash_synchronous();
101 }
103 p = map_domain_page(mfn);
104 if ( p == NULL )
105 {
106 printk("Can not map E820 memory map page for HVM domain.\n");
107 domain_crash_synchronous();
108 }
110 e820_map_nr = *(p + E820_MAP_NR_OFFSET);
111 e820entry = (struct e820entry *)(p + E820_MAP_OFFSET);
113 for ( i = 0; i < e820_map_nr; i++ )
114 cb(d, e820entry + i, data);
116 unmap_domain_page(p);
117 }
119 static void hvm_zap_iommu_pages(struct domain *d)
120 {
121 e820_foreach(d, e820_zap_iommu_callback, NULL);
122 }
124 static void e820_map_io_shared_callback(struct domain *d,
125 struct e820entry *e,
126 void *data)
127 {
128 unsigned long *mfn = data;
129 if ( e->type == E820_SHARED_PAGE )
130 {
131 ASSERT(*mfn == INVALID_MFN);
132 *mfn = gmfn_to_mfn(d, e->addr >> PAGE_SHIFT);
133 }
134 }
136 void hvm_map_io_shared_page(struct vcpu *v)
137 {
138 unsigned long mfn = INVALID_MFN;
139 void *p;
140 struct domain *d = v->domain;
142 if ( d->arch.hvm_domain.shared_page_va )
143 return;
145 e820_foreach(d, e820_map_io_shared_callback, &mfn);
147 if ( mfn == INVALID_MFN )
148 {
149 printk("Can not find io request shared page for HVM domain.\n");
150 domain_crash_synchronous();
151 }
153 p = map_domain_page_global(mfn);
154 if ( p == NULL )
155 {
156 printk("Can not map io request shared page for HVM domain.\n");
157 domain_crash_synchronous();
158 }
160 d->arch.hvm_domain.shared_page_va = (unsigned long)p;
161 }
163 void hvm_setup_platform(struct domain* d)
164 {
165 struct hvm_domain *platform;
166 struct vcpu *v=current;
168 if ( !hvm_guest(v) || (v->vcpu_id != 0) )
169 return;
171 if ( shadow_direct_map_init(d) == 0 )
172 {
173 printk("Can not allocate shadow direct map for HVM domain.\n");
174 domain_crash_synchronous();
175 }
177 hvm_zap_iommu_pages(d);
178 hvm_map_io_shared_page(v);
180 platform = &d->arch.hvm_domain;
181 pic_init(&platform->vpic, pic_irq_request, &platform->interrupt_request);
182 register_pic_io_hook();
184 if ( hvm_apic_support(d) )
185 {
186 spin_lock_init(&d->arch.hvm_domain.round_robin_lock);
187 hvm_vioapic_init(d);
188 }
190 init_timer(&platform->pl_time.periodic_tm.timer,
191 pt_timer_fn, v, v->processor);
192 pit_init(v, cpu_khz);
193 }
195 void pic_irq_request(void *data, int level)
196 {
197 int *interrupt_request = data;
198 *interrupt_request = level;
199 }
201 void hvm_pic_assist(struct vcpu *v)
202 {
203 global_iodata_t *spg;
204 u16 *virq_line, irqs;
205 struct hvm_virpic *pic = &v->domain->arch.hvm_domain.vpic;
207 spg = &get_sp(v->domain)->sp_global;
208 virq_line = &spg->pic_clear_irr;
209 if ( *virq_line ) {
210 do {
211 irqs = *(volatile u16*)virq_line;
212 } while ( (u16)cmpxchg(virq_line,irqs, 0) != irqs );
213 do_pic_irqs_clear(pic, irqs);
214 }
215 virq_line = &spg->pic_irr;
216 if ( *virq_line ) {
217 do {
218 irqs = *(volatile u16*)virq_line;
219 } while ( (u16)cmpxchg(virq_line,irqs, 0) != irqs );
220 do_pic_irqs(pic, irqs);
221 }
222 }
224 u64 hvm_get_guest_time(struct vcpu *v)
225 {
226 u64 host_tsc;
228 rdtscll(host_tsc);
229 return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
230 }
232 int cpu_get_interrupt(struct vcpu *v, int *type)
233 {
234 int intno;
235 struct hvm_virpic *s = &v->domain->arch.hvm_domain.vpic;
236 unsigned long flags;
238 if ( (intno = cpu_get_apic_interrupt(v, type)) != -1 ) {
239 /* set irq request if a PIC irq is still pending */
240 /* XXX: improve that */
241 spin_lock_irqsave(&s->lock, flags);
242 pic_update_irq(s);
243 spin_unlock_irqrestore(&s->lock, flags);
244 return intno;
245 }
246 /* read the irq from the PIC */
247 if ( v->vcpu_id == 0 && (intno = cpu_get_pic_interrupt(v, type)) != -1 )
248 return intno;
250 return -1;
251 }
253 /*
254 * Copy from/to guest virtual.
255 */
256 int
257 hvm_copy(void *buf, unsigned long vaddr, int size, int dir)
258 {
259 unsigned long mfn;
260 char *addr;
261 int count;
263 while (size > 0) {
264 count = PAGE_SIZE - (vaddr & ~PAGE_MASK);
265 if (count > size)
266 count = size;
268 if (hvm_paging_enabled(current))
269 mfn = gva_to_mfn(vaddr);
270 else
271 mfn = get_mfn_from_gpfn(vaddr >> PAGE_SHIFT);
272 if (mfn == INVALID_MFN)
273 return 0;
275 addr = (char *)map_domain_page(mfn) + (vaddr & ~PAGE_MASK);
277 if (dir == HVM_COPY_IN)
278 memcpy(buf, addr, count);
279 else
280 memcpy(addr, buf, count);
282 unmap_domain_page(addr);
284 vaddr += count;
285 buf += count;
286 size -= count;
287 }
289 return 1;
290 }
292 /*
293 * HVM specific printbuf. Mostly used for hvmloader chit-chat.
294 */
295 void hvm_print_line(struct vcpu *v, const char c)
296 {
297 int *index = &v->domain->arch.hvm_domain.pbuf_index;
298 char *pbuf = v->domain->arch.hvm_domain.pbuf;
300 if (*index == HVM_PBUF_SIZE-2 || c == '\n') {
301 if (*index == HVM_PBUF_SIZE-2)
302 pbuf[(*index)++] = c;
303 pbuf[*index] = '\0';
304 printk("(GUEST: %u) %s\n", v->domain->domain_id, pbuf);
305 *index = 0;
306 } else
307 pbuf[(*index)++] = c;
308 }
310 typedef unsigned long hvm_hypercall_t(
311 unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
313 #define HYPERCALL(x) \
314 [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x
315 #define HYPERCALL_COMPAT32(x) \
316 [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x ## _compat32
318 #if defined(__i386__)
320 static hvm_hypercall_t *hvm_hypercall_table[] = {
321 HYPERCALL(memory_op),
322 HYPERCALL(multicall),
323 HYPERCALL(xen_version),
324 HYPERCALL(event_channel_op),
325 HYPERCALL(hvm_op)
326 };
328 void hvm_do_hypercall(struct cpu_user_regs *pregs)
329 {
330 if ( unlikely(ring_3(pregs)) )
331 {
332 pregs->eax = -EPERM;
333 return;
334 }
336 if ( (pregs->eax >= NR_hypercalls) || !hvm_hypercall_table[pregs->eax] )
337 {
338 DPRINTK("HVM vcpu %d:%d did a bad hypercall %d.\n",
339 current->domain->domain_id, current->vcpu_id,
340 pregs->eax);
341 pregs->eax = -ENOSYS;
342 return;
343 }
345 pregs->eax = hvm_hypercall_table[pregs->eax](
346 pregs->ebx, pregs->ecx, pregs->edx, pregs->esi, pregs->edi);
347 }
349 #else /* defined(__x86_64__) */
351 static long do_memory_op_compat32(int cmd, XEN_GUEST_HANDLE(void) arg)
352 {
353 extern long do_add_to_physmap(struct xen_add_to_physmap *xatp);
354 long rc;
356 switch ( cmd )
357 {
358 case XENMEM_add_to_physmap:
359 {
360 struct {
361 domid_t domid;
362 uint32_t space;
363 uint32_t idx;
364 uint32_t gpfn;
365 } u;
366 struct xen_add_to_physmap h;
368 if ( copy_from_guest(&u, arg, 1) )
369 return -EFAULT;
371 h.domid = u.domid;
372 h.space = u.space;
373 h.idx = u.idx;
374 h.gpfn = u.gpfn;
376 this_cpu(guest_handles_in_xen_space) = 1;
377 rc = do_memory_op(cmd, guest_handle_from_ptr(&h, void));
378 this_cpu(guest_handles_in_xen_space) = 0;
380 break;
381 }
383 default:
384 DPRINTK("memory_op %d.\n", cmd);
385 rc = -ENOSYS;
386 break;
387 }
389 return rc;
390 }
392 static hvm_hypercall_t *hvm_hypercall64_table[NR_hypercalls] = {
393 HYPERCALL(memory_op),
394 HYPERCALL(xen_version),
395 HYPERCALL(hvm_op),
396 HYPERCALL(event_channel_op)
397 };
399 static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
400 HYPERCALL_COMPAT32(memory_op),
401 HYPERCALL(xen_version),
402 HYPERCALL(hvm_op),
403 HYPERCALL(event_channel_op)
404 };
406 void hvm_do_hypercall(struct cpu_user_regs *pregs)
407 {
408 if ( unlikely(ring_3(pregs)) )
409 {
410 pregs->rax = -EPERM;
411 return;
412 }
414 pregs->rax = (uint32_t)pregs->eax; /* mask in case compat32 caller */
415 if ( (pregs->rax >= NR_hypercalls) || !hvm_hypercall64_table[pregs->rax] )
416 {
417 DPRINTK("HVM vcpu %d:%d did a bad hypercall %ld.\n",
418 current->domain->domain_id, current->vcpu_id,
419 pregs->rax);
420 pregs->rax = -ENOSYS;
421 return;
422 }
424 if ( current->domain->arch.ops->guest_paging_levels == PAGING_L4 )
425 {
426 pregs->rax = hvm_hypercall64_table[pregs->rax](pregs->rdi,
427 pregs->rsi,
428 pregs->rdx,
429 pregs->r10,
430 pregs->r8);
431 }
432 else
433 {
434 pregs->eax = hvm_hypercall32_table[pregs->eax]((uint32_t)pregs->ebx,
435 (uint32_t)pregs->ecx,
436 (uint32_t)pregs->edx,
437 (uint32_t)pregs->esi,
438 (uint32_t)pregs->edi);
439 }
440 }
442 #endif /* defined(__x86_64__) */
444 /* Initialise a hypercall transfer page for a VMX domain using
445 paravirtualised drivers. */
446 void hvm_hypercall_page_initialise(struct domain *d,
447 void *hypercall_page)
448 {
449 hvm_funcs.init_hypercall_page(d, hypercall_page);
450 }
453 /*
454 * only called in HVM domain BSP context
455 * when booting, vcpuid is always equal to apic_id
456 */
457 int hvm_bringup_ap(int vcpuid, int trampoline_vector)
458 {
459 struct vcpu *bsp = current, *v;
460 struct domain *d = bsp->domain;
461 struct vcpu_guest_context *ctxt;
462 int rc = 0;
464 /* current must be HVM domain BSP */
465 if ( !(hvm_guest(bsp) && bsp->vcpu_id == 0) ) {
466 printk("Not calling hvm_bringup_ap from BSP context.\n");
467 domain_crash_synchronous();
468 }
470 if ( (v = d->vcpu[vcpuid]) == NULL )
471 return -ENOENT;
473 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL ) {
474 printk("Failed to allocate memory in hvm_bringup_ap.\n");
475 return -ENOMEM;
476 }
478 hvm_init_ap_context(ctxt, vcpuid, trampoline_vector);
480 LOCK_BIGLOCK(d);
481 rc = -EEXIST;
482 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
483 rc = boot_vcpu(d, vcpuid, ctxt);
484 UNLOCK_BIGLOCK(d);
486 if ( rc != 0 )
487 printk("AP %d bringup failed in boot_vcpu %x.\n", vcpuid, rc);
488 else {
489 if ( test_and_clear_bit(_VCPUF_down, &d->vcpu[vcpuid]->vcpu_flags) )
490 vcpu_wake(d->vcpu[vcpuid]);
491 printk("AP %d bringup suceeded.\n", vcpuid);
492 }
494 xfree(ctxt);
496 return rc;
497 }
499 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
501 {
502 long rc = 0;
504 switch ( op )
505 {
506 case HVMOP_set_param:
507 case HVMOP_get_param:
508 {
509 struct xen_hvm_param a;
510 struct domain *d;
512 if ( copy_from_guest(&a, arg, 1) )
513 return -EFAULT;
515 if ( a.index >= HVM_NR_PARAMS )
516 return -EINVAL;
518 if ( a.domid == DOMID_SELF )
519 {
520 get_knownalive_domain(current->domain);
521 d = current->domain;
522 }
523 else if ( IS_PRIV(current->domain) )
524 {
525 d = find_domain_by_id(a.domid);
526 if ( !d )
527 return -ESRCH;
528 }
529 else
530 {
531 return -EPERM;
532 }
534 if ( op == HVMOP_set_param )
535 {
536 rc = 0;
537 d->arch.hvm_domain.params[a.index] = a.value;
538 }
539 else
540 {
541 rc = d->arch.hvm_domain.params[a.index];
542 }
544 put_domain(d);
545 return rc;
546 }
548 default:
549 {
550 DPRINTK("Bad HVM op %ld.\n", op);
551 rc = -ENOSYS;
552 }
553 }
555 return rc;
556 }
558 /*
559 * Local variables:
560 * mode: C
561 * c-set-style: "BSD"
562 * c-basic-offset: 4
563 * tab-width: 4
564 * indent-tabs-mode: nil
565 * End:
566 */