debuggers.hg

view xen/arch/x86/hvm/hvm.c @ 10958:bfe12b4d45d3

[HVM] Make copy_{to,from}_guest work for HVM domains.
Signed-off-by: Steven Smith <ssmith@xensource.com>
author kfraser@localhost.localdomain
date Thu Aug 03 15:22:25 2006 +0100 (2006-08-03)
parents 7ff6020e4758
children 49dcd838b7df
line source
1 /*
2 * hvm.c: Common hardware virtual machine abstractions.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/lib.h>
24 #include <xen/trace.h>
25 #include <xen/sched.h>
26 #include <xen/irq.h>
27 #include <xen/softirq.h>
28 #include <xen/domain.h>
29 #include <xen/domain_page.h>
30 #include <xen/hypercall.h>
31 #include <asm/current.h>
32 #include <asm/io.h>
33 #include <asm/shadow.h>
34 #include <asm/regs.h>
35 #include <asm/cpufeature.h>
36 #include <asm/processor.h>
37 #include <asm/types.h>
38 #include <asm/msr.h>
39 #include <asm/spinlock.h>
40 #include <asm/hvm/hvm.h>
41 #include <asm/hvm/support.h>
42 #include <asm/shadow.h>
43 #if CONFIG_PAGING_LEVELS >= 3
44 #include <asm/shadow_64.h>
45 #endif
46 #include <public/sched.h>
47 #include <public/hvm/ioreq.h>
48 #include <public/hvm/hvm_info_table.h>
49 #include <xen/guest_access.h>
51 int hvm_enabled = 0;
53 unsigned int opt_hvm_debug_level = 0;
54 integer_param("hvm_debug", opt_hvm_debug_level);
56 struct hvm_function_table hvm_funcs;
58 static void hvm_zap_mmio_range(
59 struct domain *d, unsigned long pfn, unsigned long nr_pfn)
60 {
61 unsigned long i, val = INVALID_MFN;
63 ASSERT(d == current->domain);
65 for ( i = 0; i < nr_pfn; i++ )
66 {
67 if ( pfn + i >= 0xfffff )
68 break;
70 __copy_to_user(&phys_to_machine_mapping[pfn + i], &val, sizeof (val));
71 }
72 }
74 static void e820_zap_iommu_callback(struct domain *d,
75 struct e820entry *e,
76 void *ign)
77 {
78 if ( e->type == E820_IO )
79 hvm_zap_mmio_range(d, e->addr >> PAGE_SHIFT, e->size >> PAGE_SHIFT);
80 }
82 static void e820_foreach(struct domain *d,
83 void (*cb)(struct domain *d,
84 struct e820entry *e,
85 void *data),
86 void *data)
87 {
88 int i;
89 unsigned char e820_map_nr;
90 struct e820entry *e820entry;
91 unsigned char *p;
92 unsigned long mfn;
94 mfn = gmfn_to_mfn(d, E820_MAP_PAGE >> PAGE_SHIFT);
95 if ( mfn == INVALID_MFN )
96 {
97 printk("Can not find E820 memory map page for HVM domain.\n");
98 domain_crash_synchronous();
99 }
101 p = map_domain_page(mfn);
102 if ( p == NULL )
103 {
104 printk("Can not map E820 memory map page for HVM domain.\n");
105 domain_crash_synchronous();
106 }
108 e820_map_nr = *(p + E820_MAP_NR_OFFSET);
109 e820entry = (struct e820entry *)(p + E820_MAP_OFFSET);
111 for ( i = 0; i < e820_map_nr; i++ )
112 cb(d, e820entry + i, data);
114 unmap_domain_page(p);
115 }
117 static void hvm_zap_iommu_pages(struct domain *d)
118 {
119 e820_foreach(d, e820_zap_iommu_callback, NULL);
120 }
122 static void e820_map_io_shared_callback(struct domain *d,
123 struct e820entry *e,
124 void *data)
125 {
126 unsigned long *mfn = data;
127 if ( e->type == E820_SHARED_PAGE )
128 {
129 ASSERT(*mfn == INVALID_MFN);
130 *mfn = gmfn_to_mfn(d, e->addr >> PAGE_SHIFT);
131 }
132 }
134 void hvm_map_io_shared_page(struct vcpu *v)
135 {
136 unsigned long mfn = INVALID_MFN;
137 void *p;
138 struct domain *d = v->domain;
140 if ( d->arch.hvm_domain.shared_page_va )
141 return;
143 e820_foreach(d, e820_map_io_shared_callback, &mfn);
145 if ( mfn == INVALID_MFN )
146 {
147 printk("Can not find io request shared page for HVM domain.\n");
148 domain_crash_synchronous();
149 }
151 p = map_domain_page_global(mfn);
152 if ( p == NULL )
153 {
154 printk("Can not map io request shared page for HVM domain.\n");
155 domain_crash_synchronous();
156 }
158 d->arch.hvm_domain.shared_page_va = (unsigned long)p;
159 }
161 void hvm_setup_platform(struct domain* d)
162 {
163 struct hvm_domain *platform;
164 struct vcpu *v=current;
166 if ( !hvm_guest(v) || (v->vcpu_id != 0) )
167 return;
169 if ( shadow_direct_map_init(d) == 0 )
170 {
171 printk("Can not allocate shadow direct map for HVM domain.\n");
172 domain_crash_synchronous();
173 }
175 hvm_zap_iommu_pages(d);
176 hvm_map_io_shared_page(v);
178 platform = &d->arch.hvm_domain;
179 pic_init(&platform->vpic, pic_irq_request, &platform->interrupt_request);
180 register_pic_io_hook();
182 if ( hvm_apic_support(d) )
183 {
184 spin_lock_init(&d->arch.hvm_domain.round_robin_lock);
185 hvm_vioapic_init(d);
186 }
188 init_timer(&platform->pl_time.periodic_tm.timer,
189 pt_timer_fn, v, v->processor);
190 pit_init(v, cpu_khz);
191 }
193 void pic_irq_request(void *data, int level)
194 {
195 int *interrupt_request = data;
196 *interrupt_request = level;
197 }
199 void hvm_pic_assist(struct vcpu *v)
200 {
201 global_iodata_t *spg;
202 u16 *virq_line, irqs;
203 struct hvm_virpic *pic = &v->domain->arch.hvm_domain.vpic;
205 spg = &get_sp(v->domain)->sp_global;
206 virq_line = &spg->pic_clear_irr;
207 if ( *virq_line ) {
208 do {
209 irqs = *(volatile u16*)virq_line;
210 } while ( (u16)cmpxchg(virq_line,irqs, 0) != irqs );
211 do_pic_irqs_clear(pic, irqs);
212 }
213 virq_line = &spg->pic_irr;
214 if ( *virq_line ) {
215 do {
216 irqs = *(volatile u16*)virq_line;
217 } while ( (u16)cmpxchg(virq_line,irqs, 0) != irqs );
218 do_pic_irqs(pic, irqs);
219 }
220 }
222 u64 hvm_get_guest_time(struct vcpu *v)
223 {
224 u64 host_tsc;
226 rdtscll(host_tsc);
227 return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
228 }
230 int cpu_get_interrupt(struct vcpu *v, int *type)
231 {
232 int intno;
233 struct hvm_virpic *s = &v->domain->arch.hvm_domain.vpic;
234 unsigned long flags;
236 if ( (intno = cpu_get_apic_interrupt(v, type)) != -1 ) {
237 /* set irq request if a PIC irq is still pending */
238 /* XXX: improve that */
239 spin_lock_irqsave(&s->lock, flags);
240 pic_update_irq(s);
241 spin_unlock_irqrestore(&s->lock, flags);
242 return intno;
243 }
244 /* read the irq from the PIC */
245 if ( v->vcpu_id == 0 && (intno = cpu_get_pic_interrupt(v, type)) != -1 )
246 return intno;
248 return -1;
249 }
251 /*
252 * Copy from/to guest virtual.
253 */
254 int
255 hvm_copy(void *buf, unsigned long vaddr, int size, int dir)
256 {
257 unsigned long mfn;
258 char *addr;
259 int count;
261 while (size > 0) {
262 count = PAGE_SIZE - (vaddr & ~PAGE_MASK);
263 if (count > size)
264 count = size;
266 if (hvm_paging_enabled(current))
267 mfn = gva_to_mfn(vaddr);
268 else
269 mfn = get_mfn_from_gpfn(vaddr >> PAGE_SHIFT);
270 if (mfn == INVALID_MFN)
271 return 0;
273 addr = (char *)map_domain_page(mfn) + (vaddr & ~PAGE_MASK);
275 if (dir == HVM_COPY_IN)
276 memcpy(buf, addr, count);
277 else
278 memcpy(addr, buf, count);
280 unmap_domain_page(addr);
282 vaddr += count;
283 buf += count;
284 size -= count;
285 }
287 return 1;
288 }
290 /*
291 * HVM specific printbuf. Mostly used for hvmloader chit-chat.
292 */
293 void hvm_print_line(struct vcpu *v, const char c)
294 {
295 int *index = &v->domain->arch.hvm_domain.pbuf_index;
296 char *pbuf = v->domain->arch.hvm_domain.pbuf;
298 if (*index == HVM_PBUF_SIZE-2 || c == '\n') {
299 if (*index == HVM_PBUF_SIZE-2)
300 pbuf[(*index)++] = c;
301 pbuf[*index] = '\0';
302 printk("(GUEST: %u) %s\n", v->domain->domain_id, pbuf);
303 *index = 0;
304 } else
305 pbuf[(*index)++] = c;
306 }
308 #if defined(__i386__)
310 typedef unsigned long hvm_hypercall_t(
311 unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
312 #define HYPERCALL(x) [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x
313 static hvm_hypercall_t *hvm_hypercall_table[] = {
314 HYPERCALL(mmu_update),
315 HYPERCALL(memory_op),
316 HYPERCALL(multicall),
317 HYPERCALL(update_va_mapping),
318 HYPERCALL(event_channel_op_compat),
319 HYPERCALL(xen_version),
320 HYPERCALL(grant_table_op),
321 HYPERCALL(event_channel_op),
322 HYPERCALL(hvm_op)
323 };
324 #undef HYPERCALL
326 void hvm_do_hypercall(struct cpu_user_regs *pregs)
327 {
328 if ( ring_3(pregs) )
329 {
330 pregs->eax = -EPERM;
331 return;
332 }
334 if ( pregs->eax > ARRAY_SIZE(hvm_hypercall_table) ||
335 !hvm_hypercall_table[pregs->eax] )
336 {
337 DPRINTK("HVM vcpu %d:%d did a bad hypercall %d.\n",
338 current->domain->domain_id, current->vcpu_id,
339 pregs->eax);
340 pregs->eax = -ENOSYS;
341 }
342 else
343 {
344 pregs->eax = hvm_hypercall_table[pregs->eax](
345 pregs->ebx, pregs->ecx, pregs->edx, pregs->esi, pregs->edi);
346 }
347 }
349 #else /* __x86_64__ */
351 void hvm_do_hypercall(struct cpu_user_regs *pregs)
352 {
353 printk("not supported yet!\n");
354 }
356 #endif
358 /* Initialise a hypercall transfer page for a VMX domain using
359 paravirtualised drivers. */
360 void hvm_hypercall_page_initialise(struct domain *d,
361 void *hypercall_page)
362 {
363 hvm_funcs.init_hypercall_page(d, hypercall_page);
364 }
367 /*
368 * only called in HVM domain BSP context
369 * when booting, vcpuid is always equal to apic_id
370 */
371 int hvm_bringup_ap(int vcpuid, int trampoline_vector)
372 {
373 struct vcpu *bsp = current, *v;
374 struct domain *d = bsp->domain;
375 struct vcpu_guest_context *ctxt;
376 int rc = 0;
378 /* current must be HVM domain BSP */
379 if ( !(hvm_guest(bsp) && bsp->vcpu_id == 0) ) {
380 printk("Not calling hvm_bringup_ap from BSP context.\n");
381 domain_crash_synchronous();
382 }
384 if ( (v = d->vcpu[vcpuid]) == NULL )
385 return -ENOENT;
387 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL ) {
388 printk("Failed to allocate memory in hvm_bringup_ap.\n");
389 return -ENOMEM;
390 }
392 hvm_init_ap_context(ctxt, vcpuid, trampoline_vector);
394 LOCK_BIGLOCK(d);
395 rc = -EEXIST;
396 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
397 rc = boot_vcpu(d, vcpuid, ctxt);
398 UNLOCK_BIGLOCK(d);
400 if ( rc != 0 )
401 printk("AP %d bringup failed in boot_vcpu %x.\n", vcpuid, rc);
402 else {
403 if ( test_and_clear_bit(_VCPUF_down, &d->vcpu[vcpuid]->vcpu_flags) )
404 vcpu_wake(d->vcpu[vcpuid]);
405 printk("AP %d bringup suceeded.\n", vcpuid);
406 }
408 xfree(ctxt);
410 return rc;
411 }
413 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
415 {
416 long rc = 0;
418 switch ( op )
419 {
420 case HVMOP_set_param:
421 case HVMOP_get_param:
422 {
423 struct xen_hvm_param a;
424 struct domain *d;
426 if ( copy_from_guest(&a, arg, 1) )
427 return -EFAULT;
429 if ( a.index >= HVM_NR_PARAMS )
430 return -EINVAL;
432 if ( a.domid == DOMID_SELF )
433 {
434 get_knownalive_domain(current->domain);
435 d = current->domain;
436 }
437 else if ( IS_PRIV(current->domain) )
438 {
439 d = find_domain_by_id(a.domid);
440 if ( !d )
441 return -ESRCH;
442 }
443 else
444 {
445 return -EPERM;
446 }
448 if ( op == HVMOP_set_param )
449 {
450 rc = 0;
451 d->arch.hvm_domain.params[a.index] = a.value;
452 }
453 else
454 {
455 rc = d->arch.hvm_domain.params[a.index];
456 }
458 put_domain(d);
459 return rc;
460 }
462 default:
463 {
464 DPRINTK("Bad HVM op %ld.\n", op);
465 rc = -ENOSYS;
466 }
467 }
469 return rc;
470 }
472 /*
473 * Local variables:
474 * mode: C
475 * c-set-style: "BSD"
476 * c-basic-offset: 4
477 * tab-width: 4
478 * indent-tabs-mode: nil
479 * End:
480 */