debuggers.hg

view xen/arch/x86/hvm/hvm.c @ 13651:fde9e1d474b7

hvm: Define a global I/O access bitmap, allowing direct access to port 0x80.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Jan 25 18:20:58 2007 +0000 (2007-01-25)
parents d6d27c649fd6
children 271ffb1c12eb
line source
1 /*
2 * hvm.c: Common hardware virtual machine abstractions.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/lib.h>
24 #include <xen/trace.h>
25 #include <xen/sched.h>
26 #include <xen/irq.h>
27 #include <xen/softirq.h>
28 #include <xen/domain.h>
29 #include <xen/domain_page.h>
30 #include <xen/hypercall.h>
31 #include <xen/guest_access.h>
32 #include <xen/event.h>
33 #include <xen/shadow.h>
34 #include <asm/current.h>
35 #include <asm/e820.h>
36 #include <asm/io.h>
37 #include <asm/shadow.h>
38 #include <asm/regs.h>
39 #include <asm/cpufeature.h>
40 #include <asm/processor.h>
41 #include <asm/types.h>
42 #include <asm/msr.h>
43 #include <asm/mc146818rtc.h>
44 #include <asm/spinlock.h>
45 #include <asm/hvm/hvm.h>
46 #include <asm/hvm/vpt.h>
47 #include <asm/hvm/support.h>
48 #include <public/sched.h>
49 #include <public/hvm/ioreq.h>
50 #include <public/version.h>
51 #include <public/memory.h>
53 int hvm_enabled;
55 unsigned int opt_hvm_debug_level;
56 integer_param("hvm_debug", opt_hvm_debug_level);
58 struct hvm_function_table hvm_funcs;
60 /* I/O permission bitmap is globally shared by all HVM guests. */
61 char __attribute__ ((__section__ (".bss.page_aligned")))
62 hvm_io_bitmap[3*PAGE_SIZE];
64 void hvm_enable(void)
65 {
66 if ( hvm_enabled )
67 return;
69 /*
70 * Allow direct access to the PC debug port (it is often used for I/O
71 * delays, but the vmexits simply slow things down).
72 */
73 memset(hvm_io_bitmap, ~0, sizeof(hvm_io_bitmap));
74 clear_bit(0x80, hvm_io_bitmap);
76 hvm_enabled = 1;
77 }
79 void hvm_stts(struct vcpu *v)
80 {
81 /* FPU state already dirty? Then no need to setup_fpu() lazily. */
82 if ( !test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
83 hvm_funcs.stts(v);
84 }
86 void hvm_set_guest_time(struct vcpu *v, u64 gtime)
87 {
88 u64 host_tsc;
90 rdtscll(host_tsc);
92 v->arch.hvm_vcpu.cache_tsc_offset = gtime - host_tsc;
93 hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
94 }
96 u64 hvm_get_guest_time(struct vcpu *v)
97 {
98 u64 host_tsc;
100 rdtscll(host_tsc);
101 return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
102 }
104 void hvm_migrate_timers(struct vcpu *v)
105 {
106 pit_migrate_timers(v);
107 rtc_migrate_timers(v);
108 hpet_migrate_timers(v);
109 pmtimer_migrate_timers(v);
110 if ( vcpu_vlapic(v)->pt.enabled )
111 migrate_timer(&vcpu_vlapic(v)->pt.timer, v->processor);
112 }
114 void hvm_do_resume(struct vcpu *v)
115 {
116 ioreq_t *p;
118 hvm_stts(v);
120 pt_thaw_time(v);
122 /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
123 p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
124 while ( p->state != STATE_IOREQ_NONE )
125 {
126 switch ( p->state )
127 {
128 case STATE_IORESP_READY: /* IORESP_READY -> NONE */
129 hvm_io_assist(v);
130 break;
131 case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
132 case STATE_IOREQ_INPROCESS:
133 wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port,
134 (p->state != STATE_IOREQ_READY) &&
135 (p->state != STATE_IOREQ_INPROCESS));
136 break;
137 default:
138 gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state);
139 domain_crash_synchronous();
140 }
141 }
142 }
144 int hvm_domain_initialise(struct domain *d)
145 {
146 int rc;
148 if ( !hvm_enabled )
149 {
150 gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest "
151 "on a non-VT/AMDV platform.\n");
152 return -EINVAL;
153 }
155 spin_lock_init(&d->arch.hvm_domain.pbuf_lock);
156 spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
157 spin_lock_init(&d->arch.hvm_domain.irq_lock);
159 rc = shadow_enable(d, SHM2_refcounts|SHM2_translate|SHM2_external);
160 if ( rc != 0 )
161 return rc;
163 vpic_init(d);
164 vioapic_init(d);
166 return 0;
167 }
169 void hvm_domain_destroy(struct domain *d)
170 {
171 HVMStateEntry *se, *dse;
172 pit_deinit(d);
173 rtc_deinit(d);
174 pmtimer_deinit(d);
175 hpet_deinit(d);
177 se = d->arch.hvm_domain.first_se;
178 while (se) {
179 dse = se;
180 se = se->next;
181 xfree(dse);
182 }
184 if ( d->arch.hvm_domain.shared_page_va )
185 unmap_domain_page_global(
186 (void *)d->arch.hvm_domain.shared_page_va);
188 if ( d->arch.hvm_domain.buffered_io_va )
189 unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
190 }
192 int hvm_load_cpu_ctxt(hvm_domain_context_t *h, void *opaque, int version)
193 {
194 struct vcpu *v = opaque;
196 if ( hvm_funcs.load_cpu_ctxt(h, opaque, version) < 0 )
197 return -EINVAL;
199 /* Auxiliary processors shoudl be woken immediately. */
200 if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
201 vcpu_wake(v);
203 return 0;
204 }
206 int hvm_vcpu_initialise(struct vcpu *v)
207 {
208 int rc;
210 hvm_register_savevm(v->domain, "xen_hvm_cpu", v->vcpu_id, 1,
211 hvm_funcs.save_cpu_ctxt, hvm_load_cpu_ctxt,
212 (void *)v);
214 if ( (rc = vlapic_init(v)) != 0 )
215 return rc;
217 if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 )
218 {
219 vlapic_destroy(v);
220 return rc;
221 }
223 /* Create ioreq event channel. */
224 v->arch.hvm_vcpu.xen_port = alloc_unbound_xen_event_channel(v, 0);
225 if ( get_sp(v->domain) && get_vio(v->domain, v->vcpu_id) )
226 get_vio(v->domain, v->vcpu_id)->vp_eport =
227 v->arch.hvm_vcpu.xen_port;
229 INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
231 if ( v->vcpu_id != 0 )
232 return 0;
234 pit_init(v, cpu_khz);
235 rtc_init(v, RTC_PORT(0), RTC_IRQ);
236 pmtimer_init(v, ACPI_PM_TMR_BLK_ADDRESS);
237 hpet_init(v);
239 /* init hvm sharepage */
240 shpage_init(v->domain, get_sp(v->domain));
242 /* Init guest TSC to start from zero. */
243 hvm_set_guest_time(v, 0);
245 return 0;
246 }
248 void hvm_vcpu_destroy(struct vcpu *v)
249 {
250 vlapic_destroy(v);
251 hvm_funcs.vcpu_destroy(v);
253 /* Event channel is already freed by evtchn_destroy(). */
254 /*free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);*/
255 }
257 static void hvm_vcpu_down(void)
258 {
259 struct vcpu *v = current;
260 struct domain *d = v->domain;
261 int online_count = 0;
263 gdprintk(XENLOG_INFO, "DOM%d/VCPU%d: going offline.\n",
264 d->domain_id, v->vcpu_id);
266 /* Doesn't halt us immediately, but we'll never return to guest context. */
267 set_bit(_VCPUF_down, &v->vcpu_flags);
268 vcpu_sleep_nosync(v);
270 /* Any other VCPUs online? ... */
271 LOCK_BIGLOCK(d);
272 for_each_vcpu ( d, v )
273 if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
274 online_count++;
275 UNLOCK_BIGLOCK(d);
277 /* ... Shut down the domain if not. */
278 if ( online_count == 0 )
279 {
280 gdprintk(XENLOG_INFO, "DOM%d: all CPUs offline -- powering off.\n",
281 d->domain_id);
282 domain_shutdown(d, SHUTDOWN_poweroff);
283 }
284 }
286 void hvm_send_assist_req(struct vcpu *v)
287 {
288 ioreq_t *p;
290 p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
291 if ( unlikely(p->state != STATE_IOREQ_NONE) )
292 {
293 /* This indicates a bug in the device model. Crash the domain. */
294 gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p->state);
295 domain_crash_synchronous();
296 }
298 prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
300 /*
301 * Following happens /after/ blocking and setting up ioreq contents.
302 * prepare_wait_on_xen_event_channel() is an implicit barrier.
303 */
304 p->state = STATE_IOREQ_READY;
305 notify_via_xen_event_channel(v->arch.hvm_vcpu.xen_port);
306 }
308 void hvm_hlt(unsigned long rflags)
309 {
310 /*
311 * If we halt with interrupts disabled, that's a pretty sure sign that we
312 * want to shut down. In a real processor, NMIs are the only way to break
313 * out of this.
314 */
315 if ( unlikely(!(rflags & X86_EFLAGS_IF)) )
316 return hvm_vcpu_down();
318 do_sched_op_compat(SCHEDOP_block, 0);
319 }
321 void hvm_triple_fault(void)
322 {
323 struct vcpu *v = current;
324 gdprintk(XENLOG_INFO, "Triple fault on VCPU%d - "
325 "invoking HVM system reset.\n", v->vcpu_id);
326 domain_shutdown(v->domain, SHUTDOWN_reboot);
327 }
329 /*
330 * __hvm_copy():
331 * @buf = hypervisor buffer
332 * @addr = guest address to copy to/from
333 * @size = number of bytes to copy
334 * @dir = copy *to* guest (TRUE) or *from* guest (FALSE)?
335 * @virt = addr is *virtual* (TRUE) or *guest physical* (FALSE)?
336 * Returns number of bytes failed to copy (0 == complete success).
337 */
338 static int __hvm_copy(void *buf, paddr_t addr, int size, int dir, int virt)
339 {
340 unsigned long mfn;
341 char *p;
342 int count, todo;
344 todo = size;
345 while ( todo > 0 )
346 {
347 count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
349 if ( virt )
350 mfn = get_mfn_from_gpfn(shadow_gva_to_gfn(current, addr));
351 else
352 mfn = get_mfn_from_gpfn(addr >> PAGE_SHIFT);
354 if ( mfn == INVALID_MFN )
355 return todo;
357 p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
359 if ( dir )
360 memcpy(p, buf, count); /* dir == TRUE: *to* guest */
361 else
362 memcpy(buf, p, count); /* dir == FALSE: *from guest */
364 unmap_domain_page(p);
366 addr += count;
367 buf += count;
368 todo -= count;
369 }
371 return 0;
372 }
374 int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size)
375 {
376 return __hvm_copy(buf, paddr, size, 1, 0);
377 }
379 int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size)
380 {
381 return __hvm_copy(buf, paddr, size, 0, 0);
382 }
384 int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size)
385 {
386 return __hvm_copy(buf, vaddr, size, 1, 1);
387 }
389 int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size)
390 {
391 return __hvm_copy(buf, vaddr, size, 0, 1);
392 }
395 /* HVM specific printbuf. Mostly used for hvmloader chit-chat. */
396 void hvm_print_line(struct vcpu *v, const char c)
397 {
398 struct hvm_domain *hd = &v->domain->arch.hvm_domain;
400 spin_lock(&hd->pbuf_lock);
401 hd->pbuf[hd->pbuf_idx++] = c;
402 if ( (hd->pbuf_idx == (sizeof(hd->pbuf) - 2)) || (c == '\n') )
403 {
404 if ( c != '\n' )
405 hd->pbuf[hd->pbuf_idx++] = '\n';
406 hd->pbuf[hd->pbuf_idx] = '\0';
407 printk(XENLOG_G_DEBUG "HVM%u: %s", v->domain->domain_id, hd->pbuf);
408 hd->pbuf_idx = 0;
409 }
410 spin_unlock(&hd->pbuf_lock);
411 }
413 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
414 unsigned int *ecx, unsigned int *edx)
415 {
416 if ( !cpuid_hypervisor_leaves(input, eax, ebx, ecx, edx) )
417 {
418 cpuid(input, eax, ebx, ecx, edx);
420 if ( input == 0x00000001 )
421 {
422 struct vcpu *v = current;
424 clear_bit(X86_FEATURE_MWAIT & 31, ecx);
426 if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
427 clear_bit(X86_FEATURE_APIC & 31, edx);
429 #if CONFIG_PAGING_LEVELS >= 3
430 if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
431 #endif
432 clear_bit(X86_FEATURE_PAE & 31, edx);
433 clear_bit(X86_FEATURE_PSE36 & 31, edx);
434 }
435 else if ( input == 0x80000001 )
436 {
437 #if CONFIG_PAGING_LEVELS >= 3
438 struct vcpu *v = current;
439 if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
440 #endif
441 clear_bit(X86_FEATURE_NX & 31, edx);
442 #ifdef __i386__
443 /* Mask feature for Intel ia32e or AMD long mode. */
444 clear_bit(X86_FEATURE_LAHF_LM & 31, ecx);
446 clear_bit(X86_FEATURE_LM & 31, edx);
447 clear_bit(X86_FEATURE_SYSCALL & 31, edx);
448 #endif
449 }
450 }
451 }
453 typedef unsigned long hvm_hypercall_t(
454 unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
456 #define HYPERCALL(x) \
457 [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x
458 #define HYPERCALL_COMPAT32(x) \
459 [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x ## _compat32
461 #if defined(__i386__)
463 static hvm_hypercall_t *hvm_hypercall_table[NR_hypercalls] = {
464 HYPERCALL(memory_op),
465 HYPERCALL(multicall),
466 HYPERCALL(xen_version),
467 HYPERCALL(event_channel_op),
468 HYPERCALL(hvm_op)
469 };
471 void hvm_do_hypercall(struct cpu_user_regs *pregs)
472 {
473 if ( unlikely(ring_3(pregs)) )
474 {
475 pregs->eax = -EPERM;
476 return;
477 }
479 if ( (pregs->eax >= NR_hypercalls) || !hvm_hypercall_table[pregs->eax] )
480 {
481 gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d did a bad hypercall %d.\n",
482 current->domain->domain_id, current->vcpu_id,
483 pregs->eax);
484 pregs->eax = -ENOSYS;
485 return;
486 }
488 pregs->eax = hvm_hypercall_table[pregs->eax](
489 pregs->ebx, pregs->ecx, pregs->edx, pregs->esi, pregs->edi);
490 }
492 #else /* defined(__x86_64__) */
494 static long do_memory_op_compat32(int cmd, XEN_GUEST_HANDLE(void) arg)
495 {
496 extern long do_add_to_physmap(struct xen_add_to_physmap *xatp);
497 long rc;
499 switch ( cmd )
500 {
501 case XENMEM_add_to_physmap:
502 {
503 struct {
504 domid_t domid;
505 uint32_t space;
506 uint32_t idx;
507 uint32_t gpfn;
508 } u;
509 struct xen_add_to_physmap h;
511 if ( copy_from_guest(&u, arg, 1) )
512 return -EFAULT;
514 h.domid = u.domid;
515 h.space = u.space;
516 h.idx = u.idx;
517 h.gpfn = u.gpfn;
519 this_cpu(guest_handles_in_xen_space) = 1;
520 rc = do_memory_op(cmd, guest_handle_from_ptr(&h, void));
521 this_cpu(guest_handles_in_xen_space) = 0;
523 break;
524 }
526 default:
527 gdprintk(XENLOG_WARNING, "memory_op %d.\n", cmd);
528 rc = -ENOSYS;
529 break;
530 }
532 return rc;
533 }
535 static hvm_hypercall_t *hvm_hypercall64_table[NR_hypercalls] = {
536 HYPERCALL(memory_op),
537 HYPERCALL(xen_version),
538 HYPERCALL(hvm_op),
539 HYPERCALL(event_channel_op)
540 };
542 static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
543 HYPERCALL_COMPAT32(memory_op),
544 HYPERCALL(xen_version),
545 HYPERCALL(hvm_op),
546 HYPERCALL(event_channel_op)
547 };
549 void hvm_do_hypercall(struct cpu_user_regs *pregs)
550 {
551 if ( unlikely(ring_3(pregs)) )
552 {
553 pregs->rax = -EPERM;
554 return;
555 }
557 pregs->rax = (uint32_t)pregs->eax; /* mask in case compat32 caller */
558 if ( (pregs->rax >= NR_hypercalls) || !hvm_hypercall64_table[pregs->rax] )
559 {
560 gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d did a bad hypercall %ld.\n",
561 current->domain->domain_id, current->vcpu_id,
562 pregs->rax);
563 pregs->rax = -ENOSYS;
564 return;
565 }
567 if ( current->arch.shadow.mode->guest_levels == 4 )
568 {
569 pregs->rax = hvm_hypercall64_table[pregs->rax](pregs->rdi,
570 pregs->rsi,
571 pregs->rdx,
572 pregs->r10,
573 pregs->r8);
574 }
575 else
576 {
577 pregs->eax = hvm_hypercall32_table[pregs->eax]((uint32_t)pregs->ebx,
578 (uint32_t)pregs->ecx,
579 (uint32_t)pregs->edx,
580 (uint32_t)pregs->esi,
581 (uint32_t)pregs->edi);
582 }
583 }
585 #endif /* defined(__x86_64__) */
587 void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3)
588 {
589 v->arch.hvm_vcpu.hw_cr3 = guest_cr3;
590 hvm_funcs.update_guest_cr3(v);
591 }
593 /* Initialise a hypercall transfer page for a VMX domain using
594 paravirtualised drivers. */
595 void hvm_hypercall_page_initialise(struct domain *d,
596 void *hypercall_page)
597 {
598 hvm_funcs.init_hypercall_page(d, hypercall_page);
599 }
602 /*
603 * only called in HVM domain BSP context
604 * when booting, vcpuid is always equal to apic_id
605 */
606 int hvm_bringup_ap(int vcpuid, int trampoline_vector)
607 {
608 struct vcpu *bsp = current, *v;
609 struct domain *d = bsp->domain;
610 struct vcpu_guest_context *ctxt;
611 int rc = 0;
613 BUG_ON(!is_hvm_domain(d));
615 if ( bsp->vcpu_id != 0 )
616 {
617 gdprintk(XENLOG_ERR, "Not calling hvm_bringup_ap from BSP context.\n");
618 domain_crash(bsp->domain);
619 return -EINVAL;
620 }
622 if ( (v = d->vcpu[vcpuid]) == NULL )
623 return -ENOENT;
625 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
626 {
627 gdprintk(XENLOG_ERR,
628 "Failed to allocate memory in hvm_bringup_ap.\n");
629 return -ENOMEM;
630 }
632 hvm_init_ap_context(ctxt, vcpuid, trampoline_vector);
634 /* Sync AP's TSC with BSP's. */
635 v->arch.hvm_vcpu.cache_tsc_offset =
636 v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
637 hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
639 LOCK_BIGLOCK(d);
640 rc = -EEXIST;
641 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
642 rc = boot_vcpu(d, vcpuid, ctxt);
643 UNLOCK_BIGLOCK(d);
645 if ( rc != 0 )
646 {
647 gdprintk(XENLOG_ERR,
648 "AP %d bringup failed in boot_vcpu %x.\n", vcpuid, rc);
649 goto out;
650 }
652 if ( test_and_clear_bit(_VCPUF_down, &d->vcpu[vcpuid]->vcpu_flags) )
653 vcpu_wake(d->vcpu[vcpuid]);
654 gdprintk(XENLOG_INFO, "AP %d bringup suceeded.\n", vcpuid);
656 out:
657 xfree(ctxt);
658 return rc;
659 }
661 static int hvmop_set_pci_intx_level(
662 XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t) uop)
663 {
664 struct xen_hvm_set_pci_intx_level op;
665 struct domain *d;
666 int rc;
668 if ( copy_from_guest(&op, uop, 1) )
669 return -EFAULT;
671 if ( !IS_PRIV(current->domain) )
672 return -EPERM;
674 if ( (op.domain > 0) || (op.bus > 0) || (op.device > 31) || (op.intx > 3) )
675 return -EINVAL;
677 d = find_domain_by_id(op.domid);
678 if ( d == NULL )
679 return -ESRCH;
681 rc = -EINVAL;
682 if ( !is_hvm_domain(d) )
683 goto out;
685 rc = 0;
686 switch ( op.level )
687 {
688 case 0:
689 hvm_pci_intx_deassert(d, op.device, op.intx);
690 break;
691 case 1:
692 hvm_pci_intx_assert(d, op.device, op.intx);
693 break;
694 default:
695 rc = -EINVAL;
696 break;
697 }
699 out:
700 put_domain(d);
701 return rc;
702 }
704 static int hvmop_set_isa_irq_level(
705 XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t) uop)
706 {
707 struct xen_hvm_set_isa_irq_level op;
708 struct domain *d;
709 int rc;
711 if ( copy_from_guest(&op, uop, 1) )
712 return -EFAULT;
714 if ( !IS_PRIV(current->domain) )
715 return -EPERM;
717 if ( op.isa_irq > 15 )
718 return -EINVAL;
720 d = find_domain_by_id(op.domid);
721 if ( d == NULL )
722 return -ESRCH;
724 rc = -EINVAL;
725 if ( !is_hvm_domain(d) )
726 goto out;
728 rc = 0;
729 switch ( op.level )
730 {
731 case 0:
732 hvm_isa_irq_deassert(d, op.isa_irq);
733 break;
734 case 1:
735 hvm_isa_irq_assert(d, op.isa_irq);
736 break;
737 default:
738 rc = -EINVAL;
739 break;
740 }
742 out:
743 put_domain(d);
744 return rc;
745 }
747 static int hvmop_set_pci_link_route(
748 XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t) uop)
749 {
750 struct xen_hvm_set_pci_link_route op;
751 struct domain *d;
752 int rc;
754 if ( copy_from_guest(&op, uop, 1) )
755 return -EFAULT;
757 if ( !IS_PRIV(current->domain) )
758 return -EPERM;
760 if ( (op.link > 3) || (op.isa_irq > 15) )
761 return -EINVAL;
763 d = find_domain_by_id(op.domid);
764 if ( d == NULL )
765 return -ESRCH;
767 rc = -EINVAL;
768 if ( !is_hvm_domain(d) )
769 goto out;
771 rc = 0;
772 hvm_set_pci_link_route(d, op.link, op.isa_irq);
774 out:
775 put_domain(d);
776 return rc;
777 }
779 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
781 {
782 long rc = 0;
784 switch ( op )
785 {
786 case HVMOP_set_param:
787 case HVMOP_get_param:
788 {
789 struct xen_hvm_param a;
790 struct domain *d;
791 struct vcpu *v;
792 unsigned long mfn;
793 void *p;
795 if ( copy_from_guest(&a, arg, 1) )
796 return -EFAULT;
798 if ( a.index >= HVM_NR_PARAMS )
799 return -EINVAL;
801 if ( a.domid == DOMID_SELF )
802 {
803 get_knownalive_domain(current->domain);
804 d = current->domain;
805 }
806 else if ( IS_PRIV(current->domain) )
807 {
808 d = find_domain_by_id(a.domid);
809 if ( d == NULL )
810 return -ESRCH;
811 }
812 else
813 {
814 return -EPERM;
815 }
817 rc = -EINVAL;
818 if ( !is_hvm_domain(d) )
819 goto param_fail;
821 if ( op == HVMOP_set_param )
822 {
823 switch ( a.index )
824 {
825 case HVM_PARAM_IOREQ_PFN:
826 if ( d->arch.hvm_domain.shared_page_va )
827 goto param_fail;
828 mfn = gmfn_to_mfn(d, a.value);
829 if ( mfn == INVALID_MFN )
830 goto param_fail;
831 p = map_domain_page_global(mfn);
832 if ( p == NULL )
833 goto param_fail;
834 d->arch.hvm_domain.shared_page_va = (unsigned long)p;
835 /* Initialise evtchn port info if VCPUs already created. */
836 for_each_vcpu ( d, v )
837 get_vio(d, v->vcpu_id)->vp_eport =
838 v->arch.hvm_vcpu.xen_port;
839 break;
840 case HVM_PARAM_BUFIOREQ_PFN:
841 if ( d->arch.hvm_domain.buffered_io_va )
842 goto param_fail;
843 mfn = gmfn_to_mfn(d, a.value);
844 if ( mfn == INVALID_MFN )
845 goto param_fail;
846 p = map_domain_page_global(mfn);
847 if ( p == NULL )
848 goto param_fail;
849 d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
850 break;
851 case HVM_PARAM_CALLBACK_IRQ:
852 hvm_set_callback_via(d, a.value);
853 break;
854 }
855 d->arch.hvm_domain.params[a.index] = a.value;
856 rc = 0;
857 }
858 else
859 {
860 a.value = d->arch.hvm_domain.params[a.index];
861 rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
862 }
864 param_fail:
865 put_domain(d);
866 break;
867 }
869 case HVMOP_set_pci_intx_level:
870 rc = hvmop_set_pci_intx_level(
871 guest_handle_cast(arg, xen_hvm_set_pci_intx_level_t));
872 break;
874 case HVMOP_set_isa_irq_level:
875 rc = hvmop_set_isa_irq_level(
876 guest_handle_cast(arg, xen_hvm_set_isa_irq_level_t));
877 break;
879 case HVMOP_set_pci_link_route:
880 rc = hvmop_set_pci_link_route(
881 guest_handle_cast(arg, xen_hvm_set_pci_link_route_t));
882 break;
884 default:
885 {
886 gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op);
887 rc = -ENOSYS;
888 break;
889 }
890 }
892 return rc;
893 }
895 /*
896 * Local variables:
897 * mode: C
898 * c-set-style: "BSD"
899 * c-basic-offset: 4
900 * tab-width: 4
901 * indent-tabs-mode: nil
902 * End:
903 */