debuggers.hg

view xen/arch/x86/hvm/hvm.c @ 13692:47e26ced172a

[HVM] save/restore fix

* do not save "down" vcpu's vmcs to avoid system crash
* make hvm_ctxt buffer bigger as all vmcs sit here, otherwise >=4
* vcpus cause overflow

Signed-off-by: Zhai Edwin <edwin.zhai@intel.com>
author kaf24@localhost.localdomain
date Sun Jan 28 09:46:09 2007 +0000 (2007-01-28)
parents 271ffb1c12eb
children 99d36a153024
line source
1 /*
2 * hvm.c: Common hardware virtual machine abstractions.
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 * Copyright (c) 2005, International Business Machines Corporation.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 */
21 #include <xen/config.h>
22 #include <xen/init.h>
23 #include <xen/lib.h>
24 #include <xen/trace.h>
25 #include <xen/sched.h>
26 #include <xen/irq.h>
27 #include <xen/softirq.h>
28 #include <xen/domain.h>
29 #include <xen/domain_page.h>
30 #include <xen/hypercall.h>
31 #include <xen/guest_access.h>
32 #include <xen/event.h>
33 #include <xen/shadow.h>
34 #include <asm/current.h>
35 #include <asm/e820.h>
36 #include <asm/io.h>
37 #include <asm/shadow.h>
38 #include <asm/regs.h>
39 #include <asm/cpufeature.h>
40 #include <asm/processor.h>
41 #include <asm/types.h>
42 #include <asm/msr.h>
43 #include <asm/mc146818rtc.h>
44 #include <asm/spinlock.h>
45 #include <asm/hvm/hvm.h>
46 #include <asm/hvm/vpt.h>
47 #include <asm/hvm/support.h>
48 #include <public/sched.h>
49 #include <public/hvm/ioreq.h>
50 #include <public/version.h>
51 #include <public/memory.h>
53 int hvm_enabled;
55 unsigned int opt_hvm_debug_level;
56 integer_param("hvm_debug", opt_hvm_debug_level);
58 struct hvm_function_table hvm_funcs;
60 /* I/O permission bitmap is globally shared by all HVM guests. */
61 char __attribute__ ((__section__ (".bss.page_aligned")))
62 hvm_io_bitmap[3*PAGE_SIZE];
64 void hvm_enable(void)
65 {
66 if ( hvm_enabled )
67 return;
69 /*
70 * Allow direct access to the PC debug port (it is often used for I/O
71 * delays, but the vmexits simply slow things down).
72 */
73 memset(hvm_io_bitmap, ~0, sizeof(hvm_io_bitmap));
74 clear_bit(0x80, hvm_io_bitmap);
76 hvm_enabled = 1;
77 }
79 void hvm_stts(struct vcpu *v)
80 {
81 /* FPU state already dirty? Then no need to setup_fpu() lazily. */
82 if ( !test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
83 hvm_funcs.stts(v);
84 }
86 void hvm_set_guest_time(struct vcpu *v, u64 gtime)
87 {
88 u64 host_tsc;
90 rdtscll(host_tsc);
92 v->arch.hvm_vcpu.cache_tsc_offset = gtime - host_tsc;
93 hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
94 }
96 u64 hvm_get_guest_time(struct vcpu *v)
97 {
98 u64 host_tsc;
100 rdtscll(host_tsc);
101 return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
102 }
104 void hvm_migrate_timers(struct vcpu *v)
105 {
106 pit_migrate_timers(v);
107 rtc_migrate_timers(v);
108 hpet_migrate_timers(v);
109 pmtimer_migrate_timers(v);
110 if ( vcpu_vlapic(v)->pt.enabled )
111 migrate_timer(&vcpu_vlapic(v)->pt.timer, v->processor);
112 }
114 void hvm_do_resume(struct vcpu *v)
115 {
116 ioreq_t *p;
118 hvm_stts(v);
120 pt_thaw_time(v);
122 /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
123 p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
124 while ( p->state != STATE_IOREQ_NONE )
125 {
126 switch ( p->state )
127 {
128 case STATE_IORESP_READY: /* IORESP_READY -> NONE */
129 hvm_io_assist(v);
130 break;
131 case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
132 case STATE_IOREQ_INPROCESS:
133 wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port,
134 (p->state != STATE_IOREQ_READY) &&
135 (p->state != STATE_IOREQ_INPROCESS));
136 break;
137 default:
138 gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state);
139 domain_crash_synchronous();
140 }
141 }
142 }
144 int hvm_domain_initialise(struct domain *d)
145 {
146 int rc;
148 if ( !hvm_enabled )
149 {
150 gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest "
151 "on a non-VT/AMDV platform.\n");
152 return -EINVAL;
153 }
155 spin_lock_init(&d->arch.hvm_domain.pbuf_lock);
156 spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
157 spin_lock_init(&d->arch.hvm_domain.irq_lock);
159 rc = shadow_enable(d, SHM2_refcounts|SHM2_translate|SHM2_external);
160 if ( rc != 0 )
161 return rc;
163 vpic_init(d);
164 vioapic_init(d);
166 return 0;
167 }
169 void hvm_domain_destroy(struct domain *d)
170 {
171 HVMStateEntry *se, *dse;
172 pit_deinit(d);
173 rtc_deinit(d);
174 pmtimer_deinit(d);
175 hpet_deinit(d);
177 se = d->arch.hvm_domain.first_se;
178 while (se) {
179 dse = se;
180 se = se->next;
181 xfree(dse);
182 }
184 if ( d->arch.hvm_domain.shared_page_va )
185 unmap_domain_page_global(
186 (void *)d->arch.hvm_domain.shared_page_va);
188 if ( d->arch.hvm_domain.buffered_io_va )
189 unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
190 }
192 #define HVM_VCPU_CTXT_MAGIC 0x85963130
193 void hvm_save_cpu_ctxt(hvm_domain_context_t *h, void *opaque)
194 {
195 struct vcpu *v = opaque;
197 if ( test_bit(_VCPUF_down, &v->vcpu_flags) ) {
198 hvm_put_32u(h, 0x0);
199 return;
200 }
202 hvm_put_32u(h, HVM_VCPU_CTXT_MAGIC);
203 hvm_funcs.save_cpu_ctxt(h, opaque);
204 }
206 int hvm_load_cpu_ctxt(hvm_domain_context_t *h, void *opaque, int version)
207 {
208 struct vcpu *v = opaque;
210 if ( hvm_get_32u(h) != HVM_VCPU_CTXT_MAGIC )
211 return 0;
213 if ( hvm_funcs.load_cpu_ctxt(h, opaque, version) < 0 )
214 return -EINVAL;
216 /* Auxiliary processors shoudl be woken immediately. */
217 if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
218 vcpu_wake(v);
220 return 0;
221 }
223 int hvm_vcpu_initialise(struct vcpu *v)
224 {
225 int rc;
227 hvm_register_savevm(v->domain, "xen_hvm_cpu", v->vcpu_id, 1,
228 hvm_save_cpu_ctxt, hvm_load_cpu_ctxt,
229 (void *)v);
231 if ( (rc = vlapic_init(v)) != 0 )
232 return rc;
234 if ( (rc = hvm_funcs.vcpu_initialise(v)) != 0 )
235 {
236 vlapic_destroy(v);
237 return rc;
238 }
240 /* Create ioreq event channel. */
241 v->arch.hvm_vcpu.xen_port = alloc_unbound_xen_event_channel(v, 0);
242 if ( get_sp(v->domain) && get_vio(v->domain, v->vcpu_id) )
243 get_vio(v->domain, v->vcpu_id)->vp_eport =
244 v->arch.hvm_vcpu.xen_port;
246 INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
248 if ( v->vcpu_id != 0 )
249 return 0;
251 pit_init(v, cpu_khz);
252 rtc_init(v, RTC_PORT(0), RTC_IRQ);
253 pmtimer_init(v, ACPI_PM_TMR_BLK_ADDRESS);
254 hpet_init(v);
256 /* init hvm sharepage */
257 shpage_init(v->domain, get_sp(v->domain));
259 /* Init guest TSC to start from zero. */
260 hvm_set_guest_time(v, 0);
262 return 0;
263 }
265 void hvm_vcpu_destroy(struct vcpu *v)
266 {
267 vlapic_destroy(v);
268 hvm_funcs.vcpu_destroy(v);
270 /* Event channel is already freed by evtchn_destroy(). */
271 /*free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);*/
272 }
274 static void hvm_vcpu_down(void)
275 {
276 struct vcpu *v = current;
277 struct domain *d = v->domain;
278 int online_count = 0;
280 gdprintk(XENLOG_INFO, "DOM%d/VCPU%d: going offline.\n",
281 d->domain_id, v->vcpu_id);
283 /* Doesn't halt us immediately, but we'll never return to guest context. */
284 set_bit(_VCPUF_down, &v->vcpu_flags);
285 vcpu_sleep_nosync(v);
287 /* Any other VCPUs online? ... */
288 LOCK_BIGLOCK(d);
289 for_each_vcpu ( d, v )
290 if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
291 online_count++;
292 UNLOCK_BIGLOCK(d);
294 /* ... Shut down the domain if not. */
295 if ( online_count == 0 )
296 {
297 gdprintk(XENLOG_INFO, "DOM%d: all CPUs offline -- powering off.\n",
298 d->domain_id);
299 domain_shutdown(d, SHUTDOWN_poweroff);
300 }
301 }
303 void hvm_send_assist_req(struct vcpu *v)
304 {
305 ioreq_t *p;
307 p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
308 if ( unlikely(p->state != STATE_IOREQ_NONE) )
309 {
310 /* This indicates a bug in the device model. Crash the domain. */
311 gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p->state);
312 domain_crash_synchronous();
313 }
315 prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
317 /*
318 * Following happens /after/ blocking and setting up ioreq contents.
319 * prepare_wait_on_xen_event_channel() is an implicit barrier.
320 */
321 p->state = STATE_IOREQ_READY;
322 notify_via_xen_event_channel(v->arch.hvm_vcpu.xen_port);
323 }
325 void hvm_hlt(unsigned long rflags)
326 {
327 /*
328 * If we halt with interrupts disabled, that's a pretty sure sign that we
329 * want to shut down. In a real processor, NMIs are the only way to break
330 * out of this.
331 */
332 if ( unlikely(!(rflags & X86_EFLAGS_IF)) )
333 return hvm_vcpu_down();
335 do_sched_op_compat(SCHEDOP_block, 0);
336 }
338 void hvm_triple_fault(void)
339 {
340 struct vcpu *v = current;
341 gdprintk(XENLOG_INFO, "Triple fault on VCPU%d - "
342 "invoking HVM system reset.\n", v->vcpu_id);
343 domain_shutdown(v->domain, SHUTDOWN_reboot);
344 }
346 /*
347 * __hvm_copy():
348 * @buf = hypervisor buffer
349 * @addr = guest address to copy to/from
350 * @size = number of bytes to copy
351 * @dir = copy *to* guest (TRUE) or *from* guest (FALSE)?
352 * @virt = addr is *virtual* (TRUE) or *guest physical* (FALSE)?
353 * Returns number of bytes failed to copy (0 == complete success).
354 */
355 static int __hvm_copy(void *buf, paddr_t addr, int size, int dir, int virt)
356 {
357 unsigned long mfn;
358 char *p;
359 int count, todo;
361 todo = size;
362 while ( todo > 0 )
363 {
364 count = min_t(int, PAGE_SIZE - (addr & ~PAGE_MASK), todo);
366 if ( virt )
367 mfn = get_mfn_from_gpfn(shadow_gva_to_gfn(current, addr));
368 else
369 mfn = get_mfn_from_gpfn(addr >> PAGE_SHIFT);
371 if ( mfn == INVALID_MFN )
372 return todo;
374 p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
376 if ( dir )
377 memcpy(p, buf, count); /* dir == TRUE: *to* guest */
378 else
379 memcpy(buf, p, count); /* dir == FALSE: *from guest */
381 unmap_domain_page(p);
383 addr += count;
384 buf += count;
385 todo -= count;
386 }
388 return 0;
389 }
391 int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size)
392 {
393 return __hvm_copy(buf, paddr, size, 1, 0);
394 }
396 int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size)
397 {
398 return __hvm_copy(buf, paddr, size, 0, 0);
399 }
401 int hvm_copy_to_guest_virt(unsigned long vaddr, void *buf, int size)
402 {
403 return __hvm_copy(buf, vaddr, size, 1, 1);
404 }
406 int hvm_copy_from_guest_virt(void *buf, unsigned long vaddr, int size)
407 {
408 return __hvm_copy(buf, vaddr, size, 0, 1);
409 }
412 /* HVM specific printbuf. Mostly used for hvmloader chit-chat. */
413 void hvm_print_line(struct vcpu *v, const char c)
414 {
415 struct hvm_domain *hd = &v->domain->arch.hvm_domain;
417 spin_lock(&hd->pbuf_lock);
418 hd->pbuf[hd->pbuf_idx++] = c;
419 if ( (hd->pbuf_idx == (sizeof(hd->pbuf) - 2)) || (c == '\n') )
420 {
421 if ( c != '\n' )
422 hd->pbuf[hd->pbuf_idx++] = '\n';
423 hd->pbuf[hd->pbuf_idx] = '\0';
424 printk(XENLOG_G_DEBUG "HVM%u: %s", v->domain->domain_id, hd->pbuf);
425 hd->pbuf_idx = 0;
426 }
427 spin_unlock(&hd->pbuf_lock);
428 }
430 void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
431 unsigned int *ecx, unsigned int *edx)
432 {
433 if ( !cpuid_hypervisor_leaves(input, eax, ebx, ecx, edx) )
434 {
435 cpuid(input, eax, ebx, ecx, edx);
437 if ( input == 0x00000001 )
438 {
439 struct vcpu *v = current;
441 clear_bit(X86_FEATURE_MWAIT & 31, ecx);
443 if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
444 clear_bit(X86_FEATURE_APIC & 31, edx);
446 #if CONFIG_PAGING_LEVELS >= 3
447 if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
448 #endif
449 clear_bit(X86_FEATURE_PAE & 31, edx);
450 clear_bit(X86_FEATURE_PSE36 & 31, edx);
451 }
452 else if ( input == 0x80000001 )
453 {
454 #if CONFIG_PAGING_LEVELS >= 3
455 struct vcpu *v = current;
456 if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_PAE_ENABLED] )
457 #endif
458 clear_bit(X86_FEATURE_NX & 31, edx);
459 #ifdef __i386__
460 /* Mask feature for Intel ia32e or AMD long mode. */
461 clear_bit(X86_FEATURE_LAHF_LM & 31, ecx);
463 clear_bit(X86_FEATURE_LM & 31, edx);
464 clear_bit(X86_FEATURE_SYSCALL & 31, edx);
465 #endif
466 }
467 }
468 }
470 typedef unsigned long hvm_hypercall_t(
471 unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
473 #define HYPERCALL(x) \
474 [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x
475 #define HYPERCALL_COMPAT32(x) \
476 [ __HYPERVISOR_ ## x ] = (hvm_hypercall_t *) do_ ## x ## _compat32
478 #if defined(__i386__)
480 static hvm_hypercall_t *hvm_hypercall_table[NR_hypercalls] = {
481 HYPERCALL(memory_op),
482 HYPERCALL(multicall),
483 HYPERCALL(xen_version),
484 HYPERCALL(event_channel_op),
485 HYPERCALL(hvm_op)
486 };
488 void hvm_do_hypercall(struct cpu_user_regs *pregs)
489 {
490 if ( unlikely(ring_3(pregs)) )
491 {
492 pregs->eax = -EPERM;
493 return;
494 }
496 if ( (pregs->eax >= NR_hypercalls) || !hvm_hypercall_table[pregs->eax] )
497 {
498 gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d did a bad hypercall %d.\n",
499 current->domain->domain_id, current->vcpu_id,
500 pregs->eax);
501 pregs->eax = -ENOSYS;
502 return;
503 }
505 pregs->eax = hvm_hypercall_table[pregs->eax](
506 pregs->ebx, pregs->ecx, pregs->edx, pregs->esi, pregs->edi);
507 }
509 #else /* defined(__x86_64__) */
511 static long do_memory_op_compat32(int cmd, XEN_GUEST_HANDLE(void) arg)
512 {
513 extern long do_add_to_physmap(struct xen_add_to_physmap *xatp);
514 long rc;
516 switch ( cmd )
517 {
518 case XENMEM_add_to_physmap:
519 {
520 struct {
521 domid_t domid;
522 uint32_t space;
523 uint32_t idx;
524 uint32_t gpfn;
525 } u;
526 struct xen_add_to_physmap h;
528 if ( copy_from_guest(&u, arg, 1) )
529 return -EFAULT;
531 h.domid = u.domid;
532 h.space = u.space;
533 h.idx = u.idx;
534 h.gpfn = u.gpfn;
536 this_cpu(guest_handles_in_xen_space) = 1;
537 rc = do_memory_op(cmd, guest_handle_from_ptr(&h, void));
538 this_cpu(guest_handles_in_xen_space) = 0;
540 break;
541 }
543 default:
544 gdprintk(XENLOG_WARNING, "memory_op %d.\n", cmd);
545 rc = -ENOSYS;
546 break;
547 }
549 return rc;
550 }
552 static hvm_hypercall_t *hvm_hypercall64_table[NR_hypercalls] = {
553 HYPERCALL(memory_op),
554 HYPERCALL(xen_version),
555 HYPERCALL(hvm_op),
556 HYPERCALL(event_channel_op)
557 };
559 static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
560 HYPERCALL_COMPAT32(memory_op),
561 HYPERCALL(xen_version),
562 HYPERCALL(hvm_op),
563 HYPERCALL(event_channel_op)
564 };
566 void hvm_do_hypercall(struct cpu_user_regs *pregs)
567 {
568 if ( unlikely(ring_3(pregs)) )
569 {
570 pregs->rax = -EPERM;
571 return;
572 }
574 pregs->rax = (uint32_t)pregs->eax; /* mask in case compat32 caller */
575 if ( (pregs->rax >= NR_hypercalls) || !hvm_hypercall64_table[pregs->rax] )
576 {
577 gdprintk(XENLOG_WARNING, "HVM vcpu %d:%d did a bad hypercall %ld.\n",
578 current->domain->domain_id, current->vcpu_id,
579 pregs->rax);
580 pregs->rax = -ENOSYS;
581 return;
582 }
584 if ( current->arch.shadow.mode->guest_levels == 4 )
585 {
586 pregs->rax = hvm_hypercall64_table[pregs->rax](pregs->rdi,
587 pregs->rsi,
588 pregs->rdx,
589 pregs->r10,
590 pregs->r8);
591 }
592 else
593 {
594 pregs->eax = hvm_hypercall32_table[pregs->eax]((uint32_t)pregs->ebx,
595 (uint32_t)pregs->ecx,
596 (uint32_t)pregs->edx,
597 (uint32_t)pregs->esi,
598 (uint32_t)pregs->edi);
599 }
600 }
602 #endif /* defined(__x86_64__) */
604 void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3)
605 {
606 v->arch.hvm_vcpu.hw_cr3 = guest_cr3;
607 hvm_funcs.update_guest_cr3(v);
608 }
610 /* Initialise a hypercall transfer page for a VMX domain using
611 paravirtualised drivers. */
612 void hvm_hypercall_page_initialise(struct domain *d,
613 void *hypercall_page)
614 {
615 hvm_funcs.init_hypercall_page(d, hypercall_page);
616 }
619 /*
620 * only called in HVM domain BSP context
621 * when booting, vcpuid is always equal to apic_id
622 */
623 int hvm_bringup_ap(int vcpuid, int trampoline_vector)
624 {
625 struct vcpu *bsp = current, *v;
626 struct domain *d = bsp->domain;
627 struct vcpu_guest_context *ctxt;
628 int rc = 0;
630 BUG_ON(!is_hvm_domain(d));
632 if ( bsp->vcpu_id != 0 )
633 {
634 gdprintk(XENLOG_ERR, "Not calling hvm_bringup_ap from BSP context.\n");
635 domain_crash(bsp->domain);
636 return -EINVAL;
637 }
639 if ( (v = d->vcpu[vcpuid]) == NULL )
640 return -ENOENT;
642 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
643 {
644 gdprintk(XENLOG_ERR,
645 "Failed to allocate memory in hvm_bringup_ap.\n");
646 return -ENOMEM;
647 }
649 hvm_init_ap_context(ctxt, vcpuid, trampoline_vector);
651 /* Sync AP's TSC with BSP's. */
652 v->arch.hvm_vcpu.cache_tsc_offset =
653 v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
654 hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
656 LOCK_BIGLOCK(d);
657 rc = -EEXIST;
658 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
659 rc = boot_vcpu(d, vcpuid, ctxt);
660 UNLOCK_BIGLOCK(d);
662 if ( rc != 0 )
663 {
664 gdprintk(XENLOG_ERR,
665 "AP %d bringup failed in boot_vcpu %x.\n", vcpuid, rc);
666 goto out;
667 }
669 if ( test_and_clear_bit(_VCPUF_down, &d->vcpu[vcpuid]->vcpu_flags) )
670 vcpu_wake(d->vcpu[vcpuid]);
671 gdprintk(XENLOG_INFO, "AP %d bringup suceeded.\n", vcpuid);
673 out:
674 xfree(ctxt);
675 return rc;
676 }
678 static int hvmop_set_pci_intx_level(
679 XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t) uop)
680 {
681 struct xen_hvm_set_pci_intx_level op;
682 struct domain *d;
683 int rc;
685 if ( copy_from_guest(&op, uop, 1) )
686 return -EFAULT;
688 if ( !IS_PRIV(current->domain) )
689 return -EPERM;
691 if ( (op.domain > 0) || (op.bus > 0) || (op.device > 31) || (op.intx > 3) )
692 return -EINVAL;
694 d = get_domain_by_id(op.domid);
695 if ( d == NULL )
696 return -ESRCH;
698 rc = -EINVAL;
699 if ( !is_hvm_domain(d) )
700 goto out;
702 rc = 0;
703 switch ( op.level )
704 {
705 case 0:
706 hvm_pci_intx_deassert(d, op.device, op.intx);
707 break;
708 case 1:
709 hvm_pci_intx_assert(d, op.device, op.intx);
710 break;
711 default:
712 rc = -EINVAL;
713 break;
714 }
716 out:
717 put_domain(d);
718 return rc;
719 }
721 static int hvmop_set_isa_irq_level(
722 XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t) uop)
723 {
724 struct xen_hvm_set_isa_irq_level op;
725 struct domain *d;
726 int rc;
728 if ( copy_from_guest(&op, uop, 1) )
729 return -EFAULT;
731 if ( !IS_PRIV(current->domain) )
732 return -EPERM;
734 if ( op.isa_irq > 15 )
735 return -EINVAL;
737 d = get_domain_by_id(op.domid);
738 if ( d == NULL )
739 return -ESRCH;
741 rc = -EINVAL;
742 if ( !is_hvm_domain(d) )
743 goto out;
745 rc = 0;
746 switch ( op.level )
747 {
748 case 0:
749 hvm_isa_irq_deassert(d, op.isa_irq);
750 break;
751 case 1:
752 hvm_isa_irq_assert(d, op.isa_irq);
753 break;
754 default:
755 rc = -EINVAL;
756 break;
757 }
759 out:
760 put_domain(d);
761 return rc;
762 }
764 static int hvmop_set_pci_link_route(
765 XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t) uop)
766 {
767 struct xen_hvm_set_pci_link_route op;
768 struct domain *d;
769 int rc;
771 if ( copy_from_guest(&op, uop, 1) )
772 return -EFAULT;
774 if ( !IS_PRIV(current->domain) )
775 return -EPERM;
777 if ( (op.link > 3) || (op.isa_irq > 15) )
778 return -EINVAL;
780 d = get_domain_by_id(op.domid);
781 if ( d == NULL )
782 return -ESRCH;
784 rc = -EINVAL;
785 if ( !is_hvm_domain(d) )
786 goto out;
788 rc = 0;
789 hvm_set_pci_link_route(d, op.link, op.isa_irq);
791 out:
792 put_domain(d);
793 return rc;
794 }
796 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
798 {
799 long rc = 0;
801 switch ( op )
802 {
803 case HVMOP_set_param:
804 case HVMOP_get_param:
805 {
806 struct xen_hvm_param a;
807 struct domain *d;
808 struct vcpu *v;
809 unsigned long mfn;
810 void *p;
812 if ( copy_from_guest(&a, arg, 1) )
813 return -EFAULT;
815 if ( a.index >= HVM_NR_PARAMS )
816 return -EINVAL;
818 if ( a.domid == DOMID_SELF )
819 {
820 get_knownalive_domain(current->domain);
821 d = current->domain;
822 }
823 else if ( IS_PRIV(current->domain) )
824 {
825 d = get_domain_by_id(a.domid);
826 if ( d == NULL )
827 return -ESRCH;
828 }
829 else
830 {
831 return -EPERM;
832 }
834 rc = -EINVAL;
835 if ( !is_hvm_domain(d) )
836 goto param_fail;
838 if ( op == HVMOP_set_param )
839 {
840 switch ( a.index )
841 {
842 case HVM_PARAM_IOREQ_PFN:
843 if ( d->arch.hvm_domain.shared_page_va )
844 goto param_fail;
845 mfn = gmfn_to_mfn(d, a.value);
846 if ( mfn == INVALID_MFN )
847 goto param_fail;
848 p = map_domain_page_global(mfn);
849 if ( p == NULL )
850 goto param_fail;
851 d->arch.hvm_domain.shared_page_va = (unsigned long)p;
852 /* Initialise evtchn port info if VCPUs already created. */
853 for_each_vcpu ( d, v )
854 get_vio(d, v->vcpu_id)->vp_eport =
855 v->arch.hvm_vcpu.xen_port;
856 break;
857 case HVM_PARAM_BUFIOREQ_PFN:
858 if ( d->arch.hvm_domain.buffered_io_va )
859 goto param_fail;
860 mfn = gmfn_to_mfn(d, a.value);
861 if ( mfn == INVALID_MFN )
862 goto param_fail;
863 p = map_domain_page_global(mfn);
864 if ( p == NULL )
865 goto param_fail;
866 d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
867 break;
868 case HVM_PARAM_CALLBACK_IRQ:
869 hvm_set_callback_via(d, a.value);
870 break;
871 }
872 d->arch.hvm_domain.params[a.index] = a.value;
873 rc = 0;
874 }
875 else
876 {
877 a.value = d->arch.hvm_domain.params[a.index];
878 rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
879 }
881 param_fail:
882 put_domain(d);
883 break;
884 }
886 case HVMOP_set_pci_intx_level:
887 rc = hvmop_set_pci_intx_level(
888 guest_handle_cast(arg, xen_hvm_set_pci_intx_level_t));
889 break;
891 case HVMOP_set_isa_irq_level:
892 rc = hvmop_set_isa_irq_level(
893 guest_handle_cast(arg, xen_hvm_set_isa_irq_level_t));
894 break;
896 case HVMOP_set_pci_link_route:
897 rc = hvmop_set_pci_link_route(
898 guest_handle_cast(arg, xen_hvm_set_pci_link_route_t));
899 break;
901 default:
902 {
903 gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op);
904 rc = -ENOSYS;
905 break;
906 }
907 }
909 return rc;
910 }
912 /*
913 * Local variables:
914 * mode: C
915 * c-set-style: "BSD"
916 * c-basic-offset: 4
917 * tab-width: 4
918 * indent-tabs-mode: nil
919 * End:
920 */