debuggers.hg

view xen/arch/ia64/vmx/vmx_support.c @ 16392:91575bb23d07

[IA64] vti save-restore: hvm domain io page clean up.

- set_hvm_param hypercall clean up.
- The reference counts of the io pages must be incremented.
- Buffered pio wasn't SMP safe.
- Clean up get_vio() parameter.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Wed Nov 07 10:31:09 2007 -0700 (2007-11-07)
parents 704151d0e219
children 809b20f066fb
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_support.c: vmx specific support interface.
4 * Copyright (c) 2005, Intel Corporation.
5 * Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 *
20 */
21 #include <xen/config.h>
22 #include <xen/sched.h>
23 #include <xen/hypercall.h>
24 #include <xen/event.h>
25 #include <public/sched.h>
26 #include <public/hvm/ioreq.h>
27 #include <asm/vmx.h>
28 #include <asm/vmx_vcpu.h>
30 /*
31 * Only place to call vmx_io_assist is mmio/legacy_io emulation.
32 * Since I/O emulation is synchronous, it shouldn't be called in
33 * other places. This is not like x86, since IA-64 implements a
34 * per-vp stack without continuation.
35 */
36 void vmx_io_assist(struct vcpu *v)
37 {
38 vcpu_iodata_t *vio;
39 ioreq_t *p;
41 /*
42 * This shared page contains I/O request between emulation code
43 * and device model.
44 */
45 vio = get_vio(v);
46 if (!vio)
47 panic_domain(vcpu_regs(v),"Corruption: bad shared page: %lx\n",
48 (unsigned long)vio);
50 p = &vio->vp_ioreq;
52 if (p->state == STATE_IORESP_READY) {
53 p->state = STATE_IOREQ_NONE;
54 }
55 else {
56 /* Can't block here, for the same reason as other places to
57 * use vmx_wait_io. Simple return is safe since vmx_wait_io will
58 * try to block again
59 */
60 return;
61 }
62 }
64 void vmx_send_assist_req(struct vcpu *v)
65 {
66 ioreq_t *p;
68 p = &get_vio(v)->vp_ioreq;
69 if (unlikely(p->state != STATE_IOREQ_NONE)) {
70 /* This indicates a bug in the device model. Crash the
71 domain. */
72 printk("Device model set bad IO state %d.\n", p->state);
73 domain_crash(v->domain);
74 return;
75 }
76 wmb();
77 p->state = STATE_IOREQ_READY;
78 notify_via_xen_event_channel(v->arch.arch_vmx.xen_port);
80 for (;;) {
81 if (p->state != STATE_IOREQ_READY &&
82 p->state != STATE_IOREQ_INPROCESS)
83 break;
85 set_bit(_VPF_blocked_in_xen, &current->pause_flags);
86 mb(); /* set blocked status /then/ re-evaluate condition */
87 if (p->state != STATE_IOREQ_READY &&
88 p->state != STATE_IOREQ_INPROCESS)
89 {
90 clear_bit(_VPF_blocked_in_xen, &current->pause_flags);
91 break;
92 }
94 raise_softirq(SCHEDULE_SOFTIRQ);
95 do_softirq();
96 mb();
97 }
99 /* the code under this line is completer phase... */
100 vmx_io_assist(v);
101 }