debuggers.hg

view xen/arch/ia64/vmx/vmx_support.c @ 10953:d7242c3a2906

[HVM] Remove unused ioreq state IORESP_HOOK.
Signed-off-by: Steven Smith <ssmith@xensource.com>
author kfraser@localhost.localdomain
date Thu Aug 03 14:02:29 2006 +0100 (2006-08-03)
parents 75b23b6a7cb7
children 66c27919578f
line source
2 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 /*
4 * vmx_support.c: vmx specific support interface.
5 * Copyright (c) 2005, Intel Corporation.
6 * Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 *
21 */
22 #include <xen/config.h>
23 #include <xen/sched.h>
24 #include <xen/hypercall.h>
25 #include <public/sched.h>
26 #include <public/hvm/ioreq.h>
27 #include <asm/vmx.h>
28 #include <asm/vmx_vcpu.h>
30 /*
31 * I/O emulation should be atomic from domain point of view. However,
32 * when emulation code is waiting for I/O completion by blocking,
33 * other events like DM interrupt, VBD, etc. may come and unblock
34 * current exection flow. So we have to prepare for re-block if unblocked
35 * by non I/O completion event. After io emulation is done, re-enable
36 * pending indicaion if other ports are pending
37 */
38 void vmx_wait_io(void)
39 {
40 struct vcpu *v = current;
41 struct domain *d = v->domain;
42 int port = iopacket_port(v);
44 for (;;) {
45 if (test_and_clear_bit(0, &v->vcpu_info->evtchn_upcall_pending) &&
46 test_and_clear_bit(port / BITS_PER_LONG,
47 &v->vcpu_info->evtchn_pending_sel) &&
48 test_and_clear_bit(port, &d->shared_info->evtchn_pending[0]))
49 vmx_io_assist(v);
51 if (!test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
52 break;
54 do_sched_op_compat(SCHEDOP_block, 0);
55 }
57 /* re-enable indication if other pending events */
58 if (d->shared_info->evtchn_pending[port / BITS_PER_LONG])
59 set_bit(port / BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
61 if (v->vcpu_info->evtchn_pending_sel)
62 set_bit(0, &v->vcpu_info->evtchn_upcall_pending);
63 }
65 /*
66 * Only place to call vmx_io_assist is mmio/legacy_io emulation.
67 * Since I/O emulation is synchronous, it shouldn't be called in
68 * other places. This is not like x86, since IA-64 implements a
69 * per-vp stack without continuation.
70 */
71 void vmx_io_assist(struct vcpu *v)
72 {
73 vcpu_iodata_t *vio;
74 ioreq_t *p;
76 /*
77 * This shared page contains I/O request between emulation code
78 * and device model.
79 */
80 vio = get_vio(v->domain, v->vcpu_id);
81 if (!vio)
82 panic_domain(vcpu_regs(v),"Corruption: bad shared page: %lx\n", (unsigned long)vio);
84 p = &vio->vp_ioreq;
86 if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) {
87 if (p->state != STATE_IORESP_READY) {
88 /* Can't block here, for the same reason as other places to
89 * use vmx_wait_io. Simple return is safe since vmx_wait_io will
90 * try to block again
91 */
92 return;
93 } else
94 p->state = STATE_INVALID;
96 clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags);
97 }
98 }
100 /*
101 * VMX domainN has two types of interrupt source: lsapic model within
102 * HV, and device model within domain 0 (service OS). There're another
103 * pending array in share page, manipulated by device model directly.
104 * To conform to VT-i spec, we have to sync pending bits in shared page
105 * into VPD. This has to be done before checking pending interrupt at
106 * resume to guest. For domain 0, all the interrupt sources come from
107 * HV, which then doesn't require this assist.
108 */
109 void vmx_intr_assist(struct vcpu *v)
110 {
111 vcpu_iodata_t *vio;
112 struct domain *d = v->domain;
113 extern void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu,
114 unsigned long *pend_irr);
115 int port = iopacket_port(v);
117 if (test_bit(port, &d->shared_info->evtchn_pending[0]) ||
118 test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
119 vmx_wait_io();
121 /* I/O emulation is atomic, so it's impossible to see execution flow
122 * out of vmx_wait_io, when guest is still waiting for response.
123 */
124 if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags))
125 panic_domain(vcpu_regs(v),"!!!Bad resume to guest before I/O emulation is done.\n");
127 /* Even without event pending, we still need to sync pending bits
128 * between DM and vlsapic. The reason is that interrupt delivery
129 * shares same event channel as I/O emulation, with corresponding
130 * indicator possibly cleared when vmx_wait_io().
131 */
132 vio = get_vio(v->domain, v->vcpu_id);
133 if (!vio)
134 panic_domain(vcpu_regs(v),"Corruption: bad shared page: %lx\n", (unsigned long)vio);
136 #ifdef V_IOSAPIC_READY
137 /* Confirm virtual interrupt line signals, and set pending bits in vpd */
138 if(v->vcpu_id==0)
139 vmx_virq_line_assist(v);
140 #endif
141 return;
142 }