debuggers.hg

view xen/arch/x86/hvm/intercept.c @ 16381:c0bdfda5183d

hvm: Clean up buf_ioreq handling.
Also, disable stdvga caching on hvm save/restore, as the shadow vga
state is not preserved.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Thu Nov 08 14:50:01 2007 +0000 (2007-11-08)
parents adefbadab27c
children 381781af1d5a
line source
1 /*
2 * intercept.c: Handle performance critical I/O packets in hypervisor space
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 */
20 #include <xen/config.h>
21 #include <xen/types.h>
22 #include <xen/sched.h>
23 #include <asm/regs.h>
24 #include <asm/hvm/hvm.h>
25 #include <asm/hvm/support.h>
26 #include <asm/hvm/domain.h>
27 #include <xen/lib.h>
28 #include <xen/sched.h>
29 #include <asm/current.h>
30 #include <io_ports.h>
31 #include <xen/event.h>
32 #include <asm/iommu.h>
35 extern struct hvm_mmio_handler hpet_mmio_handler;
36 extern struct hvm_mmio_handler vlapic_mmio_handler;
37 extern struct hvm_mmio_handler vioapic_mmio_handler;
39 #define HVM_MMIO_HANDLER_NR 3
41 static struct hvm_mmio_handler *hvm_mmio_handlers[HVM_MMIO_HANDLER_NR] =
42 {
43 &hpet_mmio_handler,
44 &vlapic_mmio_handler,
45 &vioapic_mmio_handler
46 };
48 static inline void hvm_mmio_access(struct vcpu *v,
49 ioreq_t *p,
50 hvm_mmio_read_t read_handler,
51 hvm_mmio_write_t write_handler)
52 {
53 unsigned int tmp1, tmp2;
54 unsigned long data;
56 switch ( p->type ) {
57 case IOREQ_TYPE_COPY:
58 {
59 if ( !p->data_is_ptr ) {
60 if ( p->dir == IOREQ_READ )
61 p->data = read_handler(v, p->addr, p->size);
62 else /* p->dir == IOREQ_WRITE */
63 write_handler(v, p->addr, p->size, p->data);
64 } else { /* p->data_is_ptr */
65 int i, sign = (p->df) ? -1 : 1;
67 if ( p->dir == IOREQ_READ ) {
68 for ( i = 0; i < p->count; i++ ) {
69 data = read_handler(v,
70 p->addr + (sign * i * p->size),
71 p->size);
72 (void)hvm_copy_to_guest_phys(
73 p->data + (sign * i * p->size),
74 &data,
75 p->size);
76 }
77 } else {/* p->dir == IOREQ_WRITE */
78 for ( i = 0; i < p->count; i++ ) {
79 (void)hvm_copy_from_guest_phys(
80 &data,
81 p->data + (sign * i * p->size),
82 p->size);
83 write_handler(v,
84 p->addr + (sign * i * p->size),
85 p->size, data);
86 }
87 }
88 }
89 break;
90 }
92 case IOREQ_TYPE_AND:
93 tmp1 = read_handler(v, p->addr, p->size);
94 if ( p->dir == IOREQ_WRITE ) {
95 tmp2 = tmp1 & (unsigned long) p->data;
96 write_handler(v, p->addr, p->size, tmp2);
97 }
98 p->data = tmp1;
99 break;
101 case IOREQ_TYPE_ADD:
102 tmp1 = read_handler(v, p->addr, p->size);
103 if (p->dir == IOREQ_WRITE) {
104 tmp2 = tmp1 + (unsigned long) p->data;
105 write_handler(v, p->addr, p->size, tmp2);
106 }
107 p->data = tmp1;
108 break;
110 case IOREQ_TYPE_OR:
111 tmp1 = read_handler(v, p->addr, p->size);
112 if ( p->dir == IOREQ_WRITE ) {
113 tmp2 = tmp1 | (unsigned long) p->data;
114 write_handler(v, p->addr, p->size, tmp2);
115 }
116 p->data = tmp1;
117 break;
119 case IOREQ_TYPE_XOR:
120 tmp1 = read_handler(v, p->addr, p->size);
121 if ( p->dir == IOREQ_WRITE ) {
122 tmp2 = tmp1 ^ (unsigned long) p->data;
123 write_handler(v, p->addr, p->size, tmp2);
124 }
125 p->data = tmp1;
126 break;
128 case IOREQ_TYPE_XCHG:
129 /*
130 * Note that we don't need to be atomic here since VCPU is accessing
131 * its own local APIC.
132 */
133 tmp1 = read_handler(v, p->addr, p->size);
134 write_handler(v, p->addr, p->size, (unsigned long) p->data);
135 p->data = tmp1;
136 break;
138 case IOREQ_TYPE_SUB:
139 tmp1 = read_handler(v, p->addr, p->size);
140 if ( p->dir == IOREQ_WRITE ) {
141 tmp2 = tmp1 - (unsigned long) p->data;
142 write_handler(v, p->addr, p->size, tmp2);
143 }
144 p->data = tmp1;
145 break;
147 default:
148 printk("hvm_mmio_access: error ioreq type %x\n", p->type);
149 domain_crash_synchronous();
150 break;
151 }
152 }
154 int hvm_buffered_io_send(ioreq_t *p)
155 {
156 struct vcpu *v = current;
157 struct hvm_ioreq_page *iorp = &v->domain->arch.hvm_domain.buf_ioreq;
158 buffered_iopage_t *pg = iorp->va;
159 buf_ioreq_t bp;
160 /* Timeoffset sends 64b data, but no address. Use two consecutive slots. */
161 int qw = 0;
163 /* Ensure buffered_iopage fits in a page */
164 BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
166 /*
167 * Return 0 for the cases we can't deal with:
168 * - 'addr' is only a 20-bit field, so we cannot address beyond 1MB
169 * - we cannot buffer accesses to guest memory buffers, as the guest
170 * may expect the memory buffer to be synchronously accessed
171 * - the count field is usually used with data_is_ptr and since we don't
172 * support data_is_ptr we do not waste space for the count field either
173 */
174 if ( (p->addr > 0xffffful) || p->data_is_ptr || (p->count != 1) )
175 return 0;
177 bp.type = p->type;
178 bp.dir = p->dir;
179 bp.df = p->df;
180 switch ( p->size )
181 {
182 case 1:
183 bp.size = 0;
184 break;
185 case 2:
186 bp.size = 1;
187 break;
188 case 4:
189 bp.size = 2;
190 break;
191 case 8:
192 bp.size = 3;
193 qw = 1;
194 break;
195 default:
196 gdprintk(XENLOG_WARNING, "unexpected ioreq size:%"PRId64"\n", p->size);
197 return 0;
198 }
200 bp.data = p->data;
201 bp.addr = p->addr;
203 spin_lock(&iorp->lock);
205 if ( (pg->write_pointer - pg->read_pointer) >=
206 (IOREQ_BUFFER_SLOT_NUM - qw) )
207 {
208 /* The queue is full: send the iopacket through the normal path. */
209 spin_unlock(&iorp->lock);
210 return 0;
211 }
213 memcpy(&pg->buf_ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM],
214 &bp, sizeof(bp));
216 if ( qw )
217 {
218 bp.data = p->data >> 32;
219 memcpy(&pg->buf_ioreq[(pg->write_pointer+1) % IOREQ_BUFFER_SLOT_NUM],
220 &bp, sizeof(bp));
221 }
223 /* Make the ioreq_t visible /before/ write_pointer. */
224 wmb();
225 pg->write_pointer += qw ? 2 : 1;
227 spin_unlock(&iorp->lock);
229 return 1;
230 }
232 int hvm_mmio_intercept(ioreq_t *p)
233 {
234 struct vcpu *v = current;
235 int i;
237 for ( i = 0; i < HVM_MMIO_HANDLER_NR; i++ )
238 {
239 if ( hvm_mmio_handlers[i]->check_handler(v, p->addr) )
240 {
241 hvm_mmio_access(v, p,
242 hvm_mmio_handlers[i]->read_handler,
243 hvm_mmio_handlers[i]->write_handler);
244 return 1;
245 }
246 }
248 return 0;
249 }
251 /*
252 * Check if the request is handled inside xen
253 * return value: 0 --not handled; 1 --handled
254 */
255 int hvm_io_intercept(ioreq_t *p, int type)
256 {
257 struct vcpu *v = current;
258 struct hvm_io_handler *handler =
259 &(v->domain->arch.hvm_domain.io_handler);
260 int i;
261 unsigned long addr, size;
263 if ( (type == HVM_PORTIO) && (dpci_ioport_intercept(p)) )
264 return 1;
266 for (i = 0; i < handler->num_slot; i++) {
267 if( type != handler->hdl_list[i].type)
268 continue;
269 addr = handler->hdl_list[i].addr;
270 size = handler->hdl_list[i].size;
271 if (p->addr >= addr &&
272 p->addr + p->size <= addr + size)
273 return handler->hdl_list[i].action(p);
274 }
275 return 0;
276 }
278 int register_io_handler(
279 struct domain *d, unsigned long addr, unsigned long size,
280 intercept_action_t action, int type)
281 {
282 struct hvm_io_handler *handler = &d->arch.hvm_domain.io_handler;
283 int num = handler->num_slot;
285 BUG_ON(num >= MAX_IO_HANDLER);
287 handler->hdl_list[num].addr = addr;
288 handler->hdl_list[num].size = size;
289 handler->hdl_list[num].action = action;
290 handler->hdl_list[num].type = type;
291 handler->num_slot++;
293 return 1;
294 }
295 /*
296 * Local variables:
297 * mode: C
298 * c-set-style: "BSD"
299 * c-basic-offset: 4
300 * tab-width: 4
301 * indent-tabs-mode: nil
302 * End:
303 */