debuggers.hg

view xen/arch/ia64/vmx/mmio.c @ 16381:c0bdfda5183d

hvm: Clean up buf_ioreq handling.
Also, disable stdvga caching on hvm save/restore, as the shadow vga
state is not preserved.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Thu Nov 08 14:50:01 2007 +0000 (2007-11-08)
parents cbf8224779c6
children ee935d2b8a63
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * mmio.c: MMIO emulation components.
4 * Copyright (c) 2004, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
20 * Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
21 */
23 #include <linux/sched.h>
24 #include <xen/mm.h>
25 #include <asm/vmx_mm_def.h>
26 #include <asm/gcc_intrin.h>
27 #include <linux/interrupt.h>
28 #include <asm/vmx_vcpu.h>
29 #include <asm/bundle.h>
30 #include <asm/types.h>
31 #include <public/hvm/ioreq.h>
32 #include <asm/vmx.h>
33 #include <public/event_channel.h>
34 #include <public/xen.h>
35 #include <linux/event.h>
36 #include <xen/domain.h>
37 #include <asm/viosapic.h>
38 #include <asm/vlsapic.h>
39 #include <asm/hvm/vacpi.h>
41 #define HVM_BUFFERED_IO_RANGE_NR 1
43 struct hvm_buffered_io_range {
44 unsigned long start_addr;
45 unsigned long length;
46 };
48 static struct hvm_buffered_io_range buffered_stdvga_range = {0xA0000, 0x20000};
49 static struct hvm_buffered_io_range
50 *hvm_buffered_io_ranges[HVM_BUFFERED_IO_RANGE_NR] =
51 {
52 &buffered_stdvga_range
53 };
55 static int hvm_buffered_io_intercept(ioreq_t *p)
56 {
57 struct vcpu *v = current;
58 buffered_iopage_t *pg =
59 (buffered_iopage_t *)(v->domain->arch.hvm_domain.buffered_io_va);
60 buf_ioreq_t bp;
61 int i, qw = 0;
63 /* Ensure buffered_iopage fits in a page */
64 BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
66 /* ignore READ ioreq_t and anything buffered io can't deal with */
67 if (p->dir == IOREQ_READ || p->addr > 0xFFFFFUL ||
68 p->data_is_ptr || p->count != 1)
69 return 0;
71 for (i = 0; i < HVM_BUFFERED_IO_RANGE_NR; i++) {
72 if (p->addr >= hvm_buffered_io_ranges[i]->start_addr &&
73 p->addr + p->size - 1 < hvm_buffered_io_ranges[i]->start_addr +
74 hvm_buffered_io_ranges[i]->length)
75 break;
76 }
78 if (i == HVM_BUFFERED_IO_RANGE_NR)
79 return 0;
81 bp.type = p->type;
82 bp.dir = p->dir;
83 bp.df = p->df;
84 switch (p->size) {
85 case 1:
86 bp.size = 0;
87 break;
88 case 2:
89 bp.size = 1;
90 break;
91 case 4:
92 bp.size = 2;
93 break;
94 case 8:
95 bp.size = 3;
96 qw = 1;
97 break;
98 default:
99 gdprintk(XENLOG_WARNING, "unexpected ioreq size:%"PRId64"\n", p->size);
100 return 0;
101 }
102 bp.data = p->data;
103 bp.addr = p->addr;
105 spin_lock(&v->domain->arch.hvm_domain.buffered_io_lock);
107 if (pg->write_pointer - pg->read_pointer >= IOREQ_BUFFER_SLOT_NUM - qw) {
108 /* the queue is full.
109 * send the iopacket through the normal path.
110 * NOTE: The arithimetic operation could handle the situation for
111 * write_pointer overflow.
112 */
113 spin_unlock(&v->domain->arch.hvm_domain.buffered_io_lock);
114 return 0;
115 }
117 memcpy(&pg->buf_ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM],
118 &bp, sizeof(bp));
120 if (qw) {
121 bp.data = p->data >> 32;
122 memcpy(&pg->buf_ioreq[(pg->write_pointer + 1) % IOREQ_BUFFER_SLOT_NUM],
123 &bp, sizeof(bp));
124 }
126 /* Make the ioreq_t visible before write_pointer */
127 wmb();
128 pg->write_pointer += qw ? 2 : 1;
130 spin_unlock(&v->domain->arch.hvm_domain.buffered_io_lock);
132 return 1;
133 }
135 static void low_mmio_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
136 {
137 struct vcpu *v = current;
138 vcpu_iodata_t *vio;
139 ioreq_t *p;
141 vio = get_vio(v->domain, v->vcpu_id);
142 if (!vio)
143 panic_domain(NULL, "bad shared page");
145 p = &vio->vp_ioreq;
147 p->addr = pa;
148 p->size = s;
149 p->count = 1;
150 if (dir == IOREQ_WRITE)
151 p->data = *val;
152 else
153 p->data = 0;
154 p->data_is_ptr = 0;
155 p->dir = dir;
156 p->df = 0;
157 p->type = 1;
159 p->io_count++;
161 if (hvm_buffered_io_intercept(p)) {
162 p->state = STATE_IORESP_READY;
163 vmx_io_assist(v);
164 if (dir != IOREQ_READ)
165 return;
166 }
168 vmx_send_assist_req(v);
169 if (dir == IOREQ_READ)
170 *val = p->data;
172 return;
173 }
175 static int vmx_ide_pio_intercept(ioreq_t *p, u64 *val)
176 {
177 struct buffered_piopage *pio_page =
178 (void *)(current->domain->arch.hvm_domain.buffered_pio_va);
179 struct pio_buffer *piobuf;
180 uint32_t pointer, page_offset;
182 if (p->addr == 0x1F0)
183 piobuf = &pio_page->pio[PIO_BUFFER_IDE_PRIMARY];
184 else if (p->addr == 0x170)
185 piobuf = &pio_page->pio[PIO_BUFFER_IDE_SECONDARY];
186 else
187 return 0;
189 if (p->size != 2 && p->size != 4)
190 return 0;
192 pointer = piobuf->pointer;
193 page_offset = piobuf->page_offset;
195 /* sanity check */
196 if (page_offset + pointer < offsetof(struct buffered_piopage, buffer))
197 return 0;
198 if (page_offset + piobuf->data_end > PAGE_SIZE)
199 return 0;
201 if (pointer + p->size < piobuf->data_end) {
202 uint8_t *bufp = (uint8_t *)pio_page + page_offset + pointer;
203 if (p->dir == IOREQ_WRITE) {
204 if (likely(p->size == 4 && (((long)bufp & 3) == 0)))
205 *(uint32_t *)bufp = *val;
206 else
207 memcpy(bufp, val, p->size);
208 } else {
209 if (likely(p->size == 4 && (((long)bufp & 3) == 0))) {
210 *val = *(uint32_t *)bufp;
211 } else {
212 *val = 0;
213 memcpy(val, bufp, p->size);
214 }
215 }
216 piobuf->pointer += p->size;
217 p->state = STATE_IORESP_READY;
218 vmx_io_assist(current);
219 return 1;
220 }
221 return 0;
222 }
224 #define TO_LEGACY_IO(pa) (((pa)>>12<<2)|((pa)&0x3))
226 static const char * const guest_os_name[] = {
227 "Unknown",
228 "Windows 2003 server",
229 "Linux",
230 };
232 static inline void set_os_type(VCPU *v, u64 type)
233 {
234 if (type > OS_BASE && type < OS_END) {
235 v->domain->arch.vmx_platform.gos_type = type;
236 gdprintk(XENLOG_INFO, "Guest OS : %s\n", guest_os_name[type - OS_BASE]);
238 if (GOS_WINDOWS(v)) {
239 struct xen_ia64_opt_feature optf;
241 /* Windows identity maps regions 4 & 5 */
242 optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG4;
243 optf.on = XEN_IA64_OPTF_ON;
244 optf.pgprot = (_PAGE_P|_PAGE_A|_PAGE_D|_PAGE_MA_WB|_PAGE_AR_RW);
245 optf.key = 0;
246 domain_opt_feature(&optf);
248 optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG5;
249 optf.pgprot = (_PAGE_P|_PAGE_A|_PAGE_D|_PAGE_MA_UC|_PAGE_AR_RW);
250 domain_opt_feature(&optf);
251 }
252 }
253 }
256 static void legacy_io_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
257 {
258 struct vcpu *v = current;
259 vcpu_iodata_t *vio;
260 ioreq_t *p;
262 vio = get_vio(v->domain, v->vcpu_id);
263 if (!vio)
264 panic_domain(NULL, "bad shared page\n");
266 p = &vio->vp_ioreq;
267 p->addr = TO_LEGACY_IO(pa & 0x3ffffffUL);
268 p->size = s;
269 p->count = 1;
270 p->dir = dir;
271 if (dir == IOREQ_WRITE)
272 p->data = *val;
273 else
274 p->data = 0;
275 p->data_is_ptr = 0;
276 p->type = 0;
277 p->df = 0;
279 p->io_count++;
281 if (dir == IOREQ_WRITE && p->addr == OS_TYPE_PORT) {
282 set_os_type(v, *val);
283 return;
284 }
286 if (vmx_ide_pio_intercept(p, val))
287 return;
289 if (IS_ACPI_ADDR(p->addr) && vacpi_intercept(p, val))
290 return;
292 vmx_send_assist_req(v);
293 if (dir == IOREQ_READ) { // read
294 *val=p->data;
295 }
296 #ifdef DEBUG_PCI
297 if (dir == IOREQ_WRITE)
298 if (p->addr == 0xcf8UL)
299 printk("Write 0xcf8, with val [0x%lx]\n", p->data);
300 else
301 if (p->addr == 0xcfcUL)
302 printk("Read 0xcfc, with val [0x%lx]\n", p->data);
303 #endif //DEBUG_PCI
304 return;
305 }
307 static void mmio_access(VCPU *vcpu, u64 src_pa, u64 *dest, size_t s, int ma, int dir)
308 {
309 unsigned long iot;
310 iot = __gpfn_is_io(vcpu->domain, src_pa >> PAGE_SHIFT);
312 perfc_incra(vmx_mmio_access, iot >> 56);
313 switch (iot) {
314 case GPFN_PIB:
315 if (ma != 4)
316 panic_domain(NULL, "Access PIB not with UC attribute\n");
318 if (!dir)
319 vlsapic_write(vcpu, src_pa, s, *dest);
320 else
321 *dest = vlsapic_read(vcpu, src_pa, s);
322 break;
323 case GPFN_GFW:
324 break;
325 case GPFN_IOSAPIC:
326 if (!dir)
327 viosapic_write(vcpu, src_pa, s, *dest);
328 else
329 *dest = viosapic_read(vcpu, src_pa, s);
330 break;
331 case GPFN_FRAME_BUFFER:
332 case GPFN_LOW_MMIO:
333 low_mmio_access(vcpu, src_pa, dest, s, dir);
334 break;
335 case GPFN_LEGACY_IO:
336 legacy_io_access(vcpu, src_pa, dest, s, dir);
337 break;
338 default:
339 panic_domain(NULL,"Bad I/O access\n");
340 break;
341 }
342 return;
343 }
345 /*
346 dir 1: read 0:write
347 */
348 void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
349 {
350 REGS *regs;
351 IA64_BUNDLE bundle;
352 int slot, dir=0;
353 enum { SL_INTEGER, SL_FLOATING, SL_FLOATING_FP8 } inst_type;
354 size_t size;
355 u64 data, data1, temp, update_reg;
356 s32 imm;
357 INST64 inst;
359 regs = vcpu_regs(vcpu);
360 if (IA64_RETRY == __vmx_get_domain_bundle(regs->cr_iip, &bundle)) {
361 /* if fetch code fail, return and try again */
362 return;
363 }
364 slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
365 if (!slot)
366 inst.inst = bundle.slot0;
367 else if (slot == 1) {
368 u64 slot1b = bundle.slot1b;
369 inst.inst = bundle.slot1a + (slot1b << 18);
370 }
371 else if (slot == 2)
372 inst.inst = bundle.slot2;
375 // Integer Load/Store
376 if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) {
377 inst_type = SL_INTEGER;
378 size = (inst.M1.x6 & 0x3);
379 if ((inst.M1.x6 >> 2) > 0xb) {
380 dir = IOREQ_WRITE;
381 vcpu_get_gr_nat(vcpu, inst.M4.r2, &data);
382 } else if ((inst.M1.x6 >> 2) < 0xb) {
383 dir = IOREQ_READ;
384 }
385 }
386 // Integer Load + Reg update
387 else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) {
388 inst_type = SL_INTEGER;
389 dir = IOREQ_READ;
390 size = (inst.M2.x6 & 0x3);
391 vcpu_get_gr_nat(vcpu, inst.M2.r3, &temp);
392 vcpu_get_gr_nat(vcpu, inst.M2.r2, &update_reg);
393 temp += update_reg;
394 vcpu_set_gr(vcpu, inst.M2.r3, temp, 0);
395 }
396 // Integer Load/Store + Imm update
397 else if (inst.M3.major == 5) {
398 inst_type = SL_INTEGER;
399 size = (inst.M3.x6 & 0x3);
400 if ((inst.M5.x6 >> 2) > 0xb) {
401 dir = IOREQ_WRITE;
402 vcpu_get_gr_nat(vcpu, inst.M5.r2, &data);
403 vcpu_get_gr_nat(vcpu, inst.M5.r3, &temp);
404 imm = (inst.M5.s << 31) | (inst.M5.i << 30) | (inst.M5.imm7 << 23);
405 temp += imm >> 23;
406 vcpu_set_gr(vcpu, inst.M5.r3, temp, 0);
407 } else if ((inst.M3.x6 >> 2) < 0xb) {
408 dir = IOREQ_READ;
409 vcpu_get_gr_nat(vcpu, inst.M3.r3, &temp);
410 imm = (inst.M3.s << 31) | (inst.M3.i << 30) | (inst.M3.imm7 << 23);
411 temp += imm >> 23;
412 vcpu_set_gr(vcpu, inst.M3.r3, temp, 0);
413 }
414 }
415 // Floating-point spill
416 else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B &&
417 inst.M9.m == 0 && inst.M9.x == 0) {
418 struct ia64_fpreg v;
420 inst_type = SL_FLOATING;
421 dir = IOREQ_WRITE;
422 vcpu_get_fpreg(vcpu, inst.M9.f2, &v);
423 data1 = v.u.bits[1] & 0x3ffff;
424 data = v.u.bits[0];
425 size = 4;
426 }
427 // Floating-point spill + Imm update
428 else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) {
429 struct ia64_fpreg v;
431 inst_type = SL_FLOATING;
432 dir = IOREQ_WRITE;
433 vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
434 vcpu_get_gr_nat(vcpu, inst.M10.r3, &temp);
435 imm = (inst.M10.s << 31) | (inst.M10.i << 30) | (inst.M10.imm7 << 23);
436 temp += imm >> 23;
437 vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
438 data1 = v.u.bits[1] & 0x3ffff;
439 data = v.u.bits[0];
440 size = 4;
441 }
442 // Floating-point stf8 + Imm update
443 else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) {
444 struct ia64_fpreg v;
446 inst_type = SL_FLOATING;
447 dir = IOREQ_WRITE;
448 size = 3;
449 vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
450 data = v.u.bits[0]; /* Significand. */
451 vcpu_get_gr_nat(vcpu, inst.M10.r3, &temp);
452 imm = (inst.M10.s << 31) | (inst.M10.i << 30) | (inst.M10.imm7 << 23);
453 temp += imm >> 23;
454 vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
455 }
456 // lfetch - do not perform accesses.
457 else if (inst.M15.major== 7 && inst.M15.x6 >=0x2c && inst.M15.x6 <= 0x2f) {
458 vcpu_get_gr_nat(vcpu, inst.M15.r3, &temp);
459 imm = (inst.M15.s << 31) | (inst.M15.i << 30) | (inst.M15.imm7 << 23);
460 temp += imm >> 23;
461 vcpu_set_gr(vcpu, inst.M15.r3, temp, 0);
463 vcpu_increment_iip(vcpu);
464 return;
465 }
466 // Floating-point Load Pair + Imm ldfp8 M12
467 else if (inst.M12.major == 6 && inst.M12.m == 1
468 && inst.M12.x == 1 && inst.M12.x6 == 1) {
469 inst_type = SL_FLOATING_FP8;
470 dir = IOREQ_READ;
471 size = 4; //ldfd
472 vcpu_set_gr(vcpu,inst.M12.r3,padr + 16, 0);
473 }
474 else {
475 panic_domain
476 (NULL, "This memory access instr can't be emulated: %lx pc=%lx\n",
477 inst.inst, regs->cr_iip);
478 }
480 if (size == 4) {
481 mmio_access(vcpu, padr + 8, &data1, 1 << 3, ma, dir);
482 size = 3;
483 }
484 mmio_access(vcpu, padr, &data, 1 << size, ma, dir);
486 if (dir == IOREQ_READ) {
487 if (inst_type == SL_INTEGER) {
488 vcpu_set_gr(vcpu, inst.M1.r1, data, 0);
489 } else if (inst_type == SL_FLOATING_FP8) {
490 struct ia64_fpreg v;
492 v.u.bits[0] = data;
493 v.u.bits[1] = 0x1003E;
494 vcpu_set_fpreg(vcpu, inst.M12.f1, &v);
495 v.u.bits[0] = data1;
496 v.u.bits[1] = 0x1003E;
497 vcpu_set_fpreg(vcpu, inst.M12.f2, &v);
498 } else {
499 panic_domain(NULL, "Don't support ldfd now !");
500 }
501 }
502 vcpu_increment_iip(vcpu);
503 }