debuggers.hg

view xen/arch/ia64/vmx/mmio.c @ 16400:ee935d2b8a63

merge with xen-unstable.hg (staging)
author Alex Williamson <alex.williamson@hp.com>
date Thu Nov 08 09:37:06 2007 -0700 (2007-11-08)
parents 91575bb23d07 c0bdfda5183d
children 6fc79cb7934d
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * mmio.c: MMIO emulation components.
4 * Copyright (c) 2004, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
20 * Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
21 */
23 #include <linux/sched.h>
24 #include <xen/mm.h>
25 #include <asm/vmx_mm_def.h>
26 #include <asm/gcc_intrin.h>
27 #include <linux/interrupt.h>
28 #include <asm/vmx_vcpu.h>
29 #include <asm/bundle.h>
30 #include <asm/types.h>
31 #include <public/hvm/ioreq.h>
32 #include <asm/vmx.h>
33 #include <public/event_channel.h>
34 #include <public/xen.h>
35 #include <linux/event.h>
36 #include <xen/domain.h>
37 #include <asm/viosapic.h>
38 #include <asm/vlsapic.h>
39 #include <asm/hvm/vacpi.h>
41 #define HVM_BUFFERED_IO_RANGE_NR 1
43 struct hvm_buffered_io_range {
44 unsigned long start_addr;
45 unsigned long length;
46 };
48 static struct hvm_buffered_io_range buffered_stdvga_range = {0xA0000, 0x20000};
49 static struct hvm_buffered_io_range
50 *hvm_buffered_io_ranges[HVM_BUFFERED_IO_RANGE_NR] =
51 {
52 &buffered_stdvga_range
53 };
55 static int hvm_buffered_io_intercept(ioreq_t *p)
56 {
57 struct vcpu *v = current;
58 buffered_iopage_t *pg =
59 (buffered_iopage_t *)(v->domain->arch.hvm_domain.buf_ioreq.va);
60 buf_ioreq_t bp;
61 int i, qw = 0;
63 /* Ensure buffered_iopage fits in a page */
64 BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
66 /* ignore READ ioreq_t and anything buffered io can't deal with */
67 if (p->dir == IOREQ_READ || p->addr > 0xFFFFFUL ||
68 p->data_is_ptr || p->count != 1)
69 return 0;
71 for (i = 0; i < HVM_BUFFERED_IO_RANGE_NR; i++) {
72 if (p->addr >= hvm_buffered_io_ranges[i]->start_addr &&
73 p->addr + p->size - 1 < hvm_buffered_io_ranges[i]->start_addr +
74 hvm_buffered_io_ranges[i]->length)
75 break;
76 }
78 if (i == HVM_BUFFERED_IO_RANGE_NR)
79 return 0;
81 bp.type = p->type;
82 bp.dir = p->dir;
83 bp.df = p->df;
84 switch (p->size) {
85 case 1:
86 bp.size = 0;
87 break;
88 case 2:
89 bp.size = 1;
90 break;
91 case 4:
92 bp.size = 2;
93 break;
94 case 8:
95 bp.size = 3;
96 qw = 1;
97 break;
98 default:
99 gdprintk(XENLOG_WARNING, "unexpected ioreq size:%"PRId64"\n", p->size);
100 return 0;
101 }
102 bp.data = p->data;
103 bp.addr = p->addr;
105 spin_lock(&v->domain->arch.hvm_domain.buf_ioreq.lock);
107 if (pg->write_pointer - pg->read_pointer >= IOREQ_BUFFER_SLOT_NUM - qw) {
108 /* the queue is full.
109 * send the iopacket through the normal path.
110 * NOTE: The arithimetic operation could handle the situation for
111 * write_pointer overflow.
112 */
113 spin_unlock(&v->domain->arch.hvm_domain.buf_ioreq.lock);
114 return 0;
115 }
117 memcpy(&pg->buf_ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM],
118 &bp, sizeof(bp));
120 if (qw) {
121 bp.data = p->data >> 32;
122 memcpy(&pg->buf_ioreq[(pg->write_pointer + 1) % IOREQ_BUFFER_SLOT_NUM],
123 &bp, sizeof(bp));
124 }
126 /* Make the ioreq_t visible before write_pointer */
127 wmb();
128 pg->write_pointer += qw ? 2 : 1;
130 spin_unlock(&v->domain->arch.hvm_domain.buf_ioreq.lock);
132 return 1;
133 }
135 static void low_mmio_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
136 {
137 struct vcpu *v = current;
138 vcpu_iodata_t *vio;
139 ioreq_t *p;
141 vio = get_vio(v);
142 if (!vio)
143 panic_domain(NULL, "bad shared page");
145 p = &vio->vp_ioreq;
147 p->addr = pa;
148 p->size = s;
149 p->count = 1;
150 if (dir == IOREQ_WRITE)
151 p->data = *val;
152 else
153 p->data = 0;
154 p->data_is_ptr = 0;
155 p->dir = dir;
156 p->df = 0;
157 p->type = 1;
159 p->io_count++;
161 if (hvm_buffered_io_intercept(p)) {
162 p->state = STATE_IORESP_READY;
163 vmx_io_assist(v);
164 if (dir != IOREQ_READ)
165 return;
166 }
168 vmx_send_assist_req(v);
169 if (dir == IOREQ_READ)
170 *val = p->data;
172 return;
173 }
175 static int vmx_ide_pio_intercept(ioreq_t *p, u64 *val)
176 {
177 struct buffered_piopage *pio_page =
178 (void *)(current->domain->arch.hvm_domain.buf_pioreq.va);
179 spinlock_t *pio_lock;
180 struct pio_buffer *piobuf;
181 uint32_t pointer, page_offset;
183 if (p->addr == 0x1F0)
184 piobuf = &pio_page->pio[PIO_BUFFER_IDE_PRIMARY];
185 else if (p->addr == 0x170)
186 piobuf = &pio_page->pio[PIO_BUFFER_IDE_SECONDARY];
187 else
188 return 0;
190 if (p->size != 2 && p->size != 4)
191 return 0;
193 pio_lock = &current->domain->arch.hvm_domain.buf_pioreq.lock;
194 spin_lock(pio_lock);
196 pointer = piobuf->pointer;
197 page_offset = piobuf->page_offset;
199 /* sanity check */
200 if (page_offset + pointer < offsetof(struct buffered_piopage, buffer))
201 goto unlock_out;
202 if (page_offset + piobuf->data_end > PAGE_SIZE)
203 goto unlock_out;
205 if (pointer + p->size < piobuf->data_end) {
206 uint8_t *bufp = (uint8_t *)pio_page + page_offset + pointer;
207 if (p->dir == IOREQ_WRITE) {
208 if (likely(p->size == 4 && (((long)bufp & 3) == 0)))
209 *(uint32_t *)bufp = *val;
210 else
211 memcpy(bufp, val, p->size);
212 } else {
213 if (likely(p->size == 4 && (((long)bufp & 3) == 0))) {
214 *val = *(uint32_t *)bufp;
215 } else {
216 *val = 0;
217 memcpy(val, bufp, p->size);
218 }
219 }
220 piobuf->pointer += p->size;
221 spin_unlock(pio_lock);
223 p->state = STATE_IORESP_READY;
224 vmx_io_assist(current);
225 return 1;
226 }
228 unlock_out:
229 spin_unlock(pio_lock);
230 return 0;
231 }
233 #define TO_LEGACY_IO(pa) (((pa)>>12<<2)|((pa)&0x3))
235 static const char * const guest_os_name[] = {
236 "Unknown",
237 "Windows 2003 server",
238 "Linux",
239 };
241 static inline void set_os_type(VCPU *v, u64 type)
242 {
243 if (type > OS_BASE && type < OS_END) {
244 v->domain->arch.vmx_platform.gos_type = type;
245 gdprintk(XENLOG_INFO, "Guest OS : %s\n", guest_os_name[type - OS_BASE]);
247 if (GOS_WINDOWS(v)) {
248 struct xen_ia64_opt_feature optf;
250 /* Windows identity maps regions 4 & 5 */
251 optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG4;
252 optf.on = XEN_IA64_OPTF_ON;
253 optf.pgprot = (_PAGE_P|_PAGE_A|_PAGE_D|_PAGE_MA_WB|_PAGE_AR_RW);
254 optf.key = 0;
255 domain_opt_feature(&optf);
257 optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG5;
258 optf.pgprot = (_PAGE_P|_PAGE_A|_PAGE_D|_PAGE_MA_UC|_PAGE_AR_RW);
259 domain_opt_feature(&optf);
260 }
261 }
262 }
265 static void legacy_io_access(VCPU *vcpu, u64 pa, u64 *val, size_t s, int dir)
266 {
267 struct vcpu *v = current;
268 vcpu_iodata_t *vio;
269 ioreq_t *p;
271 vio = get_vio(v);
272 if (!vio)
273 panic_domain(NULL, "bad shared page\n");
275 p = &vio->vp_ioreq;
276 p->addr = TO_LEGACY_IO(pa & 0x3ffffffUL);
277 p->size = s;
278 p->count = 1;
279 p->dir = dir;
280 if (dir == IOREQ_WRITE)
281 p->data = *val;
282 else
283 p->data = 0;
284 p->data_is_ptr = 0;
285 p->type = 0;
286 p->df = 0;
288 p->io_count++;
290 if (dir == IOREQ_WRITE && p->addr == OS_TYPE_PORT) {
291 set_os_type(v, *val);
292 return;
293 }
295 if (vmx_ide_pio_intercept(p, val))
296 return;
298 if (IS_ACPI_ADDR(p->addr) && vacpi_intercept(p, val))
299 return;
301 vmx_send_assist_req(v);
302 if (dir == IOREQ_READ) { // read
303 *val=p->data;
304 }
305 #ifdef DEBUG_PCI
306 if (dir == IOREQ_WRITE)
307 if (p->addr == 0xcf8UL)
308 printk("Write 0xcf8, with val [0x%lx]\n", p->data);
309 else
310 if (p->addr == 0xcfcUL)
311 printk("Read 0xcfc, with val [0x%lx]\n", p->data);
312 #endif //DEBUG_PCI
313 return;
314 }
316 static void mmio_access(VCPU *vcpu, u64 src_pa, u64 *dest, size_t s, int ma, int dir)
317 {
318 unsigned long iot;
319 iot = __gpfn_is_io(vcpu->domain, src_pa >> PAGE_SHIFT);
321 perfc_incra(vmx_mmio_access, iot >> 56);
322 switch (iot) {
323 case GPFN_PIB:
324 if (ma != 4)
325 panic_domain(NULL, "Access PIB not with UC attribute\n");
327 if (!dir)
328 vlsapic_write(vcpu, src_pa, s, *dest);
329 else
330 *dest = vlsapic_read(vcpu, src_pa, s);
331 break;
332 case GPFN_GFW:
333 break;
334 case GPFN_IOSAPIC:
335 if (!dir)
336 viosapic_write(vcpu, src_pa, s, *dest);
337 else
338 *dest = viosapic_read(vcpu, src_pa, s);
339 break;
340 case GPFN_FRAME_BUFFER:
341 case GPFN_LOW_MMIO:
342 low_mmio_access(vcpu, src_pa, dest, s, dir);
343 break;
344 case GPFN_LEGACY_IO:
345 legacy_io_access(vcpu, src_pa, dest, s, dir);
346 break;
347 default:
348 panic_domain(NULL,"Bad I/O access\n");
349 break;
350 }
351 return;
352 }
354 /*
355 dir 1: read 0:write
356 */
357 void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
358 {
359 REGS *regs;
360 IA64_BUNDLE bundle;
361 int slot, dir=0;
362 enum { SL_INTEGER, SL_FLOATING, SL_FLOATING_FP8 } inst_type;
363 size_t size;
364 u64 data, data1, temp, update_reg;
365 s32 imm;
366 INST64 inst;
368 regs = vcpu_regs(vcpu);
369 if (IA64_RETRY == __vmx_get_domain_bundle(regs->cr_iip, &bundle)) {
370 /* if fetch code fail, return and try again */
371 return;
372 }
373 slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
374 if (!slot)
375 inst.inst = bundle.slot0;
376 else if (slot == 1) {
377 u64 slot1b = bundle.slot1b;
378 inst.inst = bundle.slot1a + (slot1b << 18);
379 }
380 else if (slot == 2)
381 inst.inst = bundle.slot2;
384 // Integer Load/Store
385 if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) {
386 inst_type = SL_INTEGER;
387 size = (inst.M1.x6 & 0x3);
388 if ((inst.M1.x6 >> 2) > 0xb) {
389 dir = IOREQ_WRITE;
390 vcpu_get_gr_nat(vcpu, inst.M4.r2, &data);
391 } else if ((inst.M1.x6 >> 2) < 0xb) {
392 dir = IOREQ_READ;
393 }
394 }
395 // Integer Load + Reg update
396 else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) {
397 inst_type = SL_INTEGER;
398 dir = IOREQ_READ;
399 size = (inst.M2.x6 & 0x3);
400 vcpu_get_gr_nat(vcpu, inst.M2.r3, &temp);
401 vcpu_get_gr_nat(vcpu, inst.M2.r2, &update_reg);
402 temp += update_reg;
403 vcpu_set_gr(vcpu, inst.M2.r3, temp, 0);
404 }
405 // Integer Load/Store + Imm update
406 else if (inst.M3.major == 5) {
407 inst_type = SL_INTEGER;
408 size = (inst.M3.x6 & 0x3);
409 if ((inst.M5.x6 >> 2) > 0xb) {
410 dir = IOREQ_WRITE;
411 vcpu_get_gr_nat(vcpu, inst.M5.r2, &data);
412 vcpu_get_gr_nat(vcpu, inst.M5.r3, &temp);
413 imm = (inst.M5.s << 31) | (inst.M5.i << 30) | (inst.M5.imm7 << 23);
414 temp += imm >> 23;
415 vcpu_set_gr(vcpu, inst.M5.r3, temp, 0);
416 } else if ((inst.M3.x6 >> 2) < 0xb) {
417 dir = IOREQ_READ;
418 vcpu_get_gr_nat(vcpu, inst.M3.r3, &temp);
419 imm = (inst.M3.s << 31) | (inst.M3.i << 30) | (inst.M3.imm7 << 23);
420 temp += imm >> 23;
421 vcpu_set_gr(vcpu, inst.M3.r3, temp, 0);
422 }
423 }
424 // Floating-point spill
425 else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B &&
426 inst.M9.m == 0 && inst.M9.x == 0) {
427 struct ia64_fpreg v;
429 inst_type = SL_FLOATING;
430 dir = IOREQ_WRITE;
431 vcpu_get_fpreg(vcpu, inst.M9.f2, &v);
432 data1 = v.u.bits[1] & 0x3ffff;
433 data = v.u.bits[0];
434 size = 4;
435 }
436 // Floating-point spill + Imm update
437 else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) {
438 struct ia64_fpreg v;
440 inst_type = SL_FLOATING;
441 dir = IOREQ_WRITE;
442 vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
443 vcpu_get_gr_nat(vcpu, inst.M10.r3, &temp);
444 imm = (inst.M10.s << 31) | (inst.M10.i << 30) | (inst.M10.imm7 << 23);
445 temp += imm >> 23;
446 vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
447 data1 = v.u.bits[1] & 0x3ffff;
448 data = v.u.bits[0];
449 size = 4;
450 }
451 // Floating-point stf8 + Imm update
452 else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) {
453 struct ia64_fpreg v;
455 inst_type = SL_FLOATING;
456 dir = IOREQ_WRITE;
457 size = 3;
458 vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
459 data = v.u.bits[0]; /* Significand. */
460 vcpu_get_gr_nat(vcpu, inst.M10.r3, &temp);
461 imm = (inst.M10.s << 31) | (inst.M10.i << 30) | (inst.M10.imm7 << 23);
462 temp += imm >> 23;
463 vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
464 }
465 // lfetch - do not perform accesses.
466 else if (inst.M15.major== 7 && inst.M15.x6 >=0x2c && inst.M15.x6 <= 0x2f) {
467 vcpu_get_gr_nat(vcpu, inst.M15.r3, &temp);
468 imm = (inst.M15.s << 31) | (inst.M15.i << 30) | (inst.M15.imm7 << 23);
469 temp += imm >> 23;
470 vcpu_set_gr(vcpu, inst.M15.r3, temp, 0);
472 vcpu_increment_iip(vcpu);
473 return;
474 }
475 // Floating-point Load Pair + Imm ldfp8 M12
476 else if (inst.M12.major == 6 && inst.M12.m == 1
477 && inst.M12.x == 1 && inst.M12.x6 == 1) {
478 inst_type = SL_FLOATING_FP8;
479 dir = IOREQ_READ;
480 size = 4; //ldfd
481 vcpu_set_gr(vcpu,inst.M12.r3,padr + 16, 0);
482 }
483 else {
484 panic_domain
485 (NULL, "This memory access instr can't be emulated: %lx pc=%lx\n",
486 inst.inst, regs->cr_iip);
487 }
489 if (size == 4) {
490 mmio_access(vcpu, padr + 8, &data1, 1 << 3, ma, dir);
491 size = 3;
492 }
493 mmio_access(vcpu, padr, &data, 1 << size, ma, dir);
495 if (dir == IOREQ_READ) {
496 if (inst_type == SL_INTEGER) {
497 vcpu_set_gr(vcpu, inst.M1.r1, data, 0);
498 } else if (inst_type == SL_FLOATING_FP8) {
499 struct ia64_fpreg v;
501 v.u.bits[0] = data;
502 v.u.bits[1] = 0x1003E;
503 vcpu_set_fpreg(vcpu, inst.M12.f1, &v);
504 v.u.bits[0] = data1;
505 v.u.bits[1] = 0x1003E;
506 vcpu_set_fpreg(vcpu, inst.M12.f2, &v);
507 } else {
508 panic_domain(NULL, "Don't support ldfd now !");
509 }
510 }
511 vcpu_increment_iip(vcpu);
512 }