debuggers.hg

view xen/arch/x86/hvm/stdvga.c @ 16381:c0bdfda5183d

hvm: Clean up buf_ioreq handling.
Also, disable stdvga caching on hvm save/restore, as the shadow vga
state is not preserved.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Thu Nov 08 14:50:01 2007 +0000 (2007-11-08)
parents 2a5461071ca4
children d1ac500f77c1
line source
1 /*
2 * Copyright (c) 2003-2007, Virtual Iron Software, Inc.
3 *
4 * Portions have been modified by Virtual Iron Software, Inc.
5 * (c) 2007. This file and the modifications can be redistributed and/or
6 * modified under the terms and conditions of the GNU General Public
7 * License, version 2.1 and not any later version of the GPL, as published
8 * by the Free Software Foundation.
9 *
10 * This improves the performance of Standard VGA,
11 * the mode used during Windows boot and by the Linux
12 * splash screen.
13 *
14 * It does so by buffering all the stdvga programmed output ops
15 * and memory mapped ops (both reads and writes) that are sent to QEMU.
16 *
17 * We maintain locally essential VGA state so we can respond
18 * immediately to input and read ops without waiting for
19 * QEMU. We snoop output and write ops to keep our state
20 * up-to-date.
21 *
22 * PIO input ops are satisfied from cached state without
23 * bothering QEMU.
24 *
25 * PIO output and mmio ops are passed through to QEMU, including
26 * mmio read ops. This is necessary because mmio reads
27 * can have side effects.
28 */
30 #include <xen/config.h>
31 #include <xen/types.h>
32 #include <xen/sched.h>
33 #include <asm/hvm/support.h>
35 #define vram_b(_s, _a) \
36 (((uint8_t*) (_s)->vram_ptr[((_a)>>12)&0x3f])[(_a)&0xfff])
37 #define vram_w(_s, _a) \
38 (((uint16_t*)(_s)->vram_ptr[((_a)>>11)&0x3f])[(_a)&0x7ff])
39 #define vram_l(_s, _a) \
40 (((uint32_t*)(_s)->vram_ptr[((_a)>>10)&0x3f])[(_a)&0x3ff])
42 #define PAT(x) (x)
43 static const uint32_t mask16[16] = {
44 PAT(0x00000000),
45 PAT(0x000000ff),
46 PAT(0x0000ff00),
47 PAT(0x0000ffff),
48 PAT(0x00ff0000),
49 PAT(0x00ff00ff),
50 PAT(0x00ffff00),
51 PAT(0x00ffffff),
52 PAT(0xff000000),
53 PAT(0xff0000ff),
54 PAT(0xff00ff00),
55 PAT(0xff00ffff),
56 PAT(0xffff0000),
57 PAT(0xffff00ff),
58 PAT(0xffffff00),
59 PAT(0xffffffff),
60 };
62 /* force some bits to zero */
63 const uint8_t sr_mask[8] = {
64 (uint8_t)~0xfc,
65 (uint8_t)~0xc2,
66 (uint8_t)~0xf0,
67 (uint8_t)~0xc0,
68 (uint8_t)~0xf1,
69 (uint8_t)~0xff,
70 (uint8_t)~0xff,
71 (uint8_t)~0x00,
72 };
74 const uint8_t gr_mask[16] = {
75 (uint8_t)~0xf0, /* 0x00 */
76 (uint8_t)~0xf0, /* 0x01 */
77 (uint8_t)~0xf0, /* 0x02 */
78 (uint8_t)~0xe0, /* 0x03 */
79 (uint8_t)~0xfc, /* 0x04 */
80 (uint8_t)~0x84, /* 0x05 */
81 (uint8_t)~0xf0, /* 0x06 */
82 (uint8_t)~0xf0, /* 0x07 */
83 (uint8_t)~0x00, /* 0x08 */
84 };
86 static uint64_t stdvga_inb(uint64_t addr)
87 {
88 struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
89 uint8_t val = 0;
91 switch ( addr )
92 {
93 case 0x3c4: /* sequencer address register */
94 val = s->sr_index;
95 break;
97 case 0x3c5: /* sequencer data register */
98 if ( s->sr_index < sizeof(s->sr) )
99 val = s->sr[s->sr_index];
100 break;
102 case 0x3ce: /* graphics address register */
103 val = s->gr_index;
104 break;
106 case 0x3cf: /* graphics data register */
107 val = s->gr[s->gr_index];
108 break;
110 default:
111 gdprintk(XENLOG_WARNING, "unexpected io addr 0x%04x\n", (int)addr);
112 }
114 return val;
115 }
117 static uint64_t stdvga_in(ioreq_t *p)
118 {
119 /* Satisfy reads from sequence and graphics registers using local values */
120 uint64_t data = 0;
122 switch ( p->size )
123 {
124 case 1:
125 data = stdvga_inb(p->addr);
126 break;
128 case 2:
129 data = stdvga_inb(p->addr);
130 data |= stdvga_inb(p->addr + 1) << 8;
131 break;
133 case 4:
134 data = stdvga_inb(p->addr);
135 data |= stdvga_inb(p->addr + 1) << 8;
136 data |= stdvga_inb(p->addr + 2) << 16;
137 data |= stdvga_inb(p->addr + 3) << 24;
138 break;
140 case 8:
141 data = stdvga_inb(p->addr);
142 data |= stdvga_inb(p->addr + 1) << 8;
143 data |= stdvga_inb(p->addr + 2) << 16;
144 data |= stdvga_inb(p->addr + 3) << 24;
145 data |= stdvga_inb(p->addr + 4) << 32;
146 data |= stdvga_inb(p->addr + 5) << 40;
147 data |= stdvga_inb(p->addr + 6) << 48;
148 data |= stdvga_inb(p->addr + 7) << 56;
149 break;
151 default:
152 gdprintk(XENLOG_WARNING, "invalid io size:%d\n", (int)p->size);
153 }
155 return data;
156 }
158 static void stdvga_outb(uint64_t addr, uint8_t val)
159 {
160 /* Bookkeep (via snooping) the sequencer and graphics registers */
162 struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
163 int prev_stdvga = s->stdvga;
165 switch ( addr )
166 {
167 case 0x3c4: /* sequencer address register */
168 s->sr_index = val;
169 break;
171 case 0x3c5: /* sequencer data register */
172 switch ( s->sr_index )
173 {
174 case 0x00 ... 0x05:
175 case 0x07:
176 s->sr[s->sr_index] = val & sr_mask[s->sr_index];
177 break;
178 case 0x06:
179 s->sr[s->sr_index] = ((val & 0x17) == 0x12) ? 0x12 : 0x0f;
180 break;
181 default:
182 if ( s->sr_index < sizeof(s->sr) )
183 s->sr[s->sr_index] = val;
184 break;
185 }
186 break;
188 case 0x3ce: /* graphics address register */
189 s->gr_index = val;
190 break;
192 case 0x3cf: /* graphics data register */
193 if ( s->gr_index < sizeof(gr_mask) )
194 {
195 s->gr[s->gr_index] = val & gr_mask[s->gr_index];
196 }
197 else if ( (s->gr_index == 0xff) && (s->vram_ptr != NULL) )
198 {
199 uint32_t addr;
200 for ( addr = 0xa0000; addr < 0xa4000; addr += 2 )
201 vram_w(s, addr) = (val << 8) | s->gr[0xfe];
202 }
203 else
204 {
205 s->gr[s->gr_index] = val;
206 }
207 break;
208 }
210 /* When in standard vga mode, emulate here all writes to the vram buffer
211 * so we can immediately satisfy reads without waiting for qemu. */
212 s->stdvga =
213 (s->sr[0x07] == 0) && /* standard vga mode */
214 (s->gr[6] == 0x05); /* misc graphics register w/ MemoryMapSelect=1
215 * 0xa0000-0xaffff (64k region), AlphaDis=1 */
217 if ( !prev_stdvga && s->stdvga )
218 {
219 s->cache = 1; /* (re)start caching video buffer */
220 gdprintk(XENLOG_INFO, "entering stdvga and caching modes\n");
221 }
222 else if ( prev_stdvga && !s->stdvga )
223 {
224 gdprintk(XENLOG_INFO, "leaving stdvga\n");
225 }
226 }
228 static void stdvga_outv(uint64_t addr, uint64_t data, uint32_t size)
229 {
230 switch ( size )
231 {
232 case 1:
233 stdvga_outb(addr, data);
234 break;
236 case 2:
237 stdvga_outb(addr+0, data >> 0);
238 stdvga_outb(addr+1, data >> 8);
239 break;
241 case 4:
242 stdvga_outb(addr+0, data >> 0);
243 stdvga_outb(addr+1, data >> 8);
244 stdvga_outb(addr+2, data >> 16);
245 stdvga_outb(addr+3, data >> 24);
246 break;
248 case 8:
249 stdvga_outb(addr+0, data >> 0);
250 stdvga_outb(addr+1, data >> 8);
251 stdvga_outb(addr+2, data >> 16);
252 stdvga_outb(addr+3, data >> 24);
253 stdvga_outb(addr+4, data >> 32);
254 stdvga_outb(addr+5, data >> 40);
255 stdvga_outb(addr+6, data >> 48);
256 stdvga_outb(addr+7, data >> 56);
257 break;
259 default:
260 gdprintk(XENLOG_WARNING, "invalid io size:%d\n", size);
261 }
262 }
264 static void stdvga_out(ioreq_t *p)
265 {
266 if ( p->data_is_ptr )
267 {
268 int i, sign = p->df ? -1 : 1;
269 uint64_t addr = p->addr, data = p->data, tmp;
270 for ( i = 0; i < p->count; i++ )
271 {
272 hvm_copy_from_guest_phys(&tmp, data, p->size);
273 stdvga_outv(addr, tmp, p->size);
274 data += sign * p->size;
275 addr += sign * p->size;
276 }
277 }
278 else
279 {
280 stdvga_outv(p->addr, p->data, p->size);
281 }
282 }
284 int stdvga_intercept_pio(ioreq_t *p)
285 {
286 struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
287 int buf = 0, rc;
289 if ( p->size > 8 )
290 {
291 gdprintk(XENLOG_WARNING, "stdvga bad access size %d\n", (int)p->size);
292 return 0;
293 }
295 spin_lock(&s->lock);
297 if ( p->dir == IOREQ_READ )
298 {
299 if ( p->size != 1 )
300 gdprintk(XENLOG_WARNING, "unexpected io size:%d\n", (int)p->size);
301 if ( p->data_is_ptr )
302 gdprintk(XENLOG_WARNING, "unexpected data_is_ptr\n");
303 if ( !((p->addr == 0x3c5) && (s->sr_index >= sizeof(sr_mask))) &&
304 !((p->addr == 0x3cf) && (s->gr_index >= sizeof(gr_mask))) )
305 {
306 p->data = stdvga_in(p);
307 buf = 1;
308 }
309 }
310 else
311 {
312 stdvga_out(p);
313 buf = 1;
314 }
316 rc = (buf && hvm_buffered_io_send(p));
318 spin_unlock(&s->lock);
320 return rc;
321 }
323 #define GET_PLANE(data, p) (((data) >> ((p) * 8)) & 0xff)
325 static uint8_t stdvga_mem_readb(uint64_t addr)
326 {
327 struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
328 int plane;
329 uint32_t ret;
331 addr &= 0x1ffff;
332 if ( addr >= 0x10000 )
333 return 0xff;
335 if ( s->sr[4] & 0x08 )
336 {
337 /* chain 4 mode : simplest access */
338 ret = vram_b(s, addr);
339 }
340 else if ( s->gr[5] & 0x10 )
341 {
342 /* odd/even mode (aka text mode mapping) */
343 plane = (s->gr[4] & 2) | (addr & 1);
344 ret = vram_b(s, ((addr & ~1) << 1) | plane);
345 }
346 else
347 {
348 /* standard VGA latched access */
349 s->latch = vram_l(s, addr);
351 if ( !(s->gr[5] & 0x08) )
352 {
353 /* read mode 0 */
354 plane = s->gr[4];
355 ret = GET_PLANE(s->latch, plane);
356 }
357 else
358 {
359 /* read mode 1 */
360 ret = (s->latch ^ mask16[s->gr[2]]) & mask16[s->gr[7]];
361 ret |= ret >> 16;
362 ret |= ret >> 8;
363 ret = (~ret) & 0xff;
364 }
365 }
367 return ret;
368 }
370 static uint32_t stdvga_mem_read(uint32_t addr, uint32_t size)
371 {
372 uint32_t data = 0;
374 switch ( size )
375 {
376 case 1:
377 data = stdvga_mem_readb(addr);
378 break;
380 case 2:
381 data = stdvga_mem_readb(addr);
382 data |= stdvga_mem_readb(addr + 1) << 8;
383 break;
385 case 4:
386 data = stdvga_mem_readb(addr);
387 data |= stdvga_mem_readb(addr + 1) << 8;
388 data |= stdvga_mem_readb(addr + 2) << 16;
389 data |= stdvga_mem_readb(addr + 3) << 24;
390 break;
392 default:
393 gdprintk(XENLOG_WARNING, "invalid io size:%d\n", size);
394 break;
395 }
397 return data;
398 }
400 static void stdvga_mem_writeb(uint64_t addr, uint32_t val)
401 {
402 struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
403 int plane, write_mode, b, func_select, mask;
404 uint32_t write_mask, bit_mask, set_mask;
406 addr &= 0x1ffff;
407 if ( addr >= 0x10000 )
408 return;
410 if ( s->sr[4] & 0x08 )
411 {
412 /* chain 4 mode : simplest access */
413 plane = addr & 3;
414 mask = (1 << plane);
415 if ( s->sr[2] & mask )
416 vram_b(s, addr) = val;
417 } else if ( s->gr[5] & 0x10 )
418 {
419 /* odd/even mode (aka text mode mapping) */
420 plane = (s->gr[4] & 2) | (addr & 1);
421 mask = (1 << plane);
422 if ( s->sr[2] & mask )
423 {
424 addr = ((addr & ~1) << 1) | plane;
425 vram_b(s, addr) = val;
426 }
427 }
428 else
429 {
430 write_mode = s->gr[5] & 3;
431 switch ( write_mode )
432 {
433 default:
434 case 0:
435 /* rotate */
436 b = s->gr[3] & 7;
437 val = ((val >> b) | (val << (8 - b))) & 0xff;
438 val |= val << 8;
439 val |= val << 16;
441 /* apply set/reset mask */
442 set_mask = mask16[s->gr[1]];
443 val = (val & ~set_mask) | (mask16[s->gr[0]] & set_mask);
444 bit_mask = s->gr[8];
445 break;
446 case 1:
447 val = s->latch;
448 goto do_write;
449 case 2:
450 val = mask16[val & 0x0f];
451 bit_mask = s->gr[8];
452 break;
453 case 3:
454 /* rotate */
455 b = s->gr[3] & 7;
456 val = (val >> b) | (val << (8 - b));
458 bit_mask = s->gr[8] & val;
459 val = mask16[s->gr[0]];
460 break;
461 }
463 /* apply logical operation */
464 func_select = s->gr[3] >> 3;
465 switch ( func_select )
466 {
467 case 0:
468 default:
469 /* nothing to do */
470 break;
471 case 1:
472 /* and */
473 val &= s->latch;
474 break;
475 case 2:
476 /* or */
477 val |= s->latch;
478 break;
479 case 3:
480 /* xor */
481 val ^= s->latch;
482 break;
483 }
485 /* apply bit mask */
486 bit_mask |= bit_mask << 8;
487 bit_mask |= bit_mask << 16;
488 val = (val & bit_mask) | (s->latch & ~bit_mask);
490 do_write:
491 /* mask data according to sr[2] */
492 mask = s->sr[2];
493 write_mask = mask16[mask];
494 vram_l(s, addr) =
495 (vram_l(s, addr) & ~write_mask) |
496 (val & write_mask);
497 }
498 }
500 static void stdvga_mem_write(uint32_t addr, uint32_t data, uint32_t size)
501 {
502 /* Intercept mmio write */
503 switch ( size )
504 {
505 case 1:
506 stdvga_mem_writeb(addr, (data >> 0) & 0xff);
507 break;
509 case 2:
510 stdvga_mem_writeb(addr+0, (data >> 0) & 0xff);
511 stdvga_mem_writeb(addr+1, (data >> 8) & 0xff);
512 break;
514 case 4:
515 stdvga_mem_writeb(addr+0, (data >> 0) & 0xff);
516 stdvga_mem_writeb(addr+1, (data >> 8) & 0xff);
517 stdvga_mem_writeb(addr+2, (data >> 16) & 0xff);
518 stdvga_mem_writeb(addr+3, (data >> 24) & 0xff);
519 break;
521 default:
522 gdprintk(XENLOG_WARNING, "invalid io size:%d\n", size);
523 break;
524 }
525 }
527 static uint32_t read_data;
529 static int mmio_move(struct hvm_hw_stdvga *s, ioreq_t *p)
530 {
531 int i;
532 int sign = p->df ? -1 : 1;
534 if ( p->data_is_ptr )
535 {
536 if ( p->dir == IOREQ_READ )
537 {
538 uint32_t addr = p->addr, data = p->data, tmp;
539 for ( i = 0; i < p->count; i++ )
540 {
541 tmp = stdvga_mem_read(addr, p->size);
542 hvm_copy_to_guest_phys(data, &tmp, p->size);
543 data += sign * p->size;
544 addr += sign * p->size;
545 }
546 }
547 else
548 {
549 uint32_t addr = p->addr, data = p->data, tmp;
550 for ( i = 0; i < p->count; i++ )
551 {
552 hvm_copy_from_guest_phys(&tmp, data, p->size);
553 stdvga_mem_write(addr, tmp, p->size);
554 data += sign * p->size;
555 addr += sign * p->size;
556 }
557 }
558 }
559 else
560 {
561 if ( p->dir == IOREQ_READ )
562 {
563 uint32_t addr = p->addr;
564 for ( i = 0; i < p->count; i++ )
565 {
566 p->data = stdvga_mem_read(addr, p->size);
567 addr += sign * p->size;
568 }
569 }
570 else
571 {
572 uint32_t addr = p->addr;
573 for ( i = 0; i < p->count; i++ )
574 {
575 stdvga_mem_write(addr, p->data, p->size);
576 addr += sign * p->size;
577 }
578 }
579 }
581 read_data = p->data;
582 return 1;
583 }
585 static uint32_t op_and(uint32_t a, uint32_t b) { return a & b; }
586 static uint32_t op_or (uint32_t a, uint32_t b) { return a | b; }
587 static uint32_t op_xor(uint32_t a, uint32_t b) { return a ^ b; }
588 static uint32_t op_add(uint32_t a, uint32_t b) { return a + b; }
589 static uint32_t op_sub(uint32_t a, uint32_t b) { return a - b; }
590 static uint32_t (*op_array[])(uint32_t, uint32_t) = {
591 [IOREQ_TYPE_AND] = op_and,
592 [IOREQ_TYPE_OR ] = op_or,
593 [IOREQ_TYPE_XOR] = op_xor,
594 [IOREQ_TYPE_ADD] = op_add,
595 [IOREQ_TYPE_SUB] = op_sub
596 };
598 static int mmio_op(struct hvm_hw_stdvga *s, ioreq_t *p)
599 {
600 uint32_t orig, mod = 0;
601 orig = stdvga_mem_read(p->addr, p->size);
603 if ( p->dir == IOREQ_WRITE )
604 {
605 mod = (op_array[p->type])(orig, p->data);
606 stdvga_mem_write(p->addr, mod, p->size);
607 }
609 return 0; /* Don't try to buffer these operations */
610 }
612 int stdvga_intercept_mmio(ioreq_t *p)
613 {
614 struct domain *d = current->domain;
615 struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
616 int buf = 0, rc;
618 if ( p->size > 8 )
619 {
620 gdprintk(XENLOG_WARNING, "invalid mmio size %d\n", (int)p->size);
621 return 0;
622 }
624 spin_lock(&s->lock);
626 if ( s->stdvga && s->cache )
627 {
628 switch ( p->type )
629 {
630 case IOREQ_TYPE_COPY:
631 buf = mmio_move(s, p);
632 break;
633 case IOREQ_TYPE_AND:
634 case IOREQ_TYPE_OR:
635 case IOREQ_TYPE_XOR:
636 case IOREQ_TYPE_ADD:
637 case IOREQ_TYPE_SUB:
638 buf = mmio_op(s, p);
639 break;
640 default:
641 gdprintk(XENLOG_WARNING, "unsupported mmio request type:%d "
642 "addr:0x%04x data:0x%04x size:%d count:%d state:%d "
643 "isptr:%d dir:%d df:%d\n",
644 p->type, (int)p->addr, (int)p->data, (int)p->size,
645 (int)p->count, p->state,
646 p->data_is_ptr, p->dir, p->df);
647 s->cache = 0;
648 }
649 }
650 else
651 {
652 buf = (p->dir == IOREQ_WRITE);
653 }
655 rc = (buf && hvm_buffered_io_send(p));
657 spin_unlock(&s->lock);
659 return rc;
660 }
662 void stdvga_init(struct domain *d)
663 {
664 int i;
665 struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
666 memset(s, 0, sizeof(*s));
667 spin_lock_init(&s->lock);
669 for ( i = 0; i != ARRAY_SIZE(s->vram_ptr); i++ )
670 {
671 struct page_info *vram_page;
672 vram_page = alloc_domheap_page(NULL);
673 if ( vram_page == NULL )
674 break;
675 s->vram_ptr[i] = page_to_virt(vram_page);
676 memset(s->vram_ptr[i], 0, PAGE_SIZE);
677 }
679 if ( i == ARRAY_SIZE(s->vram_ptr) )
680 {
681 /* Sequencer registers. */
682 register_portio_handler(d, 0x3c4, 2, stdvga_intercept_pio);
683 /* Graphics registers. */
684 register_portio_handler(d, 0x3ce, 2, stdvga_intercept_pio);
685 /* MMIO. */
686 register_buffered_io_handler(d, 0xa0000, 0x10000,
687 stdvga_intercept_mmio);
688 }
689 }
691 void stdvga_deinit(struct domain *d)
692 {
693 struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
694 int i;
696 for ( i = 0; i != ARRAY_SIZE(s->vram_ptr); i++ )
697 {
698 struct page_info *vram_page;
699 if ( s->vram_ptr[i] == NULL )
700 continue;
701 vram_page = virt_to_page(s->vram_ptr[i]);
702 free_domheap_page(vram_page);
703 s->vram_ptr[i] = NULL;
704 }
705 }