debuggers.hg

view xen/arch/x86/hvm/stdvga.c @ 16604:01c9b2b3118a

hvm stdvga: Do not emulate PIO reads inside Xen. They should be rare
(PIO write emulation is really for book-keeping to detect when we
enter/leave stdvga mode, and to work out what to do with mmio
accesses), and we may do the wrong thing depending on emulated SVGA
hardware and current mode.

This simplifies the code and means that 'stdvga=1' once again works
properly and causes the stdvga bios to be loaded by hvmloader.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Sat Dec 08 16:57:13 2007 +0000 (2007-12-08)
parents d1ac500f77c1
children 9862217f3c34
line source
1 /*
2 * Copyright (c) 2003-2007, Virtual Iron Software, Inc.
3 *
4 * Portions have been modified by Virtual Iron Software, Inc.
5 * (c) 2007. This file and the modifications can be redistributed and/or
6 * modified under the terms and conditions of the GNU General Public
7 * License, version 2.1 and not any later version of the GPL, as published
8 * by the Free Software Foundation.
9 *
10 * This improves the performance of Standard VGA,
11 * the mode used during Windows boot and by the Linux
12 * splash screen.
13 *
14 * It does so by buffering all the stdvga programmed output ops
15 * and memory mapped ops (both reads and writes) that are sent to QEMU.
16 *
17 * We maintain locally essential VGA state so we can respond
18 * immediately to input and read ops without waiting for
19 * QEMU. We snoop output and write ops to keep our state
20 * up-to-date.
21 *
22 * PIO input ops are satisfied from cached state without
23 * bothering QEMU.
24 *
25 * PIO output and mmio ops are passed through to QEMU, including
26 * mmio read ops. This is necessary because mmio reads
27 * can have side effects.
28 */
30 #include <xen/config.h>
31 #include <xen/types.h>
32 #include <xen/sched.h>
33 #include <xen/domain_page.h>
34 #include <asm/hvm/support.h>
36 #define PAT(x) (x)
37 static const uint32_t mask16[16] = {
38 PAT(0x00000000),
39 PAT(0x000000ff),
40 PAT(0x0000ff00),
41 PAT(0x0000ffff),
42 PAT(0x00ff0000),
43 PAT(0x00ff00ff),
44 PAT(0x00ffff00),
45 PAT(0x00ffffff),
46 PAT(0xff000000),
47 PAT(0xff0000ff),
48 PAT(0xff00ff00),
49 PAT(0xff00ffff),
50 PAT(0xffff0000),
51 PAT(0xffff00ff),
52 PAT(0xffffff00),
53 PAT(0xffffffff),
54 };
56 /* force some bits to zero */
57 const uint8_t sr_mask[8] = {
58 (uint8_t)~0xfc,
59 (uint8_t)~0xc2,
60 (uint8_t)~0xf0,
61 (uint8_t)~0xc0,
62 (uint8_t)~0xf1,
63 (uint8_t)~0xff,
64 (uint8_t)~0xff,
65 (uint8_t)~0x00,
66 };
68 const uint8_t gr_mask[9] = {
69 (uint8_t)~0xf0, /* 0x00 */
70 (uint8_t)~0xf0, /* 0x01 */
71 (uint8_t)~0xf0, /* 0x02 */
72 (uint8_t)~0xe0, /* 0x03 */
73 (uint8_t)~0xfc, /* 0x04 */
74 (uint8_t)~0x84, /* 0x05 */
75 (uint8_t)~0xf0, /* 0x06 */
76 (uint8_t)~0xf0, /* 0x07 */
77 (uint8_t)~0x00, /* 0x08 */
78 };
80 static uint8_t *vram_getb(struct hvm_hw_stdvga *s, unsigned int a)
81 {
82 struct page_info *pg = s->vram_page[(a >> 12) & 0x3f];
83 uint8_t *p = map_domain_page(page_to_mfn(pg));
84 return &p[a & 0xfff];
85 }
87 static uint32_t *vram_getl(struct hvm_hw_stdvga *s, unsigned int a)
88 {
89 struct page_info *pg = s->vram_page[(a >> 10) & 0x3f];
90 uint32_t *p = map_domain_page(page_to_mfn(pg));
91 return &p[a & 0x3ff];
92 }
94 static void vram_put(struct hvm_hw_stdvga *s, void *p)
95 {
96 unmap_domain_page(p);
97 }
99 static int stdvga_outb(uint64_t addr, uint8_t val)
100 {
101 struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
102 int rc = 1, prev_stdvga = s->stdvga;
104 switch ( addr )
105 {
106 case 0x3c4: /* sequencer address register */
107 s->sr_index = val;
108 break;
110 case 0x3c5: /* sequencer data register */
111 rc = (s->sr_index < sizeof(s->sr));
112 if ( rc )
113 s->sr[s->sr_index] = val & sr_mask[s->sr_index] ;
114 break;
116 case 0x3ce: /* graphics address register */
117 s->gr_index = val;
118 break;
120 case 0x3cf: /* graphics data register */
121 rc = (s->gr_index < sizeof(s->gr));
122 if ( rc )
123 s->gr[s->gr_index] = val & gr_mask[s->gr_index];
124 break;
126 default:
127 rc = 0;
128 break;
129 }
131 /* When in standard vga mode, emulate here all writes to the vram buffer
132 * so we can immediately satisfy reads without waiting for qemu. */
133 s->stdvga =
134 (s->sr[7] == 0x00) && /* standard vga mode */
135 (s->gr[6] == 0x05); /* misc graphics register w/ MemoryMapSelect=1
136 * 0xa0000-0xaffff (64k region), AlphaDis=1 */
138 if ( !prev_stdvga && s->stdvga )
139 {
140 s->cache = 1; /* (re)start caching video buffer */
141 gdprintk(XENLOG_INFO, "entering stdvga and caching modes\n");
142 }
143 else if ( prev_stdvga && !s->stdvga )
144 {
145 gdprintk(XENLOG_INFO, "leaving stdvga\n");
146 }
148 return rc;
149 }
151 static int stdvga_out(ioreq_t *p)
152 {
153 int rc = 1;
155 switch ( p->size )
156 {
157 case 1:
158 rc &= stdvga_outb(p->addr, p->data);
159 break;
161 case 2:
162 rc &= stdvga_outb(p->addr + 0, p->data >> 0);
163 rc &= stdvga_outb(p->addr + 1, p->data >> 8);
164 break;
166 default:
167 rc = 0;
168 break;
169 }
171 return rc;
172 }
174 int stdvga_intercept_pio(ioreq_t *p)
175 {
176 struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
177 int rc;
179 if ( p->data_is_ptr || (p->dir == IOREQ_READ) )
180 return 0;
182 spin_lock(&s->lock);
183 rc = (stdvga_out(p) && hvm_buffered_io_send(p));
184 spin_unlock(&s->lock);
186 return rc;
187 }
189 #define GET_PLANE(data, p) (((data) >> ((p) * 8)) & 0xff)
191 static uint8_t stdvga_mem_readb(uint64_t addr)
192 {
193 struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
194 int plane;
195 uint32_t ret, *vram_l;
196 uint8_t *vram_b;
198 addr &= 0x1ffff;
199 if ( addr >= 0x10000 )
200 return 0xff;
202 if ( s->sr[4] & 0x08 )
203 {
204 /* chain 4 mode : simplest access */
205 vram_b = vram_getb(s, addr);
206 ret = *vram_b;
207 vram_put(s, vram_b);
208 }
209 else if ( s->gr[5] & 0x10 )
210 {
211 /* odd/even mode (aka text mode mapping) */
212 plane = (s->gr[4] & 2) | (addr & 1);
213 vram_b = vram_getb(s, ((addr & ~1) << 1) | plane);
214 ret = *vram_b;
215 vram_put(s, vram_b);
216 }
217 else
218 {
219 /* standard VGA latched access */
220 vram_l = vram_getl(s, addr);
221 s->latch = *vram_l;
222 vram_put(s, vram_l);
224 if ( !(s->gr[5] & 0x08) )
225 {
226 /* read mode 0 */
227 plane = s->gr[4];
228 ret = GET_PLANE(s->latch, plane);
229 }
230 else
231 {
232 /* read mode 1 */
233 ret = (s->latch ^ mask16[s->gr[2]]) & mask16[s->gr[7]];
234 ret |= ret >> 16;
235 ret |= ret >> 8;
236 ret = (~ret) & 0xff;
237 }
238 }
240 return ret;
241 }
243 static uint32_t stdvga_mem_read(uint32_t addr, uint32_t size)
244 {
245 uint32_t data = 0;
247 switch ( size )
248 {
249 case 1:
250 data = stdvga_mem_readb(addr);
251 break;
253 case 2:
254 data = stdvga_mem_readb(addr);
255 data |= stdvga_mem_readb(addr + 1) << 8;
256 break;
258 case 4:
259 data = stdvga_mem_readb(addr);
260 data |= stdvga_mem_readb(addr + 1) << 8;
261 data |= stdvga_mem_readb(addr + 2) << 16;
262 data |= stdvga_mem_readb(addr + 3) << 24;
263 break;
265 default:
266 gdprintk(XENLOG_WARNING, "invalid io size:%d\n", size);
267 break;
268 }
270 return data;
271 }
273 static void stdvga_mem_writeb(uint64_t addr, uint32_t val)
274 {
275 struct hvm_hw_stdvga *s = &current->domain->arch.hvm_domain.stdvga;
276 int plane, write_mode, b, func_select, mask;
277 uint32_t write_mask, bit_mask, set_mask, *vram_l;
278 uint8_t *vram_b;
280 addr &= 0x1ffff;
281 if ( addr >= 0x10000 )
282 return;
284 if ( s->sr[4] & 0x08 )
285 {
286 /* chain 4 mode : simplest access */
287 plane = addr & 3;
288 mask = (1 << plane);
289 if ( s->sr[2] & mask )
290 {
291 vram_b = vram_getb(s, addr);
292 *vram_b = val;
293 vram_put(s, vram_b);
294 }
295 }
296 else if ( s->gr[5] & 0x10 )
297 {
298 /* odd/even mode (aka text mode mapping) */
299 plane = (s->gr[4] & 2) | (addr & 1);
300 mask = (1 << plane);
301 if ( s->sr[2] & mask )
302 {
303 addr = ((addr & ~1) << 1) | plane;
304 vram_b = vram_getb(s, addr);
305 *vram_b = val;
306 vram_put(s, vram_b);
307 }
308 }
309 else
310 {
311 write_mode = s->gr[5] & 3;
312 switch ( write_mode )
313 {
314 default:
315 case 0:
316 /* rotate */
317 b = s->gr[3] & 7;
318 val = ((val >> b) | (val << (8 - b))) & 0xff;
319 val |= val << 8;
320 val |= val << 16;
322 /* apply set/reset mask */
323 set_mask = mask16[s->gr[1]];
324 val = (val & ~set_mask) | (mask16[s->gr[0]] & set_mask);
325 bit_mask = s->gr[8];
326 break;
327 case 1:
328 val = s->latch;
329 goto do_write;
330 case 2:
331 val = mask16[val & 0x0f];
332 bit_mask = s->gr[8];
333 break;
334 case 3:
335 /* rotate */
336 b = s->gr[3] & 7;
337 val = (val >> b) | (val << (8 - b));
339 bit_mask = s->gr[8] & val;
340 val = mask16[s->gr[0]];
341 break;
342 }
344 /* apply logical operation */
345 func_select = s->gr[3] >> 3;
346 switch ( func_select )
347 {
348 case 0:
349 default:
350 /* nothing to do */
351 break;
352 case 1:
353 /* and */
354 val &= s->latch;
355 break;
356 case 2:
357 /* or */
358 val |= s->latch;
359 break;
360 case 3:
361 /* xor */
362 val ^= s->latch;
363 break;
364 }
366 /* apply bit mask */
367 bit_mask |= bit_mask << 8;
368 bit_mask |= bit_mask << 16;
369 val = (val & bit_mask) | (s->latch & ~bit_mask);
371 do_write:
372 /* mask data according to sr[2] */
373 mask = s->sr[2];
374 write_mask = mask16[mask];
375 vram_l = vram_getl(s, addr);
376 *vram_l = (*vram_l & ~write_mask) | (val & write_mask);
377 vram_put(s, vram_l);
378 }
379 }
381 static void stdvga_mem_write(uint32_t addr, uint32_t data, uint32_t size)
382 {
383 /* Intercept mmio write */
384 switch ( size )
385 {
386 case 1:
387 stdvga_mem_writeb(addr, (data >> 0) & 0xff);
388 break;
390 case 2:
391 stdvga_mem_writeb(addr+0, (data >> 0) & 0xff);
392 stdvga_mem_writeb(addr+1, (data >> 8) & 0xff);
393 break;
395 case 4:
396 stdvga_mem_writeb(addr+0, (data >> 0) & 0xff);
397 stdvga_mem_writeb(addr+1, (data >> 8) & 0xff);
398 stdvga_mem_writeb(addr+2, (data >> 16) & 0xff);
399 stdvga_mem_writeb(addr+3, (data >> 24) & 0xff);
400 break;
402 default:
403 gdprintk(XENLOG_WARNING, "invalid io size:%d\n", size);
404 break;
405 }
406 }
408 static uint32_t read_data;
410 static int mmio_move(struct hvm_hw_stdvga *s, ioreq_t *p)
411 {
412 int i;
413 int sign = p->df ? -1 : 1;
415 if ( p->data_is_ptr )
416 {
417 if ( p->dir == IOREQ_READ )
418 {
419 uint32_t addr = p->addr, data = p->data, tmp;
420 for ( i = 0; i < p->count; i++ )
421 {
422 tmp = stdvga_mem_read(addr, p->size);
423 hvm_copy_to_guest_phys(data, &tmp, p->size);
424 data += sign * p->size;
425 addr += sign * p->size;
426 }
427 }
428 else
429 {
430 uint32_t addr = p->addr, data = p->data, tmp;
431 for ( i = 0; i < p->count; i++ )
432 {
433 hvm_copy_from_guest_phys(&tmp, data, p->size);
434 stdvga_mem_write(addr, tmp, p->size);
435 data += sign * p->size;
436 addr += sign * p->size;
437 }
438 }
439 }
440 else
441 {
442 if ( p->dir == IOREQ_READ )
443 {
444 uint32_t addr = p->addr;
445 for ( i = 0; i < p->count; i++ )
446 {
447 p->data = stdvga_mem_read(addr, p->size);
448 addr += sign * p->size;
449 }
450 }
451 else
452 {
453 uint32_t addr = p->addr;
454 for ( i = 0; i < p->count; i++ )
455 {
456 stdvga_mem_write(addr, p->data, p->size);
457 addr += sign * p->size;
458 }
459 }
460 }
462 read_data = p->data;
463 return 1;
464 }
466 static uint32_t op_and(uint32_t a, uint32_t b) { return a & b; }
467 static uint32_t op_or (uint32_t a, uint32_t b) { return a | b; }
468 static uint32_t op_xor(uint32_t a, uint32_t b) { return a ^ b; }
469 static uint32_t op_add(uint32_t a, uint32_t b) { return a + b; }
470 static uint32_t op_sub(uint32_t a, uint32_t b) { return a - b; }
471 static uint32_t (*op_array[])(uint32_t, uint32_t) = {
472 [IOREQ_TYPE_AND] = op_and,
473 [IOREQ_TYPE_OR ] = op_or,
474 [IOREQ_TYPE_XOR] = op_xor,
475 [IOREQ_TYPE_ADD] = op_add,
476 [IOREQ_TYPE_SUB] = op_sub
477 };
479 static int mmio_op(struct hvm_hw_stdvga *s, ioreq_t *p)
480 {
481 uint32_t orig, mod = 0;
482 orig = stdvga_mem_read(p->addr, p->size);
484 if ( p->dir == IOREQ_WRITE )
485 {
486 mod = (op_array[p->type])(orig, p->data);
487 stdvga_mem_write(p->addr, mod, p->size);
488 }
490 return 0; /* Don't try to buffer these operations */
491 }
493 int stdvga_intercept_mmio(ioreq_t *p)
494 {
495 struct domain *d = current->domain;
496 struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
497 int buf = 0, rc;
499 if ( p->size > 8 )
500 {
501 gdprintk(XENLOG_WARNING, "invalid mmio size %d\n", (int)p->size);
502 return 0;
503 }
505 spin_lock(&s->lock);
507 if ( s->stdvga && s->cache )
508 {
509 switch ( p->type )
510 {
511 case IOREQ_TYPE_COPY:
512 buf = mmio_move(s, p);
513 break;
514 case IOREQ_TYPE_AND:
515 case IOREQ_TYPE_OR:
516 case IOREQ_TYPE_XOR:
517 case IOREQ_TYPE_ADD:
518 case IOREQ_TYPE_SUB:
519 buf = mmio_op(s, p);
520 break;
521 default:
522 gdprintk(XENLOG_WARNING, "unsupported mmio request type:%d "
523 "addr:0x%04x data:0x%04x size:%d count:%d state:%d "
524 "isptr:%d dir:%d df:%d\n",
525 p->type, (int)p->addr, (int)p->data, (int)p->size,
526 (int)p->count, p->state,
527 p->data_is_ptr, p->dir, p->df);
528 s->cache = 0;
529 }
530 }
531 else
532 {
533 buf = (p->dir == IOREQ_WRITE);
534 }
536 rc = (buf && hvm_buffered_io_send(p));
538 spin_unlock(&s->lock);
540 return rc;
541 }
543 void stdvga_init(struct domain *d)
544 {
545 struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
546 struct page_info *pg;
547 void *p;
548 int i;
550 memset(s, 0, sizeof(*s));
551 spin_lock_init(&s->lock);
553 for ( i = 0; i != ARRAY_SIZE(s->vram_page); i++ )
554 {
555 if ( (pg = alloc_domheap_page(NULL)) == NULL )
556 break;
557 s->vram_page[i] = pg;
558 p = map_domain_page(page_to_mfn(pg));
559 clear_page(p);
560 unmap_domain_page(p);
561 }
563 if ( i == ARRAY_SIZE(s->vram_page) )
564 {
565 /* Sequencer registers. */
566 register_portio_handler(d, 0x3c4, 2, stdvga_intercept_pio);
567 /* Graphics registers. */
568 register_portio_handler(d, 0x3ce, 2, stdvga_intercept_pio);
569 /* MMIO. */
570 register_buffered_io_handler(
571 d, 0xa0000, 0x10000, stdvga_intercept_mmio);
572 }
573 }
575 void stdvga_deinit(struct domain *d)
576 {
577 struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga;
578 int i;
580 for ( i = 0; i != ARRAY_SIZE(s->vram_page); i++ )
581 {
582 if ( s->vram_page[i] == NULL )
583 continue;
584 free_domheap_page(s->vram_page[i]);
585 s->vram_page[i] = NULL;
586 }
587 }