/root/src/xen/xen/arch/x86/hvm/stdvga.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2003-2007, Virtual Iron Software, Inc. |
3 | | * |
4 | | * Portions have been modified by Virtual Iron Software, Inc. |
5 | | * (c) 2007. This file and the modifications can be redistributed and/or |
6 | | * modified under the terms and conditions of the GNU General Public |
7 | | * License, version 2.1 and not any later version of the GPL, as published |
8 | | * by the Free Software Foundation. |
9 | | * |
10 | | * This improves the performance of Standard VGA, |
11 | | * the mode used during Windows boot and by the Linux |
12 | | * splash screen. |
13 | | * |
14 | | * It does so by buffering all the stdvga programmed output ops |
15 | | * and memory mapped ops (both reads and writes) that are sent to QEMU. |
16 | | * |
17 | | * We maintain locally essential VGA state so we can respond |
18 | | * immediately to input and read ops without waiting for |
19 | | * QEMU. We snoop output and write ops to keep our state |
20 | | * up-to-date. |
21 | | * |
22 | | * PIO input ops are satisfied from cached state without |
23 | | * bothering QEMU. |
24 | | * |
25 | | * PIO output and mmio ops are passed through to QEMU, including |
26 | | * mmio read ops. This is necessary because mmio reads |
27 | | * can have side effects. |
28 | | */ |
29 | | |
30 | | #include <xen/types.h> |
31 | | #include <xen/sched.h> |
32 | | #include <xen/domain_page.h> |
33 | | #include <asm/hvm/ioreq.h> |
34 | | #include <asm/hvm/support.h> |
35 | | #include <xen/numa.h> |
36 | | #include <xen/paging.h> |
37 | | |
38 | 0 | #define VGA_MEM_BASE 0xa0000 |
39 | 0 | #define VGA_MEM_SIZE 0x20000 |
40 | | |
41 | | #define PAT(x) (x) |
42 | | static const uint32_t mask16[16] = { |
43 | | PAT(0x00000000), |
44 | | PAT(0x000000ff), |
45 | | PAT(0x0000ff00), |
46 | | PAT(0x0000ffff), |
47 | | PAT(0x00ff0000), |
48 | | PAT(0x00ff00ff), |
49 | | PAT(0x00ffff00), |
50 | | PAT(0x00ffffff), |
51 | | PAT(0xff000000), |
52 | | PAT(0xff0000ff), |
53 | | PAT(0xff00ff00), |
54 | | PAT(0xff00ffff), |
55 | | PAT(0xffff0000), |
56 | | PAT(0xffff00ff), |
57 | | PAT(0xffffff00), |
58 | | PAT(0xffffffff), |
59 | | }; |
60 | | |
61 | | /* force some bits to zero */ |
62 | | static const uint8_t sr_mask[8] = { |
63 | | (uint8_t)~0xfc, |
64 | | (uint8_t)~0xc2, |
65 | | (uint8_t)~0xf0, |
66 | | (uint8_t)~0xc0, |
67 | | (uint8_t)~0xf1, |
68 | | (uint8_t)~0xff, |
69 | | (uint8_t)~0xff, |
70 | | (uint8_t)~0x00, |
71 | | }; |
72 | | |
73 | | static const uint8_t gr_mask[9] = { |
74 | | (uint8_t)~0xf0, /* 0x00 */ |
75 | | (uint8_t)~0xf0, /* 0x01 */ |
76 | | (uint8_t)~0xf0, /* 0x02 */ |
77 | | (uint8_t)~0xe0, /* 0x03 */ |
78 | | (uint8_t)~0xfc, /* 0x04 */ |
79 | | (uint8_t)~0x84, /* 0x05 */ |
80 | | (uint8_t)~0xf0, /* 0x06 */ |
81 | | (uint8_t)~0xf0, /* 0x07 */ |
82 | | (uint8_t)~0x00, /* 0x08 */ |
83 | | }; |
84 | | |
85 | | static uint8_t *vram_getb(struct hvm_hw_stdvga *s, unsigned int a) |
86 | 0 | { |
87 | 0 | struct page_info *pg = s->vram_page[(a >> 12) & 0x3f]; |
88 | 0 | uint8_t *p = __map_domain_page(pg); |
89 | 0 | return &p[a & 0xfff]; |
90 | 0 | } |
91 | | |
92 | | static uint32_t *vram_getl(struct hvm_hw_stdvga *s, unsigned int a) |
93 | 0 | { |
94 | 0 | struct page_info *pg = s->vram_page[(a >> 10) & 0x3f]; |
95 | 0 | uint32_t *p = __map_domain_page(pg); |
96 | 0 | return &p[a & 0x3ff]; |
97 | 0 | } |
98 | | |
99 | | static void vram_put(struct hvm_hw_stdvga *s, void *p) |
100 | 0 | { |
101 | 0 | unmap_domain_page(p); |
102 | 0 | } |
103 | | |
104 | | static void stdvga_try_cache_enable(struct hvm_hw_stdvga *s) |
105 | 0 | { |
106 | 0 | /* |
107 | 0 | * Caching mode can only be enabled if the the cache has |
108 | 0 | * never been used before. As soon as it is disabled, it will |
109 | 0 | * become out-of-sync with the VGA device model and since no |
110 | 0 | * mechanism exists to acquire current VRAM state from the |
111 | 0 | * device model, re-enabling it would lead to stale data being |
112 | 0 | * seen by the guest. |
113 | 0 | */ |
114 | 0 | if ( s->cache != STDVGA_CACHE_UNINITIALIZED ) |
115 | 0 | return; |
116 | 0 |
|
117 | 0 | gdprintk(XENLOG_INFO, "entering caching mode\n"); |
118 | 0 | s->cache = STDVGA_CACHE_ENABLED; |
119 | 0 | } |
120 | | |
121 | | static void stdvga_cache_disable(struct hvm_hw_stdvga *s) |
122 | 0 | { |
123 | 0 | if ( s->cache != STDVGA_CACHE_ENABLED ) |
124 | 0 | return; |
125 | 0 |
|
126 | 0 | gdprintk(XENLOG_INFO, "leaving caching mode\n"); |
127 | 0 | s->cache = STDVGA_CACHE_DISABLED; |
128 | 0 | } |
129 | | |
130 | | static bool_t stdvga_cache_is_enabled(const struct hvm_hw_stdvga *s) |
131 | 0 | { |
132 | 0 | return s->cache == STDVGA_CACHE_ENABLED; |
133 | 0 | } |
134 | | |
135 | | static int stdvga_outb(uint64_t addr, uint8_t val) |
136 | 0 | { |
137 | 0 | struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm_domain.stdvga; |
138 | 0 | int rc = 1, prev_stdvga = s->stdvga; |
139 | 0 |
|
140 | 0 | switch ( addr ) |
141 | 0 | { |
142 | 0 | case 0x3c4: /* sequencer address register */ |
143 | 0 | s->sr_index = val; |
144 | 0 | break; |
145 | 0 |
|
146 | 0 | case 0x3c5: /* sequencer data register */ |
147 | 0 | rc = (s->sr_index < sizeof(s->sr)); |
148 | 0 | if ( rc ) |
149 | 0 | s->sr[s->sr_index] = val & sr_mask[s->sr_index] ; |
150 | 0 | break; |
151 | 0 |
|
152 | 0 | case 0x3ce: /* graphics address register */ |
153 | 0 | s->gr_index = val; |
154 | 0 | break; |
155 | 0 |
|
156 | 0 | case 0x3cf: /* graphics data register */ |
157 | 0 | rc = (s->gr_index < sizeof(s->gr)); |
158 | 0 | if ( rc ) |
159 | 0 | s->gr[s->gr_index] = val & gr_mask[s->gr_index]; |
160 | 0 | break; |
161 | 0 |
|
162 | 0 | default: |
163 | 0 | rc = 0; |
164 | 0 | break; |
165 | 0 | } |
166 | 0 |
|
167 | 0 | /* When in standard vga mode, emulate here all writes to the vram buffer |
168 | 0 | * so we can immediately satisfy reads without waiting for qemu. */ |
169 | 0 | s->stdvga = (s->sr[7] == 0x00); |
170 | 0 |
|
171 | 0 | if ( !prev_stdvga && s->stdvga ) |
172 | 0 | { |
173 | 0 | gdprintk(XENLOG_INFO, "entering stdvga mode\n"); |
174 | 0 | stdvga_try_cache_enable(s); |
175 | 0 | } |
176 | 0 | else if ( prev_stdvga && !s->stdvga ) |
177 | 0 | { |
178 | 0 | gdprintk(XENLOG_INFO, "leaving stdvga mode\n"); |
179 | 0 | } |
180 | 0 |
|
181 | 0 | return rc; |
182 | 0 | } |
183 | | |
184 | | static void stdvga_out(uint32_t port, uint32_t bytes, uint32_t val) |
185 | 0 | { |
186 | 0 | switch ( bytes ) |
187 | 0 | { |
188 | 0 | case 1: |
189 | 0 | stdvga_outb(port, val); |
190 | 0 | break; |
191 | 0 |
|
192 | 0 | case 2: |
193 | 0 | stdvga_outb(port + 0, val >> 0); |
194 | 0 | stdvga_outb(port + 1, val >> 8); |
195 | 0 | break; |
196 | 0 |
|
197 | 0 | default: |
198 | 0 | break; |
199 | 0 | } |
200 | 0 | } |
201 | | |
202 | | static int stdvga_intercept_pio( |
203 | | int dir, unsigned int port, unsigned int bytes, uint32_t *val) |
204 | 0 | { |
205 | 0 | struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm_domain.stdvga; |
206 | 0 |
|
207 | 0 | if ( dir == IOREQ_WRITE ) |
208 | 0 | { |
209 | 0 | spin_lock(&s->lock); |
210 | 0 | stdvga_out(port, bytes, *val); |
211 | 0 | spin_unlock(&s->lock); |
212 | 0 | } |
213 | 0 |
|
214 | 0 | return X86EMUL_UNHANDLEABLE; /* propagate to external ioemu */ |
215 | 0 | } |
216 | | |
217 | | static unsigned int stdvga_mem_offset( |
218 | | struct hvm_hw_stdvga *s, unsigned int mmio_addr) |
219 | 0 | { |
220 | 0 | unsigned int memory_map_mode = (s->gr[6] >> 2) & 3; |
221 | 0 | unsigned int offset = mmio_addr & 0x1ffff; |
222 | 0 |
|
223 | 0 | switch ( memory_map_mode ) |
224 | 0 | { |
225 | 0 | case 0: |
226 | 0 | break; |
227 | 0 | case 1: |
228 | 0 | if ( offset >= 0x10000 ) |
229 | 0 | goto fail; |
230 | 0 | offset += 0; /* assume bank_offset == 0; */ |
231 | 0 | break; |
232 | 0 | case 2: |
233 | 0 | offset -= 0x10000; |
234 | 0 | if ( offset >= 0x8000 ) |
235 | 0 | goto fail; |
236 | 0 | break; |
237 | 0 | default: |
238 | 0 | case 3: |
239 | 0 | offset -= 0x18000; |
240 | 0 | if ( offset >= 0x8000 ) |
241 | 0 | goto fail; |
242 | 0 | break; |
243 | 0 | } |
244 | 0 |
|
245 | 0 | return offset; |
246 | 0 |
|
247 | 0 | fail: |
248 | 0 | return ~0u; |
249 | 0 | } |
250 | | |
251 | 0 | #define GET_PLANE(data, p) (((data) >> ((p) * 8)) & 0xff) |
252 | | |
253 | | static uint8_t stdvga_mem_readb(uint64_t addr) |
254 | 0 | { |
255 | 0 | struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm_domain.stdvga; |
256 | 0 | int plane; |
257 | 0 | uint32_t ret, *vram_l; |
258 | 0 | uint8_t *vram_b; |
259 | 0 |
|
260 | 0 | addr = stdvga_mem_offset(s, addr); |
261 | 0 | if ( addr == ~0u ) |
262 | 0 | return 0xff; |
263 | 0 |
|
264 | 0 | if ( s->sr[4] & 0x08 ) |
265 | 0 | { |
266 | 0 | /* chain 4 mode : simplest access */ |
267 | 0 | vram_b = vram_getb(s, addr); |
268 | 0 | ret = *vram_b; |
269 | 0 | vram_put(s, vram_b); |
270 | 0 | } |
271 | 0 | else if ( s->gr[5] & 0x10 ) |
272 | 0 | { |
273 | 0 | /* odd/even mode (aka text mode mapping) */ |
274 | 0 | plane = (s->gr[4] & 2) | (addr & 1); |
275 | 0 | vram_b = vram_getb(s, ((addr & ~1) << 1) | plane); |
276 | 0 | ret = *vram_b; |
277 | 0 | vram_put(s, vram_b); |
278 | 0 | } |
279 | 0 | else |
280 | 0 | { |
281 | 0 | /* standard VGA latched access */ |
282 | 0 | vram_l = vram_getl(s, addr); |
283 | 0 | s->latch = *vram_l; |
284 | 0 | vram_put(s, vram_l); |
285 | 0 |
|
286 | 0 | if ( !(s->gr[5] & 0x08) ) |
287 | 0 | { |
288 | 0 | /* read mode 0 */ |
289 | 0 | plane = s->gr[4]; |
290 | 0 | ret = GET_PLANE(s->latch, plane); |
291 | 0 | } |
292 | 0 | else |
293 | 0 | { |
294 | 0 | /* read mode 1 */ |
295 | 0 | ret = (s->latch ^ mask16[s->gr[2]]) & mask16[s->gr[7]]; |
296 | 0 | ret |= ret >> 16; |
297 | 0 | ret |= ret >> 8; |
298 | 0 | ret = (~ret) & 0xff; |
299 | 0 | } |
300 | 0 | } |
301 | 0 |
|
302 | 0 | return ret; |
303 | 0 | } |
304 | | |
305 | | static int stdvga_mem_read(const struct hvm_io_handler *handler, |
306 | | uint64_t addr, uint32_t size, uint64_t *p_data) |
307 | 0 | { |
308 | 0 | uint64_t data = ~0ul; |
309 | 0 |
|
310 | 0 | switch ( size ) |
311 | 0 | { |
312 | 0 | case 1: |
313 | 0 | data = stdvga_mem_readb(addr); |
314 | 0 | break; |
315 | 0 |
|
316 | 0 | case 2: |
317 | 0 | data = stdvga_mem_readb(addr); |
318 | 0 | data |= stdvga_mem_readb(addr + 1) << 8; |
319 | 0 | break; |
320 | 0 |
|
321 | 0 | case 4: |
322 | 0 | data = stdvga_mem_readb(addr); |
323 | 0 | data |= stdvga_mem_readb(addr + 1) << 8; |
324 | 0 | data |= stdvga_mem_readb(addr + 2) << 16; |
325 | 0 | data |= stdvga_mem_readb(addr + 3) << 24; |
326 | 0 | break; |
327 | 0 |
|
328 | 0 | case 8: |
329 | 0 | data = (uint64_t)(stdvga_mem_readb(addr)); |
330 | 0 | data |= (uint64_t)(stdvga_mem_readb(addr + 1)) << 8; |
331 | 0 | data |= (uint64_t)(stdvga_mem_readb(addr + 2)) << 16; |
332 | 0 | data |= (uint64_t)(stdvga_mem_readb(addr + 3)) << 24; |
333 | 0 | data |= (uint64_t)(stdvga_mem_readb(addr + 4)) << 32; |
334 | 0 | data |= (uint64_t)(stdvga_mem_readb(addr + 5)) << 40; |
335 | 0 | data |= (uint64_t)(stdvga_mem_readb(addr + 6)) << 48; |
336 | 0 | data |= (uint64_t)(stdvga_mem_readb(addr + 7)) << 56; |
337 | 0 | break; |
338 | 0 |
|
339 | 0 | default: |
340 | 0 | gdprintk(XENLOG_WARNING, "invalid io size: %u\n", size); |
341 | 0 | break; |
342 | 0 | } |
343 | 0 |
|
344 | 0 | *p_data = data; |
345 | 0 | return X86EMUL_OKAY; |
346 | 0 | } |
347 | | |
348 | | static void stdvga_mem_writeb(uint64_t addr, uint32_t val) |
349 | 0 | { |
350 | 0 | struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm_domain.stdvga; |
351 | 0 | int plane, write_mode, b, func_select, mask; |
352 | 0 | uint32_t write_mask, bit_mask, set_mask, *vram_l; |
353 | 0 | uint8_t *vram_b; |
354 | 0 |
|
355 | 0 | addr = stdvga_mem_offset(s, addr); |
356 | 0 | if ( addr == ~0u ) |
357 | 0 | return; |
358 | 0 |
|
359 | 0 | if ( s->sr[4] & 0x08 ) |
360 | 0 | { |
361 | 0 | /* chain 4 mode : simplest access */ |
362 | 0 | plane = addr & 3; |
363 | 0 | mask = (1 << plane); |
364 | 0 | if ( s->sr[2] & mask ) |
365 | 0 | { |
366 | 0 | vram_b = vram_getb(s, addr); |
367 | 0 | *vram_b = val; |
368 | 0 | vram_put(s, vram_b); |
369 | 0 | } |
370 | 0 | } |
371 | 0 | else if ( s->gr[5] & 0x10 ) |
372 | 0 | { |
373 | 0 | /* odd/even mode (aka text mode mapping) */ |
374 | 0 | plane = (s->gr[4] & 2) | (addr & 1); |
375 | 0 | mask = (1 << plane); |
376 | 0 | if ( s->sr[2] & mask ) |
377 | 0 | { |
378 | 0 | addr = ((addr & ~1) << 1) | plane; |
379 | 0 | vram_b = vram_getb(s, addr); |
380 | 0 | *vram_b = val; |
381 | 0 | vram_put(s, vram_b); |
382 | 0 | } |
383 | 0 | } |
384 | 0 | else |
385 | 0 | { |
386 | 0 | write_mode = s->gr[5] & 3; |
387 | 0 | switch ( write_mode ) |
388 | 0 | { |
389 | 0 | default: |
390 | 0 | case 0: |
391 | 0 | /* rotate */ |
392 | 0 | b = s->gr[3] & 7; |
393 | 0 | val = ((val >> b) | (val << (8 - b))) & 0xff; |
394 | 0 | val |= val << 8; |
395 | 0 | val |= val << 16; |
396 | 0 |
|
397 | 0 | /* apply set/reset mask */ |
398 | 0 | set_mask = mask16[s->gr[1]]; |
399 | 0 | val = (val & ~set_mask) | (mask16[s->gr[0]] & set_mask); |
400 | 0 | bit_mask = s->gr[8]; |
401 | 0 | break; |
402 | 0 | case 1: |
403 | 0 | val = s->latch; |
404 | 0 | goto do_write; |
405 | 0 | case 2: |
406 | 0 | val = mask16[val & 0x0f]; |
407 | 0 | bit_mask = s->gr[8]; |
408 | 0 | break; |
409 | 0 | case 3: |
410 | 0 | /* rotate */ |
411 | 0 | b = s->gr[3] & 7; |
412 | 0 | val = (val >> b) | (val << (8 - b)); |
413 | 0 |
|
414 | 0 | bit_mask = s->gr[8] & val; |
415 | 0 | val = mask16[s->gr[0]]; |
416 | 0 | break; |
417 | 0 | } |
418 | 0 |
|
419 | 0 | /* apply logical operation */ |
420 | 0 | func_select = s->gr[3] >> 3; |
421 | 0 | switch ( func_select ) |
422 | 0 | { |
423 | 0 | case 0: |
424 | 0 | default: |
425 | 0 | /* nothing to do */ |
426 | 0 | break; |
427 | 0 | case 1: |
428 | 0 | /* and */ |
429 | 0 | val &= s->latch; |
430 | 0 | break; |
431 | 0 | case 2: |
432 | 0 | /* or */ |
433 | 0 | val |= s->latch; |
434 | 0 | break; |
435 | 0 | case 3: |
436 | 0 | /* xor */ |
437 | 0 | val ^= s->latch; |
438 | 0 | break; |
439 | 0 | } |
440 | 0 |
|
441 | 0 | /* apply bit mask */ |
442 | 0 | bit_mask |= bit_mask << 8; |
443 | 0 | bit_mask |= bit_mask << 16; |
444 | 0 | val = (val & bit_mask) | (s->latch & ~bit_mask); |
445 | 0 |
|
446 | 0 | do_write: |
447 | 0 | /* mask data according to sr[2] */ |
448 | 0 | mask = s->sr[2]; |
449 | 0 | write_mask = mask16[mask]; |
450 | 0 | vram_l = vram_getl(s, addr); |
451 | 0 | *vram_l = (*vram_l & ~write_mask) | (val & write_mask); |
452 | 0 | vram_put(s, vram_l); |
453 | 0 | } |
454 | 0 | } |
455 | | |
456 | | static int stdvga_mem_write(const struct hvm_io_handler *handler, |
457 | | uint64_t addr, uint32_t size, |
458 | | uint64_t data) |
459 | 0 | { |
460 | 0 | struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm_domain.stdvga; |
461 | 0 | ioreq_t p = { |
462 | 0 | .type = IOREQ_TYPE_COPY, |
463 | 0 | .addr = addr, |
464 | 0 | .size = size, |
465 | 0 | .count = 1, |
466 | 0 | .dir = IOREQ_WRITE, |
467 | 0 | .data = data, |
468 | 0 | }; |
469 | 0 | struct hvm_ioreq_server *srv; |
470 | 0 |
|
471 | 0 | if ( !stdvga_cache_is_enabled(s) || !s->stdvga ) |
472 | 0 | goto done; |
473 | 0 |
|
474 | 0 | /* Intercept mmio write */ |
475 | 0 | switch ( size ) |
476 | 0 | { |
477 | 0 | case 1: |
478 | 0 | stdvga_mem_writeb(addr, (data >> 0) & 0xff); |
479 | 0 | break; |
480 | 0 |
|
481 | 0 | case 2: |
482 | 0 | stdvga_mem_writeb(addr+0, (data >> 0) & 0xff); |
483 | 0 | stdvga_mem_writeb(addr+1, (data >> 8) & 0xff); |
484 | 0 | break; |
485 | 0 |
|
486 | 0 | case 4: |
487 | 0 | stdvga_mem_writeb(addr+0, (data >> 0) & 0xff); |
488 | 0 | stdvga_mem_writeb(addr+1, (data >> 8) & 0xff); |
489 | 0 | stdvga_mem_writeb(addr+2, (data >> 16) & 0xff); |
490 | 0 | stdvga_mem_writeb(addr+3, (data >> 24) & 0xff); |
491 | 0 | break; |
492 | 0 |
|
493 | 0 | case 8: |
494 | 0 | stdvga_mem_writeb(addr+0, (data >> 0) & 0xff); |
495 | 0 | stdvga_mem_writeb(addr+1, (data >> 8) & 0xff); |
496 | 0 | stdvga_mem_writeb(addr+2, (data >> 16) & 0xff); |
497 | 0 | stdvga_mem_writeb(addr+3, (data >> 24) & 0xff); |
498 | 0 | stdvga_mem_writeb(addr+4, (data >> 32) & 0xff); |
499 | 0 | stdvga_mem_writeb(addr+5, (data >> 40) & 0xff); |
500 | 0 | stdvga_mem_writeb(addr+6, (data >> 48) & 0xff); |
501 | 0 | stdvga_mem_writeb(addr+7, (data >> 56) & 0xff); |
502 | 0 | break; |
503 | 0 |
|
504 | 0 | default: |
505 | 0 | gdprintk(XENLOG_WARNING, "invalid io size: %u\n", size); |
506 | 0 | break; |
507 | 0 | } |
508 | 0 |
|
509 | 0 | done: |
510 | 0 | srv = hvm_select_ioreq_server(current->domain, &p); |
511 | 0 | if ( !srv ) |
512 | 0 | return X86EMUL_UNHANDLEABLE; |
513 | 0 |
|
514 | 0 | return hvm_send_ioreq(srv, &p, 1); |
515 | 0 | } |
516 | | |
517 | | static bool_t stdvga_mem_accept(const struct hvm_io_handler *handler, |
518 | | const ioreq_t *p) |
519 | 0 | { |
520 | 0 | struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm_domain.stdvga; |
521 | 0 |
|
522 | 0 | /* |
523 | 0 | * The range check must be done without taking the lock, to avoid |
524 | 0 | * deadlock when hvm_mmio_internal() is called from |
525 | 0 | * hvm_copy_to/from_guest_phys() in hvm_process_io_intercept(). |
526 | 0 | */ |
527 | 0 | if ( (hvm_mmio_first_byte(p) < VGA_MEM_BASE) || |
528 | 0 | (hvm_mmio_last_byte(p) >= (VGA_MEM_BASE + VGA_MEM_SIZE)) ) |
529 | 0 | return 0; |
530 | 0 |
|
531 | 0 | spin_lock(&s->lock); |
532 | 0 |
|
533 | 0 | if ( p->dir == IOREQ_WRITE && p->count > 1 ) |
534 | 0 | { |
535 | 0 | /* |
536 | 0 | * We cannot return X86EMUL_UNHANDLEABLE on anything other then the |
537 | 0 | * first cycle of an I/O. So, since we cannot guarantee to always be |
538 | 0 | * able to send buffered writes, we have to reject any multi-cycle |
539 | 0 | * I/O and, since we are rejecting an I/O, we must invalidate the |
540 | 0 | * cache. |
541 | 0 | * Single-cycle write transactions are accepted even if the cache is |
542 | 0 | * not active since we can assert, when in stdvga mode, that writes |
543 | 0 | * to VRAM have no side effect and thus we can try to buffer them. |
544 | 0 | */ |
545 | 0 | stdvga_cache_disable(s); |
546 | 0 |
|
547 | 0 | goto reject; |
548 | 0 | } |
549 | 0 | else if ( p->dir == IOREQ_READ && |
550 | 0 | (!stdvga_cache_is_enabled(s) || !s->stdvga) ) |
551 | 0 | goto reject; |
552 | 0 |
|
553 | 0 | /* s->lock intentionally held */ |
554 | 0 | return 1; |
555 | 0 |
|
556 | 0 | reject: |
557 | 0 | spin_unlock(&s->lock); |
558 | 0 | return 0; |
559 | 0 | } |
560 | | |
561 | | static void stdvga_mem_complete(const struct hvm_io_handler *handler) |
562 | 0 | { |
563 | 0 | struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm_domain.stdvga; |
564 | 0 |
|
565 | 0 | spin_unlock(&s->lock); |
566 | 0 | } |
567 | | |
568 | | static const struct hvm_io_ops stdvga_mem_ops = { |
569 | | .accept = stdvga_mem_accept, |
570 | | .read = stdvga_mem_read, |
571 | | .write = stdvga_mem_write, |
572 | | .complete = stdvga_mem_complete |
573 | | }; |
574 | | |
575 | | void stdvga_init(struct domain *d) |
576 | 1 | { |
577 | 1 | struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga; |
578 | 1 | struct page_info *pg; |
579 | 1 | unsigned int i; |
580 | 1 | |
581 | 1 | if ( !has_vvga(d) ) |
582 | 1 | return; |
583 | 1 | |
584 | 0 | memset(s, 0, sizeof(*s)); |
585 | 0 | spin_lock_init(&s->lock); |
586 | 0 | |
587 | 0 | for ( i = 0; i != ARRAY_SIZE(s->vram_page); i++ ) |
588 | 0 | { |
589 | 0 | pg = alloc_domheap_page(d, MEMF_no_owner); |
590 | 0 | if ( pg == NULL ) |
591 | 0 | break; |
592 | 0 | s->vram_page[i] = pg; |
593 | 0 | clear_domain_page(_mfn(page_to_mfn(pg))); |
594 | 0 | } |
595 | 0 |
|
596 | 0 | if ( i == ARRAY_SIZE(s->vram_page) ) |
597 | 0 | { |
598 | 0 | struct hvm_io_handler *handler; |
599 | 0 |
|
600 | 0 | /* Sequencer registers. */ |
601 | 0 | register_portio_handler(d, 0x3c4, 2, stdvga_intercept_pio); |
602 | 0 | /* Graphics registers. */ |
603 | 0 | register_portio_handler(d, 0x3ce, 2, stdvga_intercept_pio); |
604 | 0 |
|
605 | 0 | /* VGA memory */ |
606 | 0 | handler = hvm_next_io_handler(d); |
607 | 0 |
|
608 | 0 | if ( handler == NULL ) |
609 | 0 | return; |
610 | 0 |
|
611 | 0 | handler->type = IOREQ_TYPE_COPY; |
612 | 0 | handler->ops = &stdvga_mem_ops; |
613 | 0 | } |
614 | 0 | } |
615 | | |
616 | | void stdvga_deinit(struct domain *d) |
617 | 0 | { |
618 | 0 | struct hvm_hw_stdvga *s = &d->arch.hvm_domain.stdvga; |
619 | 0 | int i; |
620 | 0 |
|
621 | 0 | if ( !has_vvga(d) ) |
622 | 0 | return; |
623 | 0 |
|
624 | 0 | for ( i = 0; i != ARRAY_SIZE(s->vram_page); i++ ) |
625 | 0 | { |
626 | 0 | if ( s->vram_page[i] == NULL ) |
627 | 0 | continue; |
628 | 0 | free_domheap_page(s->vram_page[i]); |
629 | 0 | s->vram_page[i] = NULL; |
630 | 0 | } |
631 | 0 | } |
632 | | |
633 | | /* |
634 | | * Local variables: |
635 | | * mode: C |
636 | | * c-file-style: "BSD" |
637 | | * c-basic-offset: 4 |
638 | | * tab-width: 4 |
639 | | * indent-tabs-mode: nil |
640 | | * End: |
641 | | */ |