debuggers.hg

view xen/arch/x86/hvm/vmx/realmode.c @ 16960:ed2ca78286a8

vmx realmode: Multiple I/O reads to qemu in an instruction is not
allowed. But we do allow, for example, a read followed by a write
(e.g., MOVS within video RAM).
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jan 28 11:28:55 2008 +0000 (2008-01-28)
parents db620f1c9d30
children 0d70e01c0012
line source
1 /******************************************************************************
2 * arch/x86/hvm/vmx/realmode.c
3 *
4 * Real-mode emulation for VMX.
5 *
6 * Copyright (c) 2007 Citrix Systems, Inc.
7 *
8 * Authors:
9 * Keir Fraser <keir.fraser@citrix.com>
10 */
12 #include <xen/config.h>
13 #include <xen/init.h>
14 #include <xen/lib.h>
15 #include <xen/sched.h>
16 #include <asm/event.h>
17 #include <asm/hvm/hvm.h>
18 #include <asm/hvm/support.h>
19 #include <asm/hvm/vmx/vmx.h>
20 #include <asm/hvm/vmx/vmcs.h>
21 #include <asm/hvm/vmx/cpu.h>
22 #include <asm/x86_emulate.h>
24 struct realmode_emulate_ctxt {
25 struct x86_emulate_ctxt ctxt;
27 /* Cache of 16 bytes of instruction. */
28 uint8_t insn_buf[16];
29 unsigned long insn_buf_eip;
31 struct segment_register seg_reg[10];
33 union {
34 struct {
35 unsigned int hlt:1;
36 unsigned int mov_ss:1;
37 unsigned int sti:1;
38 } flags;
39 unsigned int flag_word;
40 };
42 uint8_t exn_vector;
43 uint8_t exn_insn_len;
45 uint32_t intr_shadow;
46 };
48 static void realmode_deliver_exception(
49 unsigned int vector,
50 unsigned int insn_len,
51 struct realmode_emulate_ctxt *rm_ctxt)
52 {
53 struct segment_register *idtr = &rm_ctxt->seg_reg[x86_seg_idtr];
54 struct segment_register *csr = &rm_ctxt->seg_reg[x86_seg_cs];
55 struct cpu_user_regs *regs = rm_ctxt->ctxt.regs;
56 uint32_t cs_eip, pstk;
57 uint16_t frame[3];
58 unsigned int last_byte;
60 again:
61 last_byte = (vector * 4) + 3;
62 if ( idtr->limit < last_byte )
63 {
64 /* Software interrupt? */
65 if ( insn_len != 0 )
66 {
67 insn_len = 0;
68 vector = TRAP_gp_fault;
69 goto again;
70 }
72 /* Exception or hardware interrupt. */
73 switch ( vector )
74 {
75 case TRAP_double_fault:
76 hvm_triple_fault();
77 return;
78 case TRAP_gp_fault:
79 vector = TRAP_double_fault;
80 goto again;
81 default:
82 vector = TRAP_gp_fault;
83 goto again;
84 }
85 }
87 (void)hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4);
89 frame[0] = regs->eip + insn_len;
90 frame[1] = csr->sel;
91 frame[2] = regs->eflags & ~X86_EFLAGS_RF;
93 if ( rm_ctxt->ctxt.addr_size == 32 )
94 {
95 regs->esp -= 6;
96 pstk = regs->esp;
97 }
98 else
99 {
100 pstk = (uint16_t)(regs->esp - 6);
101 regs->esp &= ~0xffff;
102 regs->esp |= pstk;
103 }
105 pstk += rm_ctxt->seg_reg[x86_seg_ss].base;
106 (void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame));
108 csr->sel = cs_eip >> 16;
109 csr->base = (uint32_t)csr->sel << 4;
110 regs->eip = (uint16_t)cs_eip;
111 regs->eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF);
112 }
114 static int
115 realmode_read(
116 enum x86_segment seg,
117 unsigned long offset,
118 unsigned long *val,
119 unsigned int bytes,
120 enum hvm_access_type access_type,
121 struct realmode_emulate_ctxt *rm_ctxt)
122 {
123 uint32_t addr = rm_ctxt->seg_reg[seg].base + offset;
125 *val = 0;
127 if ( hvm_copy_from_guest_phys(val, addr, bytes) )
128 {
129 struct vcpu *curr = current;
131 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
132 return X86EMUL_UNHANDLEABLE;
134 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
135 {
136 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
137 send_mmio_req(IOREQ_TYPE_COPY, addr, 1, bytes,
138 0, IOREQ_READ, 0, 0);
139 }
141 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
142 return X86EMUL_RETRY;
144 *val = curr->arch.hvm_vmx.real_mode_io_data;
145 curr->arch.hvm_vmx.real_mode_io_completed = 0;
146 }
148 return X86EMUL_OKAY;
149 }
151 static int
152 realmode_emulate_read(
153 enum x86_segment seg,
154 unsigned long offset,
155 unsigned long *val,
156 unsigned int bytes,
157 struct x86_emulate_ctxt *ctxt)
158 {
159 return realmode_read(
160 seg, offset, val, bytes, hvm_access_read,
161 container_of(ctxt, struct realmode_emulate_ctxt, ctxt));
162 }
164 static int
165 realmode_emulate_insn_fetch(
166 enum x86_segment seg,
167 unsigned long offset,
168 unsigned long *val,
169 unsigned int bytes,
170 struct x86_emulate_ctxt *ctxt)
171 {
172 struct realmode_emulate_ctxt *rm_ctxt =
173 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
174 unsigned int insn_off = offset - rm_ctxt->insn_buf_eip;
176 /* Fall back if requested bytes are not in the prefetch cache. */
177 if ( unlikely((insn_off + bytes) > sizeof(rm_ctxt->insn_buf)) )
178 return realmode_read(
179 seg, offset, val, bytes,
180 hvm_access_insn_fetch, rm_ctxt);
182 /* Hit the cache. Simple memcpy. */
183 *val = 0;
184 memcpy(val, &rm_ctxt->insn_buf[insn_off], bytes);
185 return X86EMUL_OKAY;
186 }
188 static int
189 realmode_emulate_write(
190 enum x86_segment seg,
191 unsigned long offset,
192 unsigned long val,
193 unsigned int bytes,
194 struct x86_emulate_ctxt *ctxt)
195 {
196 struct realmode_emulate_ctxt *rm_ctxt =
197 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
198 uint32_t addr = rm_ctxt->seg_reg[seg].base + offset;
200 if ( hvm_copy_to_guest_phys(addr, &val, bytes) )
201 {
202 struct vcpu *curr = current;
204 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
205 return X86EMUL_UNHANDLEABLE;
207 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
208 send_mmio_req(IOREQ_TYPE_COPY, addr, 1, bytes,
209 val, IOREQ_WRITE, 0, 0);
210 }
212 return X86EMUL_OKAY;
213 }
215 static int
216 realmode_emulate_cmpxchg(
217 enum x86_segment seg,
218 unsigned long offset,
219 unsigned long old,
220 unsigned long new,
221 unsigned int bytes,
222 struct x86_emulate_ctxt *ctxt)
223 {
224 /* Fix this in case the guest is really relying on r-m-w atomicity. */
225 return realmode_emulate_write(seg, offset, new, bytes, ctxt);
226 }
228 static int
229 realmode_rep_ins(
230 uint16_t src_port,
231 enum x86_segment dst_seg,
232 unsigned long dst_offset,
233 unsigned int bytes_per_rep,
234 unsigned long *reps,
235 struct x86_emulate_ctxt *ctxt)
236 {
237 struct realmode_emulate_ctxt *rm_ctxt =
238 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
239 struct vcpu *curr = current;
240 uint32_t paddr = rm_ctxt->seg_reg[dst_seg].base + dst_offset;
242 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
243 return X86EMUL_UNHANDLEABLE;
245 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
246 {
247 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
248 send_pio_req(src_port, *reps, bytes_per_rep,
249 paddr, IOREQ_READ,
250 !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1);
251 }
253 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
254 return X86EMUL_RETRY;
256 curr->arch.hvm_vmx.real_mode_io_completed = 0;
258 return X86EMUL_OKAY;
259 }
261 static int
262 realmode_rep_outs(
263 enum x86_segment src_seg,
264 unsigned long src_offset,
265 uint16_t dst_port,
266 unsigned int bytes_per_rep,
267 unsigned long *reps,
268 struct x86_emulate_ctxt *ctxt)
269 {
270 struct realmode_emulate_ctxt *rm_ctxt =
271 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
272 struct vcpu *curr = current;
273 uint32_t paddr = rm_ctxt->seg_reg[src_seg].base + src_offset;
275 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
276 return X86EMUL_UNHANDLEABLE;
278 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
279 send_pio_req(dst_port, *reps, bytes_per_rep,
280 paddr, IOREQ_WRITE,
281 !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1);
283 return X86EMUL_OKAY;
284 }
286 static int
287 realmode_read_segment(
288 enum x86_segment seg,
289 struct segment_register *reg,
290 struct x86_emulate_ctxt *ctxt)
291 {
292 struct realmode_emulate_ctxt *rm_ctxt =
293 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
294 memcpy(reg, &rm_ctxt->seg_reg[seg], sizeof(struct segment_register));
295 return X86EMUL_OKAY;
296 }
298 static int
299 realmode_write_segment(
300 enum x86_segment seg,
301 struct segment_register *reg,
302 struct x86_emulate_ctxt *ctxt)
303 {
304 struct realmode_emulate_ctxt *rm_ctxt =
305 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
306 memcpy(&rm_ctxt->seg_reg[seg], reg, sizeof(struct segment_register));
307 if ( seg == x86_seg_ss )
308 rm_ctxt->flags.mov_ss = 1;
309 return X86EMUL_OKAY;
310 }
312 static int
313 realmode_read_io(
314 unsigned int port,
315 unsigned int bytes,
316 unsigned long *val,
317 struct x86_emulate_ctxt *ctxt)
318 {
319 struct vcpu *curr = current;
321 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
322 return X86EMUL_UNHANDLEABLE;
324 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
325 {
326 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
327 send_pio_req(port, 1, bytes, 0, IOREQ_READ, 0, 0);
328 }
330 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
331 return X86EMUL_RETRY;
333 *val = curr->arch.hvm_vmx.real_mode_io_data;
334 curr->arch.hvm_vmx.real_mode_io_completed = 0;
336 return X86EMUL_OKAY;
337 }
339 static int realmode_write_io(
340 unsigned int port,
341 unsigned int bytes,
342 unsigned long val,
343 struct x86_emulate_ctxt *ctxt)
344 {
345 struct vcpu *curr = current;
347 if ( port == 0xe9 )
348 {
349 hvm_print_line(curr, val);
350 return X86EMUL_OKAY;
351 }
353 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
354 return X86EMUL_UNHANDLEABLE;
356 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
357 send_pio_req(port, 1, bytes, val, IOREQ_WRITE, 0, 0);
359 return X86EMUL_OKAY;
360 }
362 static int
363 realmode_read_cr(
364 unsigned int reg,
365 unsigned long *val,
366 struct x86_emulate_ctxt *ctxt)
367 {
368 switch ( reg )
369 {
370 case 0:
371 case 2:
372 case 3:
373 case 4:
374 *val = current->arch.hvm_vcpu.guest_cr[reg];
375 break;
376 default:
377 return X86EMUL_UNHANDLEABLE;
378 }
380 return X86EMUL_OKAY;
381 }
383 static int
384 realmode_write_cr(
385 unsigned int reg,
386 unsigned long val,
387 struct x86_emulate_ctxt *ctxt)
388 {
389 switch ( reg )
390 {
391 case 0:
392 if ( !hvm_set_cr0(val) )
393 return X86EMUL_UNHANDLEABLE;
394 break;
395 case 2:
396 current->arch.hvm_vcpu.guest_cr[2] = val;
397 break;
398 case 3:
399 if ( !hvm_set_cr3(val) )
400 return X86EMUL_UNHANDLEABLE;
401 break;
402 case 4:
403 if ( !hvm_set_cr4(val) )
404 return X86EMUL_UNHANDLEABLE;
405 break;
406 default:
407 return X86EMUL_UNHANDLEABLE;
408 }
410 return X86EMUL_OKAY;
411 }
413 static int realmode_write_rflags(
414 unsigned long val,
415 struct x86_emulate_ctxt *ctxt)
416 {
417 struct realmode_emulate_ctxt *rm_ctxt =
418 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
419 if ( (val & X86_EFLAGS_IF) && !(ctxt->regs->eflags & X86_EFLAGS_IF) )
420 rm_ctxt->flags.sti = 1;
421 return X86EMUL_OKAY;
422 }
424 static int realmode_wbinvd(
425 struct x86_emulate_ctxt *ctxt)
426 {
427 vmx_wbinvd_intercept();
428 return X86EMUL_OKAY;
429 }
431 static int realmode_cpuid(
432 unsigned int *eax,
433 unsigned int *ebx,
434 unsigned int *ecx,
435 unsigned int *edx,
436 struct x86_emulate_ctxt *ctxt)
437 {
438 vmx_cpuid_intercept(eax, ebx, ecx, edx);
439 return X86EMUL_OKAY;
440 }
442 static int realmode_hlt(
443 struct x86_emulate_ctxt *ctxt)
444 {
445 struct realmode_emulate_ctxt *rm_ctxt =
446 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
447 rm_ctxt->flags.hlt = 1;
448 return X86EMUL_OKAY;
449 }
451 static int realmode_inject_hw_exception(
452 uint8_t vector,
453 struct x86_emulate_ctxt *ctxt)
454 {
455 struct realmode_emulate_ctxt *rm_ctxt =
456 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
458 rm_ctxt->exn_vector = vector;
459 rm_ctxt->exn_insn_len = 0;
461 return X86EMUL_OKAY;
462 }
464 static int realmode_inject_sw_interrupt(
465 uint8_t vector,
466 uint8_t insn_len,
467 struct x86_emulate_ctxt *ctxt)
468 {
469 struct realmode_emulate_ctxt *rm_ctxt =
470 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
472 rm_ctxt->exn_vector = vector;
473 rm_ctxt->exn_insn_len = insn_len;
475 return X86EMUL_OKAY;
476 }
478 static void realmode_load_fpu_ctxt(
479 struct x86_emulate_ctxt *ctxt)
480 {
481 if ( !current->fpu_dirtied )
482 vmx_do_no_device_fault();
483 }
485 static struct x86_emulate_ops realmode_emulator_ops = {
486 .read = realmode_emulate_read,
487 .insn_fetch = realmode_emulate_insn_fetch,
488 .write = realmode_emulate_write,
489 .cmpxchg = realmode_emulate_cmpxchg,
490 .rep_ins = realmode_rep_ins,
491 .rep_outs = realmode_rep_outs,
492 .read_segment = realmode_read_segment,
493 .write_segment = realmode_write_segment,
494 .read_io = realmode_read_io,
495 .write_io = realmode_write_io,
496 .read_cr = realmode_read_cr,
497 .write_cr = realmode_write_cr,
498 .write_rflags = realmode_write_rflags,
499 .wbinvd = realmode_wbinvd,
500 .cpuid = realmode_cpuid,
501 .hlt = realmode_hlt,
502 .inject_hw_exception = realmode_inject_hw_exception,
503 .inject_sw_interrupt = realmode_inject_sw_interrupt,
504 .load_fpu_ctxt = realmode_load_fpu_ctxt
505 };
507 static void realmode_emulate_one(struct realmode_emulate_ctxt *rm_ctxt)
508 {
509 struct cpu_user_regs *regs = rm_ctxt->ctxt.regs;
510 struct vcpu *curr = current;
511 u32 new_intr_shadow;
512 int rc, io_completed;
514 rm_ctxt->insn_buf_eip = regs->eip;
515 (void)hvm_copy_from_guest_phys(
516 rm_ctxt->insn_buf,
517 (uint32_t)(rm_ctxt->seg_reg[x86_seg_cs].base + regs->eip),
518 sizeof(rm_ctxt->insn_buf));
520 rm_ctxt->flag_word = 0;
522 io_completed = curr->arch.hvm_vmx.real_mode_io_completed;
523 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
524 {
525 gdprintk(XENLOG_ERR, "I/O in progress before insn is emulated.\n");
526 goto fail;
527 }
529 rc = x86_emulate(&rm_ctxt->ctxt, &realmode_emulator_ops);
531 if ( curr->arch.hvm_vmx.real_mode_io_completed )
532 {
533 gdprintk(XENLOG_ERR, "I/O completion after insn is emulated.\n");
534 goto fail;
535 }
537 if ( rc == X86EMUL_UNHANDLEABLE )
538 {
539 gdprintk(XENLOG_ERR, "Failed to emulate insn.\n");
540 goto fail;
541 }
543 if ( rc == X86EMUL_RETRY )
544 {
545 BUG_ON(!curr->arch.hvm_vmx.real_mode_io_in_progress);
546 if ( !io_completed )
547 return;
548 gdprintk(XENLOG_ERR, "Multiple I/O reads in a single insn.\n");
549 goto fail;
550 }
552 if ( curr->arch.hvm_vmx.real_mode_io_in_progress &&
553 (get_ioreq(curr)->vp_ioreq.dir == IOREQ_READ) )
554 {
555 gdprintk(XENLOG_ERR, "I/O read in progress but insn is retired.\n");
556 goto fail;
557 }
559 new_intr_shadow = rm_ctxt->intr_shadow;
561 /* MOV-SS instruction toggles MOV-SS shadow, else we just clear it. */
562 if ( rm_ctxt->flags.mov_ss )
563 new_intr_shadow ^= VMX_INTR_SHADOW_MOV_SS;
564 else
565 new_intr_shadow &= ~VMX_INTR_SHADOW_MOV_SS;
567 /* STI instruction toggles STI shadow, else we just clear it. */
568 if ( rm_ctxt->flags.sti )
569 new_intr_shadow ^= VMX_INTR_SHADOW_STI;
570 else
571 new_intr_shadow &= ~VMX_INTR_SHADOW_STI;
573 /* Update interrupt shadow information in VMCS only if it changes. */
574 if ( rm_ctxt->intr_shadow != new_intr_shadow )
575 {
576 rm_ctxt->intr_shadow = new_intr_shadow;
577 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, rm_ctxt->intr_shadow);
578 }
580 if ( rc == X86EMUL_EXCEPTION )
581 {
582 realmode_deliver_exception(
583 rm_ctxt->exn_vector, rm_ctxt->exn_insn_len, rm_ctxt);
584 }
585 else if ( rm_ctxt->flags.hlt && !hvm_local_events_need_delivery(curr) )
586 {
587 hvm_hlt(regs->eflags);
588 }
590 return;
592 fail:
593 gdprintk(XENLOG_ERR,
594 "Real-mode emulation failed @ %04x:%08lx: "
595 "%02x %02x %02x %02x %02x %02x\n",
596 rm_ctxt->seg_reg[x86_seg_cs].sel, rm_ctxt->insn_buf_eip,
597 rm_ctxt->insn_buf[0], rm_ctxt->insn_buf[1],
598 rm_ctxt->insn_buf[2], rm_ctxt->insn_buf[3],
599 rm_ctxt->insn_buf[4], rm_ctxt->insn_buf[5]);
600 domain_crash_synchronous();
601 }
603 void vmx_realmode(struct cpu_user_regs *regs)
604 {
605 struct vcpu *curr = current;
606 struct realmode_emulate_ctxt rm_ctxt;
607 unsigned long intr_info;
608 int i;
610 rm_ctxt.ctxt.regs = regs;
612 for ( i = 0; i < 10; i++ )
613 hvm_get_segment_register(curr, i, &rm_ctxt.seg_reg[i]);
615 rm_ctxt.ctxt.addr_size =
616 rm_ctxt.seg_reg[x86_seg_cs].attr.fields.db ? 32 : 16;
617 rm_ctxt.ctxt.sp_size =
618 rm_ctxt.seg_reg[x86_seg_ss].attr.fields.db ? 32 : 16;
620 rm_ctxt.intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
622 if ( curr->arch.hvm_vmx.real_mode_io_in_progress ||
623 curr->arch.hvm_vmx.real_mode_io_completed )
624 realmode_emulate_one(&rm_ctxt);
626 intr_info = __vmread(VM_ENTRY_INTR_INFO);
627 if ( intr_info & INTR_INFO_VALID_MASK )
628 {
629 realmode_deliver_exception((uint8_t)intr_info, 0, &rm_ctxt);
630 __vmwrite(VM_ENTRY_INTR_INFO, 0);
631 }
633 while ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
634 !softirq_pending(smp_processor_id()) &&
635 !hvm_local_events_need_delivery(curr) &&
636 !curr->arch.hvm_vmx.real_mode_io_in_progress )
637 realmode_emulate_one(&rm_ctxt);
639 /*
640 * Cannot enter protected mode with bogus selector RPLs and DPLs. Hence we
641 * fix up as best we can, even though this deviates from native execution
642 */
643 if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
644 {
645 /* CS.RPL == SS.RPL == SS.DPL == 0. */
646 rm_ctxt.seg_reg[x86_seg_cs].sel &= ~3;
647 rm_ctxt.seg_reg[x86_seg_ss].sel &= ~3;
648 /* DS,ES,FS,GS: The most uninvasive trick is to set DPL == RPL. */
649 rm_ctxt.seg_reg[x86_seg_ds].attr.fields.dpl =
650 rm_ctxt.seg_reg[x86_seg_ds].sel & 3;
651 rm_ctxt.seg_reg[x86_seg_es].attr.fields.dpl =
652 rm_ctxt.seg_reg[x86_seg_es].sel & 3;
653 rm_ctxt.seg_reg[x86_seg_fs].attr.fields.dpl =
654 rm_ctxt.seg_reg[x86_seg_fs].sel & 3;
655 rm_ctxt.seg_reg[x86_seg_gs].attr.fields.dpl =
656 rm_ctxt.seg_reg[x86_seg_gs].sel & 3;
657 }
659 for ( i = 0; i < 10; i++ )
660 hvm_set_segment_register(curr, i, &rm_ctxt.seg_reg[i]);
661 }
663 int vmx_realmode_io_complete(void)
664 {
665 struct vcpu *curr = current;
666 ioreq_t *p = &get_ioreq(curr)->vp_ioreq;
668 if ( !curr->arch.hvm_vmx.real_mode_io_in_progress )
669 return 0;
671 curr->arch.hvm_vmx.real_mode_io_in_progress = 0;
672 if ( p->dir == IOREQ_READ )
673 {
674 curr->arch.hvm_vmx.real_mode_io_completed = 1;
675 curr->arch.hvm_vmx.real_mode_io_data = p->data;
676 }
678 return 1;
679 }