debuggers.hg

view xen/arch/x86/hvm/vmx/realmode.c @ 16993:af5d189df051

vmx realmode: Exception delivery clears interrupt shadow.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jan 31 13:01:08 2008 +0000 (2008-01-31)
parents 938446025b5b
children 92734271810a
line source
1 /******************************************************************************
2 * arch/x86/hvm/vmx/realmode.c
3 *
4 * Real-mode emulation for VMX.
5 *
6 * Copyright (c) 2007 Citrix Systems, Inc.
7 *
8 * Authors:
9 * Keir Fraser <keir.fraser@citrix.com>
10 */
12 #include <xen/config.h>
13 #include <xen/init.h>
14 #include <xen/lib.h>
15 #include <xen/sched.h>
16 #include <asm/event.h>
17 #include <asm/hvm/hvm.h>
18 #include <asm/hvm/support.h>
19 #include <asm/hvm/vmx/vmx.h>
20 #include <asm/hvm/vmx/vmcs.h>
21 #include <asm/hvm/vmx/cpu.h>
22 #include <asm/x86_emulate.h>
24 struct realmode_emulate_ctxt {
25 struct x86_emulate_ctxt ctxt;
27 /* Cache of 16 bytes of instruction. */
28 uint8_t insn_buf[16];
29 unsigned long insn_buf_eip;
31 struct segment_register seg_reg[10];
33 union {
34 struct {
35 unsigned int hlt:1;
36 unsigned int mov_ss:1;
37 unsigned int sti:1;
38 } flags;
39 unsigned int flag_word;
40 };
42 uint8_t exn_vector;
43 uint8_t exn_insn_len;
45 uint32_t intr_shadow;
46 };
48 static void realmode_deliver_exception(
49 unsigned int vector,
50 unsigned int insn_len,
51 struct realmode_emulate_ctxt *rm_ctxt)
52 {
53 struct segment_register *idtr = &rm_ctxt->seg_reg[x86_seg_idtr];
54 struct segment_register *csr = &rm_ctxt->seg_reg[x86_seg_cs];
55 struct cpu_user_regs *regs = rm_ctxt->ctxt.regs;
56 uint32_t cs_eip, pstk;
57 uint16_t frame[3];
58 unsigned int last_byte;
60 again:
61 last_byte = (vector * 4) + 3;
62 if ( idtr->limit < last_byte )
63 {
64 /* Software interrupt? */
65 if ( insn_len != 0 )
66 {
67 insn_len = 0;
68 vector = TRAP_gp_fault;
69 goto again;
70 }
72 /* Exception or hardware interrupt. */
73 switch ( vector )
74 {
75 case TRAP_double_fault:
76 hvm_triple_fault();
77 return;
78 case TRAP_gp_fault:
79 vector = TRAP_double_fault;
80 goto again;
81 default:
82 vector = TRAP_gp_fault;
83 goto again;
84 }
85 }
87 (void)hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4);
89 frame[0] = regs->eip + insn_len;
90 frame[1] = csr->sel;
91 frame[2] = regs->eflags & ~X86_EFLAGS_RF;
93 if ( rm_ctxt->ctxt.addr_size == 32 )
94 {
95 regs->esp -= 6;
96 pstk = regs->esp;
97 }
98 else
99 {
100 pstk = (uint16_t)(regs->esp - 6);
101 regs->esp &= ~0xffff;
102 regs->esp |= pstk;
103 }
105 pstk += rm_ctxt->seg_reg[x86_seg_ss].base;
106 (void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame));
108 csr->sel = cs_eip >> 16;
109 csr->base = (uint32_t)csr->sel << 4;
110 regs->eip = (uint16_t)cs_eip;
111 regs->eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF);
113 /* Exception delivery clears STI and MOV-SS blocking. */
114 if ( rm_ctxt->intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) )
115 {
116 rm_ctxt->intr_shadow &= ~(VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS);
117 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, rm_ctxt->intr_shadow);
118 }
119 }
121 static int
122 realmode_read(
123 enum x86_segment seg,
124 unsigned long offset,
125 unsigned long *val,
126 unsigned int bytes,
127 enum hvm_access_type access_type,
128 struct realmode_emulate_ctxt *rm_ctxt)
129 {
130 uint32_t addr = rm_ctxt->seg_reg[seg].base + offset;
132 *val = 0;
134 if ( hvm_copy_from_guest_phys(val, addr, bytes) )
135 {
136 struct vcpu *curr = current;
138 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
139 return X86EMUL_UNHANDLEABLE;
141 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
142 {
143 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
144 send_mmio_req(IOREQ_TYPE_COPY, addr, 1, bytes,
145 0, IOREQ_READ, 0, 0);
146 }
148 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
149 return X86EMUL_RETRY;
151 *val = curr->arch.hvm_vmx.real_mode_io_data;
152 curr->arch.hvm_vmx.real_mode_io_completed = 0;
153 }
155 return X86EMUL_OKAY;
156 }
158 static int
159 realmode_emulate_read(
160 enum x86_segment seg,
161 unsigned long offset,
162 unsigned long *val,
163 unsigned int bytes,
164 struct x86_emulate_ctxt *ctxt)
165 {
166 return realmode_read(
167 seg, offset, val, bytes, hvm_access_read,
168 container_of(ctxt, struct realmode_emulate_ctxt, ctxt));
169 }
171 static int
172 realmode_emulate_insn_fetch(
173 enum x86_segment seg,
174 unsigned long offset,
175 unsigned long *val,
176 unsigned int bytes,
177 struct x86_emulate_ctxt *ctxt)
178 {
179 struct realmode_emulate_ctxt *rm_ctxt =
180 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
181 unsigned int insn_off = offset - rm_ctxt->insn_buf_eip;
183 /* Fall back if requested bytes are not in the prefetch cache. */
184 if ( unlikely((insn_off + bytes) > sizeof(rm_ctxt->insn_buf)) )
185 return realmode_read(
186 seg, offset, val, bytes,
187 hvm_access_insn_fetch, rm_ctxt);
189 /* Hit the cache. Simple memcpy. */
190 *val = 0;
191 memcpy(val, &rm_ctxt->insn_buf[insn_off], bytes);
192 return X86EMUL_OKAY;
193 }
195 static int
196 realmode_emulate_write(
197 enum x86_segment seg,
198 unsigned long offset,
199 unsigned long val,
200 unsigned int bytes,
201 struct x86_emulate_ctxt *ctxt)
202 {
203 struct realmode_emulate_ctxt *rm_ctxt =
204 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
205 uint32_t addr = rm_ctxt->seg_reg[seg].base + offset;
207 if ( hvm_copy_to_guest_phys(addr, &val, bytes) )
208 {
209 struct vcpu *curr = current;
211 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
212 return X86EMUL_UNHANDLEABLE;
214 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
215 send_mmio_req(IOREQ_TYPE_COPY, addr, 1, bytes,
216 val, IOREQ_WRITE, 0, 0);
217 }
219 return X86EMUL_OKAY;
220 }
222 static int
223 realmode_emulate_cmpxchg(
224 enum x86_segment seg,
225 unsigned long offset,
226 unsigned long old,
227 unsigned long new,
228 unsigned int bytes,
229 struct x86_emulate_ctxt *ctxt)
230 {
231 /* Fix this in case the guest is really relying on r-m-w atomicity. */
232 return realmode_emulate_write(seg, offset, new, bytes, ctxt);
233 }
235 static int
236 realmode_rep_ins(
237 uint16_t src_port,
238 enum x86_segment dst_seg,
239 unsigned long dst_offset,
240 unsigned int bytes_per_rep,
241 unsigned long *reps,
242 struct x86_emulate_ctxt *ctxt)
243 {
244 struct realmode_emulate_ctxt *rm_ctxt =
245 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
246 struct vcpu *curr = current;
247 uint32_t paddr = rm_ctxt->seg_reg[dst_seg].base + dst_offset;
249 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
250 return X86EMUL_UNHANDLEABLE;
252 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
253 {
254 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
255 send_pio_req(src_port, *reps, bytes_per_rep,
256 paddr, IOREQ_READ,
257 !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1);
258 }
260 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
261 return X86EMUL_RETRY;
263 curr->arch.hvm_vmx.real_mode_io_completed = 0;
265 return X86EMUL_OKAY;
266 }
268 static int
269 realmode_rep_outs(
270 enum x86_segment src_seg,
271 unsigned long src_offset,
272 uint16_t dst_port,
273 unsigned int bytes_per_rep,
274 unsigned long *reps,
275 struct x86_emulate_ctxt *ctxt)
276 {
277 struct realmode_emulate_ctxt *rm_ctxt =
278 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
279 struct vcpu *curr = current;
280 uint32_t paddr = rm_ctxt->seg_reg[src_seg].base + src_offset;
282 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
283 return X86EMUL_UNHANDLEABLE;
285 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
286 send_pio_req(dst_port, *reps, bytes_per_rep,
287 paddr, IOREQ_WRITE,
288 !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1);
290 return X86EMUL_OKAY;
291 }
293 static int
294 realmode_read_segment(
295 enum x86_segment seg,
296 struct segment_register *reg,
297 struct x86_emulate_ctxt *ctxt)
298 {
299 struct realmode_emulate_ctxt *rm_ctxt =
300 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
301 memcpy(reg, &rm_ctxt->seg_reg[seg], sizeof(struct segment_register));
302 return X86EMUL_OKAY;
303 }
305 static int
306 realmode_write_segment(
307 enum x86_segment seg,
308 struct segment_register *reg,
309 struct x86_emulate_ctxt *ctxt)
310 {
311 struct realmode_emulate_ctxt *rm_ctxt =
312 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
313 memcpy(&rm_ctxt->seg_reg[seg], reg, sizeof(struct segment_register));
314 if ( seg == x86_seg_ss )
315 rm_ctxt->flags.mov_ss = 1;
316 return X86EMUL_OKAY;
317 }
319 static int
320 realmode_read_io(
321 unsigned int port,
322 unsigned int bytes,
323 unsigned long *val,
324 struct x86_emulate_ctxt *ctxt)
325 {
326 struct vcpu *curr = current;
328 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
329 return X86EMUL_UNHANDLEABLE;
331 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
332 {
333 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
334 send_pio_req(port, 1, bytes, 0, IOREQ_READ, 0, 0);
335 }
337 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
338 return X86EMUL_RETRY;
340 *val = curr->arch.hvm_vmx.real_mode_io_data;
341 curr->arch.hvm_vmx.real_mode_io_completed = 0;
343 return X86EMUL_OKAY;
344 }
346 static int realmode_write_io(
347 unsigned int port,
348 unsigned int bytes,
349 unsigned long val,
350 struct x86_emulate_ctxt *ctxt)
351 {
352 struct vcpu *curr = current;
354 if ( port == 0xe9 )
355 {
356 hvm_print_line(curr, val);
357 return X86EMUL_OKAY;
358 }
360 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
361 return X86EMUL_UNHANDLEABLE;
363 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
364 send_pio_req(port, 1, bytes, val, IOREQ_WRITE, 0, 0);
366 return X86EMUL_OKAY;
367 }
369 static int
370 realmode_read_cr(
371 unsigned int reg,
372 unsigned long *val,
373 struct x86_emulate_ctxt *ctxt)
374 {
375 switch ( reg )
376 {
377 case 0:
378 case 2:
379 case 3:
380 case 4:
381 *val = current->arch.hvm_vcpu.guest_cr[reg];
382 break;
383 default:
384 return X86EMUL_UNHANDLEABLE;
385 }
387 return X86EMUL_OKAY;
388 }
390 static int
391 realmode_write_cr(
392 unsigned int reg,
393 unsigned long val,
394 struct x86_emulate_ctxt *ctxt)
395 {
396 switch ( reg )
397 {
398 case 0:
399 if ( !hvm_set_cr0(val) )
400 return X86EMUL_UNHANDLEABLE;
401 break;
402 case 2:
403 current->arch.hvm_vcpu.guest_cr[2] = val;
404 break;
405 case 3:
406 if ( !hvm_set_cr3(val) )
407 return X86EMUL_UNHANDLEABLE;
408 break;
409 case 4:
410 if ( !hvm_set_cr4(val) )
411 return X86EMUL_UNHANDLEABLE;
412 break;
413 default:
414 return X86EMUL_UNHANDLEABLE;
415 }
417 return X86EMUL_OKAY;
418 }
420 static int
421 realmode_read_msr(
422 unsigned long reg,
423 uint64_t *val,
424 struct x86_emulate_ctxt *ctxt)
425 {
426 struct cpu_user_regs _regs;
428 _regs.ecx = (uint32_t)reg;
430 if ( !vmx_msr_read_intercept(&_regs) )
431 {
432 struct realmode_emulate_ctxt *rm_ctxt =
433 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
434 rm_ctxt->exn_vector = (uint8_t)__vmread(VM_ENTRY_INTR_INFO);
435 rm_ctxt->exn_insn_len = 0;
436 __vmwrite(VM_ENTRY_INTR_INFO, 0);
437 return X86EMUL_EXCEPTION;
438 }
440 *val = ((uint64_t)(uint32_t)_regs.edx << 32) || (uint32_t)_regs.eax;
441 return X86EMUL_OKAY;
442 }
444 static int
445 realmode_write_msr(
446 unsigned long reg,
447 uint64_t val,
448 struct x86_emulate_ctxt *ctxt)
449 {
450 struct cpu_user_regs _regs;
452 _regs.edx = (uint32_t)(val >> 32);
453 _regs.eax = (uint32_t)val;
454 _regs.ecx = (uint32_t)reg;
456 if ( !vmx_msr_write_intercept(&_regs) )
457 {
458 struct realmode_emulate_ctxt *rm_ctxt =
459 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
460 rm_ctxt->exn_vector = (uint8_t)__vmread(VM_ENTRY_INTR_INFO);
461 rm_ctxt->exn_insn_len = 0;
462 __vmwrite(VM_ENTRY_INTR_INFO, 0);
463 return X86EMUL_EXCEPTION;
464 }
466 return X86EMUL_OKAY;
467 }
469 static int realmode_write_rflags(
470 unsigned long val,
471 struct x86_emulate_ctxt *ctxt)
472 {
473 struct realmode_emulate_ctxt *rm_ctxt =
474 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
475 if ( (val & X86_EFLAGS_IF) && !(ctxt->regs->eflags & X86_EFLAGS_IF) )
476 rm_ctxt->flags.sti = 1;
477 return X86EMUL_OKAY;
478 }
480 static int realmode_wbinvd(
481 struct x86_emulate_ctxt *ctxt)
482 {
483 vmx_wbinvd_intercept();
484 return X86EMUL_OKAY;
485 }
487 static int realmode_cpuid(
488 unsigned int *eax,
489 unsigned int *ebx,
490 unsigned int *ecx,
491 unsigned int *edx,
492 struct x86_emulate_ctxt *ctxt)
493 {
494 vmx_cpuid_intercept(eax, ebx, ecx, edx);
495 return X86EMUL_OKAY;
496 }
498 static int realmode_hlt(
499 struct x86_emulate_ctxt *ctxt)
500 {
501 struct realmode_emulate_ctxt *rm_ctxt =
502 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
503 rm_ctxt->flags.hlt = 1;
504 return X86EMUL_OKAY;
505 }
507 static int realmode_inject_hw_exception(
508 uint8_t vector,
509 struct x86_emulate_ctxt *ctxt)
510 {
511 struct realmode_emulate_ctxt *rm_ctxt =
512 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
514 rm_ctxt->exn_vector = vector;
515 rm_ctxt->exn_insn_len = 0;
517 return X86EMUL_OKAY;
518 }
520 static int realmode_inject_sw_interrupt(
521 uint8_t vector,
522 uint8_t insn_len,
523 struct x86_emulate_ctxt *ctxt)
524 {
525 struct realmode_emulate_ctxt *rm_ctxt =
526 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
528 rm_ctxt->exn_vector = vector;
529 rm_ctxt->exn_insn_len = insn_len;
531 return X86EMUL_OKAY;
532 }
534 static void realmode_load_fpu_ctxt(
535 struct x86_emulate_ctxt *ctxt)
536 {
537 if ( !current->fpu_dirtied )
538 vmx_do_no_device_fault();
539 }
541 static struct x86_emulate_ops realmode_emulator_ops = {
542 .read = realmode_emulate_read,
543 .insn_fetch = realmode_emulate_insn_fetch,
544 .write = realmode_emulate_write,
545 .cmpxchg = realmode_emulate_cmpxchg,
546 .rep_ins = realmode_rep_ins,
547 .rep_outs = realmode_rep_outs,
548 .read_segment = realmode_read_segment,
549 .write_segment = realmode_write_segment,
550 .read_io = realmode_read_io,
551 .write_io = realmode_write_io,
552 .read_cr = realmode_read_cr,
553 .write_cr = realmode_write_cr,
554 .read_msr = realmode_read_msr,
555 .write_msr = realmode_write_msr,
556 .write_rflags = realmode_write_rflags,
557 .wbinvd = realmode_wbinvd,
558 .cpuid = realmode_cpuid,
559 .hlt = realmode_hlt,
560 .inject_hw_exception = realmode_inject_hw_exception,
561 .inject_sw_interrupt = realmode_inject_sw_interrupt,
562 .load_fpu_ctxt = realmode_load_fpu_ctxt
563 };
565 static void realmode_emulate_one(struct realmode_emulate_ctxt *rm_ctxt)
566 {
567 struct cpu_user_regs *regs = rm_ctxt->ctxt.regs;
568 struct vcpu *curr = current;
569 u32 new_intr_shadow;
570 int rc, io_completed;
572 rm_ctxt->insn_buf_eip = regs->eip;
573 (void)hvm_copy_from_guest_phys(
574 rm_ctxt->insn_buf,
575 (uint32_t)(rm_ctxt->seg_reg[x86_seg_cs].base + regs->eip),
576 sizeof(rm_ctxt->insn_buf));
578 rm_ctxt->flag_word = 0;
580 io_completed = curr->arch.hvm_vmx.real_mode_io_completed;
581 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
582 {
583 gdprintk(XENLOG_ERR, "I/O in progress before insn is emulated.\n");
584 goto fail;
585 }
587 rc = x86_emulate(&rm_ctxt->ctxt, &realmode_emulator_ops);
589 if ( curr->arch.hvm_vmx.real_mode_io_completed )
590 {
591 gdprintk(XENLOG_ERR, "I/O completion after insn is emulated.\n");
592 goto fail;
593 }
595 if ( rc == X86EMUL_UNHANDLEABLE )
596 {
597 gdprintk(XENLOG_ERR, "Failed to emulate insn.\n");
598 goto fail;
599 }
601 if ( rc == X86EMUL_RETRY )
602 {
603 BUG_ON(!curr->arch.hvm_vmx.real_mode_io_in_progress);
604 if ( !io_completed )
605 return;
606 gdprintk(XENLOG_ERR, "Multiple I/O reads in a single insn.\n");
607 goto fail;
608 }
610 if ( curr->arch.hvm_vmx.real_mode_io_in_progress &&
611 (get_ioreq(curr)->vp_ioreq.dir == IOREQ_READ) )
612 {
613 gdprintk(XENLOG_ERR, "I/O read in progress but insn is retired.\n");
614 goto fail;
615 }
617 new_intr_shadow = rm_ctxt->intr_shadow;
619 /* MOV-SS instruction toggles MOV-SS shadow, else we just clear it. */
620 if ( rm_ctxt->flags.mov_ss )
621 new_intr_shadow ^= VMX_INTR_SHADOW_MOV_SS;
622 else
623 new_intr_shadow &= ~VMX_INTR_SHADOW_MOV_SS;
625 /* STI instruction toggles STI shadow, else we just clear it. */
626 if ( rm_ctxt->flags.sti )
627 new_intr_shadow ^= VMX_INTR_SHADOW_STI;
628 else
629 new_intr_shadow &= ~VMX_INTR_SHADOW_STI;
631 /* Update interrupt shadow information in VMCS only if it changes. */
632 if ( rm_ctxt->intr_shadow != new_intr_shadow )
633 {
634 rm_ctxt->intr_shadow = new_intr_shadow;
635 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, rm_ctxt->intr_shadow);
636 }
638 if ( rc == X86EMUL_EXCEPTION )
639 {
640 realmode_deliver_exception(
641 rm_ctxt->exn_vector, rm_ctxt->exn_insn_len, rm_ctxt);
642 }
643 else if ( rm_ctxt->flags.hlt && !hvm_local_events_need_delivery(curr) )
644 {
645 hvm_hlt(regs->eflags);
646 }
648 return;
650 fail:
651 gdprintk(XENLOG_ERR,
652 "Real-mode emulation failed @ %04x:%08lx: "
653 "%02x %02x %02x %02x %02x %02x\n",
654 rm_ctxt->seg_reg[x86_seg_cs].sel, rm_ctxt->insn_buf_eip,
655 rm_ctxt->insn_buf[0], rm_ctxt->insn_buf[1],
656 rm_ctxt->insn_buf[2], rm_ctxt->insn_buf[3],
657 rm_ctxt->insn_buf[4], rm_ctxt->insn_buf[5]);
658 domain_crash_synchronous();
659 }
661 void vmx_realmode(struct cpu_user_regs *regs)
662 {
663 struct vcpu *curr = current;
664 struct realmode_emulate_ctxt rm_ctxt;
665 unsigned long intr_info = __vmread(VM_ENTRY_INTR_INFO);
666 int i;
668 rm_ctxt.ctxt.regs = regs;
670 for ( i = 0; i < 10; i++ )
671 hvm_get_segment_register(curr, i, &rm_ctxt.seg_reg[i]);
673 rm_ctxt.ctxt.addr_size =
674 rm_ctxt.seg_reg[x86_seg_cs].attr.fields.db ? 32 : 16;
675 rm_ctxt.ctxt.sp_size =
676 rm_ctxt.seg_reg[x86_seg_ss].attr.fields.db ? 32 : 16;
678 rm_ctxt.intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
680 if ( curr->arch.hvm_vmx.real_mode_io_in_progress ||
681 curr->arch.hvm_vmx.real_mode_io_completed )
682 realmode_emulate_one(&rm_ctxt);
684 if ( intr_info & INTR_INFO_VALID_MASK )
685 {
686 realmode_deliver_exception((uint8_t)intr_info, 0, &rm_ctxt);
687 __vmwrite(VM_ENTRY_INTR_INFO, 0);
688 }
690 while ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
691 !softirq_pending(smp_processor_id()) &&
692 !hvm_local_events_need_delivery(curr) &&
693 !curr->arch.hvm_vmx.real_mode_io_in_progress )
694 realmode_emulate_one(&rm_ctxt);
696 /*
697 * Cannot enter protected mode with bogus selector RPLs and DPLs. Hence we
698 * fix up as best we can, even though this deviates from native execution
699 */
700 if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
701 {
702 /* CS.RPL == SS.RPL == SS.DPL == 0. */
703 rm_ctxt.seg_reg[x86_seg_cs].sel &= ~3;
704 rm_ctxt.seg_reg[x86_seg_ss].sel &= ~3;
705 /* DS,ES,FS,GS: The most uninvasive trick is to set DPL == RPL. */
706 rm_ctxt.seg_reg[x86_seg_ds].attr.fields.dpl =
707 rm_ctxt.seg_reg[x86_seg_ds].sel & 3;
708 rm_ctxt.seg_reg[x86_seg_es].attr.fields.dpl =
709 rm_ctxt.seg_reg[x86_seg_es].sel & 3;
710 rm_ctxt.seg_reg[x86_seg_fs].attr.fields.dpl =
711 rm_ctxt.seg_reg[x86_seg_fs].sel & 3;
712 rm_ctxt.seg_reg[x86_seg_gs].attr.fields.dpl =
713 rm_ctxt.seg_reg[x86_seg_gs].sel & 3;
714 }
716 for ( i = 0; i < 10; i++ )
717 hvm_set_segment_register(curr, i, &rm_ctxt.seg_reg[i]);
718 }
720 int vmx_realmode_io_complete(void)
721 {
722 struct vcpu *curr = current;
723 ioreq_t *p = &get_ioreq(curr)->vp_ioreq;
725 if ( !curr->arch.hvm_vmx.real_mode_io_in_progress )
726 return 0;
728 curr->arch.hvm_vmx.real_mode_io_in_progress = 0;
729 if ( p->dir == IOREQ_READ )
730 {
731 curr->arch.hvm_vmx.real_mode_io_completed = 1;
732 curr->arch.hvm_vmx.real_mode_io_data = p->data;
733 }
735 return 1;
736 }