debuggers.hg

view xen/arch/x86/hvm/vmx/realmode.c @ 16987:0d70e01c0012

vmx realmode: Emulate MSR accesses.
Signed-off-by: Nitin A Kamble <nitin.a.kamble@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jan 31 09:33:26 2008 +0000 (2008-01-31)
parents ed2ca78286a8
children 938446025b5b
line source
1 /******************************************************************************
2 * arch/x86/hvm/vmx/realmode.c
3 *
4 * Real-mode emulation for VMX.
5 *
6 * Copyright (c) 2007 Citrix Systems, Inc.
7 *
8 * Authors:
9 * Keir Fraser <keir.fraser@citrix.com>
10 */
12 #include <xen/config.h>
13 #include <xen/init.h>
14 #include <xen/lib.h>
15 #include <xen/sched.h>
16 #include <asm/event.h>
17 #include <asm/hvm/hvm.h>
18 #include <asm/hvm/support.h>
19 #include <asm/hvm/vmx/vmx.h>
20 #include <asm/hvm/vmx/vmcs.h>
21 #include <asm/hvm/vmx/cpu.h>
22 #include <asm/x86_emulate.h>
24 struct realmode_emulate_ctxt {
25 struct x86_emulate_ctxt ctxt;
27 /* Cache of 16 bytes of instruction. */
28 uint8_t insn_buf[16];
29 unsigned long insn_buf_eip;
31 struct segment_register seg_reg[10];
33 union {
34 struct {
35 unsigned int hlt:1;
36 unsigned int mov_ss:1;
37 unsigned int sti:1;
38 } flags;
39 unsigned int flag_word;
40 };
42 uint8_t exn_vector;
43 uint8_t exn_insn_len;
45 uint32_t intr_shadow;
46 };
48 static void realmode_deliver_exception(
49 unsigned int vector,
50 unsigned int insn_len,
51 struct realmode_emulate_ctxt *rm_ctxt)
52 {
53 struct segment_register *idtr = &rm_ctxt->seg_reg[x86_seg_idtr];
54 struct segment_register *csr = &rm_ctxt->seg_reg[x86_seg_cs];
55 struct cpu_user_regs *regs = rm_ctxt->ctxt.regs;
56 uint32_t cs_eip, pstk;
57 uint16_t frame[3];
58 unsigned int last_byte;
60 again:
61 last_byte = (vector * 4) + 3;
62 if ( idtr->limit < last_byte )
63 {
64 /* Software interrupt? */
65 if ( insn_len != 0 )
66 {
67 insn_len = 0;
68 vector = TRAP_gp_fault;
69 goto again;
70 }
72 /* Exception or hardware interrupt. */
73 switch ( vector )
74 {
75 case TRAP_double_fault:
76 hvm_triple_fault();
77 return;
78 case TRAP_gp_fault:
79 vector = TRAP_double_fault;
80 goto again;
81 default:
82 vector = TRAP_gp_fault;
83 goto again;
84 }
85 }
87 (void)hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4);
89 frame[0] = regs->eip + insn_len;
90 frame[1] = csr->sel;
91 frame[2] = regs->eflags & ~X86_EFLAGS_RF;
93 if ( rm_ctxt->ctxt.addr_size == 32 )
94 {
95 regs->esp -= 6;
96 pstk = regs->esp;
97 }
98 else
99 {
100 pstk = (uint16_t)(regs->esp - 6);
101 regs->esp &= ~0xffff;
102 regs->esp |= pstk;
103 }
105 pstk += rm_ctxt->seg_reg[x86_seg_ss].base;
106 (void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame));
108 csr->sel = cs_eip >> 16;
109 csr->base = (uint32_t)csr->sel << 4;
110 regs->eip = (uint16_t)cs_eip;
111 regs->eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF);
112 }
114 static int
115 realmode_read(
116 enum x86_segment seg,
117 unsigned long offset,
118 unsigned long *val,
119 unsigned int bytes,
120 enum hvm_access_type access_type,
121 struct realmode_emulate_ctxt *rm_ctxt)
122 {
123 uint32_t addr = rm_ctxt->seg_reg[seg].base + offset;
125 *val = 0;
127 if ( hvm_copy_from_guest_phys(val, addr, bytes) )
128 {
129 struct vcpu *curr = current;
131 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
132 return X86EMUL_UNHANDLEABLE;
134 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
135 {
136 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
137 send_mmio_req(IOREQ_TYPE_COPY, addr, 1, bytes,
138 0, IOREQ_READ, 0, 0);
139 }
141 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
142 return X86EMUL_RETRY;
144 *val = curr->arch.hvm_vmx.real_mode_io_data;
145 curr->arch.hvm_vmx.real_mode_io_completed = 0;
146 }
148 return X86EMUL_OKAY;
149 }
151 static int
152 realmode_emulate_read(
153 enum x86_segment seg,
154 unsigned long offset,
155 unsigned long *val,
156 unsigned int bytes,
157 struct x86_emulate_ctxt *ctxt)
158 {
159 return realmode_read(
160 seg, offset, val, bytes, hvm_access_read,
161 container_of(ctxt, struct realmode_emulate_ctxt, ctxt));
162 }
164 static int
165 realmode_emulate_insn_fetch(
166 enum x86_segment seg,
167 unsigned long offset,
168 unsigned long *val,
169 unsigned int bytes,
170 struct x86_emulate_ctxt *ctxt)
171 {
172 struct realmode_emulate_ctxt *rm_ctxt =
173 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
174 unsigned int insn_off = offset - rm_ctxt->insn_buf_eip;
176 /* Fall back if requested bytes are not in the prefetch cache. */
177 if ( unlikely((insn_off + bytes) > sizeof(rm_ctxt->insn_buf)) )
178 return realmode_read(
179 seg, offset, val, bytes,
180 hvm_access_insn_fetch, rm_ctxt);
182 /* Hit the cache. Simple memcpy. */
183 *val = 0;
184 memcpy(val, &rm_ctxt->insn_buf[insn_off], bytes);
185 return X86EMUL_OKAY;
186 }
188 static int
189 realmode_emulate_write(
190 enum x86_segment seg,
191 unsigned long offset,
192 unsigned long val,
193 unsigned int bytes,
194 struct x86_emulate_ctxt *ctxt)
195 {
196 struct realmode_emulate_ctxt *rm_ctxt =
197 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
198 uint32_t addr = rm_ctxt->seg_reg[seg].base + offset;
200 if ( hvm_copy_to_guest_phys(addr, &val, bytes) )
201 {
202 struct vcpu *curr = current;
204 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
205 return X86EMUL_UNHANDLEABLE;
207 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
208 send_mmio_req(IOREQ_TYPE_COPY, addr, 1, bytes,
209 val, IOREQ_WRITE, 0, 0);
210 }
212 return X86EMUL_OKAY;
213 }
215 static int
216 realmode_emulate_cmpxchg(
217 enum x86_segment seg,
218 unsigned long offset,
219 unsigned long old,
220 unsigned long new,
221 unsigned int bytes,
222 struct x86_emulate_ctxt *ctxt)
223 {
224 /* Fix this in case the guest is really relying on r-m-w atomicity. */
225 return realmode_emulate_write(seg, offset, new, bytes, ctxt);
226 }
228 static int
229 realmode_rep_ins(
230 uint16_t src_port,
231 enum x86_segment dst_seg,
232 unsigned long dst_offset,
233 unsigned int bytes_per_rep,
234 unsigned long *reps,
235 struct x86_emulate_ctxt *ctxt)
236 {
237 struct realmode_emulate_ctxt *rm_ctxt =
238 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
239 struct vcpu *curr = current;
240 uint32_t paddr = rm_ctxt->seg_reg[dst_seg].base + dst_offset;
242 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
243 return X86EMUL_UNHANDLEABLE;
245 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
246 {
247 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
248 send_pio_req(src_port, *reps, bytes_per_rep,
249 paddr, IOREQ_READ,
250 !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1);
251 }
253 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
254 return X86EMUL_RETRY;
256 curr->arch.hvm_vmx.real_mode_io_completed = 0;
258 return X86EMUL_OKAY;
259 }
261 static int
262 realmode_rep_outs(
263 enum x86_segment src_seg,
264 unsigned long src_offset,
265 uint16_t dst_port,
266 unsigned int bytes_per_rep,
267 unsigned long *reps,
268 struct x86_emulate_ctxt *ctxt)
269 {
270 struct realmode_emulate_ctxt *rm_ctxt =
271 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
272 struct vcpu *curr = current;
273 uint32_t paddr = rm_ctxt->seg_reg[src_seg].base + src_offset;
275 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
276 return X86EMUL_UNHANDLEABLE;
278 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
279 send_pio_req(dst_port, *reps, bytes_per_rep,
280 paddr, IOREQ_WRITE,
281 !!(ctxt->regs->eflags & X86_EFLAGS_DF), 1);
283 return X86EMUL_OKAY;
284 }
286 static int
287 realmode_read_segment(
288 enum x86_segment seg,
289 struct segment_register *reg,
290 struct x86_emulate_ctxt *ctxt)
291 {
292 struct realmode_emulate_ctxt *rm_ctxt =
293 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
294 memcpy(reg, &rm_ctxt->seg_reg[seg], sizeof(struct segment_register));
295 return X86EMUL_OKAY;
296 }
298 static int
299 realmode_write_segment(
300 enum x86_segment seg,
301 struct segment_register *reg,
302 struct x86_emulate_ctxt *ctxt)
303 {
304 struct realmode_emulate_ctxt *rm_ctxt =
305 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
306 memcpy(&rm_ctxt->seg_reg[seg], reg, sizeof(struct segment_register));
307 if ( seg == x86_seg_ss )
308 rm_ctxt->flags.mov_ss = 1;
309 return X86EMUL_OKAY;
310 }
312 static int
313 realmode_read_io(
314 unsigned int port,
315 unsigned int bytes,
316 unsigned long *val,
317 struct x86_emulate_ctxt *ctxt)
318 {
319 struct vcpu *curr = current;
321 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
322 return X86EMUL_UNHANDLEABLE;
324 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
325 {
326 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
327 send_pio_req(port, 1, bytes, 0, IOREQ_READ, 0, 0);
328 }
330 if ( !curr->arch.hvm_vmx.real_mode_io_completed )
331 return X86EMUL_RETRY;
333 *val = curr->arch.hvm_vmx.real_mode_io_data;
334 curr->arch.hvm_vmx.real_mode_io_completed = 0;
336 return X86EMUL_OKAY;
337 }
339 static int realmode_write_io(
340 unsigned int port,
341 unsigned int bytes,
342 unsigned long val,
343 struct x86_emulate_ctxt *ctxt)
344 {
345 struct vcpu *curr = current;
347 if ( port == 0xe9 )
348 {
349 hvm_print_line(curr, val);
350 return X86EMUL_OKAY;
351 }
353 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
354 return X86EMUL_UNHANDLEABLE;
356 curr->arch.hvm_vmx.real_mode_io_in_progress = 1;
357 send_pio_req(port, 1, bytes, val, IOREQ_WRITE, 0, 0);
359 return X86EMUL_OKAY;
360 }
362 static int
363 realmode_read_cr(
364 unsigned int reg,
365 unsigned long *val,
366 struct x86_emulate_ctxt *ctxt)
367 {
368 switch ( reg )
369 {
370 case 0:
371 case 2:
372 case 3:
373 case 4:
374 *val = current->arch.hvm_vcpu.guest_cr[reg];
375 break;
376 default:
377 return X86EMUL_UNHANDLEABLE;
378 }
380 return X86EMUL_OKAY;
381 }
383 static int
384 realmode_write_cr(
385 unsigned int reg,
386 unsigned long val,
387 struct x86_emulate_ctxt *ctxt)
388 {
389 switch ( reg )
390 {
391 case 0:
392 if ( !hvm_set_cr0(val) )
393 return X86EMUL_UNHANDLEABLE;
394 break;
395 case 2:
396 current->arch.hvm_vcpu.guest_cr[2] = val;
397 break;
398 case 3:
399 if ( !hvm_set_cr3(val) )
400 return X86EMUL_UNHANDLEABLE;
401 break;
402 case 4:
403 if ( !hvm_set_cr4(val) )
404 return X86EMUL_UNHANDLEABLE;
405 break;
406 default:
407 return X86EMUL_UNHANDLEABLE;
408 }
410 return X86EMUL_OKAY;
411 }
413 static int
414 realmode_read_msr(
415 unsigned long reg,
416 uint64_t *val,
417 struct x86_emulate_ctxt *ctxt)
418 {
419 struct cpu_user_regs _regs = { .ecx = (uint32_t)reg };
421 if ( !vmx_msr_read_intercept(&_regs) )
422 {
423 struct realmode_emulate_ctxt *rm_ctxt =
424 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
425 rm_ctxt->exn_vector = (uint8_t)__vmread(VM_ENTRY_INTR_INFO);
426 rm_ctxt->exn_insn_len = 0;
427 __vmwrite(VM_ENTRY_INTR_INFO, 0);
428 return X86EMUL_EXCEPTION;
429 }
431 *val = ((uint64_t)(uint32_t)_regs.edx << 32) || (uint32_t)_regs.eax;
432 return X86EMUL_OKAY;
433 }
435 static int
436 realmode_write_msr(
437 unsigned long reg,
438 uint64_t val,
439 struct x86_emulate_ctxt *ctxt)
440 {
441 struct cpu_user_regs _regs = {
442 .edx = (uint32_t)(val >> 32),
443 .eax = (uint32_t)val,
444 .ecx = (uint32_t)reg };
446 if ( !vmx_msr_write_intercept(&_regs) )
447 {
448 struct realmode_emulate_ctxt *rm_ctxt =
449 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
450 rm_ctxt->exn_vector = (uint8_t)__vmread(VM_ENTRY_INTR_INFO);
451 rm_ctxt->exn_insn_len = 0;
452 __vmwrite(VM_ENTRY_INTR_INFO, 0);
453 return X86EMUL_EXCEPTION;
454 }
456 return X86EMUL_OKAY;
457 }
459 static int realmode_write_rflags(
460 unsigned long val,
461 struct x86_emulate_ctxt *ctxt)
462 {
463 struct realmode_emulate_ctxt *rm_ctxt =
464 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
465 if ( (val & X86_EFLAGS_IF) && !(ctxt->regs->eflags & X86_EFLAGS_IF) )
466 rm_ctxt->flags.sti = 1;
467 return X86EMUL_OKAY;
468 }
470 static int realmode_wbinvd(
471 struct x86_emulate_ctxt *ctxt)
472 {
473 vmx_wbinvd_intercept();
474 return X86EMUL_OKAY;
475 }
477 static int realmode_cpuid(
478 unsigned int *eax,
479 unsigned int *ebx,
480 unsigned int *ecx,
481 unsigned int *edx,
482 struct x86_emulate_ctxt *ctxt)
483 {
484 vmx_cpuid_intercept(eax, ebx, ecx, edx);
485 return X86EMUL_OKAY;
486 }
488 static int realmode_hlt(
489 struct x86_emulate_ctxt *ctxt)
490 {
491 struct realmode_emulate_ctxt *rm_ctxt =
492 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
493 rm_ctxt->flags.hlt = 1;
494 return X86EMUL_OKAY;
495 }
497 static int realmode_inject_hw_exception(
498 uint8_t vector,
499 struct x86_emulate_ctxt *ctxt)
500 {
501 struct realmode_emulate_ctxt *rm_ctxt =
502 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
504 rm_ctxt->exn_vector = vector;
505 rm_ctxt->exn_insn_len = 0;
507 return X86EMUL_OKAY;
508 }
510 static int realmode_inject_sw_interrupt(
511 uint8_t vector,
512 uint8_t insn_len,
513 struct x86_emulate_ctxt *ctxt)
514 {
515 struct realmode_emulate_ctxt *rm_ctxt =
516 container_of(ctxt, struct realmode_emulate_ctxt, ctxt);
518 rm_ctxt->exn_vector = vector;
519 rm_ctxt->exn_insn_len = insn_len;
521 return X86EMUL_OKAY;
522 }
524 static void realmode_load_fpu_ctxt(
525 struct x86_emulate_ctxt *ctxt)
526 {
527 if ( !current->fpu_dirtied )
528 vmx_do_no_device_fault();
529 }
531 static struct x86_emulate_ops realmode_emulator_ops = {
532 .read = realmode_emulate_read,
533 .insn_fetch = realmode_emulate_insn_fetch,
534 .write = realmode_emulate_write,
535 .cmpxchg = realmode_emulate_cmpxchg,
536 .rep_ins = realmode_rep_ins,
537 .rep_outs = realmode_rep_outs,
538 .read_segment = realmode_read_segment,
539 .write_segment = realmode_write_segment,
540 .read_io = realmode_read_io,
541 .write_io = realmode_write_io,
542 .read_cr = realmode_read_cr,
543 .write_cr = realmode_write_cr,
544 .read_msr = realmode_read_msr,
545 .write_msr = realmode_write_msr,
546 .write_rflags = realmode_write_rflags,
547 .wbinvd = realmode_wbinvd,
548 .cpuid = realmode_cpuid,
549 .hlt = realmode_hlt,
550 .inject_hw_exception = realmode_inject_hw_exception,
551 .inject_sw_interrupt = realmode_inject_sw_interrupt,
552 .load_fpu_ctxt = realmode_load_fpu_ctxt
553 };
555 static void realmode_emulate_one(struct realmode_emulate_ctxt *rm_ctxt)
556 {
557 struct cpu_user_regs *regs = rm_ctxt->ctxt.regs;
558 struct vcpu *curr = current;
559 u32 new_intr_shadow;
560 int rc, io_completed;
562 rm_ctxt->insn_buf_eip = regs->eip;
563 (void)hvm_copy_from_guest_phys(
564 rm_ctxt->insn_buf,
565 (uint32_t)(rm_ctxt->seg_reg[x86_seg_cs].base + regs->eip),
566 sizeof(rm_ctxt->insn_buf));
568 rm_ctxt->flag_word = 0;
570 io_completed = curr->arch.hvm_vmx.real_mode_io_completed;
571 if ( curr->arch.hvm_vmx.real_mode_io_in_progress )
572 {
573 gdprintk(XENLOG_ERR, "I/O in progress before insn is emulated.\n");
574 goto fail;
575 }
577 rc = x86_emulate(&rm_ctxt->ctxt, &realmode_emulator_ops);
579 if ( curr->arch.hvm_vmx.real_mode_io_completed )
580 {
581 gdprintk(XENLOG_ERR, "I/O completion after insn is emulated.\n");
582 goto fail;
583 }
585 if ( rc == X86EMUL_UNHANDLEABLE )
586 {
587 gdprintk(XENLOG_ERR, "Failed to emulate insn.\n");
588 goto fail;
589 }
591 if ( rc == X86EMUL_RETRY )
592 {
593 BUG_ON(!curr->arch.hvm_vmx.real_mode_io_in_progress);
594 if ( !io_completed )
595 return;
596 gdprintk(XENLOG_ERR, "Multiple I/O reads in a single insn.\n");
597 goto fail;
598 }
600 if ( curr->arch.hvm_vmx.real_mode_io_in_progress &&
601 (get_ioreq(curr)->vp_ioreq.dir == IOREQ_READ) )
602 {
603 gdprintk(XENLOG_ERR, "I/O read in progress but insn is retired.\n");
604 goto fail;
605 }
607 new_intr_shadow = rm_ctxt->intr_shadow;
609 /* MOV-SS instruction toggles MOV-SS shadow, else we just clear it. */
610 if ( rm_ctxt->flags.mov_ss )
611 new_intr_shadow ^= VMX_INTR_SHADOW_MOV_SS;
612 else
613 new_intr_shadow &= ~VMX_INTR_SHADOW_MOV_SS;
615 /* STI instruction toggles STI shadow, else we just clear it. */
616 if ( rm_ctxt->flags.sti )
617 new_intr_shadow ^= VMX_INTR_SHADOW_STI;
618 else
619 new_intr_shadow &= ~VMX_INTR_SHADOW_STI;
621 /* Update interrupt shadow information in VMCS only if it changes. */
622 if ( rm_ctxt->intr_shadow != new_intr_shadow )
623 {
624 rm_ctxt->intr_shadow = new_intr_shadow;
625 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, rm_ctxt->intr_shadow);
626 }
628 if ( rc == X86EMUL_EXCEPTION )
629 {
630 realmode_deliver_exception(
631 rm_ctxt->exn_vector, rm_ctxt->exn_insn_len, rm_ctxt);
632 }
633 else if ( rm_ctxt->flags.hlt && !hvm_local_events_need_delivery(curr) )
634 {
635 hvm_hlt(regs->eflags);
636 }
638 return;
640 fail:
641 gdprintk(XENLOG_ERR,
642 "Real-mode emulation failed @ %04x:%08lx: "
643 "%02x %02x %02x %02x %02x %02x\n",
644 rm_ctxt->seg_reg[x86_seg_cs].sel, rm_ctxt->insn_buf_eip,
645 rm_ctxt->insn_buf[0], rm_ctxt->insn_buf[1],
646 rm_ctxt->insn_buf[2], rm_ctxt->insn_buf[3],
647 rm_ctxt->insn_buf[4], rm_ctxt->insn_buf[5]);
648 domain_crash_synchronous();
649 }
651 void vmx_realmode(struct cpu_user_regs *regs)
652 {
653 struct vcpu *curr = current;
654 struct realmode_emulate_ctxt rm_ctxt;
655 unsigned long intr_info;
656 int i;
658 rm_ctxt.ctxt.regs = regs;
660 for ( i = 0; i < 10; i++ )
661 hvm_get_segment_register(curr, i, &rm_ctxt.seg_reg[i]);
663 rm_ctxt.ctxt.addr_size =
664 rm_ctxt.seg_reg[x86_seg_cs].attr.fields.db ? 32 : 16;
665 rm_ctxt.ctxt.sp_size =
666 rm_ctxt.seg_reg[x86_seg_ss].attr.fields.db ? 32 : 16;
668 rm_ctxt.intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
670 if ( curr->arch.hvm_vmx.real_mode_io_in_progress ||
671 curr->arch.hvm_vmx.real_mode_io_completed )
672 realmode_emulate_one(&rm_ctxt);
674 intr_info = __vmread(VM_ENTRY_INTR_INFO);
675 if ( intr_info & INTR_INFO_VALID_MASK )
676 {
677 realmode_deliver_exception((uint8_t)intr_info, 0, &rm_ctxt);
678 __vmwrite(VM_ENTRY_INTR_INFO, 0);
679 }
681 while ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) &&
682 !softirq_pending(smp_processor_id()) &&
683 !hvm_local_events_need_delivery(curr) &&
684 !curr->arch.hvm_vmx.real_mode_io_in_progress )
685 realmode_emulate_one(&rm_ctxt);
687 /*
688 * Cannot enter protected mode with bogus selector RPLs and DPLs. Hence we
689 * fix up as best we can, even though this deviates from native execution
690 */
691 if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
692 {
693 /* CS.RPL == SS.RPL == SS.DPL == 0. */
694 rm_ctxt.seg_reg[x86_seg_cs].sel &= ~3;
695 rm_ctxt.seg_reg[x86_seg_ss].sel &= ~3;
696 /* DS,ES,FS,GS: The most uninvasive trick is to set DPL == RPL. */
697 rm_ctxt.seg_reg[x86_seg_ds].attr.fields.dpl =
698 rm_ctxt.seg_reg[x86_seg_ds].sel & 3;
699 rm_ctxt.seg_reg[x86_seg_es].attr.fields.dpl =
700 rm_ctxt.seg_reg[x86_seg_es].sel & 3;
701 rm_ctxt.seg_reg[x86_seg_fs].attr.fields.dpl =
702 rm_ctxt.seg_reg[x86_seg_fs].sel & 3;
703 rm_ctxt.seg_reg[x86_seg_gs].attr.fields.dpl =
704 rm_ctxt.seg_reg[x86_seg_gs].sel & 3;
705 }
707 for ( i = 0; i < 10; i++ )
708 hvm_set_segment_register(curr, i, &rm_ctxt.seg_reg[i]);
709 }
711 int vmx_realmode_io_complete(void)
712 {
713 struct vcpu *curr = current;
714 ioreq_t *p = &get_ioreq(curr)->vp_ioreq;
716 if ( !curr->arch.hvm_vmx.real_mode_io_in_progress )
717 return 0;
719 curr->arch.hvm_vmx.real_mode_io_in_progress = 0;
720 if ( p->dir == IOREQ_READ )
721 {
722 curr->arch.hvm_vmx.real_mode_io_completed = 1;
723 curr->arch.hvm_vmx.real_mode_io_data = p->data;
724 }
726 return 1;
727 }