/root/src/xen/xen/arch/x86/hvm/vmx/realmode.c
Line | Count | Source (jump to first uncovered line) |
1 | | /****************************************************************************** |
2 | | * arch/x86/hvm/vmx/realmode.c |
3 | | * |
4 | | * Real-mode emulation for VMX. |
5 | | * |
6 | | * Copyright (c) 2007-2008 Citrix Systems, Inc. |
7 | | * |
8 | | * Authors: |
9 | | * Keir Fraser <keir@xen.org> |
10 | | */ |
11 | | |
12 | | #include <xen/init.h> |
13 | | #include <xen/lib.h> |
14 | | #include <xen/sched.h> |
15 | | #include <xen/paging.h> |
16 | | #include <xen/softirq.h> |
17 | | #include <asm/event.h> |
18 | | #include <asm/hvm/emulate.h> |
19 | | #include <asm/hvm/hvm.h> |
20 | | #include <asm/hvm/support.h> |
21 | | #include <asm/hvm/vmx/vmx.h> |
22 | | #include <asm/hvm/vmx/vmcs.h> |
23 | | |
24 | | static void realmode_deliver_exception( |
25 | | unsigned int vector, |
26 | | unsigned int insn_len, |
27 | | struct hvm_emulate_ctxt *hvmemul_ctxt) |
28 | 0 | { |
29 | 0 | struct segment_register *idtr, *csr; |
30 | 0 | struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs; |
31 | 0 | uint32_t cs_eip, pstk; |
32 | 0 | uint16_t frame[3]; |
33 | 0 | unsigned int last_byte; |
34 | 0 |
|
35 | 0 | idtr = hvmemul_get_seg_reg(x86_seg_idtr, hvmemul_ctxt); |
36 | 0 | csr = hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt); |
37 | 0 | __set_bit(x86_seg_cs, &hvmemul_ctxt->seg_reg_dirty); |
38 | 0 |
|
39 | 0 | again: |
40 | 0 | last_byte = (vector * 4) + 3; |
41 | 0 | if ( idtr->limit < last_byte || |
42 | 0 | hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4) != |
43 | 0 | HVMTRANS_okay ) |
44 | 0 | { |
45 | 0 | /* Software interrupt? */ |
46 | 0 | if ( insn_len != 0 ) |
47 | 0 | { |
48 | 0 | insn_len = 0; |
49 | 0 | vector = TRAP_gp_fault; |
50 | 0 | goto again; |
51 | 0 | } |
52 | 0 |
|
53 | 0 | /* Exception or hardware interrupt. */ |
54 | 0 | switch ( vector ) |
55 | 0 | { |
56 | 0 | case TRAP_double_fault: |
57 | 0 | hvm_triple_fault(); |
58 | 0 | return; |
59 | 0 | case TRAP_gp_fault: |
60 | 0 | vector = TRAP_double_fault; |
61 | 0 | goto again; |
62 | 0 | default: |
63 | 0 | vector = TRAP_gp_fault; |
64 | 0 | goto again; |
65 | 0 | } |
66 | 0 | } |
67 | 0 |
|
68 | 0 | frame[0] = regs->ip + insn_len; |
69 | 0 | frame[1] = csr->sel; |
70 | 0 | frame[2] = regs->flags & ~X86_EFLAGS_RF; |
71 | 0 |
|
72 | 0 | /* We can't test hvmemul_ctxt->ctxt.sp_size: it may not be initialised. */ |
73 | 0 | if ( hvmemul_ctxt->seg_reg[x86_seg_ss].db ) |
74 | 0 | pstk = regs->esp -= 6; |
75 | 0 | else |
76 | 0 | pstk = regs->sp -= 6; |
77 | 0 |
|
78 | 0 | pstk += hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt)->base; |
79 | 0 | (void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame), current); |
80 | 0 |
|
81 | 0 | csr->sel = cs_eip >> 16; |
82 | 0 | csr->base = (uint32_t)csr->sel << 4; |
83 | 0 | regs->ip = (uint16_t)cs_eip; |
84 | 0 | regs->eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF); |
85 | 0 |
|
86 | 0 | /* Exception delivery clears STI and MOV-SS blocking. */ |
87 | 0 | if ( hvmemul_ctxt->intr_shadow & |
88 | 0 | (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS) ) |
89 | 0 | { |
90 | 0 | hvmemul_ctxt->intr_shadow &= |
91 | 0 | ~(VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS); |
92 | 0 | __vmwrite(GUEST_INTERRUPTIBILITY_INFO, hvmemul_ctxt->intr_shadow); |
93 | 0 | } |
94 | 0 | } |
95 | | |
96 | | void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt) |
97 | 0 | { |
98 | 0 | struct vcpu *curr = current; |
99 | 0 | struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io; |
100 | 0 | int rc; |
101 | 0 |
|
102 | 0 | perfc_incr(realmode_emulations); |
103 | 0 |
|
104 | 0 | rc = hvm_emulate_one(hvmemul_ctxt); |
105 | 0 |
|
106 | 0 | if ( hvm_vcpu_io_need_completion(vio) || vio->mmio_retry ) |
107 | 0 | vio->io_completion = HVMIO_realmode_completion; |
108 | 0 |
|
109 | 0 | if ( rc == X86EMUL_UNHANDLEABLE ) |
110 | 0 | { |
111 | 0 | gdprintk(XENLOG_ERR, "Failed to emulate insn.\n"); |
112 | 0 | goto fail; |
113 | 0 | } |
114 | 0 |
|
115 | 0 | if ( rc == X86EMUL_UNRECOGNIZED ) |
116 | 0 | { |
117 | 0 | gdprintk(XENLOG_ERR, "Unrecognized insn.\n"); |
118 | 0 | if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE ) |
119 | 0 | goto fail; |
120 | 0 |
|
121 | 0 | realmode_deliver_exception(TRAP_invalid_op, 0, hvmemul_ctxt); |
122 | 0 | } |
123 | 0 |
|
124 | 0 | if ( rc == X86EMUL_EXCEPTION ) |
125 | 0 | { |
126 | 0 | if ( unlikely(curr->domain->debugger_attached) && |
127 | 0 | ((hvmemul_ctxt->ctxt.event.vector == TRAP_debug) || |
128 | 0 | (hvmemul_ctxt->ctxt.event.vector == TRAP_int3)) ) |
129 | 0 | { |
130 | 0 | domain_pause_for_debugger(); |
131 | 0 | } |
132 | 0 | else if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE ) |
133 | 0 | { |
134 | 0 | gdprintk(XENLOG_ERR, "Exception %02x in protected mode.\n", |
135 | 0 | hvmemul_ctxt->ctxt.event.vector); |
136 | 0 | goto fail; |
137 | 0 | } |
138 | 0 | else |
139 | 0 | { |
140 | 0 | realmode_deliver_exception( |
141 | 0 | hvmemul_ctxt->ctxt.event.vector, |
142 | 0 | hvmemul_ctxt->ctxt.event.insn_len, |
143 | 0 | hvmemul_ctxt); |
144 | 0 | } |
145 | 0 | } |
146 | 0 |
|
147 | 0 | return; |
148 | 0 |
|
149 | 0 | fail: |
150 | 0 | hvm_dump_emulation_state(XENLOG_G_ERR, "Real-mode", hvmemul_ctxt, rc); |
151 | 0 | domain_crash(curr->domain); |
152 | 0 | } |
153 | | |
154 | | void vmx_realmode(struct cpu_user_regs *regs) |
155 | 0 | { |
156 | 0 | struct vcpu *curr = current; |
157 | 0 | struct hvm_emulate_ctxt hvmemul_ctxt; |
158 | 0 | struct segment_register *sreg; |
159 | 0 | struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io; |
160 | 0 | unsigned long intr_info; |
161 | 0 | unsigned int emulations = 0; |
162 | 0 |
|
163 | 0 | /* Get-and-clear VM_ENTRY_INTR_INFO. */ |
164 | 0 | __vmread(VM_ENTRY_INTR_INFO, &intr_info); |
165 | 0 | if ( intr_info & INTR_INFO_VALID_MASK ) |
166 | 0 | __vmwrite(VM_ENTRY_INTR_INFO, 0); |
167 | 0 |
|
168 | 0 | hvm_emulate_init_once(&hvmemul_ctxt, NULL, regs); |
169 | 0 |
|
170 | 0 | /* Only deliver interrupts into emulated real mode. */ |
171 | 0 | if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) && |
172 | 0 | (intr_info & INTR_INFO_VALID_MASK) ) |
173 | 0 | { |
174 | 0 | realmode_deliver_exception((uint8_t)intr_info, 0, &hvmemul_ctxt); |
175 | 0 | intr_info = 0; |
176 | 0 | } |
177 | 0 |
|
178 | 0 | curr->arch.hvm_vmx.vmx_emulate = 1; |
179 | 0 | while ( curr->arch.hvm_vmx.vmx_emulate && |
180 | 0 | !softirq_pending(smp_processor_id()) ) |
181 | 0 | { |
182 | 0 | /* |
183 | 0 | * Check for pending interrupts only every 16 instructions, because |
184 | 0 | * hvm_local_events_need_delivery() is moderately expensive, and only |
185 | 0 | * in real mode, because we don't emulate protected-mode IDT vectoring. |
186 | 0 | */ |
187 | 0 | if ( unlikely(!(++emulations & 15)) && |
188 | 0 | curr->arch.hvm_vmx.vmx_realmode && |
189 | 0 | hvm_local_events_need_delivery(curr) ) |
190 | 0 | break; |
191 | 0 |
|
192 | 0 | vmx_realmode_emulate_one(&hvmemul_ctxt); |
193 | 0 |
|
194 | 0 | if ( vio->io_req.state != STATE_IOREQ_NONE || vio->mmio_retry ) |
195 | 0 | break; |
196 | 0 |
|
197 | 0 | /* Stop emulating unless our segment state is not safe */ |
198 | 0 | if ( curr->arch.hvm_vmx.vmx_realmode ) |
199 | 0 | curr->arch.hvm_vmx.vmx_emulate = |
200 | 0 | (curr->arch.hvm_vmx.vm86_segment_mask != 0); |
201 | 0 | else |
202 | 0 | curr->arch.hvm_vmx.vmx_emulate = |
203 | 0 | ((hvmemul_ctxt.seg_reg[x86_seg_cs].sel & 3) |
204 | 0 | || (hvmemul_ctxt.seg_reg[x86_seg_ss].sel & 3)); |
205 | 0 | } |
206 | 0 |
|
207 | 0 | /* Need to emulate next time if we've started an IO operation */ |
208 | 0 | if ( vio->io_req.state != STATE_IOREQ_NONE ) |
209 | 0 | curr->arch.hvm_vmx.vmx_emulate = 1; |
210 | 0 |
|
211 | 0 | if ( !curr->arch.hvm_vmx.vmx_emulate && !curr->arch.hvm_vmx.vmx_realmode ) |
212 | 0 | { |
213 | 0 | /* |
214 | 0 | * Cannot enter protected mode with bogus selector RPLs and DPLs. |
215 | 0 | * At this point CS.RPL == SS.RPL == CS.DPL == SS.DPL == 0. For |
216 | 0 | * DS, ES, FS and GS the most uninvasive trick is to set DPL == RPL. |
217 | 0 | */ |
218 | 0 | sreg = hvmemul_get_seg_reg(x86_seg_ds, &hvmemul_ctxt); |
219 | 0 | sreg->dpl = sreg->sel & 3; |
220 | 0 | sreg = hvmemul_get_seg_reg(x86_seg_es, &hvmemul_ctxt); |
221 | 0 | sreg->dpl = sreg->sel & 3; |
222 | 0 | sreg = hvmemul_get_seg_reg(x86_seg_fs, &hvmemul_ctxt); |
223 | 0 | sreg->dpl = sreg->sel & 3; |
224 | 0 | sreg = hvmemul_get_seg_reg(x86_seg_gs, &hvmemul_ctxt); |
225 | 0 | sreg->dpl = sreg->sel & 3; |
226 | 0 | hvmemul_ctxt.seg_reg_dirty |= |
227 | 0 | (1ul << x86_seg_ds) | (1ul << x86_seg_es) | |
228 | 0 | (1ul << x86_seg_fs) | (1ul << x86_seg_gs); |
229 | 0 | } |
230 | 0 |
|
231 | 0 | hvm_emulate_writeback(&hvmemul_ctxt); |
232 | 0 |
|
233 | 0 | /* Re-instate VM_ENTRY_INTR_INFO if we did not discharge it. */ |
234 | 0 | if ( intr_info & INTR_INFO_VALID_MASK ) |
235 | 0 | __vmwrite(VM_ENTRY_INTR_INFO, intr_info); |
236 | 0 | } |
237 | | |
238 | | /* |
239 | | * Local variables: |
240 | | * mode: C |
241 | | * c-file-style: "BSD" |
242 | | * c-basic-offset: 4 |
243 | | * tab-width: 4 |
244 | | * indent-tabs-mode: nil |
245 | | * End: |
246 | | */ |