debuggers.hg

view xen/arch/x86/vmx_io.c @ 3607:cd26f113b1b1

bitkeeper revision 1.1159.231.12 (41f97ef6r1c2TDcgR-o8jFV1IWm5dA)

Lean decoder for MMIO instructions.

Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Chengyuan Li <chengyuan.li@intel.com>
Signed-off-by: ian.pratt@cl.cam.ac.uk
author iap10@labyrinth.cl.cam.ac.uk
date Thu Jan 27 23:53:26 2005 +0000 (2005-01-27)
parents 002034af24e6
children bc0fbb38cb25
line source
1 /*
2 * vmx_io.c: handling I/O, interrupts related VMX entry/exit
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
19 #include <xen/config.h>
20 #include <xen/init.h>
21 #include <xen/mm.h>
22 #include <xen/lib.h>
23 #include <xen/errno.h>
25 #include <asm/cpufeature.h>
26 #include <asm/processor.h>
27 #include <asm/msr.h>
28 #include <asm/vmx.h>
29 #include <asm/vmx_vmcs.h>
30 #include <xen/event.h>
31 #include <public/io/ioreq.h>
32 #include <asm/vmx_platform.h>
34 extern long do_block();
36 #if defined (__i386__)
37 static void load_xen_regs(struct xen_regs *regs)
38 {
39 /*
40 * Write the guest register value into VMCS
41 */
42 __vmwrite(GUEST_SS_SELECTOR, regs->ss);
43 __vmwrite(GUEST_ESP, regs->esp);
44 __vmwrite(GUEST_EFLAGS, regs->eflags);
45 __vmwrite(GUEST_CS_SELECTOR, regs->cs);
46 __vmwrite(GUEST_EIP, regs->eip);
47 }
49 static void set_reg_value (int size, int index, int seg, struct xen_regs *regs, long value)
50 {
51 switch (size) {
52 case BYTE:
53 switch (index) {
54 case 0:
55 regs->eax &= 0xFFFFFF00;
56 regs->eax |= (value & 0xFF);
57 break;
58 case 1:
59 regs->ecx &= 0xFFFFFF00;
60 regs->ecx |= (value & 0xFF);
61 break;
62 case 2:
63 regs->edx &= 0xFFFFFF00;
64 regs->edx |= (value & 0xFF);
65 break;
66 case 3:
67 regs->ebx &= 0xFFFFFF00;
68 regs->ebx |= (value & 0xFF);
69 break;
70 case 4:
71 regs->eax &= 0xFFFF00FF;
72 regs->eax |= ((value & 0xFF) << 8);
73 break;
74 case 5:
75 regs->ecx &= 0xFFFF00FF;
76 regs->ecx |= ((value & 0xFF) << 8);
77 break;
78 case 6:
79 regs->edx &= 0xFFFF00FF;
80 regs->edx |= ((value & 0xFF) << 8);
81 break;
82 case 7:
83 regs->ebx &= 0xFFFF00FF;
84 regs->ebx |= ((value & 0xFF) << 8);
85 break;
86 default:
87 printk("size:%x, index:%x are invalid!\n", size, index);
88 break;
90 }
91 break;
92 case WORD:
93 switch (index) {
94 case 0:
95 regs->eax &= 0xFFFF0000;
96 regs->eax |= (value & 0xFFFF);
97 break;
98 case 1:
99 regs->ecx &= 0xFFFF0000;
100 regs->ecx |= (value & 0xFFFF);
101 break;
102 case 2:
103 regs->edx &= 0xFFFF0000;
104 regs->edx |= (value & 0xFFFF);
105 break;
106 case 3:
107 regs->ebx &= 0xFFFF0000;
108 regs->ebx |= (value & 0xFFFF);
109 break;
110 case 4:
111 regs->esp &= 0xFFFF0000;
112 regs->esp |= (value & 0xFFFF);
113 break;
115 case 5:
116 regs->ebp &= 0xFFFF0000;
117 regs->ebp |= (value & 0xFFFF);
118 break;
119 case 6:
120 regs->esi &= 0xFFFF0000;
121 regs->esi |= (value & 0xFFFF);
122 break;
123 case 7:
124 regs->edi &= 0xFFFF0000;
125 regs->edi |= (value & 0xFFFF);
126 break;
127 default:
128 printk("size:%x, index:%x are invalid!\n", size, index);
129 break;
130 }
131 break;
132 case LONG:
133 switch (index) {
134 case 0:
135 regs->eax = value;
136 break;
137 case 1:
138 regs->ecx = value;
139 break;
140 case 2:
141 regs->edx = value;
142 break;
143 case 3:
144 regs->ebx = value;
145 break;
146 case 4:
147 regs->esp = value;
148 break;
149 case 5:
150 regs->ebp = value;
151 break;
152 case 6:
153 regs->esi = value;
154 break;
155 case 7:
156 regs->edi = value;
157 break;
158 default:
159 printk("size:%x, index:%x are invalid!\n", size, index);
160 break;
161 }
162 break;
163 default:
164 printk("size:%x, index:%x are invalid!\n", size, index);
165 break;
166 }
167 }
168 #endif
170 void vmx_io_assist(struct exec_domain *ed)
171 {
172 vcpu_iodata_t *vio;
173 ioreq_t *p;
174 struct domain *d = ed->domain;
175 execution_context_t *ec = get_execution_context();
176 unsigned long old_eax;
177 int sign;
178 struct mi_per_cpu_info *mpci_p;
179 struct xen_regs *inst_decoder_regs;
181 mpci_p = &ed->thread.arch_vmx.vmx_platform.mpci;
182 inst_decoder_regs = mpci_p->inst_decoder_regs;
184 /* clear the pending event */
185 ed->vcpu_info->evtchn_upcall_pending = 0;
186 /* clear the pending bit for port 2 */
187 clear_bit(IOPACKET_PORT>>5, &ed->vcpu_info->evtchn_pending_sel);
188 clear_bit(IOPACKET_PORT, &d->shared_info->evtchn_pending[0]);
190 vio = (vcpu_iodata_t *) ed->thread.arch_vmx.vmx_platform.shared_page_va;
191 if (vio == 0) {
192 VMX_DBG_LOG(DBG_LEVEL_1,
193 "bad shared page: %lx\n", (unsigned long) vio);
194 domain_crash();
195 }
196 p = &vio->vp_ioreq;
197 /* clear IO wait VMX flag */
198 if (test_bit(ARCH_VMX_IO_WAIT, &ed->thread.arch_vmx.flags)) {
199 if (p->state != STATE_IORESP_READY) {
200 printk("got a false I/O reponse\n");
201 do_block();
202 } else {
203 p->state = STATE_INVALID;
204 }
205 clear_bit(ARCH_VMX_IO_WAIT, &ed->thread.arch_vmx.flags);
206 } else {
207 return;
208 }
210 sign = (p->df) ? -1 : 1;
211 if (p->port_mm) {
212 if (p->pdata_valid) {
213 ec->esi += sign * p->count * p->size;
214 ec->edi += sign * p->count * p->size;
215 } else {
216 if (p->dir == IOREQ_WRITE) {
217 return;
218 }
219 int size = -1, index = -1;
221 size = operand_size(ed->thread.arch_vmx.vmx_platform.mpci.mmio_target);
222 index = operand_index(ed->thread.arch_vmx.vmx_platform.mpci.mmio_target);
224 if (ed->thread.arch_vmx.vmx_platform.mpci.mmio_target & WZEROEXTEND) {
225 p->u.data = p->u.data & 0xffff;
226 }
227 set_reg_value(size, index, 0, (struct xen_regs *)ec, p->u.data);
229 }
230 load_xen_regs((struct xen_regs *)ec);
231 return;
232 }
234 if (p->dir == IOREQ_WRITE) {
235 if (p->pdata_valid) {
236 ec->esi += sign * p->count * p->size;
237 ec->ecx -= p->count;
238 }
239 return;
240 } else {
241 if (p->pdata_valid) {
242 ec->edi += sign * p->count * p->size;
243 ec->ecx -= p->count;
244 return;
245 }
246 }
248 old_eax = ec->eax;
250 switch(p->size) {
251 case 1:
252 ec->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff);
253 break;
254 case 2:
255 ec->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff);
256 break;
257 case 4:
258 ec->eax = (p->u.data & 0xffffffff);
259 break;
260 default:
261 BUG();
262 }
263 }
265 static inline int __fls(unsigned long word)
266 {
267 int bit;
269 __asm__("bsrl %1,%0"
270 :"=r" (bit)
271 :"rm" (word));
272 return word ? bit : -1;
273 }
276 /* Simple minded Local APIC priority implementation. Fix later */
277 static __inline__ int find_highest_irq(unsigned long *pintr)
278 {
279 if (pintr[7])
280 return __fls(pintr[7]) + (256-32*1);
281 if (pintr[6])
282 return __fls(pintr[6]) + (256-32*2);
283 if (pintr[5])
284 return __fls(pintr[5]) + (256-32*3);
285 if (pintr[4])
286 return __fls(pintr[4]) + (256-32*4);
287 if (pintr[3])
288 return __fls(pintr[3]) + (256-32*5);
289 if (pintr[2])
290 return __fls(pintr[2]) + (256-32*6);
291 if (pintr[1])
292 return __fls(pintr[1]) + (256-32*7);
293 return __fls(pintr[0]);
294 }
296 /*
297 * Return 0-255 for pending irq.
298 * -1 when no pending.
299 */
300 static inline int find_highest_pending_irq(struct exec_domain *d)
301 {
302 vcpu_iodata_t *vio;
304 vio = (vcpu_iodata_t *) d->thread.arch_vmx.vmx_platform.shared_page_va;
305 if (vio == 0) {
306 VMX_DBG_LOG(DBG_LEVEL_1,
307 "bad shared page: %lx\n", (unsigned long) vio);
308 domain_crash();
309 }
311 return find_highest_irq(&vio->vp_intr[0]);
312 }
314 static inline void clear_highest_bit(struct exec_domain *d, int vector)
315 {
316 vcpu_iodata_t *vio;
318 vio = (vcpu_iodata_t *) d->thread.arch_vmx.vmx_platform.shared_page_va;
319 if (vio == 0) {
320 VMX_DBG_LOG(DBG_LEVEL_1,
321 "bad shared page: %lx\n", (unsigned long) vio);
322 domain_crash();
323 }
325 clear_bit(vector, &vio->vp_intr[0]);
326 }
328 static inline int irq_masked(unsigned long eflags)
329 {
330 return ((eflags & X86_EFLAGS_IF) == 0);
331 }
333 void vmx_intr_assist(struct exec_domain *d)
334 {
335 int highest_vector = find_highest_pending_irq(d);
336 unsigned long intr_fields, eflags;
338 if (highest_vector == -1)
339 return;
341 __vmread(VM_ENTRY_INTR_INFO_FIELD, &intr_fields);
342 if (intr_fields & INTR_INFO_VALID_MASK) {
343 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_intr_assist: intr_fields: %lx\n",
344 intr_fields);
345 return;
346 }
348 __vmread(GUEST_EFLAGS, &eflags);
349 if (irq_masked(eflags)) {
350 VMX_DBG_LOG(DBG_LEVEL_1, "guesting pending: %x, eflags: %lx\n",
351 highest_vector, eflags);
352 return;
353 }
355 clear_highest_bit(d, highest_vector);
356 intr_fields = (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | highest_vector);
357 __vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
359 __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
361 return;
362 }
364 void vmx_do_resume(struct exec_domain *d)
365 {
366 __vmwrite(HOST_CR3, pagetable_val(d->mm.monitor_table));
367 __vmwrite(GUEST_CR3, pagetable_val(d->mm.shadow_table));
368 __vmwrite(HOST_ESP, (unsigned long) get_stack_top());
370 if (event_pending(d)) {
371 if (test_bit(IOPACKET_PORT, &d->domain->shared_info->evtchn_pending[0]))
372 vmx_io_assist(d);
374 else if (test_bit(ARCH_VMX_IO_WAIT, &d->thread.arch_vmx.flags)) {
375 printk("got an event while blocked on I/O\n");
376 do_block();
377 }
379 /* Assumption: device model will not inject an interrupt
380 * while an ioreq_t is pending i.e. the response and
381 * interrupt can come together. But an interrupt without
382 * a response to ioreq_t is not ok.
383 */
384 }
385 if (!test_bit(ARCH_VMX_IO_WAIT, &d->thread.arch_vmx.flags))
386 vmx_intr_assist(d);
387 }