debuggers.hg

view xen/arch/x86/vmx_platform.c @ 6677:d4d69c509371

merge?
author cl349@firebug.cl.cam.ac.uk
date Tue Sep 06 17:00:25 2005 +0000 (2005-09-06)
parents b6c98fe62e1a ef1cd7729676
children a8f01a0a9559 4d899a738d59 e7c7196fa329 b2f4823b6ff0
line source
1 /*
2 * vmx_platform.c: handling x86 platform related MMIO instructions
3 * Copyright (c) 2004, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 */
20 #include <xen/config.h>
21 #include <xen/types.h>
22 #include <xen/mm.h>
23 #include <asm/shadow.h>
24 #include <xen/domain_page.h>
25 #include <asm/page.h>
26 #include <xen/event.h>
27 #include <xen/trace.h>
28 #include <asm/vmx.h>
29 #include <asm/vmx_platform.h>
30 #include <public/io/ioreq.h>
32 #include <xen/lib.h>
33 #include <xen/sched.h>
34 #include <asm/current.h>
35 #if CONFIG_PAGING_LEVELS >= 3
36 #include <asm/shadow_64.h>
37 #endif
38 #ifdef CONFIG_VMX
40 #define DECODE_success 1
41 #define DECODE_failure 0
43 #if defined (__x86_64__)
44 void store_cpu_user_regs(struct cpu_user_regs *regs)
45 {
46 __vmread(GUEST_SS_SELECTOR, &regs->ss);
47 __vmread(GUEST_RSP, &regs->rsp);
48 __vmread(GUEST_RFLAGS, &regs->rflags);
49 __vmread(GUEST_CS_SELECTOR, &regs->cs);
50 __vmread(GUEST_DS_SELECTOR, &regs->ds);
51 __vmread(GUEST_ES_SELECTOR, &regs->es);
52 __vmread(GUEST_RIP, &regs->rip);
53 }
55 static inline long __get_reg_value(unsigned long reg, int size)
56 {
57 switch(size) {
58 case BYTE_64:
59 return (char)(reg & 0xFF);
60 case WORD:
61 return (short)(reg & 0xFFFF);
62 case LONG:
63 return (int)(reg & 0xFFFFFFFF);
64 case QUAD:
65 return (long)(reg);
66 default:
67 printf("Error: (__get_reg_value) Invalid reg size\n");
68 domain_crash_synchronous();
69 }
70 }
72 long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
73 {
74 if (size == BYTE) {
75 switch (index) {
76 case 0: /* %al */
77 return (char)(regs->rax & 0xFF);
78 case 1: /* %cl */
79 return (char)(regs->rcx & 0xFF);
80 case 2: /* %dl */
81 return (char)(regs->rdx & 0xFF);
82 case 3: /* %bl */
83 return (char)(regs->rbx & 0xFF);
84 case 4: /* %ah */
85 return (char)((regs->rax & 0xFF00) >> 8);
86 case 5: /* %ch */
87 return (char)((regs->rcx & 0xFF00) >> 8);
88 case 6: /* %dh */
89 return (char)((regs->rdx & 0xFF00) >> 8);
90 case 7: /* %bh */
91 return (char)((regs->rbx & 0xFF00) >> 8);
92 default:
93 printf("Error: (get_reg_value) Invalid index value\n");
94 domain_crash_synchronous();
95 }
96 }
98 switch (index) {
99 case 0: return __get_reg_value(regs->rax, size);
100 case 1: return __get_reg_value(regs->rcx, size);
101 case 2: return __get_reg_value(regs->rdx, size);
102 case 3: return __get_reg_value(regs->rbx, size);
103 case 4: return __get_reg_value(regs->rsp, size);
104 case 5: return __get_reg_value(regs->rbp, size);
105 case 6: return __get_reg_value(regs->rsi, size);
106 case 7: return __get_reg_value(regs->rdi, size);
107 case 8: return __get_reg_value(regs->r8, size);
108 case 9: return __get_reg_value(regs->r9, size);
109 case 10: return __get_reg_value(regs->r10, size);
110 case 11: return __get_reg_value(regs->r11, size);
111 case 12: return __get_reg_value(regs->r12, size);
112 case 13: return __get_reg_value(regs->r13, size);
113 case 14: return __get_reg_value(regs->r14, size);
114 case 15: return __get_reg_value(regs->r15, size);
115 default:
116 printf("Error: (get_reg_value) Invalid index value\n");
117 domain_crash_synchronous();
118 }
119 }
120 #elif defined (__i386__)
121 void store_cpu_user_regs(struct cpu_user_regs *regs)
122 {
123 __vmread(GUEST_SS_SELECTOR, &regs->ss);
124 __vmread(GUEST_RSP, &regs->esp);
125 __vmread(GUEST_RFLAGS, &regs->eflags);
126 __vmread(GUEST_CS_SELECTOR, &regs->cs);
127 __vmread(GUEST_DS_SELECTOR, &regs->ds);
128 __vmread(GUEST_ES_SELECTOR, &regs->es);
129 __vmread(GUEST_RIP, &regs->eip);
130 }
132 static inline long __get_reg_value(unsigned long reg, int size)
133 {
134 switch(size) {
135 case WORD:
136 return (short)(reg & 0xFFFF);
137 case LONG:
138 return (int)(reg & 0xFFFFFFFF);
139 default:
140 printf("Error: (__get_reg_value) Invalid reg size\n");
141 domain_crash_synchronous();
142 }
143 }
145 long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
146 {
147 if (size == BYTE) {
148 switch (index) {
149 case 0: /* %al */
150 return (char)(regs->eax & 0xFF);
151 case 1: /* %cl */
152 return (char)(regs->ecx & 0xFF);
153 case 2: /* %dl */
154 return (char)(regs->edx & 0xFF);
155 case 3: /* %bl */
156 return (char)(regs->ebx & 0xFF);
157 case 4: /* %ah */
158 return (char)((regs->eax & 0xFF00) >> 8);
159 case 5: /* %ch */
160 return (char)((regs->ecx & 0xFF00) >> 8);
161 case 6: /* %dh */
162 return (char)((regs->edx & 0xFF00) >> 8);
163 case 7: /* %bh */
164 return (char)((regs->ebx & 0xFF00) >> 8);
165 default:
166 printf("Error: (get_reg_value) Invalid index value\n");
167 domain_crash_synchronous();
168 }
169 }
171 switch (index) {
172 case 0: return __get_reg_value(regs->eax, size);
173 case 1: return __get_reg_value(regs->ecx, size);
174 case 2: return __get_reg_value(regs->edx, size);
175 case 3: return __get_reg_value(regs->ebx, size);
176 case 4: return __get_reg_value(regs->esp, size);
177 case 5: return __get_reg_value(regs->ebp, size);
178 case 6: return __get_reg_value(regs->esi, size);
179 case 7: return __get_reg_value(regs->edi, size);
180 default:
181 printf("Error: (get_reg_value) Invalid index value\n");
182 domain_crash_synchronous();
183 }
184 }
185 #endif
187 static inline unsigned char *check_prefix(unsigned char *inst,
188 struct instruction *thread_inst, unsigned char *rex_p)
189 {
190 while (1) {
191 switch (*inst) {
192 /* rex prefix for em64t instructions */
193 case 0x40 ... 0x4e:
194 *rex_p = *inst;
195 break;
196 case 0xf3: /* REPZ */
197 thread_inst->flags = REPZ;
198 break;
199 case 0xf2: /* REPNZ */
200 thread_inst->flags = REPNZ;
201 break;
202 case 0xf0: /* LOCK */
203 break;
204 case 0x2e: /* CS */
205 case 0x36: /* SS */
206 case 0x3e: /* DS */
207 case 0x26: /* ES */
208 case 0x64: /* FS */
209 case 0x65: /* GS */
210 thread_inst->seg_sel = *inst;
211 break;
212 case 0x66: /* 32bit->16bit */
213 thread_inst->op_size = WORD;
214 break;
215 case 0x67:
216 printf("Error: Not handling 0x67 (yet)\n");
217 domain_crash_synchronous();
218 break;
219 default:
220 return inst;
221 }
222 inst++;
223 }
224 }
226 static inline unsigned long get_immediate(int op16,const unsigned char *inst, int op_size)
227 {
228 int mod, reg, rm;
229 unsigned long val = 0;
230 int i;
232 mod = (*inst >> 6) & 3;
233 reg = (*inst >> 3) & 7;
234 rm = *inst & 7;
236 inst++; //skip ModR/M byte
237 if (mod != 3 && rm == 4) {
238 inst++; //skip SIB byte
239 }
241 switch(mod) {
242 case 0:
243 if (rm == 5 || rm == 4) {
244 if (op16)
245 inst = inst + 2; //disp16, skip 2 bytes
246 else
247 inst = inst + 4; //disp32, skip 4 bytes
248 }
249 break;
250 case 1:
251 inst++; //disp8, skip 1 byte
252 break;
253 case 2:
254 if (op16)
255 inst = inst + 2; //disp16, skip 2 bytes
256 else
257 inst = inst + 4; //disp32, skip 4 bytes
258 break;
259 }
261 if (op_size == QUAD)
262 op_size = LONG;
264 for (i = 0; i < op_size; i++) {
265 val |= (*inst++ & 0xff) << (8 * i);
266 }
268 return val;
269 }
271 static inline int get_index(const unsigned char *inst, unsigned char rex)
272 {
273 int mod, reg, rm;
274 int rex_r, rex_b;
276 mod = (*inst >> 6) & 3;
277 reg = (*inst >> 3) & 7;
278 rm = *inst & 7;
280 rex_r = (rex >> 2) & 1;
281 rex_b = rex & 1;
283 //Only one operand in the instruction is register
284 if (mod == 3) {
285 return (rm + (rex_b << 3));
286 } else {
287 return (reg + (rex_r << 3));
288 }
289 return 0;
290 }
292 static void init_instruction(struct instruction *mmio_inst)
293 {
294 mmio_inst->instr = 0;
295 mmio_inst->op_size = 0;
296 mmio_inst->immediate = 0;
297 mmio_inst->seg_sel = 0;
299 mmio_inst->operand[0] = 0;
300 mmio_inst->operand[1] = 0;
302 mmio_inst->flags = 0;
303 }
305 #define GET_OP_SIZE_FOR_BYTE(op_size) \
306 do { \
307 if (rex) \
308 op_size = BYTE_64; \
309 else \
310 op_size = BYTE; \
311 } while(0)
313 #define GET_OP_SIZE_FOR_NONEBYTE(op_size) \
314 do { \
315 if (rex & 0x8) \
316 op_size = QUAD; \
317 else if (op_size != WORD) \
318 op_size = LONG; \
319 } while(0)
322 /*
323 * Decode mem,accumulator operands (as in <opcode> m8/m16/m32, al,ax,eax)
324 */
325 static int mem_acc(unsigned char size, struct instruction *instr)
326 {
327 instr->operand[0] = mk_operand(size, 0, 0, MEMORY);
328 instr->operand[1] = mk_operand(size, 0, 0, REGISTER);
329 return DECODE_success;
330 }
332 /*
333 * Decode accumulator,mem operands (as in <opcode> al,ax,eax, m8/m16/m32)
334 */
335 static int acc_mem(unsigned char size, struct instruction *instr)
336 {
337 instr->operand[0] = mk_operand(size, 0, 0, REGISTER);
338 instr->operand[1] = mk_operand(size, 0, 0, MEMORY);
339 return DECODE_success;
340 }
342 /*
343 * Decode mem,reg operands (as in <opcode> r32/16, m32/16)
344 */
345 static int mem_reg(unsigned char size, unsigned char *opcode,
346 struct instruction *instr, unsigned char rex)
347 {
348 int index = get_index(opcode + 1, rex);
350 instr->operand[0] = mk_operand(size, 0, 0, MEMORY);
351 instr->operand[1] = mk_operand(size, index, 0, REGISTER);
352 return DECODE_success;
353 }
355 /*
356 * Decode reg,mem operands (as in <opcode> m32/16, r32/16)
357 */
358 static int reg_mem(unsigned char size, unsigned char *opcode,
359 struct instruction *instr, unsigned char rex)
360 {
361 int index = get_index(opcode + 1, rex);
363 instr->operand[0] = mk_operand(size, index, 0, REGISTER);
364 instr->operand[1] = mk_operand(size, 0, 0, MEMORY);
365 return DECODE_success;
366 }
368 static int vmx_decode(unsigned char *opcode, struct instruction *instr)
369 {
370 unsigned long eflags;
371 int index, vm86 = 0;
372 unsigned char rex = 0;
373 unsigned char tmp_size = 0;
375 init_instruction(instr);
377 opcode = check_prefix(opcode, instr, &rex);
379 __vmread(GUEST_RFLAGS, &eflags);
380 if (eflags & X86_EFLAGS_VM)
381 vm86 = 1;
383 if (vm86) { /* meaning is reversed */
384 if (instr->op_size == WORD)
385 instr->op_size = LONG;
386 else if (instr->op_size == LONG)
387 instr->op_size = WORD;
388 else if (instr->op_size == 0)
389 instr->op_size = WORD;
390 }
392 switch (*opcode) {
393 case 0x0B: /* or m32/16, r32/16 */
394 instr->instr = INSTR_OR;
395 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
396 return mem_reg(instr->op_size, opcode, instr, rex);
398 case 0x20: /* and r8, m8 */
399 instr->instr = INSTR_AND;
400 GET_OP_SIZE_FOR_BYTE(instr->op_size);
401 return reg_mem(instr->op_size, opcode, instr, rex);
403 case 0x21: /* and r32/16, m32/16 */
404 instr->instr = INSTR_AND;
405 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
406 return reg_mem(instr->op_size, opcode, instr, rex);
408 case 0x23: /* and m32/16, r32/16 */
409 instr->instr = INSTR_AND;
410 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
411 return mem_reg(instr->op_size, opcode, instr, rex);
413 case 0x30: /* xor r8, m8 */
414 instr->instr = INSTR_XOR;
415 GET_OP_SIZE_FOR_BYTE(instr->op_size);
416 return reg_mem(instr->op_size, opcode, instr, rex);
418 case 0x31: /* xor r32/16, m32/16 */
419 instr->instr = INSTR_XOR;
420 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
421 return reg_mem(instr->op_size, opcode, instr, rex);
423 case 0x39: /* cmp r32/16, m32/16 */
424 instr->instr = INSTR_CMP;
425 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
426 return reg_mem(instr->op_size, opcode, instr, rex);
428 case 0x81:
429 if (((opcode[1] >> 3) & 7) == 7) { /* cmp $imm, m32/16 */
430 instr->instr = INSTR_CMP;
431 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
433 instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
434 instr->immediate = get_immediate(vm86, opcode+1, BYTE);
435 instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
437 return DECODE_success;
438 } else
439 return DECODE_failure;
441 case 0x84: /* test m8, r8 */
442 instr->instr = INSTR_TEST;
443 instr->op_size = BYTE;
444 GET_OP_SIZE_FOR_BYTE(tmp_size);
445 return mem_reg(tmp_size, opcode, instr, rex);
447 case 0x88: /* mov r8, m8 */
448 instr->instr = INSTR_MOV;
449 instr->op_size = BYTE;
450 GET_OP_SIZE_FOR_BYTE(tmp_size);
451 return reg_mem(tmp_size, opcode, instr, rex);
453 case 0x89: /* mov r32/16, m32/16 */
454 instr->instr = INSTR_MOV;
455 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
456 return reg_mem(instr->op_size, opcode, instr, rex);
458 case 0x8A: /* mov m8, r8 */
459 instr->instr = INSTR_MOV;
460 instr->op_size = BYTE;
461 GET_OP_SIZE_FOR_BYTE(tmp_size);
462 return mem_reg(tmp_size, opcode, instr, rex);
464 case 0x8B: /* mov m32/16, r32/16 */
465 instr->instr = INSTR_MOV;
466 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
467 return mem_reg(instr->op_size, opcode, instr, rex);
469 case 0xA0: /* mov <addr>, al */
470 instr->instr = INSTR_MOV;
471 instr->op_size = BYTE;
472 GET_OP_SIZE_FOR_BYTE(tmp_size);
473 return mem_acc(tmp_size, instr);
475 case 0xA1: /* mov <addr>, ax/eax */
476 instr->instr = INSTR_MOV;
477 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
478 return mem_acc(instr->op_size, instr);
480 case 0xA2: /* mov al, <addr> */
481 instr->instr = INSTR_MOV;
482 instr->op_size = BYTE;
483 GET_OP_SIZE_FOR_BYTE(tmp_size);
484 return acc_mem(tmp_size, instr);
486 case 0xA3: /* mov ax/eax, <addr> */
487 instr->instr = INSTR_MOV;
488 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
489 return acc_mem(instr->op_size, instr);
491 case 0xA4: /* movsb */
492 instr->instr = INSTR_MOVS;
493 instr->op_size = BYTE;
494 return DECODE_success;
496 case 0xA5: /* movsw/movsl */
497 instr->instr = INSTR_MOVS;
498 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
499 return DECODE_success;
501 case 0xAA: /* stosb */
502 instr->instr = INSTR_STOS;
503 instr->op_size = BYTE;
504 return DECODE_success;
506 case 0xAB: /* stosw/stosl */
507 instr->instr = INSTR_STOS;
508 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
509 return DECODE_success;
511 case 0xC6:
512 if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm8, m8 */
513 instr->instr = INSTR_MOV;
514 instr->op_size = BYTE;
516 instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
517 instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
518 instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
520 return DECODE_success;
521 } else
522 return DECODE_failure;
524 case 0xC7:
525 if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm16/32, m16/32 */
526 instr->instr = INSTR_MOV;
527 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
529 instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
530 instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
531 instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
533 return DECODE_success;
534 } else
535 return DECODE_failure;
537 case 0xF6:
538 if (((opcode[1] >> 3) & 7) == 0) { /* testb $imm8, m8 */
539 instr->instr = INSTR_TEST;
540 instr->op_size = BYTE;
542 instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE);
543 instr->immediate = get_immediate(vm86, opcode+1, instr->op_size);
544 instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY);
546 return DECODE_success;
547 } else
548 return DECODE_failure;
550 case 0x0F:
551 break;
553 default:
554 printf("%x, This opcode isn't handled yet!\n", *opcode);
555 return DECODE_failure;
556 }
558 switch (*++opcode) {
559 case 0xB6: /* movz m8, r16/r32 */
560 instr->instr = INSTR_MOVZ;
561 GET_OP_SIZE_FOR_NONEBYTE(instr->op_size);
562 index = get_index(opcode + 1, rex);
563 instr->operand[0] = mk_operand(BYTE, 0, 0, MEMORY);
564 instr->operand[1] = mk_operand(instr->op_size, index, 0, REGISTER);
565 return DECODE_success;
567 case 0xB7: /* movz m16, r32 */
568 instr->instr = INSTR_MOVZ;
569 index = get_index(opcode + 1, rex);
570 if (rex & 0x8) {
571 instr->op_size = LONG;
572 instr->operand[1] = mk_operand(QUAD, index, 0, REGISTER);
573 } else {
574 instr->op_size = WORD;
575 instr->operand[1] = mk_operand(LONG, index, 0, REGISTER);
576 }
577 instr->operand[0] = mk_operand(instr->op_size, 0, 0, MEMORY);
578 return DECODE_success;
580 default:
581 printf("0f %x, This opcode isn't handled yet\n", *opcode);
582 return DECODE_failure;
583 }
584 }
586 int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip, int inst_len)
587 {
588 if (inst_len > MAX_INST_LEN || inst_len <= 0)
589 return 0;
590 if (!vmx_copy(buf, guest_eip, inst_len, VMX_COPY_IN))
591 return 0;
592 return inst_len;
593 }
595 void send_mmio_req(unsigned char type, unsigned long gpa,
596 unsigned long count, int size, long value, int dir, int pvalid)
597 {
598 struct vcpu *d = current;
599 vcpu_iodata_t *vio;
600 ioreq_t *p;
601 int vm86;
602 struct cpu_user_regs *regs;
603 extern long evtchn_send(int lport);
605 regs = current->domain->arch.vmx_platform.mpci.inst_decoder_regs;
607 vio = get_vio(d->domain, d->vcpu_id);
608 if (vio == NULL) {
609 printf("bad shared page\n");
610 domain_crash_synchronous();
611 }
613 p = &vio->vp_ioreq;
615 vm86 = regs->eflags & X86_EFLAGS_VM;
617 if (test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags)) {
618 printf("VMX I/O has not yet completed\n");
619 domain_crash_synchronous();
620 }
622 set_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags);
623 p->dir = dir;
624 p->pdata_valid = pvalid;
626 p->type = type;
627 p->size = size;
628 p->addr = gpa;
629 p->count = count;
630 p->df = regs->eflags & EF_DF ? 1 : 0;
632 if (pvalid) {
633 if (vmx_paging_enabled(current))
634 p->u.pdata = (void *) gva_to_gpa(value);
635 else
636 p->u.pdata = (void *) value; /* guest VA == guest PA */
637 } else
638 p->u.data = value;
640 p->state = STATE_IOREQ_READY;
642 if (vmx_mmio_intercept(p)){
643 p->state = STATE_IORESP_READY;
644 vmx_io_assist(d);
645 return;
646 }
648 evtchn_send(iopacket_port(d->domain));
649 vmx_wait_io();
650 }
652 static void mmio_operands(int type, unsigned long gpa, struct instruction *inst,
653 struct mi_per_cpu_info *mpcip, struct cpu_user_regs *regs)
654 {
655 unsigned long value = 0;
656 int index, size;
658 size = operand_size(inst->operand[0]);
660 mpcip->flags = inst->flags;
661 mpcip->instr = inst->instr;
662 mpcip->operand[0] = inst->operand[0]; /* source */
663 mpcip->operand[1] = inst->operand[1]; /* destination */
665 if (inst->operand[0] & REGISTER) { /* dest is memory */
666 index = operand_index(inst->operand[0]);
667 value = get_reg_value(size, index, 0, regs);
668 send_mmio_req(type, gpa, 1, size, value, IOREQ_WRITE, 0);
669 } else if (inst->operand[0] & IMMEDIATE) { /* dest is memory */
670 value = inst->immediate;
671 send_mmio_req(type, gpa, 1, size, value, IOREQ_WRITE, 0);
672 } else if (inst->operand[0] & MEMORY) { /* dest is register */
673 /* send the request and wait for the value */
674 send_mmio_req(type, gpa, 1, size, 0, IOREQ_READ, 0);
675 } else {
676 printf("mmio_operands: invalid operand\n");
677 domain_crash_synchronous();
678 }
679 }
681 #define GET_REPEAT_COUNT() \
682 (mmio_inst.flags & REPZ ? (vm86 ? regs->ecx & 0xFFFF : regs->ecx) : 1)
684 void handle_mmio(unsigned long va, unsigned long gpa)
685 {
686 unsigned long eip, eflags, cs;
687 unsigned long inst_len, inst_addr;
688 struct mi_per_cpu_info *mpcip;
689 struct cpu_user_regs *regs;
690 struct instruction mmio_inst;
691 unsigned char inst[MAX_INST_LEN];
692 int i, vm86, ret;
694 mpcip = &current->domain->arch.vmx_platform.mpci;
695 regs = mpcip->inst_decoder_regs;
697 __vmread(GUEST_RIP, &eip);
698 __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len);
699 __vmread(GUEST_RFLAGS, &eflags);
700 vm86 = eflags & X86_EFLAGS_VM;
702 if (vm86) {
703 __vmread(GUEST_CS_SELECTOR, &cs);
704 inst_addr = (cs << 4) + eip;
705 } else
706 inst_addr = eip;
708 memset(inst, 0, MAX_INST_LEN);
709 ret = inst_copy_from_guest(inst, inst_addr, inst_len);
710 if (ret != inst_len) {
711 printf("handle_mmio - EXIT: get guest instruction fault\n");
712 domain_crash_synchronous();
713 }
715 init_instruction(&mmio_inst);
717 if (vmx_decode(inst, &mmio_inst) == DECODE_failure) {
718 printf("mmio opcode: va 0x%lx, gpa 0x%lx, len %ld:",
719 va, gpa, inst_len);
720 for (i = 0; i < inst_len; i++)
721 printf(" %02x", inst[i] & 0xFF);
722 printf("\n");
723 domain_crash_synchronous();
724 }
726 store_cpu_user_regs(regs);
727 regs->eip += inst_len; /* advance %eip */
729 switch (mmio_inst.instr) {
730 case INSTR_MOV:
731 mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
732 break;
734 case INSTR_MOVS:
735 {
736 unsigned long count = GET_REPEAT_COUNT();
737 unsigned long size = mmio_inst.op_size;
738 int sign = regs->eflags & EF_DF ? -1 : 1;
739 unsigned long addr = 0;
740 int dir;
742 /* determine non-MMIO address */
743 if (vm86) {
744 unsigned long seg;
746 __vmread(GUEST_ES_SELECTOR, &seg);
747 if (((seg << 4) + (regs->edi & 0xFFFF)) == va) {
748 dir = IOREQ_WRITE;
749 __vmread(GUEST_DS_SELECTOR, &seg);
750 addr = (seg << 4) + (regs->esi & 0xFFFF);
751 } else {
752 dir = IOREQ_READ;
753 addr = (seg << 4) + (regs->edi & 0xFFFF);
754 }
755 } else {
756 if (va == regs->edi) {
757 dir = IOREQ_WRITE;
758 addr = regs->esi;
759 } else {
760 dir = IOREQ_READ;
761 addr = regs->edi;
762 }
763 }
765 mpcip->flags = mmio_inst.flags;
766 mpcip->instr = mmio_inst.instr;
768 /*
769 * In case of a movs spanning multiple pages, we break the accesses
770 * up into multiple pages (the device model works with non-continguous
771 * physical guest pages). To copy just one page, we adjust %ecx and
772 * do not advance %eip so that the next "rep movs" copies the next page.
773 * Unaligned accesses, for example movsl starting at PGSZ-2, are
774 * turned into a single copy where we handle the overlapping memory
775 * copy ourself. After this copy succeeds, "rep movs" is executed
776 * again.
777 */
778 if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) {
779 unsigned long value = 0;
781 mpcip->flags |= OVERLAP;
783 regs->eip -= inst_len; /* do not advance %eip */
785 if (dir == IOREQ_WRITE)
786 vmx_copy(&value, addr, size, VMX_COPY_IN);
787 send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, size, value, dir, 0);
788 } else {
789 if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK)) {
790 regs->eip -= inst_len; /* do not advance %eip */
792 if (sign > 0)
793 count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size;
794 else
795 count = (addr & ~PAGE_MASK) / size;
796 }
798 send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size, addr, dir, 1);
799 }
800 break;
801 }
803 case INSTR_MOVZ:
804 mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
805 break;
807 case INSTR_STOS:
808 /*
809 * Since the destination is always in (contiguous) mmio space we don't
810 * need to break it up into pages.
811 */
812 mpcip->flags = mmio_inst.flags;
813 mpcip->instr = mmio_inst.instr;
814 send_mmio_req(IOREQ_TYPE_COPY, gpa,
815 GET_REPEAT_COUNT(), mmio_inst.op_size, regs->eax, IOREQ_WRITE, 0);
816 break;
818 case INSTR_OR:
819 mmio_operands(IOREQ_TYPE_OR, gpa, &mmio_inst, mpcip, regs);
820 break;
822 case INSTR_AND:
823 mmio_operands(IOREQ_TYPE_AND, gpa, &mmio_inst, mpcip, regs);
824 break;
826 case INSTR_XOR:
827 mmio_operands(IOREQ_TYPE_XOR, gpa, &mmio_inst, mpcip, regs);
828 break;
830 case INSTR_CMP:
831 mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
832 break;
834 case INSTR_TEST:
835 mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs);
836 break;
838 default:
839 printf("Unhandled MMIO instruction\n");
840 domain_crash_synchronous();
841 }
842 }
844 #endif /* CONFIG_VMX */
846 /*
847 * Local variables:
848 * mode: C
849 * c-set-style: "BSD"
850 * c-basic-offset: 4
851 * tab-width: 4
852 * indent-tabs-mode: nil
853 * End:
854 */