debuggers.hg

view xen/arch/x86/hvm/intercept.c @ 13707:21d6135f522f

Merge
author Tim Deegan <Tim.Deegan@xensource.com>
date Mon Jan 29 16:50:22 2007 +0000 (2007-01-29)
parents 99d36a153024 bef7fbe25a9f
children ffcd586dbaae
line source
1 /*
2 * intercept.c: Handle performance critical I/O packets in hypervisor space
3 *
4 * Copyright (c) 2004, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 */
20 #include <xen/config.h>
21 #include <xen/types.h>
22 #include <xen/sched.h>
23 #include <asm/regs.h>
24 #include <asm/hvm/hvm.h>
25 #include <asm/hvm/support.h>
26 #include <asm/hvm/domain.h>
27 #include <xen/lib.h>
28 #include <xen/sched.h>
29 #include <asm/current.h>
30 #include <io_ports.h>
31 #include <xen/event.h>
32 #include <xen/compile.h>
33 #include <public/version.h>
36 extern struct hvm_mmio_handler hpet_mmio_handler;
37 extern struct hvm_mmio_handler vlapic_mmio_handler;
38 extern struct hvm_mmio_handler vioapic_mmio_handler;
40 #define HVM_MMIO_HANDLER_NR 3
42 static struct hvm_mmio_handler *hvm_mmio_handlers[HVM_MMIO_HANDLER_NR] =
43 {
44 &hpet_mmio_handler,
45 &vlapic_mmio_handler,
46 &vioapic_mmio_handler
47 };
49 struct hvm_buffered_io_range {
50 unsigned long start_addr;
51 unsigned long length;
52 };
54 #define HVM_BUFFERED_IO_RANGE_NR 1
56 static struct hvm_buffered_io_range buffered_stdvga_range = {0xA0000, 0x20000};
57 static struct hvm_buffered_io_range
58 *hvm_buffered_io_ranges[HVM_BUFFERED_IO_RANGE_NR] =
59 {
60 &buffered_stdvga_range
61 };
63 static inline void hvm_mmio_access(struct vcpu *v,
64 ioreq_t *p,
65 hvm_mmio_read_t read_handler,
66 hvm_mmio_write_t write_handler)
67 {
68 unsigned int tmp1, tmp2;
69 unsigned long data;
71 switch ( p->type ) {
72 case IOREQ_TYPE_COPY:
73 {
74 if ( !p->data_is_ptr ) {
75 if ( p->dir == IOREQ_READ )
76 p->data = read_handler(v, p->addr, p->size);
77 else /* p->dir == IOREQ_WRITE */
78 write_handler(v, p->addr, p->size, p->data);
79 } else { /* p->data_is_ptr */
80 int i, sign = (p->df) ? -1 : 1;
82 if ( p->dir == IOREQ_READ ) {
83 for ( i = 0; i < p->count; i++ ) {
84 data = read_handler(v,
85 p->addr + (sign * i * p->size),
86 p->size);
87 (void)hvm_copy_to_guest_phys(
88 p->data + (sign * i * p->size),
89 &data,
90 p->size);
91 }
92 } else {/* p->dir == IOREQ_WRITE */
93 for ( i = 0; i < p->count; i++ ) {
94 (void)hvm_copy_from_guest_phys(
95 &data,
96 p->data + (sign * i * p->size),
97 p->size);
98 write_handler(v,
99 p->addr + (sign * i * p->size),
100 p->size, data);
101 }
102 }
103 }
104 break;
105 }
107 case IOREQ_TYPE_AND:
108 tmp1 = read_handler(v, p->addr, p->size);
109 if ( p->dir == IOREQ_WRITE ) {
110 tmp2 = tmp1 & (unsigned long) p->data;
111 write_handler(v, p->addr, p->size, tmp2);
112 }
113 p->data = tmp1;
114 break;
116 case IOREQ_TYPE_ADD:
117 tmp1 = read_handler(v, p->addr, p->size);
118 if (p->dir == IOREQ_WRITE) {
119 tmp2 = tmp1 + (unsigned long) p->data;
120 write_handler(v, p->addr, p->size, tmp2);
121 }
122 p->data = tmp1;
123 break;
125 case IOREQ_TYPE_OR:
126 tmp1 = read_handler(v, p->addr, p->size);
127 if ( p->dir == IOREQ_WRITE ) {
128 tmp2 = tmp1 | (unsigned long) p->data;
129 write_handler(v, p->addr, p->size, tmp2);
130 }
131 p->data = tmp1;
132 break;
134 case IOREQ_TYPE_XOR:
135 tmp1 = read_handler(v, p->addr, p->size);
136 if ( p->dir == IOREQ_WRITE ) {
137 tmp2 = tmp1 ^ (unsigned long) p->data;
138 write_handler(v, p->addr, p->size, tmp2);
139 }
140 p->data = tmp1;
141 break;
143 case IOREQ_TYPE_XCHG:
144 /*
145 * Note that we don't need to be atomic here since VCPU is accessing
146 * its own local APIC.
147 */
148 tmp1 = read_handler(v, p->addr, p->size);
149 write_handler(v, p->addr, p->size, (unsigned long) p->data);
150 p->data = tmp1;
151 break;
153 default:
154 printk("hvm_mmio_access: error ioreq type %x\n", p->type);
155 domain_crash_synchronous();
156 break;
157 }
158 }
161 int hvm_register_savevm(struct domain *d,
162 const char *idstr,
163 int instance_id,
164 int version_id,
165 SaveStateHandler *save_state,
166 LoadStateHandler *load_state,
167 void *opaque)
168 {
169 HVMStateEntry *se, **pse;
171 if ( (se = xmalloc(struct HVMStateEntry)) == NULL ){
172 printk("allocat hvmstate entry fail.\n");
173 return -1;
174 }
176 safe_strcpy(se->idstr, idstr);
178 se->instance_id = instance_id;
179 se->version_id = version_id;
180 se->save_state = save_state;
181 se->load_state = load_state;
182 se->opaque = opaque;
183 se->next = NULL;
185 /* add at the end of list */
186 pse = &d->arch.hvm_domain.first_se;
187 while (*pse != NULL)
188 pse = &(*pse)->next;
189 *pse = se;
190 return 0;
191 }
193 int hvm_save(struct domain *d, hvm_domain_context_t *h)
194 {
195 uint32_t len, len_pos, cur_pos;
196 uint32_t eax, ebx, ecx, edx;
197 HVMStateEntry *se;
198 char *chgset;
199 struct hvm_save_header hdr;
201 hdr.magic = HVM_FILE_MAGIC;
202 hdr.version = HVM_FILE_VERSION;
203 cpuid(1, &eax, &ebx, &ecx, &edx);
204 hdr.cpuid = eax;
205 hvm_put_struct(h, &hdr);
207 /* save xen changeset */
208 chgset = strrchr(XEN_CHANGESET, ' ');
209 if ( chgset )
210 chgset++;
211 else
212 chgset = XEN_CHANGESET;
214 len = strlen(chgset);
215 hvm_put_8u(h, len);
216 hvm_put_buffer(h, chgset, len);
218 for(se = d->arch.hvm_domain.first_se; se != NULL; se = se->next) {
219 /* ID string */
220 len = strnlen(se->idstr, sizeof(se->idstr));
221 hvm_put_8u(h, len);
222 hvm_put_buffer(h, se->idstr, len);
224 hvm_put_32u(h, se->instance_id);
225 hvm_put_32u(h, se->version_id);
227 /* record size */
228 len_pos = hvm_ctxt_tell(h);
229 hvm_put_32u(h, 0);
231 se->save_state(h, se->opaque);
233 cur_pos = hvm_ctxt_tell(h);
234 len = cur_pos - len_pos - 4;
235 hvm_ctxt_seek(h, len_pos);
236 hvm_put_32u(h, len);
237 hvm_ctxt_seek(h, cur_pos);
239 }
241 h->size = hvm_ctxt_tell(h);
242 hvm_ctxt_seek(h, 0);
244 if (h->size >= HVM_CTXT_SIZE) {
245 printk("hvm_domain_context overflow when hvm_save! need %"PRId32" bytes for use.\n", h->size);
246 return -1;
247 }
249 return 0;
251 }
253 static HVMStateEntry *find_se(struct domain *d, const char *idstr, int instance_id)
254 {
255 HVMStateEntry *se;
257 for(se = d->arch.hvm_domain.first_se; se != NULL; se = se->next) {
258 if (!strncmp(se->idstr, idstr, sizeof(se->idstr)) &&
259 instance_id == se->instance_id){
260 return se;
261 }
262 }
263 return NULL;
264 }
266 int hvm_load(struct domain *d, hvm_domain_context_t *h)
267 {
268 uint32_t len, rec_len, rec_pos, instance_id, version_id;
269 uint32_t eax, ebx, ecx, edx;
270 HVMStateEntry *se;
271 char idstr[HVM_SE_IDSTR_LEN];
272 xen_changeset_info_t chgset;
273 char *cur_chgset;
274 int ret;
275 struct hvm_save_header hdr;
276 struct vcpu *v;
278 if (h->size >= HVM_CTXT_SIZE) {
279 printk("hvm_load fail! seems hvm_domain_context overflow when hvm_save! need %"PRId32" bytes.\n", h->size);
280 return -1;
281 }
283 hvm_ctxt_seek(h, 0);
285 hvm_get_struct(h, &hdr);
287 if (hdr.magic != HVM_FILE_MAGIC) {
288 printk("HVM restore magic dismatch!\n");
289 return -1;
290 }
292 if (hdr.version != HVM_FILE_VERSION) {
293 printk("HVM restore version dismatch!\n");
294 return -1;
295 }
297 /* check cpuid */
298 cpuid(1, &eax, &ebx, &ecx, &edx);
299 /*TODO: need difine how big difference is acceptable */
300 if (hdr.cpuid != eax)
301 printk("warnings: try to restore hvm guest(0x%"PRIx32") "
302 "on a different type processor(0x%"PRIx32").\n",
303 hdr.cpuid,
304 eax);
307 /* check xen change set */
308 cur_chgset = strrchr(XEN_CHANGESET, ' ');
309 if ( cur_chgset )
310 cur_chgset++;
311 else
312 cur_chgset = XEN_CHANGESET;
314 len = hvm_get_8u(h);
315 if (len > 20) { /*typical length is 18 -- "revision number:changeset id" */
316 printk("wrong change set length %d when hvm restore!\n", len);
317 return -1;
318 }
320 hvm_get_buffer(h, chgset, len);
321 chgset[len] = '\0';
322 if (strncmp(cur_chgset, chgset, len + 1))
323 printk("warnings: try to restore hvm guest(%s) on a different changeset %s.\n",
324 chgset, cur_chgset);
327 if ( !strcmp(cur_chgset, "unavailable") )
328 printk("warnings: try to restore hvm guest when changeset is unavailable.\n");
331 /* Down all the vcpus: we only re-enable the ones that had state saved. */
332 for_each_vcpu(d, v)
333 if ( test_and_set_bit(_VCPUF_down, &v->vcpu_flags) )
334 vcpu_sleep_nosync(v);
336 while(1) {
337 if (hvm_ctxt_end(h)) {
338 break;
339 }
341 /* ID string */
342 len = hvm_get_8u(h);
343 if (len > HVM_SE_IDSTR_LEN) {
344 printk("wrong HVM save entry idstr len %d!", len);
345 return -1;
346 }
348 hvm_get_buffer(h, idstr, len);
349 idstr[len] = '\0';
351 instance_id = hvm_get_32u(h);
352 version_id = hvm_get_32u(h);
354 printk("HVM S/R Loading \"%s\" instance %#x\n", idstr, instance_id);
356 rec_len = hvm_get_32u(h);
357 rec_pos = hvm_ctxt_tell(h);
359 se = find_se(d, idstr, instance_id);
360 if (se == NULL) {
361 printk("warnings: hvm load can't find device %s's instance %d!\n",
362 idstr, instance_id);
363 } else {
364 ret = se->load_state(h, se->opaque, version_id);
365 if (ret < 0)
366 printk("warnings: loading state fail for device %s instance %d!\n",
367 idstr, instance_id);
368 }
371 /* make sure to jump end of record */
372 if ( hvm_ctxt_tell(h) - rec_pos != rec_len) {
373 printk("wrong hvm record size, maybe some dismatch between save&restore handler!\n");
374 }
375 hvm_ctxt_seek(h, rec_pos + rec_len);
376 }
378 return 0;
379 }
382 #ifdef HVM_DEBUG_SUSPEND
383 static void shpage_info(shared_iopage_t *sh)
384 {
386 vcpu_iodata_t *p = &sh->vcpu_iodata[0];
387 ioreq_t *req = &p->vp_ioreq;
388 printk("*****sharepage_info******!\n");
389 printk("vp_eport=%d\n", p->vp_eport);
390 printk("io packet: "
391 "state:%x, pvalid: %x, dir:%x, port: %"PRIx64", "
392 "data: %"PRIx64", count: %"PRIx64", size: %"PRIx64"\n",
393 req->state, req->data_is_ptr, req->dir, req->addr,
394 req->data, req->count, req->size);
395 }
396 #else
397 static void shpage_info(shared_iopage_t *sh)
398 {
399 }
400 #endif
402 static void shpage_save(hvm_domain_context_t *h, void *opaque)
403 {
404 /* XXX:no action required for shpage save/restore, since it's in guest memory
405 * keep it for debug purpose only */
407 #if 0
408 struct shared_iopage *s = opaque;
409 /* XXX:smp */
410 struct ioreq *req = &s->vcpu_iodata[0].vp_ioreq;
412 shpage_info(s);
414 hvm_put_buffer(h, (char*)req, sizeof(struct ioreq));
415 #endif
416 }
418 static int shpage_load(hvm_domain_context_t *h, void *opaque, int version_id)
419 {
420 struct shared_iopage *s = opaque;
421 #if 0
422 /* XXX:smp */
423 struct ioreq *req = &s->vcpu_iodata[0].vp_ioreq;
425 if (version_id != 1)
426 return -EINVAL;
428 hvm_get_buffer(h, (char*)req, sizeof(struct ioreq));
431 #endif
432 shpage_info(s);
433 return 0;
434 }
436 void shpage_init(struct domain *d, shared_iopage_t *sp)
437 {
438 hvm_register_savevm(d, "xen_hvm_shpage", 0x10, 1, shpage_save, shpage_load, sp);
439 }
441 int hvm_buffered_io_intercept(ioreq_t *p)
442 {
443 struct vcpu *v = current;
444 spinlock_t *buffered_io_lock;
445 buffered_iopage_t *buffered_iopage =
446 (buffered_iopage_t *)(v->domain->arch.hvm_domain.buffered_io_va);
447 unsigned long tmp_write_pointer = 0;
448 int i;
450 /* ignore READ ioreq_t! */
451 if ( p->dir == IOREQ_READ )
452 return 0;
454 for ( i = 0; i < HVM_BUFFERED_IO_RANGE_NR; i++ ) {
455 if ( p->addr >= hvm_buffered_io_ranges[i]->start_addr &&
456 p->addr + p->size - 1 < hvm_buffered_io_ranges[i]->start_addr +
457 hvm_buffered_io_ranges[i]->length )
458 break;
459 }
461 if ( i == HVM_BUFFERED_IO_RANGE_NR )
462 return 0;
464 buffered_io_lock = &v->domain->arch.hvm_domain.buffered_io_lock;
465 spin_lock(buffered_io_lock);
467 if ( buffered_iopage->write_pointer - buffered_iopage->read_pointer ==
468 (unsigned int)IOREQ_BUFFER_SLOT_NUM ) {
469 /* the queue is full.
470 * send the iopacket through the normal path.
471 * NOTE: The arithimetic operation could handle the situation for
472 * write_pointer overflow.
473 */
474 spin_unlock(buffered_io_lock);
475 return 0;
476 }
478 tmp_write_pointer = buffered_iopage->write_pointer % IOREQ_BUFFER_SLOT_NUM;
480 memcpy(&buffered_iopage->ioreq[tmp_write_pointer], p, sizeof(ioreq_t));
482 /*make the ioreq_t visible before write_pointer*/
483 wmb();
484 buffered_iopage->write_pointer++;
486 spin_unlock(buffered_io_lock);
488 return 1;
489 }
491 int hvm_mmio_intercept(ioreq_t *p)
492 {
493 struct vcpu *v = current;
494 int i;
496 for ( i = 0; i < HVM_MMIO_HANDLER_NR; i++ )
497 {
498 if ( hvm_mmio_handlers[i]->check_handler(v, p->addr) )
499 {
500 hvm_mmio_access(v, p,
501 hvm_mmio_handlers[i]->read_handler,
502 hvm_mmio_handlers[i]->write_handler);
503 return 1;
504 }
505 }
507 return 0;
508 }
510 /*
511 * Check if the request is handled inside xen
512 * return value: 0 --not handled; 1 --handled
513 */
514 int hvm_io_intercept(ioreq_t *p, int type)
515 {
516 struct vcpu *v = current;
517 struct hvm_io_handler *handler =
518 &(v->domain->arch.hvm_domain.io_handler);
519 int i;
520 unsigned long addr, size;
522 for (i = 0; i < handler->num_slot; i++) {
523 if( type != handler->hdl_list[i].type)
524 continue;
525 addr = handler->hdl_list[i].addr;
526 size = handler->hdl_list[i].size;
527 if (p->addr >= addr &&
528 p->addr < addr + size)
529 return handler->hdl_list[i].action(p);
530 }
531 return 0;
532 }
534 int register_io_handler(
535 struct domain *d, unsigned long addr, unsigned long size,
536 intercept_action_t action, int type)
537 {
538 struct hvm_io_handler *handler = &d->arch.hvm_domain.io_handler;
539 int num = handler->num_slot;
541 BUG_ON(num >= MAX_IO_HANDLER);
543 handler->hdl_list[num].addr = addr;
544 handler->hdl_list[num].size = size;
545 handler->hdl_list[num].action = action;
546 handler->hdl_list[num].type = type;
547 handler->num_slot++;
549 return 1;
550 }
551 /*
552 * Local variables:
553 * mode: C
554 * c-set-style: "BSD"
555 * c-basic-offset: 4
556 * tab-width: 4
557 * indent-tabs-mode: nil
558 * End:
559 */