debuggers.hg
changeset 16747:9862217f3c34
hvm: Improve in-Xen PIO emulation to better handle string PIO
instructions.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
instructions.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Sat Jan 12 11:13:57 2008 +0000 (2008-01-12) |
parents | a30aabe3c84a |
children | 533a8e6cebd0 |
files | xen/arch/x86/hvm/i8254.c xen/arch/x86/hvm/intercept.c xen/arch/x86/hvm/pmtimer.c xen/arch/x86/hvm/rtc.c xen/arch/x86/hvm/stdvga.c xen/arch/x86/hvm/vpic.c xen/include/asm-x86/hvm/io.h |
line diff
1.1 --- a/xen/arch/x86/hvm/i8254.c Sat Jan 12 09:29:38 2008 +0000 1.2 +++ b/xen/arch/x86/hvm/i8254.c Sat Jan 12 11:13:57 2008 +0000 1.3 @@ -48,8 +48,10 @@ 1.4 #define RW_STATE_WORD0 3 1.5 #define RW_STATE_WORD1 4 1.6 1.7 -static int handle_pit_io(ioreq_t *p); 1.8 -static int handle_speaker_io(ioreq_t *p); 1.9 +static int handle_pit_io( 1.10 + int dir, uint32_t port, uint32_t bytes, uint32_t *val); 1.11 +static int handle_speaker_io( 1.12 + int dir, uint32_t port, uint32_t bytes, uint32_t *val); 1.13 1.14 /* Compute with 96 bit intermediate result: (a*b)/c */ 1.15 static uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) 1.16 @@ -525,24 +527,25 @@ void pit_deinit(struct domain *d) 1.17 } 1.18 1.19 /* the intercept action for PIT DM retval:0--not handled; 1--handled */ 1.20 -static int handle_pit_io(ioreq_t *p) 1.21 +static int handle_pit_io( 1.22 + int dir, uint32_t port, uint32_t bytes, uint32_t *val) 1.23 { 1.24 struct PITState *vpit = vcpu_vpit(current); 1.25 1.26 - if ( (p->size != 1) || p->data_is_ptr || (p->type != IOREQ_TYPE_PIO) ) 1.27 + if ( bytes != 1 ) 1.28 { 1.29 gdprintk(XENLOG_WARNING, "PIT bad access\n"); 1.30 return 1; 1.31 } 1.32 1.33 - if ( p->dir == IOREQ_WRITE ) 1.34 + if ( dir == IOREQ_WRITE ) 1.35 { 1.36 - pit_ioport_write(vpit, p->addr, p->data); 1.37 + pit_ioport_write(vpit, port, *val); 1.38 } 1.39 else 1.40 { 1.41 - if ( (p->addr & 3) != 3 ) 1.42 - p->data = pit_ioport_read(vpit, p->addr); 1.43 + if ( (port & 3) != 3 ) 1.44 + *val = pit_ioport_read(vpit, port); 1.45 else 1.46 gdprintk(XENLOG_WARNING, "PIT: read A1:A0=3!\n"); 1.47 } 1.48 @@ -566,11 +569,12 @@ static uint32_t speaker_ioport_read( 1.49 (pit_get_out(pit, 2) << 5) | (refresh_clock << 4)); 1.50 } 1.51 1.52 -static int handle_speaker_io(ioreq_t *p) 1.53 +static int handle_speaker_io( 1.54 + int dir, uint32_t port, uint32_t bytes, uint32_t *val) 1.55 { 1.56 struct PITState *vpit = vcpu_vpit(current); 1.57 1.58 - if ( (p->size != 1) || p->data_is_ptr || (p->type != IOREQ_TYPE_PIO) ) 1.59 + if ( bytes != 1 ) 1.60 { 1.61 gdprintk(XENLOG_WARNING, "PIT_SPEAKER bad access\n"); 1.62 return 1; 1.63 @@ -578,10 +582,10 @@ static int handle_speaker_io(ioreq_t *p) 1.64 1.65 spin_lock(&vpit->lock); 1.66 1.67 - if ( p->dir == IOREQ_WRITE ) 1.68 - speaker_ioport_write(vpit, p->addr, p->data); 1.69 + if ( dir == IOREQ_WRITE ) 1.70 + speaker_ioport_write(vpit, port, *val); 1.71 else 1.72 - p->data = speaker_ioport_read(vpit, p->addr); 1.73 + *val = speaker_ioport_read(vpit, port); 1.74 1.75 spin_unlock(&vpit->lock); 1.76 1.77 @@ -597,13 +601,14 @@ int pv_pit_handler(int port, int data, i 1.78 .dir = write ? IOREQ_WRITE : IOREQ_READ, 1.79 .data = data 1.80 }; 1.81 + uint32_t val = data; 1.82 1.83 if ( (current->domain->domain_id == 0) && dom0_pit_access(&ioreq) ) 1.84 /* nothing to do */; 1.85 else if ( port == 0x61 ) 1.86 - handle_speaker_io(&ioreq); 1.87 + handle_speaker_io(ioreq.dir, port, 1, &val); 1.88 else 1.89 - handle_pit_io(&ioreq); 1.90 + handle_pit_io(ioreq.dir, port, 1, &val); 1.91 1.92 return !write ? ioreq.data : 0; 1.93 }
2.1 --- a/xen/arch/x86/hvm/intercept.c Sat Jan 12 09:29:38 2008 +0000 2.2 +++ b/xen/arch/x86/hvm/intercept.c Sat Jan 12 11:13:57 2008 +0000 2.3 @@ -247,6 +247,50 @@ int hvm_mmio_intercept(ioreq_t *p) 2.4 return 0; 2.5 } 2.6 2.7 +static int process_portio_intercept(portio_action_t action, ioreq_t *p) 2.8 +{ 2.9 + int rc = 1, i, sign = p->df ? -1 : 1; 2.10 + uint32_t data; 2.11 + 2.12 + if ( p->dir == IOREQ_READ ) 2.13 + { 2.14 + if ( !p->data_is_ptr ) 2.15 + { 2.16 + rc = action(IOREQ_READ, p->addr, p->size, &data); 2.17 + p->data = data; 2.18 + } 2.19 + else 2.20 + { 2.21 + for ( i = 0; i < p->count; i++ ) 2.22 + { 2.23 + rc = action(IOREQ_READ, p->addr, p->size, &data); 2.24 + (void)hvm_copy_to_guest_phys(p->data + sign*i*p->size, 2.25 + &data, p->size); 2.26 + } 2.27 + } 2.28 + } 2.29 + else /* p->dir == IOREQ_WRITE */ 2.30 + { 2.31 + if ( !p->data_is_ptr ) 2.32 + { 2.33 + data = p->data; 2.34 + rc = action(IOREQ_WRITE, p->addr, p->size, &data); 2.35 + } 2.36 + else 2.37 + { 2.38 + for ( i = 0; i < p->count; i++ ) 2.39 + { 2.40 + data = 0; 2.41 + (void)hvm_copy_from_guest_phys(&data, p->data + sign*i*p->size, 2.42 + p->size); 2.43 + rc = action(IOREQ_WRITE, p->addr, p->size, &data); 2.44 + } 2.45 + } 2.46 + } 2.47 + 2.48 + return rc; 2.49 +} 2.50 + 2.51 /* 2.52 * Check if the request is handled inside xen 2.53 * return value: 0 --not handled; 1 --handled 2.54 @@ -255,28 +299,35 @@ int hvm_io_intercept(ioreq_t *p, int typ 2.55 { 2.56 struct vcpu *v = current; 2.57 struct hvm_io_handler *handler = 2.58 - &(v->domain->arch.hvm_domain.io_handler); 2.59 + &v->domain->arch.hvm_domain.io_handler; 2.60 int i; 2.61 unsigned long addr, size; 2.62 2.63 if ( (type == HVM_PORTIO) && (dpci_ioport_intercept(p)) ) 2.64 return 1; 2.65 2.66 - for (i = 0; i < handler->num_slot; i++) { 2.67 - if( type != handler->hdl_list[i].type) 2.68 + for ( i = 0; i < handler->num_slot; i++ ) 2.69 + { 2.70 + if ( type != handler->hdl_list[i].type ) 2.71 continue; 2.72 addr = handler->hdl_list[i].addr; 2.73 size = handler->hdl_list[i].size; 2.74 - if (p->addr >= addr && 2.75 - p->addr + p->size <= addr + size) 2.76 - return handler->hdl_list[i].action(p); 2.77 + if ( (p->addr >= addr) && 2.78 + ((p->addr + p->size) <= (addr + size)) ) 2.79 + { 2.80 + if ( type == HVM_PORTIO ) 2.81 + return process_portio_intercept( 2.82 + handler->hdl_list[i].action.portio, p); 2.83 + return handler->hdl_list[i].action.mmio(p); 2.84 + } 2.85 } 2.86 + 2.87 return 0; 2.88 } 2.89 2.90 int register_io_handler( 2.91 struct domain *d, unsigned long addr, unsigned long size, 2.92 - intercept_action_t action, int type) 2.93 + void *action, int type) 2.94 { 2.95 struct hvm_io_handler *handler = &d->arch.hvm_domain.io_handler; 2.96 int num = handler->num_slot; 2.97 @@ -285,8 +336,10 @@ int register_io_handler( 2.98 2.99 handler->hdl_list[num].addr = addr; 2.100 handler->hdl_list[num].size = size; 2.101 - handler->hdl_list[num].action = action; 2.102 - handler->hdl_list[num].type = type; 2.103 + if ( (handler->hdl_list[num].type = type) == HVM_PORTIO ) 2.104 + handler->hdl_list[num].action.portio = action; 2.105 + else 2.106 + handler->hdl_list[num].action.mmio = action; 2.107 handler->num_slot++; 2.108 2.109 return 1;
3.1 --- a/xen/arch/x86/hvm/pmtimer.c Sat Jan 12 09:29:38 2008 +0000 3.2 +++ b/xen/arch/x86/hvm/pmtimer.c Sat Jan 12 11:13:57 2008 +0000 3.3 @@ -114,7 +114,8 @@ static void pmt_timer_callback(void *opa 3.4 } 3.5 3.6 /* Handle port I/O to the PM1a_STS and PM1a_EN registers */ 3.7 -static int handle_evt_io(ioreq_t *p) 3.8 +static int handle_evt_io( 3.9 + int dir, uint32_t port, uint32_t bytes, uint32_t *val) 3.10 { 3.11 struct vcpu *v = current; 3.12 PMTState *s = &v->domain->arch.hvm_domain.pl_time.vpmt; 3.13 @@ -123,10 +124,10 @@ static int handle_evt_io(ioreq_t *p) 3.14 3.15 spin_lock(&s->lock); 3.16 3.17 - if ( p->dir == IOREQ_WRITE ) 3.18 + if ( dir == IOREQ_WRITE ) 3.19 { 3.20 /* Handle this I/O one byte at a time */ 3.21 - for ( i = p->size, addr = p->addr, data = p->data; 3.22 + for ( i = bytes, addr = port, data = *val; 3.23 i > 0; 3.24 i--, addr++, data >>= 8 ) 3.25 { 3.26 @@ -150,9 +151,8 @@ static int handle_evt_io(ioreq_t *p) 3.27 3.28 default: 3.29 gdprintk(XENLOG_WARNING, 3.30 - "Bad ACPI PM register write: %"PRIu64 3.31 - " bytes (%#"PRIx64") at %"PRIx64"\n", 3.32 - p->size, p->data, p->addr); 3.33 + "Bad ACPI PM register write: %x bytes (%x) at %x\n", 3.34 + bytes, *val, port); 3.35 } 3.36 } 3.37 /* Fix up the SCI state to match the new register state */ 3.38 @@ -161,10 +161,10 @@ static int handle_evt_io(ioreq_t *p) 3.39 else /* p->dir == IOREQ_READ */ 3.40 { 3.41 data = s->pm.pm1a_sts | (((uint32_t) s->pm.pm1a_en) << 16); 3.42 - data >>= 8 * (p->addr - PM1a_STS_ADDR); 3.43 - if ( p->size == 1 ) data &= 0xff; 3.44 - else if ( p->size == 2 ) data &= 0xffff; 3.45 - p->data = data; 3.46 + data >>= 8 * (port - PM1a_STS_ADDR); 3.47 + if ( bytes == 1 ) data &= 0xff; 3.48 + else if ( bytes == 2 ) data &= 0xffff; 3.49 + *val = data; 3.50 } 3.51 3.52 spin_unlock(&s->lock); 3.53 @@ -174,22 +174,23 @@ static int handle_evt_io(ioreq_t *p) 3.54 3.55 3.56 /* Handle port I/O to the TMR_VAL register */ 3.57 -static int handle_pmt_io(ioreq_t *p) 3.58 +static int handle_pmt_io( 3.59 + int dir, uint32_t port, uint32_t bytes, uint32_t *val) 3.60 { 3.61 struct vcpu *v = current; 3.62 PMTState *s = &v->domain->arch.hvm_domain.pl_time.vpmt; 3.63 3.64 - if ( (p->size != 4) || p->data_is_ptr || (p->type != IOREQ_TYPE_PIO) ) 3.65 + if ( bytes != 4 ) 3.66 { 3.67 gdprintk(XENLOG_WARNING, "HVM_PMT bad access\n"); 3.68 return 1; 3.69 } 3.70 3.71 - if ( p->dir == IOREQ_READ ) 3.72 + if ( dir == IOREQ_READ ) 3.73 { 3.74 spin_lock(&s->lock); 3.75 pmt_update_time(s); 3.76 - p->data = s->pm.tmr_val; 3.77 + *val = s->pm.tmr_val; 3.78 spin_unlock(&s->lock); 3.79 return 1; 3.80 }
4.1 --- a/xen/arch/x86/hvm/rtc.c Sat Jan 12 09:29:38 2008 +0000 4.2 +++ b/xen/arch/x86/hvm/rtc.c Sat Jan 12 11:13:57 2008 +0000 4.3 @@ -395,24 +395,25 @@ static uint32_t rtc_ioport_read(RTCState 4.4 return ret; 4.5 } 4.6 4.7 -static int handle_rtc_io(ioreq_t *p) 4.8 +static int handle_rtc_io( 4.9 + int dir, uint32_t port, uint32_t bytes, uint32_t *val) 4.10 { 4.11 struct RTCState *vrtc = vcpu_vrtc(current); 4.12 4.13 - if ( (p->size != 1) || p->data_is_ptr || (p->type != IOREQ_TYPE_PIO) ) 4.14 + if ( bytes != 1 ) 4.15 { 4.16 gdprintk(XENLOG_WARNING, "HVM_RTC bas access\n"); 4.17 return 1; 4.18 } 4.19 4.20 - if ( p->dir == IOREQ_WRITE ) 4.21 + if ( dir == IOREQ_WRITE ) 4.22 { 4.23 - if ( rtc_ioport_write(vrtc, p->addr, p->data & 0xFF) ) 4.24 + if ( rtc_ioport_write(vrtc, port, (uint8_t)*val) ) 4.25 return 1; 4.26 } 4.27 else if ( vrtc->hw.cmos_index < RTC_CMOS_SIZE ) 4.28 { 4.29 - p->data = rtc_ioport_read(vrtc, p->addr); 4.30 + *val = rtc_ioport_read(vrtc, port); 4.31 return 1; 4.32 } 4.33
5.1 --- a/xen/arch/x86/hvm/stdvga.c Sat Jan 12 09:29:38 2008 +0000 5.2 +++ b/xen/arch/x86/hvm/stdvga.c Sat Jan 12 11:13:57 2008 +0000 5.3 @@ -148,42 +148,37 @@ static int stdvga_outb(uint64_t addr, ui 5.4 return rc; 5.5 } 5.6 5.7 -static int stdvga_out(ioreq_t *p) 5.8 +static void stdvga_out(uint32_t port, uint32_t bytes, uint32_t val) 5.9 { 5.10 - int rc = 1; 5.11 - 5.12 - switch ( p->size ) 5.13 + switch ( bytes ) 5.14 { 5.15 case 1: 5.16 - rc &= stdvga_outb(p->addr, p->data); 5.17 + stdvga_outb(port, val); 5.18 break; 5.19 5.20 case 2: 5.21 - rc &= stdvga_outb(p->addr + 0, p->data >> 0); 5.22 - rc &= stdvga_outb(p->addr + 1, p->data >> 8); 5.23 + stdvga_outb(port + 0, val >> 0); 5.24 + stdvga_outb(port + 1, val >> 8); 5.25 break; 5.26 5.27 default: 5.28 - rc = 0; 5.29 break; 5.30 } 5.31 - 5.32 - return rc; 5.33 } 5.34 5.35 -int stdvga_intercept_pio(ioreq_t *p) 5.36 +int stdvga_intercept_pio( 5.37 + int dir, uint32_t port, uint32_t bytes, uint32_t *val) 5.38 { 5.39 struct hvm_hw_stdvga *s = ¤t->domain->arch.hvm_domain.stdvga; 5.40 - int rc; 5.41 5.42 - if ( p->data_is_ptr || (p->dir == IOREQ_READ) ) 5.43 + if ( dir == IOREQ_READ ) 5.44 return 0; 5.45 5.46 spin_lock(&s->lock); 5.47 - rc = (stdvga_out(p) && hvm_buffered_io_send(p)); 5.48 + stdvga_out(port, bytes, *val); 5.49 spin_unlock(&s->lock); 5.50 5.51 - return rc; 5.52 + return 0; /* propagate to external ioemu */ 5.53 } 5.54 5.55 #define GET_PLANE(data, p) (((data) >> ((p) * 8)) & 0xff)
6.1 --- a/xen/arch/x86/hvm/vpic.c Sat Jan 12 09:29:38 2008 +0000 6.2 +++ b/xen/arch/x86/hvm/vpic.c Sat Jan 12 11:13:57 2008 +0000 6.3 @@ -316,61 +316,45 @@ static uint32_t vpic_ioport_read(struct 6.4 return vpic->imr; 6.5 } 6.6 6.7 -static int vpic_intercept_pic_io(ioreq_t *p) 6.8 +static int vpic_intercept_pic_io( 6.9 + int dir, uint32_t port, uint32_t bytes, uint32_t *val) 6.10 +{ 6.11 + struct hvm_hw_vpic *vpic; 6.12 + 6.13 + if ( bytes != 1 ) 6.14 + { 6.15 + gdprintk(XENLOG_WARNING, "PIC_IO bad access size %d\n", bytes); 6.16 + return 1; 6.17 + } 6.18 + 6.19 + vpic = ¤t->domain->arch.hvm_domain.vpic[port >> 7]; 6.20 + 6.21 + if ( dir == IOREQ_WRITE ) 6.22 + vpic_ioport_write(vpic, port, (uint8_t)*val); 6.23 + else 6.24 + *val = (uint8_t)vpic_ioport_read(vpic, port); 6.25 + 6.26 + return 1; 6.27 +} 6.28 + 6.29 +static int vpic_intercept_elcr_io( 6.30 + int dir, uint32_t port, uint32_t bytes, uint32_t *val) 6.31 { 6.32 struct hvm_hw_vpic *vpic; 6.33 uint32_t data; 6.34 6.35 - if ( (p->size != 1) || (p->count != 1) ) 6.36 + if ( bytes != 1 ) 6.37 { 6.38 - gdprintk(XENLOG_WARNING, "PIC_IO bad access size %d\n", (int)p->size); 6.39 + gdprintk(XENLOG_WARNING, "PIC_IO bad access size %d\n", bytes); 6.40 return 1; 6.41 } 6.42 6.43 - vpic = ¤t->domain->arch.hvm_domain.vpic[p->addr >> 7]; 6.44 - 6.45 - if ( p->dir == IOREQ_WRITE ) 6.46 - { 6.47 - if ( p->data_is_ptr ) 6.48 - (void)hvm_copy_from_guest_phys(&data, p->data, p->size); 6.49 - else 6.50 - data = p->data; 6.51 - vpic_ioport_write(vpic, (uint32_t)p->addr, (uint8_t)data); 6.52 - } 6.53 - else 6.54 - { 6.55 - data = vpic_ioport_read(vpic, (uint32_t)p->addr); 6.56 - if ( p->data_is_ptr ) 6.57 - (void)hvm_copy_to_guest_phys(p->data, &data, p->size); 6.58 - else 6.59 - p->data = (u64)data; 6.60 - } 6.61 - 6.62 - return 1; 6.63 -} 6.64 + vpic = ¤t->domain->arch.hvm_domain.vpic[port & 1]; 6.65 6.66 -static int vpic_intercept_elcr_io(ioreq_t *p) 6.67 -{ 6.68 - struct hvm_hw_vpic *vpic; 6.69 - uint32_t data; 6.70 - 6.71 - if ( (p->size != 1) || (p->count != 1) ) 6.72 + if ( dir == IOREQ_WRITE ) 6.73 { 6.74 - gdprintk(XENLOG_WARNING, "PIC_IO bad access size %d\n", (int)p->size); 6.75 - return 1; 6.76 - } 6.77 - 6.78 - vpic = ¤t->domain->arch.hvm_domain.vpic[p->addr & 1]; 6.79 - 6.80 - if ( p->dir == IOREQ_WRITE ) 6.81 - { 6.82 - if ( p->data_is_ptr ) 6.83 - (void)hvm_copy_from_guest_phys(&data, p->data, p->size); 6.84 - else 6.85 - data = p->data; 6.86 - 6.87 /* Some IRs are always edge trig. Slave IR is always level trig. */ 6.88 - data &= vpic_elcr_mask(vpic); 6.89 + data = *val & vpic_elcr_mask(vpic); 6.90 if ( vpic->is_master ) 6.91 data |= 1 << 2; 6.92 vpic->elcr = data; 6.93 @@ -378,12 +362,7 @@ static int vpic_intercept_elcr_io(ioreq_ 6.94 else 6.95 { 6.96 /* Reader should not see hardcoded level-triggered slave IR. */ 6.97 - data = vpic->elcr & vpic_elcr_mask(vpic); 6.98 - 6.99 - if ( p->data_is_ptr ) 6.100 - (void)hvm_copy_to_guest_phys(p->data, &data, p->size); 6.101 - else 6.102 - p->data = data; 6.103 + *val = vpic->elcr & vpic_elcr_mask(vpic); 6.104 } 6.105 6.106 return 1;
7.1 --- a/xen/include/asm-x86/hvm/io.h Sat Jan 12 09:29:38 2008 +0000 7.2 +++ b/xen/include/asm-x86/hvm/io.h Sat Jan 12 11:13:57 2008 +0000 7.3 @@ -86,23 +86,26 @@ struct hvm_io_op { 7.4 #define HVM_MMIO 1 7.5 #define HVM_BUFFERED_IO 2 7.6 7.7 -typedef int (*intercept_action_t)(ioreq_t *); 7.8 typedef unsigned long (*hvm_mmio_read_t)(struct vcpu *v, 7.9 unsigned long addr, 7.10 unsigned long length); 7.11 - 7.12 typedef void (*hvm_mmio_write_t)(struct vcpu *v, 7.13 unsigned long addr, 7.14 unsigned long length, 7.15 unsigned long val); 7.16 - 7.17 typedef int (*hvm_mmio_check_t)(struct vcpu *v, unsigned long addr); 7.18 7.19 +typedef int (*portio_action_t)( 7.20 + int dir, uint32_t port, uint32_t bytes, uint32_t *val); 7.21 +typedef int (*mmio_action_t)(ioreq_t *); 7.22 struct io_handler { 7.23 int type; 7.24 unsigned long addr; 7.25 unsigned long size; 7.26 - intercept_action_t action; 7.27 + union { 7.28 + portio_action_t portio; 7.29 + mmio_action_t mmio; 7.30 + } action; 7.31 }; 7.32 7.33 struct hvm_io_handler { 7.34 @@ -120,7 +123,7 @@ struct hvm_mmio_handler { 7.35 extern int hvm_io_intercept(ioreq_t *p, int type); 7.36 extern int register_io_handler( 7.37 struct domain *d, unsigned long addr, unsigned long size, 7.38 - intercept_action_t action, int type); 7.39 + void *action, int type); 7.40 7.41 static inline int hvm_portio_intercept(ioreq_t *p) 7.42 { 7.43 @@ -137,14 +140,14 @@ extern int hvm_buffered_io_send(ioreq_t 7.44 7.45 static inline int register_portio_handler( 7.46 struct domain *d, unsigned long addr, 7.47 - unsigned long size, intercept_action_t action) 7.48 + unsigned long size, portio_action_t action) 7.49 { 7.50 return register_io_handler(d, addr, size, action, HVM_PORTIO); 7.51 } 7.52 7.53 static inline int register_buffered_io_handler( 7.54 struct domain *d, unsigned long addr, 7.55 - unsigned long size, intercept_action_t action) 7.56 + unsigned long size, mmio_action_t action) 7.57 { 7.58 return register_io_handler(d, addr, size, action, HVM_BUFFERED_IO); 7.59 }