debuggers.hg
changeset 16392:91575bb23d07
[IA64] vti save-restore: hvm domain io page clean up.
- set_hvm_param hypercall clean up.
- The reference counts of the io pages must be incremented.
- Buffered pio wasn't SMP safe.
- Clean up get_vio() parameter.
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
- set_hvm_param hypercall clean up.
- The reference counts of the io pages must be incremented.
- Buffered pio wasn't SMP safe.
- Clean up get_vio() parameter.
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author | Alex Williamson <alex.williamson@hp.com> |
---|---|
date | Wed Nov 07 10:31:09 2007 -0700 (2007-11-07) |
parents | 74b40a9f4c0a |
children | df5b49037c77 |
files | xen/arch/ia64/vmx/mmio.c xen/arch/ia64/vmx/vmx_hypercall.c xen/arch/ia64/vmx/vmx_init.c xen/arch/ia64/vmx/vmx_support.c xen/include/asm-ia64/vmx.h xen/include/asm-ia64/vmx_platform.h |
line diff
1.1 --- a/xen/arch/ia64/vmx/mmio.c Wed Nov 07 10:19:21 2007 -0700 1.2 +++ b/xen/arch/ia64/vmx/mmio.c Wed Nov 07 10:31:09 2007 -0700 1.3 @@ -56,7 +56,7 @@ static int hvm_buffered_io_intercept(ior 1.4 { 1.5 struct vcpu *v = current; 1.6 buffered_iopage_t *pg = 1.7 - (buffered_iopage_t *)(v->domain->arch.hvm_domain.buffered_io_va); 1.8 + (buffered_iopage_t *)(v->domain->arch.hvm_domain.buf_ioreq.va); 1.9 buf_ioreq_t bp; 1.10 int i, qw = 0; 1.11 1.12 @@ -101,7 +101,7 @@ static int hvm_buffered_io_intercept(ior 1.13 bp.data = p->data; 1.14 bp.addr = p->addr; 1.15 1.16 - spin_lock(&v->domain->arch.hvm_domain.buffered_io_lock); 1.17 + spin_lock(&v->domain->arch.hvm_domain.buf_ioreq.lock); 1.18 1.19 if (pg->write_pointer - pg->read_pointer >= IOREQ_BUFFER_SLOT_NUM - qw) { 1.20 /* the queue is full. 1.21 @@ -109,7 +109,7 @@ static int hvm_buffered_io_intercept(ior 1.22 * NOTE: The arithimetic operation could handle the situation for 1.23 * write_pointer overflow. 1.24 */ 1.25 - spin_unlock(&v->domain->arch.hvm_domain.buffered_io_lock); 1.26 + spin_unlock(&v->domain->arch.hvm_domain.buf_ioreq.lock); 1.27 return 0; 1.28 } 1.29 1.30 @@ -126,7 +126,7 @@ static int hvm_buffered_io_intercept(ior 1.31 wmb(); 1.32 pg->write_pointer += qw ? 2 : 1; 1.33 1.34 - spin_unlock(&v->domain->arch.hvm_domain.buffered_io_lock); 1.35 + spin_unlock(&v->domain->arch.hvm_domain.buf_ioreq.lock); 1.36 1.37 return 1; 1.38 } 1.39 @@ -137,7 +137,7 @@ static void low_mmio_access(VCPU *vcpu, 1.40 vcpu_iodata_t *vio; 1.41 ioreq_t *p; 1.42 1.43 - vio = get_vio(v->domain, v->vcpu_id); 1.44 + vio = get_vio(v); 1.45 if (!vio) 1.46 panic_domain(NULL, "bad shared page"); 1.47 1.48 @@ -174,7 +174,8 @@ static void low_mmio_access(VCPU *vcpu, 1.49 static int vmx_ide_pio_intercept(ioreq_t *p, u64 *val) 1.50 { 1.51 struct buffered_piopage *pio_page = 1.52 - (void *)(current->domain->arch.hvm_domain.buffered_pio_va); 1.53 + (void *)(current->domain->arch.hvm_domain.buf_pioreq.va); 1.54 + spinlock_t *pio_lock; 1.55 struct pio_buffer *piobuf; 1.56 uint32_t pointer, page_offset; 1.57 1.58 @@ -188,14 +189,17 @@ static int vmx_ide_pio_intercept(ioreq_t 1.59 if (p->size != 2 && p->size != 4) 1.60 return 0; 1.61 1.62 + pio_lock = ¤t->domain->arch.hvm_domain.buf_pioreq.lock; 1.63 + spin_lock(pio_lock); 1.64 + 1.65 pointer = piobuf->pointer; 1.66 page_offset = piobuf->page_offset; 1.67 1.68 /* sanity check */ 1.69 if (page_offset + pointer < offsetof(struct buffered_piopage, buffer)) 1.70 - return 0; 1.71 + goto unlock_out; 1.72 if (page_offset + piobuf->data_end > PAGE_SIZE) 1.73 - return 0; 1.74 + goto unlock_out; 1.75 1.76 if (pointer + p->size < piobuf->data_end) { 1.77 uint8_t *bufp = (uint8_t *)pio_page + page_offset + pointer; 1.78 @@ -213,10 +217,15 @@ static int vmx_ide_pio_intercept(ioreq_t 1.79 } 1.80 } 1.81 piobuf->pointer += p->size; 1.82 + spin_unlock(pio_lock); 1.83 + 1.84 p->state = STATE_IORESP_READY; 1.85 vmx_io_assist(current); 1.86 return 1; 1.87 } 1.88 + 1.89 + unlock_out: 1.90 + spin_unlock(pio_lock); 1.91 return 0; 1.92 } 1.93 1.94 @@ -258,7 +267,7 @@ static void legacy_io_access(VCPU *vcpu, 1.95 vcpu_iodata_t *vio; 1.96 ioreq_t *p; 1.97 1.98 - vio = get_vio(v->domain, v->vcpu_id); 1.99 + vio = get_vio(v); 1.100 if (!vio) 1.101 panic_domain(NULL, "bad shared page\n"); 1.102
2.1 --- a/xen/arch/ia64/vmx/vmx_hypercall.c Wed Nov 07 10:19:21 2007 -0700 2.2 +++ b/xen/arch/ia64/vmx/vmx_hypercall.c Wed Nov 07 10:31:09 2007 -0700 2.3 @@ -133,8 +133,34 @@ do_hvm_op(unsigned long op, XEN_GUEST_HA 2.4 return -EPERM; 2.5 2.6 if (op == HVMOP_set_param) { 2.7 - d->arch.hvm_domain.params[a.index] = a.value; 2.8 - rc = 0; 2.9 + struct vmx_ioreq_page *iorp; 2.10 + struct vcpu *v; 2.11 + 2.12 + switch (a.index) { 2.13 + case HVM_PARAM_IOREQ_PFN: 2.14 + iorp = &d->arch.hvm_domain.ioreq; 2.15 + rc = vmx_set_ioreq_page(d, iorp, a.value); 2.16 + spin_lock(&iorp->lock); 2.17 + if (rc == 0 && iorp->va != NULL) 2.18 + /* Initialise evtchn port info if VCPUs already created. */ 2.19 + for_each_vcpu(d, v) 2.20 + get_vio(v)->vp_eport = v->arch.arch_vmx.xen_port; 2.21 + spin_unlock(&iorp->lock); 2.22 + break; 2.23 + case HVM_PARAM_BUFIOREQ_PFN: 2.24 + iorp = &d->arch.hvm_domain.buf_ioreq; 2.25 + rc = vmx_set_ioreq_page(d, iorp, a.value); 2.26 + break; 2.27 + case HVM_PARAM_BUFPIOREQ_PFN: 2.28 + iorp = &d->arch.hvm_domain.buf_pioreq; 2.29 + rc = vmx_set_ioreq_page(d, iorp, a.value); 2.30 + break; 2.31 + default: 2.32 + /* nothing */ 2.33 + break; 2.34 + } 2.35 + if (rc == 0) 2.36 + d->arch.hvm_domain.params[a.index] = a.value; 2.37 } 2.38 else { 2.39 a.value = d->arch.hvm_domain.params[a.index];
3.1 --- a/xen/arch/ia64/vmx/vmx_init.c Wed Nov 07 10:19:21 2007 -0700 3.2 +++ b/xen/arch/ia64/vmx/vmx_init.c Wed Nov 07 10:31:09 2007 -0700 3.3 @@ -267,22 +267,44 @@ vmx_load_state(struct vcpu *v) 3.4 * anchored in vcpu */ 3.5 } 3.6 3.7 -static void vmx_create_event_channels(struct vcpu *v) 3.8 +static int 3.9 +vmx_vcpu_initialise(struct vcpu *v) 3.10 { 3.11 - vcpu_iodata_t *p; 3.12 + struct vmx_ioreq_page *iorp = &v->domain->arch.hvm_domain.ioreq; 3.13 + 3.14 + int rc = alloc_unbound_xen_event_channel(v, 0); 3.15 + if (rc < 0) 3.16 + return rc; 3.17 + v->arch.arch_vmx.xen_port = rc; 3.18 + 3.19 + spin_lock(&iorp->lock); 3.20 + if (v->domain->arch.vmx_platform.ioreq.va != 0) { 3.21 + vcpu_iodata_t *p = get_vio(v); 3.22 + p->vp_eport = v->arch.arch_vmx.xen_port; 3.23 + } 3.24 + spin_unlock(&iorp->lock); 3.25 + 3.26 + gdprintk(XENLOG_INFO, "Allocated port %ld for hvm %d vcpu %d.\n", 3.27 + v->arch.arch_vmx.xen_port, v->domain->domain_id, v->vcpu_id); 3.28 + 3.29 + return 0; 3.30 +} 3.31 + 3.32 +static int vmx_create_event_channels(struct vcpu *v) 3.33 +{ 3.34 struct vcpu *o; 3.35 3.36 if (v->vcpu_id == 0) { 3.37 /* Ugly: create event channels for every vcpu when vcpu 0 3.38 starts, so that they're available for ioemu to bind to. */ 3.39 for_each_vcpu(v->domain, o) { 3.40 - p = get_vio(v->domain, o->vcpu_id); 3.41 - o->arch.arch_vmx.xen_port = p->vp_eport = 3.42 - alloc_unbound_xen_event_channel(o, 0); 3.43 - gdprintk(XENLOG_INFO, "Allocated port %ld for hvm.\n", 3.44 - o->arch.arch_vmx.xen_port); 3.45 + int rc = vmx_vcpu_initialise(o); 3.46 + if (rc < 0) //XXX error recovery 3.47 + return rc; 3.48 } 3.49 } 3.50 + 3.51 + return 0; 3.52 } 3.53 3.54 /* 3.55 @@ -294,6 +316,67 @@ static void vmx_release_assist_channel(s 3.56 return; 3.57 } 3.58 3.59 +/* following three functions are based from hvm_xxx_ioreq_page() 3.60 + * in xen/arch/x86/hvm/hvm.c */ 3.61 +static void vmx_init_ioreq_page( 3.62 + struct domain *d, struct vmx_ioreq_page *iorp) 3.63 +{ 3.64 + memset(iorp, 0, sizeof(*iorp)); 3.65 + spin_lock_init(&iorp->lock); 3.66 + domain_pause(d); 3.67 +} 3.68 + 3.69 +static void vmx_destroy_ioreq_page( 3.70 + struct domain *d, struct vmx_ioreq_page *iorp) 3.71 +{ 3.72 + spin_lock(&iorp->lock); 3.73 + 3.74 + ASSERT(d->is_dying); 3.75 + 3.76 + if (iorp->va != NULL) { 3.77 + put_page(iorp->page); 3.78 + iorp->page = NULL; 3.79 + iorp->va = NULL; 3.80 + } 3.81 + 3.82 + spin_unlock(&iorp->lock); 3.83 +} 3.84 + 3.85 +int vmx_set_ioreq_page( 3.86 + struct domain *d, struct vmx_ioreq_page *iorp, unsigned long gpfn) 3.87 +{ 3.88 + struct page_info *page; 3.89 + unsigned long mfn; 3.90 + pte_t pte; 3.91 + 3.92 + pte = *lookup_noalloc_domain_pte(d, gpfn << PAGE_SHIFT); 3.93 + if (!pte_present(pte) || !pte_mem(pte)) 3.94 + return -EINVAL; 3.95 + mfn = (pte_val(pte) & _PFN_MASK) >> PAGE_SHIFT; 3.96 + ASSERT(mfn_valid(mfn)); 3.97 + 3.98 + page = mfn_to_page(mfn); 3.99 + if (get_page(page, d) == 0) 3.100 + return -EINVAL; 3.101 + 3.102 + spin_lock(&iorp->lock); 3.103 + 3.104 + if ((iorp->va != NULL) || d->is_dying) { 3.105 + spin_unlock(&iorp->lock); 3.106 + put_page(page); 3.107 + return -EINVAL; 3.108 + } 3.109 + 3.110 + iorp->va = mfn_to_virt(mfn); 3.111 + iorp->page = page; 3.112 + 3.113 + spin_unlock(&iorp->lock); 3.114 + 3.115 + domain_unpause(d); 3.116 + 3.117 + return 0; 3.118 +} 3.119 + 3.120 /* 3.121 * Initialize VMX envirenment for guest. Only the 1st vp/vcpu 3.122 * is registered here. 3.123 @@ -320,7 +403,10 @@ vmx_final_setup_guest(struct vcpu *v) 3.124 rc = init_domain_tlb(v); 3.125 if (rc) 3.126 return rc; 3.127 - vmx_create_event_channels(v); 3.128 + 3.129 + rc = vmx_create_event_channels(v); 3.130 + if (rc) 3.131 + return rc; 3.132 3.133 /* v->arch.schedule_tail = arch_vmx_do_launch; */ 3.134 vmx_create_vp(v); 3.135 @@ -352,6 +438,10 @@ vmx_relinquish_guest_resources(struct do 3.136 vmx_release_assist_channel(v); 3.137 3.138 vacpi_relinquish_resources(d); 3.139 + 3.140 + vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.ioreq); 3.141 + vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.buf_ioreq); 3.142 + vmx_destroy_ioreq_page(d, &d->arch.vmx_platform.buf_pioreq); 3.143 } 3.144 3.145 void 3.146 @@ -397,26 +487,14 @@ static void vmx_build_io_physmap_table(s 3.147 3.148 int vmx_setup_platform(struct domain *d) 3.149 { 3.150 - unsigned long mpa; 3.151 ASSERT(d != dom0); /* only for non-privileged vti domain */ 3.152 3.153 vmx_build_io_physmap_table(d); 3.154 3.155 - mpa = __gpa_to_mpa(d, IO_PAGE_START); 3.156 - if (mpa == 0) 3.157 - return -EINVAL; 3.158 - d->arch.vmx_platform.shared_page_va = (unsigned long)__va(mpa); 3.159 - /* For buffered IO requests. */ 3.160 - spin_lock_init(&d->arch.hvm_domain.buffered_io_lock); 3.161 + vmx_init_ioreq_page(d, &d->arch.vmx_platform.ioreq); 3.162 + vmx_init_ioreq_page(d, &d->arch.vmx_platform.buf_ioreq); 3.163 + vmx_init_ioreq_page(d, &d->arch.vmx_platform.buf_pioreq); 3.164 3.165 - mpa = __gpa_to_mpa(d, BUFFER_IO_PAGE_START); 3.166 - if (mpa == 0) 3.167 - return -EINVAL; 3.168 - d->arch.hvm_domain.buffered_io_va = (unsigned long)__va(mpa); 3.169 - mpa = __gpa_to_mpa(d, BUFFER_PIO_PAGE_START); 3.170 - if (mpa == 0) 3.171 - return -EINVAL; 3.172 - d->arch.hvm_domain.buffered_pio_va = (unsigned long)__va(mpa); 3.173 /* TEMP */ 3.174 d->arch.vmx_platform.pib_base = 0xfee00000UL; 3.175 3.176 @@ -445,7 +523,7 @@ void vmx_do_resume(struct vcpu *v) 3.177 3.178 /* stolen from hvm_do_resume() in arch/x86/hvm/hvm.c */ 3.179 /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */ 3.180 - p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq; 3.181 + p = &get_vio(v)->vp_ioreq; 3.182 while (p->state != STATE_IOREQ_NONE) { 3.183 switch (p->state) { 3.184 case STATE_IORESP_READY: /* IORESP_READY -> NONE */
4.1 --- a/xen/arch/ia64/vmx/vmx_support.c Wed Nov 07 10:19:21 2007 -0700 4.2 +++ b/xen/arch/ia64/vmx/vmx_support.c Wed Nov 07 10:31:09 2007 -0700 4.3 @@ -42,7 +42,7 @@ void vmx_io_assist(struct vcpu *v) 4.4 * This shared page contains I/O request between emulation code 4.5 * and device model. 4.6 */ 4.7 - vio = get_vio(v->domain, v->vcpu_id); 4.8 + vio = get_vio(v); 4.9 if (!vio) 4.10 panic_domain(vcpu_regs(v),"Corruption: bad shared page: %lx\n", 4.11 (unsigned long)vio); 4.12 @@ -65,7 +65,7 @@ void vmx_send_assist_req(struct vcpu *v) 4.13 { 4.14 ioreq_t *p; 4.15 4.16 - p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq; 4.17 + p = &get_vio(v)->vp_ioreq; 4.18 if (unlikely(p->state != STATE_IOREQ_NONE)) { 4.19 /* This indicates a bug in the device model. Crash the 4.20 domain. */
5.1 --- a/xen/include/asm-ia64/vmx.h Wed Nov 07 10:19:21 2007 -0700 5.2 +++ b/xen/include/asm-ia64/vmx.h Wed Nov 07 10:31:09 2007 -0700 5.3 @@ -57,8 +57,12 @@ extern void vmx_send_assist_req(struct v 5.4 extern void deliver_pal_init(struct vcpu *vcpu); 5.5 extern void vmx_pend_pal_init(struct domain *d); 5.6 5.7 -static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu) 5.8 +static inline vcpu_iodata_t *get_vio(struct vcpu *v) 5.9 { 5.10 - return &((shared_iopage_t *)d->arch.vmx_platform.shared_page_va)->vcpu_iodata[cpu]; 5.11 + struct domain *d = v->domain; 5.12 + shared_iopage_t *p = (shared_iopage_t *)d->arch.vmx_platform.ioreq.va; 5.13 + ASSERT((v == current) || spin_is_locked(&d->arch.vmx_platform.ioreq.lock)); 5.14 + ASSERT(d->arch.vmx_platform.ioreq.va != NULL); 5.15 + return &p->vcpu_iodata[v->vcpu_id]; 5.16 } 5.17 #endif /* _ASM_IA64_VT_H */
6.1 --- a/xen/include/asm-ia64/vmx_platform.h Wed Nov 07 10:19:21 2007 -0700 6.2 +++ b/xen/include/asm-ia64/vmx_platform.h Wed Nov 07 10:31:09 2007 -0700 6.3 @@ -43,17 +43,24 @@ 6.4 * it is not used on ia64 */ 6.5 #define OS_TYPE_PORT 0xB2 6.6 6.7 +struct vmx_ioreq_page { 6.8 + spinlock_t lock; 6.9 + struct page_info *page; 6.10 + void *va; 6.11 +}; 6.12 +int vmx_set_ioreq_page(struct domain *d, 6.13 + struct vmx_ioreq_page *iorp, unsigned long gmfn); 6.14 + 6.15 typedef struct virtual_platform_def { 6.16 - unsigned long gos_type; 6.17 - unsigned long buffered_io_va; 6.18 - spinlock_t buffered_io_lock; 6.19 - unsigned long buffered_pio_va; 6.20 - unsigned long shared_page_va; 6.21 - unsigned long pib_base; 6.22 - unsigned long params[HVM_NR_PARAMS]; 6.23 + unsigned long gos_type; 6.24 + struct vmx_ioreq_page ioreq; 6.25 + struct vmx_ioreq_page buf_ioreq; 6.26 + struct vmx_ioreq_page buf_pioreq; 6.27 + unsigned long pib_base; 6.28 + unsigned long params[HVM_NR_PARAMS]; 6.29 /* One IOSAPIC now... */ 6.30 - struct viosapic viosapic; 6.31 - struct vacpi vacpi; 6.32 + struct viosapic viosapic; 6.33 + struct vacpi vacpi; 6.34 } vir_plat_t; 6.35 6.36 static inline int __fls(uint32_t word)