debuggers.hg
changeset 16474:81e63d66a64d
vt-d: Fix ISA IRQ alias issue
When assign multiple devices to guest which uses PIC, ISA IRQ alias
may occur. This patch splits ISA IRQ and GSI eoi function. In ISA IRQ
eoi function, searches all assigned mirqs and does eoi for the
corresponding mirqs which match the eoi ISA IRQ. Therefore fix ISA IRQ
alias issue.
Signed-off-by: Weidong Han <weidong.han@intel.com>
When assign multiple devices to guest which uses PIC, ISA IRQ alias
may occur. This patch splits ISA IRQ and GSI eoi function. In ISA IRQ
eoi function, searches all assigned mirqs and does eoi for the
corresponding mirqs which match the eoi ISA IRQ. Therefore fix ISA IRQ
alias issue.
Signed-off-by: Weidong Han <weidong.han@intel.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Fri Nov 23 16:39:45 2007 +0000 (2007-11-23) |
parents | e40015e20548 |
children | c9c476a22036 |
files | xen/arch/x86/hvm/irq.c xen/arch/x86/hvm/vmx/intr.c xen/arch/x86/hvm/vmx/vtd/io.c xen/include/asm-x86/hvm/irq.h |
line diff
1.1 --- a/xen/arch/x86/hvm/irq.c Fri Nov 23 16:25:59 2007 +0000 1.2 +++ b/xen/arch/x86/hvm/irq.c Fri Nov 23 16:39:45 2007 +0000 1.3 @@ -181,6 +181,7 @@ void hvm_set_pci_link_route(struct domai 1.4 { 1.5 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 1.6 u8 old_isa_irq; 1.7 + int i; 1.8 1.9 ASSERT((link <= 3) && (isa_irq <= 15)); 1.10 1.11 @@ -192,12 +193,16 @@ void hvm_set_pci_link_route(struct domai 1.12 hvm_irq->pci_link.route[link] = isa_irq; 1.13 1.14 /* PCI pass-through fixup. */ 1.15 - if ( hvm_irq->dpci && hvm_irq->dpci->link[link].valid ) 1.16 + if ( hvm_irq->dpci ) 1.17 { 1.18 - hvm_irq->dpci->girq[isa_irq] = hvm_irq->dpci->link[link]; 1.19 - if ( hvm_irq->dpci->girq[old_isa_irq].device == 1.20 - hvm_irq->dpci->link[link].device ) 1.21 - hvm_irq->dpci->girq[old_isa_irq].valid = 0; 1.22 + if ( old_isa_irq ) 1.23 + clear_bit(old_isa_irq, &hvm_irq->dpci->isairq_map); 1.24 + 1.25 + for ( i = 0; i < NR_LINK; i++ ) 1.26 + if ( test_bit(i, &hvm_irq->dpci->link_map) && 1.27 + hvm_irq->pci_link.route[i] ) 1.28 + set_bit(hvm_irq->pci_link.route[i], 1.29 + &hvm_irq->dpci->isairq_map); 1.30 } 1.31 1.32 if ( hvm_irq->pci_link_assert_count[link] == 0 )
2.1 --- a/xen/arch/x86/hvm/vmx/intr.c Fri Nov 23 16:25:59 2007 +0000 2.2 +++ b/xen/arch/x86/hvm/vmx/intr.c Fri Nov 23 16:39:45 2007 +0000 2.3 @@ -113,7 +113,7 @@ static void vmx_dirq_assist(struct vcpu 2.4 uint32_t device, intx; 2.5 struct domain *d = v->domain; 2.6 struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; 2.7 - struct dev_intx_gsi *dig; 2.8 + struct dev_intx_gsi_link *digl; 2.9 2.10 if ( !vtd_enabled || (v->vcpu_id != 0) || (hvm_irq_dpci == NULL) ) 2.11 return; 2.12 @@ -125,10 +125,10 @@ static void vmx_dirq_assist(struct vcpu 2.13 stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)]); 2.14 clear_bit(irq, &hvm_irq_dpci->dirq_mask); 2.15 2.16 - list_for_each_entry ( dig, &hvm_irq_dpci->mirq[irq].dig_list, list ) 2.17 + list_for_each_entry ( digl, &hvm_irq_dpci->mirq[irq].digl_list, list ) 2.18 { 2.19 - device = dig->device; 2.20 - intx = dig->intx; 2.21 + device = digl->device; 2.22 + intx = digl->intx; 2.23 hvm_pci_intx_assert(d, device, intx); 2.24 spin_lock(&hvm_irq_dpci->dirq_lock); 2.25 hvm_irq_dpci->mirq[irq].pending++;
3.1 --- a/xen/arch/x86/hvm/vmx/vtd/io.c Fri Nov 23 16:25:59 2007 +0000 3.2 +++ b/xen/arch/x86/hvm/vmx/vtd/io.c Fri Nov 23 16:39:45 2007 +0000 3.3 @@ -50,15 +50,15 @@ static void pt_irq_time_out(void *data) 3.4 struct hvm_mirq_dpci_mapping *irq_map = data; 3.5 unsigned int guest_gsi, machine_gsi = 0; 3.6 struct hvm_irq_dpci *dpci = irq_map->dom->arch.hvm_domain.irq.dpci; 3.7 - struct dev_intx_gsi *dig; 3.8 + struct dev_intx_gsi_link *digl; 3.9 uint32_t device, intx; 3.10 3.11 - list_for_each_entry ( dig, &irq_map->dig_list, list ) 3.12 + list_for_each_entry ( digl, &irq_map->digl_list, list ) 3.13 { 3.14 - guest_gsi = dig->gsi; 3.15 + guest_gsi = digl->gsi; 3.16 machine_gsi = dpci->girq[guest_gsi].machine_gsi; 3.17 - device = dig->device; 3.18 - intx = dig->intx; 3.19 + device = digl->device; 3.20 + intx = digl->intx; 3.21 hvm_pci_intx_deassert(irq_map->dom, device, intx); 3.22 } 3.23 3.24 @@ -76,7 +76,7 @@ int pt_irq_create_bind_vtd( 3.25 struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; 3.26 uint32_t machine_gsi, guest_gsi; 3.27 uint32_t device, intx, link; 3.28 - struct dev_intx_gsi *dig; 3.29 + struct dev_intx_gsi_link *digl; 3.30 3.31 if ( hvm_irq_dpci == NULL ) 3.32 { 3.33 @@ -87,7 +87,7 @@ int pt_irq_create_bind_vtd( 3.34 memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci)); 3.35 spin_lock_init(&hvm_irq_dpci->dirq_lock); 3.36 for ( int i = 0; i < NR_IRQS; i++ ) 3.37 - INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].dig_list); 3.38 + INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].digl_list); 3.39 3.40 if ( cmpxchg((unsigned long *)&d->arch.hvm_domain.irq.dpci, 3.41 0, (unsigned long)hvm_irq_dpci) != 0 ) 3.42 @@ -101,27 +101,24 @@ int pt_irq_create_bind_vtd( 3.43 intx = pt_irq_bind->u.pci.intx; 3.44 guest_gsi = hvm_pci_intx_gsi(device, intx); 3.45 link = hvm_pci_intx_link(device, intx); 3.46 + set_bit(link, hvm_irq_dpci->link_map); 3.47 3.48 - dig = xmalloc(struct dev_intx_gsi); 3.49 - if ( !dig ) 3.50 + digl = xmalloc(struct dev_intx_gsi_link); 3.51 + if ( !digl ) 3.52 return -ENOMEM; 3.53 3.54 - dig->device = device; 3.55 - dig->intx = intx; 3.56 - dig->gsi = guest_gsi; 3.57 - list_add_tail(&dig->list, 3.58 - &hvm_irq_dpci->mirq[machine_gsi].dig_list); 3.59 - 3.60 + digl->device = device; 3.61 + digl->intx = intx; 3.62 + digl->gsi = guest_gsi; 3.63 + digl->link = link; 3.64 + list_add_tail(&digl->list, 3.65 + &hvm_irq_dpci->mirq[machine_gsi].digl_list); 3.66 + 3.67 hvm_irq_dpci->girq[guest_gsi].valid = 1; 3.68 hvm_irq_dpci->girq[guest_gsi].device = device; 3.69 hvm_irq_dpci->girq[guest_gsi].intx = intx; 3.70 hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi; 3.71 3.72 - hvm_irq_dpci->link[link].valid = 1; 3.73 - hvm_irq_dpci->link[link].device = device; 3.74 - hvm_irq_dpci->link[link].intx = intx; 3.75 - hvm_irq_dpci->link[link].machine_gsi = machine_gsi; 3.76 - 3.77 /* Bind the same mirq once in the same domain */ 3.78 if ( !hvm_irq_dpci->mirq[machine_gsi].valid ) 3.79 { 3.80 @@ -162,6 +159,46 @@ int hvm_do_IRQ_dpci(struct domain *d, un 3.81 return 1; 3.82 } 3.83 3.84 +static void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq) 3.85 +{ 3.86 + struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 3.87 + struct hvm_irq_dpci *dpci = hvm_irq->dpci; 3.88 + struct dev_intx_gsi_link *digl, *tmp; 3.89 + int i; 3.90 + 3.91 + ASSERT(isairq < NR_ISAIRQS); 3.92 + if ( !vtd_enabled || !dpci || 3.93 + !test_bit(isairq, dpci->isairq_map) ) 3.94 + return; 3.95 + 3.96 + /* Multiple mirq may be mapped to one isa irq */ 3.97 + for ( i = 0; i < NR_IRQS; i++ ) 3.98 + { 3.99 + if ( !dpci->mirq[i].valid ) 3.100 + continue; 3.101 + 3.102 + list_for_each_entry_safe ( digl, tmp, 3.103 + &dpci->mirq[i].digl_list, list ) 3.104 + { 3.105 + if ( hvm_irq->pci_link.route[digl->link] == isairq ) 3.106 + { 3.107 + hvm_pci_intx_deassert(d, digl->device, digl->intx); 3.108 + spin_lock(&dpci->dirq_lock); 3.109 + if ( --dpci->mirq[i].pending == 0 ) 3.110 + { 3.111 + spin_unlock(&dpci->dirq_lock); 3.112 + gdprintk(XENLOG_INFO, 3.113 + "hvm_dpci_isairq_eoi:: mirq = %x\n", i); 3.114 + stop_timer(&dpci->hvm_timer[irq_to_vector(i)]); 3.115 + pirq_guest_eoi(d, i); 3.116 + } 3.117 + else 3.118 + spin_unlock(&dpci->dirq_lock); 3.119 + } 3.120 + } 3.121 + } 3.122 +} 3.123 + 3.124 void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi, 3.125 union vioapic_redir_entry *ent) 3.126 { 3.127 @@ -169,14 +206,21 @@ void hvm_dpci_eoi(struct domain *d, unsi 3.128 uint32_t device, intx, machine_gsi; 3.129 3.130 if ( !vtd_enabled || (hvm_irq_dpci == NULL) || 3.131 - !hvm_irq_dpci->girq[guest_gsi].valid ) 3.132 + (guest_gsi >= NR_ISAIRQS && 3.133 + !hvm_irq_dpci->girq[guest_gsi].valid) ) 3.134 return; 3.135 3.136 + if ( guest_gsi < NR_ISAIRQS ) 3.137 + { 3.138 + hvm_dpci_isairq_eoi(d, guest_gsi); 3.139 + return; 3.140 + } 3.141 + 3.142 machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi; 3.143 device = hvm_irq_dpci->girq[guest_gsi].device; 3.144 intx = hvm_irq_dpci->girq[guest_gsi].intx; 3.145 hvm_pci_intx_deassert(d, device, intx); 3.146 - 3.147 + 3.148 spin_lock(&hvm_irq_dpci->dirq_lock); 3.149 if ( --hvm_irq_dpci->mirq[machine_gsi].pending == 0 ) 3.150 { 3.151 @@ -196,9 +240,9 @@ void iommu_domain_destroy(struct domain 3.152 struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; 3.153 uint32_t i; 3.154 struct hvm_iommu *hd = domain_hvm_iommu(d); 3.155 - struct list_head *ioport_list, *dig_list, *tmp; 3.156 + struct list_head *ioport_list, *digl_list, *tmp; 3.157 struct g2m_ioport *ioport; 3.158 - struct dev_intx_gsi *dig; 3.159 + struct dev_intx_gsi_link *digl; 3.160 3.161 if ( !vtd_enabled ) 3.162 return; 3.163 @@ -211,12 +255,13 @@ void iommu_domain_destroy(struct domain 3.164 pirq_guest_unbind(d, i); 3.165 kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]); 3.166 3.167 - list_for_each_safe ( dig_list, tmp, 3.168 - &hvm_irq_dpci->mirq[i].dig_list ) 3.169 + list_for_each_safe ( digl_list, tmp, 3.170 + &hvm_irq_dpci->mirq[i].digl_list ) 3.171 { 3.172 - dig = list_entry(dig_list, struct dev_intx_gsi, list); 3.173 - list_del(&dig->list); 3.174 - xfree(dig); 3.175 + digl = list_entry(digl_list, 3.176 + struct dev_intx_gsi_link, list); 3.177 + list_del(&digl->list); 3.178 + xfree(digl); 3.179 } 3.180 } 3.181
4.1 --- a/xen/include/asm-x86/hvm/irq.h Fri Nov 23 16:25:59 2007 +0000 4.2 +++ b/xen/include/asm-x86/hvm/irq.h Fri Nov 23 16:39:45 2007 +0000 4.3 @@ -30,17 +30,18 @@ 4.4 #include <asm/hvm/vioapic.h> 4.5 #include <public/hvm/save.h> 4.6 4.7 -struct dev_intx_gsi { 4.8 +struct dev_intx_gsi_link { 4.9 struct list_head list; 4.10 uint8_t device; 4.11 uint8_t intx; 4.12 uint8_t gsi; 4.13 + uint8_t link; 4.14 }; 4.15 4.16 struct hvm_mirq_dpci_mapping { 4.17 uint8_t valid; 4.18 int pending; 4.19 - struct list_head dig_list; 4.20 + struct list_head digl_list; 4.21 struct domain *dom; 4.22 }; 4.23 4.24 @@ -51,15 +52,19 @@ struct hvm_girq_dpci_mapping { 4.25 uint8_t machine_gsi; 4.26 }; 4.27 4.28 +#define NR_ISAIRQS 16 4.29 +#define NR_LINK 4 4.30 struct hvm_irq_dpci { 4.31 spinlock_t dirq_lock; 4.32 /* Machine IRQ to guest device/intx mapping. */ 4.33 struct hvm_mirq_dpci_mapping mirq[NR_IRQS]; 4.34 /* Guest IRQ to guest device/intx mapping. */ 4.35 struct hvm_girq_dpci_mapping girq[NR_IRQS]; 4.36 - /* Link to guest device/intx mapping. */ 4.37 - struct hvm_girq_dpci_mapping link[4]; 4.38 DECLARE_BITMAP(dirq_mask, NR_IRQS); 4.39 + /* Record of mapped ISA IRQs */ 4.40 + DECLARE_BITMAP(isairq_map, NR_ISAIRQS); 4.41 + /* Record of mapped Links */ 4.42 + DECLARE_BITMAP(link_map, NR_LINK); 4.43 struct timer hvm_timer[NR_IRQS]; 4.44 }; 4.45