debuggers.hg
changeset 16569:32237d8517b1
vt-d: Use bitmap to solve domain-id limitation issue.
The Capability register reports the domain-id width supported by
hardware. For implementations supporting less than 16-bit domainids,
unused bits of domain identifier field(87:72) in Context entry are
treated as reserved by hardware. For example, for an implementation
supporting 4-bit domain-ids, bits 87:76 of this field are treated as
reserved. 16 is a small number, overflow is easy to happen. What's
more,
context-entries programmed with the same domain identifier must always
reference the same address translation structure (through the ASR
field). So Dom16 will conflict with Dom0, and device assignment fails.
This patch implements a domaid id bitmap to solve above issue.
Signed-off-by: Weidong Han <weidong.han@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
The Capability register reports the domain-id width supported by
hardware. For implementations supporting less than 16-bit domainids,
unused bits of domain identifier field(87:72) in Context entry are
treated as reserved by hardware. For example, for an implementation
supporting 4-bit domain-ids, bits 87:76 of this field are treated as
reserved. 16 is a small number, overflow is easy to happen. What's
more,
context-entries programmed with the same domain identifier must always
reference the same address translation structure (through the ASR
field). So Dom16 will conflict with Dom0, and device assignment fails.
This patch implements a domaid id bitmap to solve above issue.
Signed-off-by: Weidong Han <weidong.han@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Wed Dec 05 10:53:47 2007 +0000 (2007-12-05) |
parents | fda41d46a6a3 |
children | b47849b774f1 |
files | xen/arch/x86/hvm/vmx/vtd/intel-iommu.c xen/include/asm-x86/hvm/iommu.h xen/include/asm-x86/hvm/vmx/intel-iommu.h |
line diff
1.1 --- a/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c Wed Dec 05 10:42:45 2007 +0000 1.2 +++ b/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c Wed Dec 05 10:53:47 2007 +0000 1.3 @@ -35,11 +35,49 @@ 1.4 #include "pci_regs.h" 1.5 #include "msi.h" 1.6 1.7 +#define domain_iommu_domid(d) ((d)->arch.hvm_domain.hvm_iommu.iommu_domid) 1.8 + 1.9 #define VTDPREFIX 1.10 extern void print_iommu_regs(struct acpi_drhd_unit *drhd); 1.11 extern void print_vtd_entries(struct domain *d, int bus, int devfn, 1.12 unsigned long gmfn); 1.13 1.14 +static spinlock_t domid_bitmap_lock; /* protect domain id bitmap */ 1.15 +static int domid_bitmap_size; /* domain id bitmap size in bit */ 1.16 +static void *domid_bitmap; /* iommu domain id bitmap */ 1.17 + 1.18 +#define DID_FIELD_WIDTH 16 1.19 +#define DID_HIGH_OFFSET 8 1.20 +static void context_set_domain_id(struct context_entry *context, 1.21 + struct domain *d) 1.22 +{ 1.23 + unsigned long flags; 1.24 + domid_t iommu_domid = domain_iommu_domid(d); 1.25 + 1.26 + if ( iommu_domid == 0 ) 1.27 + { 1.28 + spin_lock_irqsave(&domid_bitmap_lock, flags); 1.29 + iommu_domid = find_first_zero_bit(domid_bitmap, domid_bitmap_size); 1.30 + set_bit(iommu_domid, domid_bitmap); 1.31 + spin_unlock_irqrestore(&domid_bitmap_lock, flags); 1.32 + d->arch.hvm_domain.hvm_iommu.iommu_domid = iommu_domid; 1.33 + } 1.34 + 1.35 + context->hi &= (1 << DID_HIGH_OFFSET) - 1; 1.36 + context->hi |= iommu_domid << DID_HIGH_OFFSET; 1.37 +} 1.38 + 1.39 +static void iommu_domid_release(struct domain *d) 1.40 +{ 1.41 + domid_t iommu_domid = domain_iommu_domid(d); 1.42 + 1.43 + if ( iommu_domid != 0 ) 1.44 + { 1.45 + d->arch.hvm_domain.hvm_iommu.iommu_domid = 0; 1.46 + clear_bit(iommu_domid, domid_bitmap); 1.47 + } 1.48 +} 1.49 + 1.50 unsigned int x86_clflush_size; 1.51 void clflush_cache_range(void *adr, int size) 1.52 { 1.53 @@ -276,9 +314,6 @@ static int __iommu_flush_context( 1.54 unsigned long flag; 1.55 unsigned long start_time; 1.56 1.57 - /* Domain id in context is 1 based */ 1.58 - did++; 1.59 - 1.60 /* 1.61 * In the non-present entry flush case, if hardware doesn't cache 1.62 * non-present entry we do nothing and if hardware cache non-present 1.63 @@ -363,9 +398,6 @@ static int __iommu_flush_iotlb(struct io 1.64 unsigned long flag; 1.65 unsigned long start_time; 1.66 1.67 - /* Domain id in context is 1 based */ 1.68 - did++; 1.69 - 1.70 /* 1.71 * In the non-present entry flush case, if hardware doesn't cache 1.72 * non-present entry we do nothing and if hardware cache non-present 1.73 @@ -534,7 +566,8 @@ static void dma_pte_clear_one(struct dom 1.74 { 1.75 iommu = drhd->iommu; 1.76 if ( cap_caching_mode(iommu->cap) ) 1.77 - iommu_flush_iotlb_psi(iommu, domain->domain_id, addr, 1, 0); 1.78 + iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain), 1.79 + addr, 1, 0); 1.80 else if (cap_rwbf(iommu->cap)) 1.81 iommu_flush_write_buffer(iommu); 1.82 } 1.83 @@ -1036,7 +1069,7 @@ static int domain_context_mapping_one( 1.84 * domain_id 0 is not valid on Intel's IOMMU, force domain_id to 1.85 * be 1 based as required by intel's iommu hw. 1.86 */ 1.87 - context_set_domain_id(*context, domain->domain_id); 1.88 + context_set_domain_id(context, domain); 1.89 context_set_address_width(*context, hd->agaw); 1.90 1.91 if ( ecap_pass_thru(iommu->ecap) ) 1.92 @@ -1069,12 +1102,12 @@ static int domain_context_mapping_one( 1.93 bus, PCI_SLOT(devfn), PCI_FUNC(devfn), 1.94 context->hi, context->lo, hd->pgd); 1.95 1.96 - if ( iommu_flush_context_device(iommu, domain->domain_id, 1.97 + if ( iommu_flush_context_device(iommu, domain_iommu_domid(domain), 1.98 (((u16)bus) << 8) | devfn, 1.99 DMA_CCMD_MASK_NOBIT, 1) ) 1.100 iommu_flush_write_buffer(iommu); 1.101 else 1.102 - iommu_flush_iotlb_dsi(iommu, domain->domain_id, 0); 1.103 + iommu_flush_iotlb_dsi(iommu, domain_iommu_domid(domain), 0); 1.104 spin_unlock_irqrestore(&iommu->lock, flags); 1.105 return ret; 1.106 } 1.107 @@ -1414,6 +1447,8 @@ void iommu_domain_teardown(struct domain 1.108 if ( list_empty(&acpi_drhd_units) ) 1.109 return; 1.110 1.111 + iommu_domid_release(d); 1.112 + 1.113 #if CONFIG_PAGING_LEVELS == 3 1.114 { 1.115 struct hvm_iommu *hd = domain_hvm_iommu(d); 1.116 @@ -1492,7 +1527,7 @@ int iommu_map_page(struct domain *d, pad 1.117 { 1.118 iommu = drhd->iommu; 1.119 if ( cap_caching_mode(iommu->cap) ) 1.120 - iommu_flush_iotlb_psi(iommu, d->domain_id, 1.121 + iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d), 1.122 gfn << PAGE_SHIFT_4K, 1, 0); 1.123 else if ( cap_rwbf(iommu->cap) ) 1.124 iommu_flush_write_buffer(iommu); 1.125 @@ -1556,7 +1591,8 @@ int iommu_page_mapping(struct domain *do 1.126 { 1.127 iommu = drhd->iommu; 1.128 if ( cap_caching_mode(iommu->cap) ) 1.129 - iommu_flush_iotlb_psi(iommu, domain->domain_id, iova, index, 0); 1.130 + iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain), 1.131 + iova, index, 0); 1.132 else if ( cap_rwbf(iommu->cap) ) 1.133 iommu_flush_write_buffer(iommu); 1.134 } 1.135 @@ -1581,7 +1617,7 @@ void iommu_flush(struct domain *d, dma_a 1.136 { 1.137 iommu = drhd->iommu; 1.138 if ( cap_caching_mode(iommu->cap) ) 1.139 - iommu_flush_iotlb_psi(iommu, d->domain_id, 1.140 + iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d), 1.141 gfn << PAGE_SHIFT_4K, 1, 0); 1.142 else if ( cap_rwbf(iommu->cap) ) 1.143 iommu_flush_write_buffer(iommu); 1.144 @@ -1760,6 +1796,7 @@ int iommu_setup(void) 1.145 if ( !vtd_enabled ) 1.146 return 0; 1.147 1.148 + spin_lock_init(&domid_bitmap_lock); 1.149 INIT_LIST_HEAD(&hd->pdev_list); 1.150 1.151 /* start from scratch */ 1.152 @@ -1768,12 +1805,18 @@ int iommu_setup(void) 1.153 /* setup clflush size */ 1.154 x86_clflush_size = ((cpuid_ebx(1) >> 8) & 0xff) * 8; 1.155 1.156 - /* 1.157 - * allocate IO page directory page for the domain. 1.158 - */ 1.159 + /* Allocate IO page directory page for the domain. */ 1.160 drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list); 1.161 iommu = drhd->iommu; 1.162 1.163 + /* Allocate domain id bitmap, and set bit 0 as reserved */ 1.164 + domid_bitmap_size = cap_ndoms(iommu->cap); 1.165 + domid_bitmap = xmalloc_bytes(domid_bitmap_size / 8); 1.166 + if ( domid_bitmap == NULL ) 1.167 + goto error; 1.168 + memset(domid_bitmap, 0, domid_bitmap_size / 8); 1.169 + set_bit(0, domid_bitmap); 1.170 + 1.171 /* setup 1:1 page table for dom0 */ 1.172 for ( i = 0; i < max_page; i++ ) 1.173 iommu_map_page(dom0, i, i);
2.1 --- a/xen/include/asm-x86/hvm/iommu.h Wed Dec 05 10:42:45 2007 +0000 2.2 +++ b/xen/include/asm-x86/hvm/iommu.h Wed Dec 05 10:53:47 2007 +0000 2.3 @@ -42,6 +42,7 @@ struct hvm_iommu { 2.4 spinlock_t mapping_lock; /* io page table lock */ 2.5 int agaw; /* adjusted guest address width, 0 is level 2 30-bit */ 2.6 struct list_head g2m_ioport_list; /* guest to machine ioport mapping */ 2.7 + domid_t iommu_domid; /* domain id stored in iommu */ 2.8 2.9 /* amd iommu support */ 2.10 int domain_id;
3.1 --- a/xen/include/asm-x86/hvm/vmx/intel-iommu.h Wed Dec 05 10:42:45 2007 +0000 3.2 +++ b/xen/include/asm-x86/hvm/vmx/intel-iommu.h Wed Dec 05 10:53:47 2007 +0000 3.3 @@ -227,8 +227,6 @@ struct context_entry { 3.4 do {(c).lo &= 0xfff; (c).lo |= (val) & PAGE_MASK_4K ;} while(0) 3.5 #define context_set_address_width(c, val) \ 3.6 do {(c).hi &= 0xfffffff8; (c).hi |= (val) & 7;} while(0) 3.7 -#define context_set_domain_id(c, val) \ 3.8 - do {(c).hi &= 0xff; (c).hi |= ((val + 1) & ((1 << 16) - 1)) << 8;} while(0) 3.9 #define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while(0) 3.10 3.11 /* page table handling */