debuggers.hg
changeset 17088:72f52dd2dba8
x86 iommu: Define vendor-neutral interface for access to IOMMU.
Signed-off-by: Wei Wang <wei.wang2@amd.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
Signed-off-by: Wei Wang <wei.wang2@amd.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
line diff
1.1 --- a/xen/arch/x86/domctl.c Thu Feb 14 10:36:47 2008 +0000 1.2 +++ b/xen/arch/x86/domctl.c Thu Feb 14 11:14:17 2008 +0000 1.3 @@ -530,7 +530,7 @@ long arch_do_domctl( 1.4 u8 bus, devfn; 1.5 1.6 ret = -EINVAL; 1.7 - if ( !vtd_enabled ) 1.8 + if ( !iommu_enabled ) 1.9 break; 1.10 1.11 bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff; 1.12 @@ -553,7 +553,7 @@ long arch_do_domctl( 1.13 u8 bus, devfn; 1.14 1.15 ret = -EINVAL; 1.16 - if ( !vtd_enabled ) 1.17 + if ( !iommu_enabled ) 1.18 break; 1.19 1.20 if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) ) 1.21 @@ -589,9 +589,9 @@ long arch_do_domctl( 1.22 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) 1.23 break; 1.24 bind = &(domctl->u.bind_pt_irq); 1.25 - if (vtd_enabled) 1.26 + if ( iommu_enabled ) 1.27 ret = pt_irq_create_bind_vtd(d, bind); 1.28 - if (ret < 0) 1.29 + if ( ret < 0 ) 1.30 gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n"); 1.31 rcu_unlock_domain(d); 1.32 }
2.1 --- a/xen/arch/x86/hvm/Makefile Thu Feb 14 10:36:47 2008 +0000 2.2 +++ b/xen/arch/x86/hvm/Makefile Thu Feb 14 11:14:17 2008 +0000 2.3 @@ -6,6 +6,7 @@ obj-y += i8254.o 2.4 obj-y += instrlen.o 2.5 obj-y += intercept.o 2.6 obj-y += io.o 2.7 +obj-y += iommu.o 2.8 obj-y += irq.o 2.9 obj-y += mtrr.o 2.10 obj-y += platform.o
3.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 3.2 +++ b/xen/arch/x86/hvm/iommu.c Thu Feb 14 11:14:17 2008 +0000 3.3 @@ -0,0 +1,135 @@ 3.4 +/* 3.5 + * This program is free software; you can redistribute it and/or modify it 3.6 + * under the terms and conditions of the GNU General Public License, 3.7 + * version 2, as published by the Free Software Foundation. 3.8 + * 3.9 + * This program is distributed in the hope it will be useful, but WITHOUT 3.10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 3.11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 3.12 + * more details. 3.13 + * 3.14 + * You should have received a copy of the GNU General Public License along with 3.15 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 3.16 + * Place - Suite 330, Boston, MA 02111-1307 USA. 3.17 + */ 3.18 + 3.19 +#include <xen/init.h> 3.20 +#include <xen/irq.h> 3.21 +#include <xen/spinlock.h> 3.22 +#include <xen/sched.h> 3.23 +#include <xen/xmalloc.h> 3.24 +#include <xen/domain_page.h> 3.25 +#include <asm/delay.h> 3.26 +#include <asm/string.h> 3.27 +#include <asm/mm.h> 3.28 +#include <asm/iommu.h> 3.29 +#include <asm/hvm/vmx/intel-iommu.h> 3.30 + 3.31 +extern struct iommu_ops intel_iommu_ops; 3.32 +extern struct iommu_ops amd_iommu_ops; 3.33 + 3.34 +int iommu_domain_init(struct domain *domain) 3.35 +{ 3.36 + struct hvm_iommu *hd = domain_hvm_iommu(domain); 3.37 + 3.38 + spin_lock_init(&hd->mapping_lock); 3.39 + spin_lock_init(&hd->iommu_list_lock); 3.40 + INIT_LIST_HEAD(&hd->pdev_list); 3.41 + INIT_LIST_HEAD(&hd->g2m_ioport_list); 3.42 + 3.43 + if ( !iommu_enabled ) 3.44 + return 0; 3.45 + 3.46 + switch ( boot_cpu_data.x86_vendor ) 3.47 + { 3.48 + case X86_VENDOR_INTEL: 3.49 + hd->platform_ops = &intel_iommu_ops; 3.50 + break; 3.51 + case X86_VENDOR_AMD: 3.52 + hd->platform_ops = &amd_iommu_ops; 3.53 + break; 3.54 + default: 3.55 + BUG(); 3.56 + } 3.57 + 3.58 + return hd->platform_ops->init(domain); 3.59 +} 3.60 + 3.61 +int assign_device(struct domain *d, u8 bus, u8 devfn) 3.62 +{ 3.63 + struct hvm_iommu *hd = domain_hvm_iommu(d); 3.64 + 3.65 + if ( !iommu_enabled || !hd->platform_ops) 3.66 + return 0; 3.67 + 3.68 + return hd->platform_ops->assign_device(d, bus, devfn); 3.69 +} 3.70 + 3.71 +void iommu_domain_destroy(struct domain *d) 3.72 +{ 3.73 + struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; 3.74 + uint32_t i; 3.75 + struct hvm_iommu *hd = domain_hvm_iommu(d); 3.76 + struct list_head *ioport_list, *digl_list, *tmp; 3.77 + struct g2m_ioport *ioport; 3.78 + struct dev_intx_gsi_link *digl; 3.79 + 3.80 + if ( !iommu_enabled || !hd->platform_ops) 3.81 + return; 3.82 + 3.83 + if ( hvm_irq_dpci != NULL ) 3.84 + { 3.85 + for ( i = 0; i < NR_IRQS; i++ ) 3.86 + { 3.87 + if ( !hvm_irq_dpci->mirq[i].valid ) 3.88 + continue; 3.89 + 3.90 + pirq_guest_unbind(d, i); 3.91 + kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]); 3.92 + 3.93 + list_for_each_safe ( digl_list, tmp, 3.94 + &hvm_irq_dpci->mirq[i].digl_list ) 3.95 + { 3.96 + digl = list_entry(digl_list, 3.97 + struct dev_intx_gsi_link, list); 3.98 + list_del(&digl->list); 3.99 + xfree(digl); 3.100 + } 3.101 + } 3.102 + 3.103 + d->arch.hvm_domain.irq.dpci = NULL; 3.104 + xfree(hvm_irq_dpci); 3.105 + } 3.106 + 3.107 + if ( hd ) 3.108 + { 3.109 + list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list ) 3.110 + { 3.111 + ioport = list_entry(ioport_list, struct g2m_ioport, list); 3.112 + list_del(&ioport->list); 3.113 + xfree(ioport); 3.114 + } 3.115 + } 3.116 + 3.117 + return hd->platform_ops->teardown(d); 3.118 +} 3.119 + 3.120 +int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn) 3.121 +{ 3.122 + struct hvm_iommu *hd = domain_hvm_iommu(d); 3.123 + 3.124 + if ( !iommu_enabled || !hd->platform_ops) 3.125 + return 0; 3.126 + 3.127 + return hd->platform_ops->map_page(d, gfn, mfn); 3.128 +} 3.129 + 3.130 +int iommu_unmap_page(struct domain *d, unsigned long gfn) 3.131 +{ 3.132 + struct hvm_iommu *hd = domain_hvm_iommu(d); 3.133 + 3.134 + if ( !iommu_enabled || !hd->platform_ops) 3.135 + return 0; 3.136 + 3.137 + return hd->platform_ops->unmap_page(d, gfn); 3.138 +}
4.1 --- a/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-detect.c Thu Feb 14 10:36:47 2008 +0000 4.2 +++ b/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-detect.c Thu Feb 14 11:14:17 2008 +0000 4.3 @@ -89,12 +89,14 @@ int __init get_iommu_capabilities(u8 bus 4.4 u32 cap_header, cap_range; 4.5 u64 mmio_bar; 4.6 4.7 +#if HACK_BIOS_SETTINGS 4.8 /* remove it when BIOS available */ 4.9 write_pci_config(bus, dev, func, 4.10 cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET, 0x00000000); 4.11 write_pci_config(bus, dev, func, 4.12 cap_ptr + PCI_CAP_MMIO_BAR_LOW_OFFSET, 0x40000001); 4.13 /* remove it when BIOS available */ 4.14 +#endif 4.15 4.16 mmio_bar = (u64)read_pci_config(bus, dev, func, 4.17 cap_ptr + PCI_CAP_MMIO_BAR_HIGH_OFFSET) << 32;
5.1 --- a/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-map.c Thu Feb 14 10:36:47 2008 +0000 5.2 +++ b/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-map.c Thu Feb 14 11:14:17 2008 +0000 5.3 @@ -30,22 +30,20 @@ static int queue_iommu_command(struct am 5.4 u32 tail, head, *cmd_buffer; 5.5 int i; 5.6 5.7 - BUG_ON( !iommu || !cmd ); 5.8 - 5.9 tail = iommu->cmd_buffer_tail; 5.10 - if ( ++tail == iommu->cmd_buffer.entries ) { 5.11 + if ( ++tail == iommu->cmd_buffer.entries ) 5.12 tail = 0; 5.13 - } 5.14 head = get_field_from_reg_u32( 5.15 - readl(iommu->mmio_base+IOMMU_CMD_BUFFER_HEAD_OFFSET), 5.16 - IOMMU_CMD_BUFFER_HEAD_MASK, 5.17 - IOMMU_CMD_BUFFER_HEAD_SHIFT); 5.18 - if ( head != tail ) { 5.19 + readl(iommu->mmio_base+IOMMU_CMD_BUFFER_HEAD_OFFSET), 5.20 + IOMMU_CMD_BUFFER_HEAD_MASK, 5.21 + IOMMU_CMD_BUFFER_HEAD_SHIFT); 5.22 + if ( head != tail ) 5.23 + { 5.24 cmd_buffer = (u32 *)(iommu->cmd_buffer.buffer + 5.25 - (iommu->cmd_buffer_tail * IOMMU_CMD_BUFFER_ENTRY_SIZE)); 5.26 - for ( i = 0; i < IOMMU_CMD_BUFFER_U32_PER_ENTRY; ++i ) { 5.27 + (iommu->cmd_buffer_tail * 5.28 + IOMMU_CMD_BUFFER_ENTRY_SIZE)); 5.29 + for ( i = 0; i < IOMMU_CMD_BUFFER_U32_PER_ENTRY; i++ ) 5.30 cmd_buffer[i] = cmd[i]; 5.31 - } 5.32 5.33 iommu->cmd_buffer_tail = tail; 5.34 return 1; 5.35 @@ -58,27 +56,25 @@ static void commit_iommu_command_buffer( 5.36 { 5.37 u32 tail; 5.38 5.39 - BUG_ON( !iommu ); 5.40 - 5.41 set_field_in_reg_u32(iommu->cmd_buffer_tail, 0, 5.42 - IOMMU_CMD_BUFFER_TAIL_MASK, 5.43 - IOMMU_CMD_BUFFER_TAIL_SHIFT, &tail); 5.44 + IOMMU_CMD_BUFFER_TAIL_MASK, 5.45 + IOMMU_CMD_BUFFER_TAIL_SHIFT, &tail); 5.46 writel(tail, iommu->mmio_base+IOMMU_CMD_BUFFER_TAIL_OFFSET); 5.47 } 5.48 5.49 int send_iommu_command(struct amd_iommu *iommu, u32 cmd[]) 5.50 { 5.51 - BUG_ON( !iommu || !cmd ); 5.52 - 5.53 - if ( queue_iommu_command(iommu, cmd) ) { 5.54 + if ( queue_iommu_command(iommu, cmd) ) 5.55 + { 5.56 commit_iommu_command_buffer(iommu); 5.57 return 1; 5.58 } 5.59 + 5.60 return 0; 5.61 } 5.62 5.63 static void invalidate_iommu_page(struct amd_iommu *iommu, 5.64 - u64 io_addr, u16 domain_id) 5.65 + u64 io_addr, u16 domain_id) 5.66 { 5.67 u64 addr_lo, addr_hi; 5.68 u32 cmd[4], entry; 5.69 @@ -87,51 +83,52 @@ static void invalidate_iommu_page(struct 5.70 addr_hi = io_addr >> 32; 5.71 5.72 set_field_in_reg_u32(domain_id, 0, 5.73 - IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK, 5.74 - IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &entry); 5.75 + IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK, 5.76 + IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &entry); 5.77 set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_PAGES, entry, 5.78 - IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, &entry); 5.79 + IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, 5.80 + &entry); 5.81 cmd[1] = entry; 5.82 5.83 set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, 0, 5.84 - IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK, 5.85 - IOMMU_INV_IOMMU_PAGES_S_FLAG_SHIFT, &entry); 5.86 + IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK, 5.87 + IOMMU_INV_IOMMU_PAGES_S_FLAG_SHIFT, &entry); 5.88 set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry, 5.89 - IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK, 5.90 - IOMMU_INV_IOMMU_PAGES_PDE_FLAG_SHIFT, &entry); 5.91 + IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK, 5.92 + IOMMU_INV_IOMMU_PAGES_PDE_FLAG_SHIFT, &entry); 5.93 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry, 5.94 - IOMMU_INV_IOMMU_PAGES_ADDR_LOW_MASK, 5.95 - IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT, &entry); 5.96 + IOMMU_INV_IOMMU_PAGES_ADDR_LOW_MASK, 5.97 + IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT, &entry); 5.98 cmd[2] = entry; 5.99 5.100 set_field_in_reg_u32((u32)addr_hi, 0, 5.101 - IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK, 5.102 - IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT, &entry); 5.103 + IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK, 5.104 + IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT, &entry); 5.105 cmd[3] = entry; 5.106 5.107 cmd[0] = 0; 5.108 send_iommu_command(iommu, cmd); 5.109 } 5.110 5.111 -static void flush_command_buffer(struct amd_iommu *iommu) 5.112 +void flush_command_buffer(struct amd_iommu *iommu) 5.113 { 5.114 u32 cmd[4], status; 5.115 int loop_count, comp_wait; 5.116 5.117 /* clear 'ComWaitInt' in status register (WIC) */ 5.118 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0, 5.119 - IOMMU_STATUS_COMP_WAIT_INT_MASK, 5.120 - IOMMU_STATUS_COMP_WAIT_INT_SHIFT, &status); 5.121 + IOMMU_STATUS_COMP_WAIT_INT_MASK, 5.122 + IOMMU_STATUS_COMP_WAIT_INT_SHIFT, &status); 5.123 writel(status, iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); 5.124 5.125 /* send an empty COMPLETION_WAIT command to flush command buffer */ 5.126 cmd[3] = cmd[2] = 0; 5.127 set_field_in_reg_u32(IOMMU_CMD_COMPLETION_WAIT, 0, 5.128 - IOMMU_CMD_OPCODE_MASK, 5.129 - IOMMU_CMD_OPCODE_SHIFT, &cmd[1]); 5.130 + IOMMU_CMD_OPCODE_MASK, 5.131 + IOMMU_CMD_OPCODE_SHIFT, &cmd[1]); 5.132 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0, 5.133 - IOMMU_COMP_WAIT_I_FLAG_MASK, 5.134 - IOMMU_COMP_WAIT_I_FLAG_SHIFT, &cmd[0]); 5.135 + IOMMU_COMP_WAIT_I_FLAG_MASK, 5.136 + IOMMU_COMP_WAIT_I_FLAG_SHIFT, &cmd[0]); 5.137 send_iommu_command(iommu, cmd); 5.138 5.139 /* wait for 'ComWaitInt' to signal comp#endifletion? */ 5.140 @@ -139,34 +136,36 @@ static void flush_command_buffer(struct 5.141 loop_count = amd_iommu_poll_comp_wait; 5.142 do { 5.143 status = readl(iommu->mmio_base + 5.144 - IOMMU_STATUS_MMIO_OFFSET); 5.145 - comp_wait = get_field_from_reg_u32(status, 5.146 - IOMMU_STATUS_COMP_WAIT_INT_MASK, 5.147 - IOMMU_STATUS_COMP_WAIT_INT_SHIFT); 5.148 + IOMMU_STATUS_MMIO_OFFSET); 5.149 + comp_wait = get_field_from_reg_u32( 5.150 + status, 5.151 + IOMMU_STATUS_COMP_WAIT_INT_MASK, 5.152 + IOMMU_STATUS_COMP_WAIT_INT_SHIFT); 5.153 --loop_count; 5.154 } while ( loop_count && !comp_wait ); 5.155 5.156 - if ( comp_wait ) { 5.157 + if ( comp_wait ) 5.158 + { 5.159 /* clear 'ComWaitInt' in status register (WIC) */ 5.160 status &= IOMMU_STATUS_COMP_WAIT_INT_MASK; 5.161 writel(status, iommu->mmio_base + 5.162 - IOMMU_STATUS_MMIO_OFFSET); 5.163 - } else 5.164 - dprintk(XENLOG_WARNING, "AMD IOMMU: %s(): Warning:" 5.165 - " ComWaitInt bit did not assert!\n", 5.166 - __FUNCTION__); 5.167 + IOMMU_STATUS_MMIO_OFFSET); 5.168 + } 5.169 + else 5.170 + dprintk(XENLOG_WARNING, "AMD IOMMU: Warning:" 5.171 + " ComWaitInt bit did not assert!\n"); 5.172 } 5.173 } 5.174 5.175 static void clear_page_table_entry_present(u32 *pte) 5.176 { 5.177 set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, pte[0], 5.178 - IOMMU_PTE_PRESENT_MASK, 5.179 - IOMMU_PTE_PRESENT_SHIFT, &pte[0]); 5.180 + IOMMU_PTE_PRESENT_MASK, 5.181 + IOMMU_PTE_PRESENT_SHIFT, &pte[0]); 5.182 } 5.183 5.184 static void set_page_table_entry_present(u32 *pte, u64 page_addr, 5.185 - int iw, int ir) 5.186 + int iw, int ir) 5.187 { 5.188 u64 addr_lo, addr_hi; 5.189 u32 entry; 5.190 @@ -175,33 +174,33 @@ static void set_page_table_entry_present 5.191 addr_hi = page_addr >> 32; 5.192 5.193 set_field_in_reg_u32((u32)addr_hi, 0, 5.194 - IOMMU_PTE_ADDR_HIGH_MASK, 5.195 - IOMMU_PTE_ADDR_HIGH_SHIFT, &entry); 5.196 + IOMMU_PTE_ADDR_HIGH_MASK, 5.197 + IOMMU_PTE_ADDR_HIGH_SHIFT, &entry); 5.198 set_field_in_reg_u32(iw ? IOMMU_CONTROL_ENABLED : 5.199 - IOMMU_CONTROL_DISABLED, entry, 5.200 - IOMMU_PTE_IO_WRITE_PERMISSION_MASK, 5.201 - IOMMU_PTE_IO_WRITE_PERMISSION_SHIFT, &entry); 5.202 + IOMMU_CONTROL_DISABLED, entry, 5.203 + IOMMU_PTE_IO_WRITE_PERMISSION_MASK, 5.204 + IOMMU_PTE_IO_WRITE_PERMISSION_SHIFT, &entry); 5.205 set_field_in_reg_u32(ir ? IOMMU_CONTROL_ENABLED : 5.206 - IOMMU_CONTROL_DISABLED, entry, 5.207 - IOMMU_PTE_IO_READ_PERMISSION_MASK, 5.208 - IOMMU_PTE_IO_READ_PERMISSION_SHIFT, &entry); 5.209 + IOMMU_CONTROL_DISABLED, entry, 5.210 + IOMMU_PTE_IO_READ_PERMISSION_MASK, 5.211 + IOMMU_PTE_IO_READ_PERMISSION_SHIFT, &entry); 5.212 pte[1] = entry; 5.213 5.214 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0, 5.215 - IOMMU_PTE_ADDR_LOW_MASK, 5.216 - IOMMU_PTE_ADDR_LOW_SHIFT, &entry); 5.217 + IOMMU_PTE_ADDR_LOW_MASK, 5.218 + IOMMU_PTE_ADDR_LOW_SHIFT, &entry); 5.219 set_field_in_reg_u32(IOMMU_PAGING_MODE_LEVEL_0, entry, 5.220 - IOMMU_PTE_NEXT_LEVEL_MASK, 5.221 - IOMMU_PTE_NEXT_LEVEL_SHIFT, &entry); 5.222 + IOMMU_PTE_NEXT_LEVEL_MASK, 5.223 + IOMMU_PTE_NEXT_LEVEL_SHIFT, &entry); 5.224 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry, 5.225 - IOMMU_PTE_PRESENT_MASK, 5.226 - IOMMU_PTE_PRESENT_SHIFT, &entry); 5.227 + IOMMU_PTE_PRESENT_MASK, 5.228 + IOMMU_PTE_PRESENT_SHIFT, &entry); 5.229 pte[0] = entry; 5.230 } 5.231 5.232 5.233 static void amd_iommu_set_page_directory_entry(u32 *pde, 5.234 - u64 next_ptr, u8 next_level) 5.235 + u64 next_ptr, u8 next_level) 5.236 { 5.237 u64 addr_lo, addr_hi; 5.238 u32 entry; 5.239 @@ -211,29 +210,31 @@ static void amd_iommu_set_page_directory 5.240 5.241 /* enable read/write permissions,which will be enforced at the PTE */ 5.242 set_field_in_reg_u32((u32)addr_hi, 0, 5.243 - IOMMU_PDE_ADDR_HIGH_MASK, IOMMU_PDE_ADDR_HIGH_SHIFT, &entry); 5.244 + IOMMU_PDE_ADDR_HIGH_MASK, 5.245 + IOMMU_PDE_ADDR_HIGH_SHIFT, &entry); 5.246 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry, 5.247 - IOMMU_PDE_IO_WRITE_PERMISSION_MASK, 5.248 - IOMMU_PDE_IO_WRITE_PERMISSION_SHIFT, &entry); 5.249 + IOMMU_PDE_IO_WRITE_PERMISSION_MASK, 5.250 + IOMMU_PDE_IO_WRITE_PERMISSION_SHIFT, &entry); 5.251 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry, 5.252 - IOMMU_PDE_IO_READ_PERMISSION_MASK, 5.253 - IOMMU_PDE_IO_READ_PERMISSION_SHIFT, &entry); 5.254 + IOMMU_PDE_IO_READ_PERMISSION_MASK, 5.255 + IOMMU_PDE_IO_READ_PERMISSION_SHIFT, &entry); 5.256 pde[1] = entry; 5.257 5.258 /* mark next level as 'present' */ 5.259 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0, 5.260 - IOMMU_PDE_ADDR_LOW_MASK, IOMMU_PDE_ADDR_LOW_SHIFT, &entry); 5.261 + IOMMU_PDE_ADDR_LOW_MASK, 5.262 + IOMMU_PDE_ADDR_LOW_SHIFT, &entry); 5.263 set_field_in_reg_u32(next_level, entry, 5.264 - IOMMU_PDE_NEXT_LEVEL_MASK, 5.265 - IOMMU_PDE_NEXT_LEVEL_SHIFT, &entry); 5.266 + IOMMU_PDE_NEXT_LEVEL_MASK, 5.267 + IOMMU_PDE_NEXT_LEVEL_SHIFT, &entry); 5.268 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry, 5.269 - IOMMU_PDE_PRESENT_MASK, 5.270 - IOMMU_PDE_PRESENT_SHIFT, &entry); 5.271 + IOMMU_PDE_PRESENT_MASK, 5.272 + IOMMU_PDE_PRESENT_SHIFT, &entry); 5.273 pde[0] = entry; 5.274 } 5.275 5.276 void amd_iommu_set_dev_table_entry(u32 *dte, u64 root_ptr, u16 domain_id, 5.277 - u8 paging_mode) 5.278 + u8 paging_mode) 5.279 { 5.280 u64 addr_hi, addr_lo; 5.281 u32 entry; 5.282 @@ -241,54 +242,56 @@ void amd_iommu_set_dev_table_entry(u32 * 5.283 dte[6] = dte[5] = dte[4] = 0; 5.284 5.285 set_field_in_reg_u32(IOMMU_DEV_TABLE_SYS_MGT_MSG_FORWARDED, 0, 5.286 - IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_MASK, 5.287 - IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_SHIFT, &entry); 5.288 + IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_MASK, 5.289 + IOMMU_DEV_TABLE_SYS_MGT_MSG_ENABLE_SHIFT, &entry); 5.290 dte[3] = entry; 5.291 5.292 set_field_in_reg_u32(domain_id, 0, 5.293 - IOMMU_DEV_TABLE_DOMAIN_ID_MASK, 5.294 - IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT, &entry); 5.295 + IOMMU_DEV_TABLE_DOMAIN_ID_MASK, 5.296 + IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT, &entry); 5.297 dte[2] = entry; 5.298 5.299 addr_lo = root_ptr & DMA_32BIT_MASK; 5.300 addr_hi = root_ptr >> 32; 5.301 set_field_in_reg_u32((u32)addr_hi, 0, 5.302 - IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK, 5.303 - IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT, &entry); 5.304 + IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK, 5.305 + IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT, &entry); 5.306 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry, 5.307 - IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_MASK, 5.308 - IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_SHIFT, &entry); 5.309 + IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_MASK, 5.310 + IOMMU_DEV_TABLE_IO_WRITE_PERMISSION_SHIFT, &entry); 5.311 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry, 5.312 - IOMMU_DEV_TABLE_IO_READ_PERMISSION_MASK, 5.313 - IOMMU_DEV_TABLE_IO_READ_PERMISSION_SHIFT, &entry); 5.314 + IOMMU_DEV_TABLE_IO_READ_PERMISSION_MASK, 5.315 + IOMMU_DEV_TABLE_IO_READ_PERMISSION_SHIFT, &entry); 5.316 dte[1] = entry; 5.317 5.318 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0, 5.319 - IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK, 5.320 - IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT, &entry); 5.321 + IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK, 5.322 + IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT, &entry); 5.323 set_field_in_reg_u32(paging_mode, entry, 5.324 - IOMMU_DEV_TABLE_PAGING_MODE_MASK, 5.325 - IOMMU_DEV_TABLE_PAGING_MODE_SHIFT, &entry); 5.326 + IOMMU_DEV_TABLE_PAGING_MODE_MASK, 5.327 + IOMMU_DEV_TABLE_PAGING_MODE_SHIFT, &entry); 5.328 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry, 5.329 - IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK, 5.330 - IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT, &entry); 5.331 + IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK, 5.332 + IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT, &entry); 5.333 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry, 5.334 - IOMMU_DEV_TABLE_VALID_MASK, 5.335 - IOMMU_DEV_TABLE_VALID_SHIFT, &entry); 5.336 + IOMMU_DEV_TABLE_VALID_MASK, 5.337 + IOMMU_DEV_TABLE_VALID_SHIFT, &entry); 5.338 dte[0] = entry; 5.339 } 5.340 5.341 -static void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry) 5.342 +void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry) 5.343 { 5.344 u64 addr_lo, addr_hi, ptr; 5.345 5.346 - addr_lo = get_field_from_reg_u32(entry[0], 5.347 - IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK, 5.348 - IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT); 5.349 + addr_lo = get_field_from_reg_u32( 5.350 + entry[0], 5.351 + IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_MASK, 5.352 + IOMMU_DEV_TABLE_PAGE_TABLE_PTR_LOW_SHIFT); 5.353 5.354 - addr_hi = get_field_from_reg_u32(entry[1], 5.355 - IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK, 5.356 - IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT); 5.357 + addr_hi = get_field_from_reg_u32( 5.358 + entry[1], 5.359 + IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_MASK, 5.360 + IOMMU_DEV_TABLE_PAGE_TABLE_PTR_HIGH_SHIFT); 5.361 5.362 ptr = (addr_hi << 32) | (addr_lo << PAGE_SHIFT); 5.363 return ptr ? maddr_to_virt((unsigned long)ptr) : NULL; 5.364 @@ -297,42 +300,74 @@ static void *amd_iommu_get_vptr_from_pag 5.365 static int amd_iommu_is_pte_present(u32 *entry) 5.366 { 5.367 return (get_field_from_reg_u32(entry[0], 5.368 - IOMMU_PDE_PRESENT_MASK, 5.369 - IOMMU_PDE_PRESENT_SHIFT)); 5.370 + IOMMU_PDE_PRESENT_MASK, 5.371 + IOMMU_PDE_PRESENT_SHIFT)); 5.372 +} 5.373 + 5.374 +void invalidate_dev_table_entry(struct amd_iommu *iommu, 5.375 + u16 device_id) 5.376 +{ 5.377 + u32 cmd[4], entry; 5.378 + 5.379 + cmd[3] = cmd[2] = 0; 5.380 + set_field_in_reg_u32(device_id, 0, 5.381 + IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_MASK, 5.382 + IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_SHIFT, &entry); 5.383 + cmd[0] = entry; 5.384 + 5.385 + set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_DEVTAB_ENTRY, 0, 5.386 + IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, 5.387 + &entry); 5.388 + cmd[1] = entry; 5.389 + 5.390 + send_iommu_command(iommu, cmd); 5.391 +} 5.392 + 5.393 +int amd_iommu_is_dte_page_translation_valid(u32 *entry) 5.394 +{ 5.395 + return (get_field_from_reg_u32(entry[0], 5.396 + IOMMU_DEV_TABLE_VALID_MASK, 5.397 + IOMMU_DEV_TABLE_VALID_SHIFT) && 5.398 + get_field_from_reg_u32(entry[0], 5.399 + IOMMU_DEV_TABLE_TRANSLATION_VALID_MASK, 5.400 + IOMMU_DEV_TABLE_TRANSLATION_VALID_SHIFT)); 5.401 } 5.402 5.403 static void *get_pte_from_page_tables(void *table, int level, 5.404 - unsigned long io_pfn) 5.405 + unsigned long io_pfn) 5.406 { 5.407 unsigned long offset; 5.408 - void *pde = 0; 5.409 + void *pde = NULL; 5.410 5.411 - BUG_ON( !table ); 5.412 + BUG_ON(table == NULL); 5.413 5.414 while ( level > 0 ) 5.415 { 5.416 - void *next_table = 0; 5.417 - unsigned long next_ptr; 5.418 offset = io_pfn >> ((PTE_PER_TABLE_SHIFT * 5.419 - (level - IOMMU_PAGING_MODE_LEVEL_1))); 5.420 + (level - IOMMU_PAGING_MODE_LEVEL_1))); 5.421 offset &= ~PTE_PER_TABLE_MASK; 5.422 pde = table + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE); 5.423 5.424 if ( level == 1 ) 5.425 break; 5.426 if ( !pde ) 5.427 - return NULL; 5.428 - if ( !amd_iommu_is_pte_present(pde) ) { 5.429 - next_table = alloc_xenheap_page(); 5.430 + return NULL; 5.431 + if ( !amd_iommu_is_pte_present(pde) ) 5.432 + { 5.433 + void *next_table = alloc_xenheap_page(); 5.434 if ( next_table == NULL ) 5.435 return NULL; 5.436 memset(next_table, 0, PAGE_SIZE); 5.437 - if ( *(u64*)(pde) == 0 ) { 5.438 - next_ptr = (u64)virt_to_maddr(next_table); 5.439 - amd_iommu_set_page_directory_entry((u32 *)pde, 5.440 - next_ptr, level - 1); 5.441 - } else 5.442 + if ( *(u64 *)pde == 0 ) 5.443 + { 5.444 + unsigned long next_ptr = (u64)virt_to_maddr(next_table); 5.445 + amd_iommu_set_page_directory_entry( 5.446 + (u32 *)pde, next_ptr, level - 1); 5.447 + } 5.448 + else 5.449 + { 5.450 free_xenheap_page(next_table); 5.451 + } 5.452 } 5.453 table = amd_iommu_get_vptr_from_page_table_entry(pde); 5.454 level--; 5.455 @@ -341,8 +376,7 @@ static void *get_pte_from_page_tables(vo 5.456 return pde; 5.457 } 5.458 5.459 -int amd_iommu_map_page(struct domain *d, unsigned long gfn, 5.460 - unsigned long mfn) 5.461 +int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn) 5.462 { 5.463 void *pte; 5.464 unsigned long flags; 5.465 @@ -352,7 +386,7 @@ int amd_iommu_map_page(struct domain *d, 5.466 5.467 BUG_ON( !hd->root_table ); 5.468 5.469 - maddr = (u64)(mfn << PAGE_SHIFT); 5.470 + maddr = (u64)mfn << PAGE_SHIFT; 5.471 5.472 iw = IOMMU_IO_WRITE_ENABLED; 5.473 ir = IOMMU_IO_READ_ENABLED; 5.474 @@ -360,18 +394,18 @@ int amd_iommu_map_page(struct domain *d, 5.475 spin_lock_irqsave(&hd->mapping_lock, flags); 5.476 5.477 pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn); 5.478 - 5.479 - if ( pte != 0 ) { 5.480 - set_page_table_entry_present((u32 *)pte, maddr, iw, ir); 5.481 - spin_unlock_irqrestore(&hd->mapping_lock, flags); 5.482 - return 0; 5.483 - } else { 5.484 + if ( pte == 0 ) 5.485 + { 5.486 dprintk(XENLOG_ERR, 5.487 - "%s() AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", 5.488 - __FUNCTION__, gfn); 5.489 + "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn); 5.490 spin_unlock_irqrestore(&hd->mapping_lock, flags); 5.491 return -EIO; 5.492 } 5.493 + 5.494 + set_page_table_entry_present((u32 *)pte, maddr, iw, ir); 5.495 + 5.496 + spin_unlock_irqrestore(&hd->mapping_lock, flags); 5.497 + return 0; 5.498 } 5.499 5.500 int amd_iommu_unmap_page(struct domain *d, unsigned long gfn) 5.501 @@ -386,34 +420,31 @@ int amd_iommu_unmap_page(struct domain * 5.502 BUG_ON( !hd->root_table ); 5.503 5.504 requestor_id = hd->domain_id; 5.505 - io_addr = (u64)(gfn << PAGE_SHIFT); 5.506 + io_addr = (u64)gfn << PAGE_SHIFT; 5.507 5.508 spin_lock_irqsave(&hd->mapping_lock, flags); 5.509 5.510 pte = get_pte_from_page_tables(hd->root_table, hd->paging_mode, gfn); 5.511 - 5.512 - if ( pte != 0 ) { 5.513 - /* mark PTE as 'page not present' */ 5.514 - clear_page_table_entry_present((u32 *)pte); 5.515 - spin_unlock_irqrestore(&hd->mapping_lock, flags); 5.516 - 5.517 - /* send INVALIDATE_IOMMU_PAGES command */ 5.518 - for_each_amd_iommu(iommu) { 5.519 - 5.520 - spin_lock_irqsave(&iommu->lock, flags); 5.521 - 5.522 - invalidate_iommu_page(iommu, io_addr, requestor_id); 5.523 - flush_command_buffer(iommu); 5.524 - 5.525 - spin_unlock_irqrestore(&iommu->lock, flags); 5.526 - } 5.527 - 5.528 - return 0; 5.529 - } else { 5.530 + if ( pte == 0 ) 5.531 + { 5.532 dprintk(XENLOG_ERR, 5.533 - "%s() AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", 5.534 - __FUNCTION__, gfn); 5.535 + "AMD IOMMU: Invalid IO pagetable entry gfn = %lx\n", gfn); 5.536 spin_unlock_irqrestore(&hd->mapping_lock, flags); 5.537 return -EIO; 5.538 } 5.539 + 5.540 + /* mark PTE as 'page not present' */ 5.541 + clear_page_table_entry_present((u32 *)pte); 5.542 + spin_unlock_irqrestore(&hd->mapping_lock, flags); 5.543 + 5.544 + /* send INVALIDATE_IOMMU_PAGES command */ 5.545 + for_each_amd_iommu(iommu) 5.546 + { 5.547 + spin_lock_irqsave(&iommu->lock, flags); 5.548 + invalidate_iommu_page(iommu, io_addr, requestor_id); 5.549 + flush_command_buffer(iommu); 5.550 + spin_unlock_irqrestore(&iommu->lock, flags); 5.551 + } 5.552 + 5.553 + return 0; 5.554 }
6.1 --- a/xen/arch/x86/hvm/svm/amd_iommu/pci-amd-iommu.c Thu Feb 14 10:36:47 2008 +0000 6.2 +++ b/xen/arch/x86/hvm/svm/amd_iommu/pci-amd-iommu.c Thu Feb 14 11:14:17 2008 +0000 6.3 @@ -51,19 +51,17 @@ static void __init init_cleanup(void) 6.4 { 6.5 struct amd_iommu *iommu; 6.6 6.7 - dprintk(XENLOG_ERR, "AMD IOMMU: %s()\n", __FUNCTION__); 6.8 - 6.9 - for_each_amd_iommu(iommu) { 6.10 + for_each_amd_iommu ( iommu ) 6.11 unmap_iommu_mmio_region(iommu); 6.12 - } 6.13 } 6.14 6.15 static void __init deallocate_iommu_table_struct( 6.16 - struct table_struct *table) 6.17 + struct table_struct *table) 6.18 { 6.19 - if (table->buffer) { 6.20 + if ( table->buffer ) 6.21 + { 6.22 free_xenheap_pages(table->buffer, 6.23 - get_order_from_bytes(table->alloc_size)); 6.24 + get_order_from_bytes(table->alloc_size)); 6.25 table->buffer = NULL; 6.26 } 6.27 } 6.28 @@ -76,11 +74,10 @@ static void __init deallocate_iommu_reso 6.29 6.30 static void __init detect_cleanup(void) 6.31 { 6.32 - struct amd_iommu *iommu; 6.33 + struct amd_iommu *iommu, *next; 6.34 6.35 - dprintk(XENLOG_ERR, "AMD IOMMU: %s()\n", __FUNCTION__); 6.36 - 6.37 - for_each_amd_iommu(iommu) { 6.38 + list_for_each_entry_safe ( iommu, next, &amd_iommu_head, list ) 6.39 + { 6.40 list_del(&iommu->list); 6.41 deallocate_iommu_resources(iommu); 6.42 xfree(iommu); 6.43 @@ -91,19 +88,21 @@ static int requestor_id_from_bdf(int bdf 6.44 { 6.45 /* HACK - HACK */ 6.46 /* account for possible 'aliasing' by parent device */ 6.47 - return bdf; 6.48 + return bdf; 6.49 } 6.50 6.51 static int __init allocate_iommu_table_struct(struct table_struct *table, 6.52 - const char *name) 6.53 + const char *name) 6.54 { 6.55 table->buffer = (void *) alloc_xenheap_pages( 6.56 get_order_from_bytes(table->alloc_size)); 6.57 6.58 - if ( !table->buffer ) { 6.59 + if ( !table->buffer ) 6.60 + { 6.61 dprintk(XENLOG_ERR, "AMD IOMMU: Error allocating %s\n", name); 6.62 return -ENOMEM; 6.63 } 6.64 + 6.65 memset(table->buffer, 0, table->alloc_size); 6.66 6.67 return 0; 6.68 @@ -114,32 +113,32 @@ static int __init allocate_iommu_resourc 6.69 /* allocate 'device table' on a 4K boundary */ 6.70 iommu->dev_table.alloc_size = 6.71 PAGE_ALIGN(((iommu->last_downstream_bus + 1) * 6.72 - IOMMU_DEV_TABLE_ENTRIES_PER_BUS) * 6.73 - IOMMU_DEV_TABLE_ENTRY_SIZE); 6.74 + IOMMU_DEV_TABLE_ENTRIES_PER_BUS) * 6.75 + IOMMU_DEV_TABLE_ENTRY_SIZE); 6.76 iommu->dev_table.entries = 6.77 iommu->dev_table.alloc_size / IOMMU_DEV_TABLE_ENTRY_SIZE; 6.78 6.79 - if (allocate_iommu_table_struct(&iommu->dev_table, 6.80 - "Device Table") != 0) 6.81 + if ( allocate_iommu_table_struct(&iommu->dev_table, 6.82 + "Device Table") != 0 ) 6.83 goto error_out; 6.84 6.85 /* allocate 'command buffer' in power of 2 increments of 4K */ 6.86 iommu->cmd_buffer_tail = 0; 6.87 iommu->cmd_buffer.alloc_size = 6.88 PAGE_SIZE << get_order_from_bytes( 6.89 - PAGE_ALIGN(amd_iommu_cmd_buffer_entries * 6.90 - IOMMU_CMD_BUFFER_ENTRY_SIZE)); 6.91 + PAGE_ALIGN(amd_iommu_cmd_buffer_entries * 6.92 + IOMMU_CMD_BUFFER_ENTRY_SIZE)); 6.93 6.94 - iommu->cmd_buffer.entries = 6.95 + iommu->cmd_buffer.entries = 6.96 iommu->cmd_buffer.alloc_size / IOMMU_CMD_BUFFER_ENTRY_SIZE; 6.97 6.98 if ( allocate_iommu_table_struct(&iommu->cmd_buffer, 6.99 - "Command Buffer") != 0 ) 6.100 + "Command Buffer") != 0 ) 6.101 goto error_out; 6.102 6.103 return 0; 6.104 6.105 -error_out: 6.106 + error_out: 6.107 deallocate_iommu_resources(iommu); 6.108 return -ENOMEM; 6.109 } 6.110 @@ -149,7 +148,8 @@ int iommu_detect_callback(u8 bus, u8 dev 6.111 struct amd_iommu *iommu; 6.112 6.113 iommu = (struct amd_iommu *) xmalloc(struct amd_iommu); 6.114 - if ( !iommu ) { 6.115 + if ( !iommu ) 6.116 + { 6.117 dprintk(XENLOG_ERR, "AMD IOMMU: Error allocating amd_iommu\n"); 6.118 return -ENOMEM; 6.119 } 6.120 @@ -170,7 +170,7 @@ int iommu_detect_callback(u8 bus, u8 dev 6.121 6.122 return 0; 6.123 6.124 -error_out: 6.125 + error_out: 6.126 xfree(iommu); 6.127 return -ENODEV; 6.128 } 6.129 @@ -180,11 +180,12 @@ static int __init amd_iommu_init(void) 6.130 struct amd_iommu *iommu; 6.131 unsigned long flags; 6.132 6.133 - for_each_amd_iommu(iommu) { 6.134 + for_each_amd_iommu ( iommu ) 6.135 + { 6.136 spin_lock_irqsave(&iommu->lock, flags); 6.137 6.138 /* register IOMMU data strucures in MMIO space */ 6.139 - if (map_iommu_mmio_region(iommu) != 0) 6.140 + if ( map_iommu_mmio_region(iommu) != 0 ) 6.141 goto error_out; 6.142 register_iommu_dev_table_in_mmio_space(iommu); 6.143 register_iommu_cmd_buffer_in_mmio_space(iommu); 6.144 @@ -200,7 +201,7 @@ static int __init amd_iommu_init(void) 6.145 6.146 return 0; 6.147 6.148 -error_out: 6.149 + error_out: 6.150 init_cleanup(); 6.151 return -ENODEV; 6.152 } 6.153 @@ -209,13 +210,16 @@ struct amd_iommu *find_iommu_for_device( 6.154 { 6.155 struct amd_iommu *iommu; 6.156 6.157 - for_each_amd_iommu(iommu) { 6.158 - if ( bus == iommu->root_bus ) { 6.159 - if ( devfn >= iommu->first_devfn && 6.160 - devfn <= iommu->last_devfn ) 6.161 + for_each_amd_iommu ( iommu ) 6.162 + { 6.163 + if ( bus == iommu->root_bus ) 6.164 + { 6.165 + if ( (devfn >= iommu->first_devfn) && 6.166 + (devfn <= iommu->last_devfn) ) 6.167 return iommu; 6.168 } 6.169 - else if ( bus <= iommu->last_downstream_bus ) { 6.170 + else if ( bus <= iommu->last_downstream_bus ) 6.171 + { 6.172 if ( iommu->downstream_bus_present[bus] ) 6.173 return iommu; 6.174 } 6.175 @@ -238,16 +242,21 @@ void amd_iommu_setup_domain_device( 6.176 dte = iommu->dev_table.buffer + 6.177 (requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE); 6.178 6.179 - spin_lock_irqsave(&iommu->lock, flags); 6.180 - 6.181 - amd_iommu_set_dev_table_entry((u32 *)dte, 6.182 - root_ptr, hd->domain_id, hd->paging_mode); 6.183 + if ( !amd_iommu_is_dte_page_translation_valid((u32 *)dte) ) 6.184 + { 6.185 + spin_lock_irqsave(&iommu->lock, flags); 6.186 6.187 - dprintk(XENLOG_INFO, "AMD IOMMU: Set DTE req_id:%x, " 6.188 - "root_ptr:%"PRIx64", domain_id:%d, paging_mode:%d\n", 6.189 - requestor_id, root_ptr, hd->domain_id, hd->paging_mode); 6.190 + amd_iommu_set_dev_table_entry( 6.191 + (u32 *)dte, 6.192 + root_ptr, hd->domain_id, hd->paging_mode); 6.193 + invalidate_dev_table_entry(iommu, requestor_id); 6.194 + flush_command_buffer(iommu); 6.195 + dprintk(XENLOG_INFO, "AMD IOMMU: Set DTE req_id:%x, " 6.196 + "root_ptr:%"PRIx64", domain_id:%d, paging_mode:%d\n", 6.197 + requestor_id, root_ptr, hd->domain_id, hd->paging_mode); 6.198 6.199 - spin_unlock_irqrestore(&iommu->lock, flags); 6.200 + spin_unlock_irqrestore(&iommu->lock, flags); 6.201 + } 6.202 } 6.203 6.204 void __init amd_iommu_setup_dom0_devices(void) 6.205 @@ -259,13 +268,16 @@ void __init amd_iommu_setup_dom0_devices 6.206 u32 l; 6.207 int req_id, bdf; 6.208 6.209 - for ( bus = 0; bus < 256; bus++ ) { 6.210 - for ( dev = 0; dev < 32; dev++ ) { 6.211 - for ( func = 0; func < 8; func++ ) { 6.212 + for ( bus = 0; bus < 256; bus++ ) 6.213 + { 6.214 + for ( dev = 0; dev < 32; dev++ ) 6.215 + { 6.216 + for ( func = 0; func < 8; func++ ) 6.217 + { 6.218 l = read_pci_config(bus, dev, func, PCI_VENDOR_ID); 6.219 /* some broken boards return 0 or ~0 if a slot is empty: */ 6.220 if ( l == 0xffffffff || l == 0x00000000 || 6.221 - l == 0x0000ffff || l == 0xffff0000 ) 6.222 + l == 0x0000ffff || l == 0xffff0000 ) 6.223 continue; 6.224 6.225 pdev = xmalloc(struct pci_dev); 6.226 @@ -288,29 +300,33 @@ int amd_iommu_detect(void) 6.227 { 6.228 unsigned long i; 6.229 6.230 - if ( !enable_amd_iommu ) { 6.231 + if ( !enable_amd_iommu ) 6.232 + { 6.233 printk("AMD IOMMU: Disabled\n"); 6.234 return 0; 6.235 } 6.236 6.237 INIT_LIST_HEAD(&amd_iommu_head); 6.238 6.239 - if ( scan_for_iommu(iommu_detect_callback) != 0 ) { 6.240 + if ( scan_for_iommu(iommu_detect_callback) != 0 ) 6.241 + { 6.242 dprintk(XENLOG_ERR, "AMD IOMMU: Error detection\n"); 6.243 goto error_out; 6.244 } 6.245 6.246 - if ( !iommu_found() ) { 6.247 + if ( !iommu_found() ) 6.248 + { 6.249 printk("AMD IOMMU: Not found!\n"); 6.250 return 0; 6.251 } 6.252 6.253 - if ( amd_iommu_init() != 0 ) { 6.254 + if ( amd_iommu_init() != 0 ) 6.255 + { 6.256 dprintk(XENLOG_ERR, "AMD IOMMU: Error initialization\n"); 6.257 goto error_out; 6.258 } 6.259 6.260 - if ( amd_iommu_domain_init(dom0) != 0 ) 6.261 + if ( iommu_domain_init(dom0) != 0 ) 6.262 goto error_out; 6.263 6.264 /* setup 1:1 page table for dom0 */ 6.265 @@ -320,21 +336,31 @@ int amd_iommu_detect(void) 6.266 amd_iommu_setup_dom0_devices(); 6.267 return 0; 6.268 6.269 -error_out: 6.270 - detect_cleanup(); 6.271 - return -ENODEV; 6.272 + error_out: 6.273 + detect_cleanup(); 6.274 + return -ENODEV; 6.275 6.276 } 6.277 6.278 static int allocate_domain_resources(struct hvm_iommu *hd) 6.279 { 6.280 /* allocate root table */ 6.281 - hd->root_table = (void *)alloc_xenheap_page(); 6.282 + unsigned long flags; 6.283 + 6.284 + spin_lock_irqsave(&hd->mapping_lock, flags); 6.285 if ( !hd->root_table ) 6.286 - return -ENOMEM; 6.287 - memset((u8*)hd->root_table, 0, PAGE_SIZE); 6.288 + { 6.289 + hd->root_table = (void *)alloc_xenheap_page(); 6.290 + if ( !hd->root_table ) 6.291 + goto error_out; 6.292 + memset((u8*)hd->root_table, 0, PAGE_SIZE); 6.293 + } 6.294 + spin_unlock_irqrestore(&hd->mapping_lock, flags); 6.295 6.296 return 0; 6.297 + error_out: 6.298 + spin_unlock_irqrestore(&hd->mapping_lock, flags); 6.299 + return -ENOMEM; 6.300 } 6.301 6.302 static int get_paging_mode(unsigned long entries) 6.303 @@ -346,7 +372,8 @@ static int get_paging_mode(unsigned long 6.304 if ( entries > max_page ) 6.305 entries = max_page; 6.306 6.307 - while ( entries > PTE_PER_TABLE_SIZE ) { 6.308 + while ( entries > PTE_PER_TABLE_SIZE ) 6.309 + { 6.310 entries = PTE_PER_TABLE_ALIGN(entries) >> PTE_PER_TABLE_SHIFT; 6.311 ++level; 6.312 if ( level > 6 ) 6.313 @@ -362,14 +389,11 @@ int amd_iommu_domain_init(struct domain 6.314 { 6.315 struct hvm_iommu *hd = domain_hvm_iommu(domain); 6.316 6.317 - spin_lock_init(&hd->mapping_lock); 6.318 - spin_lock_init(&hd->iommu_list_lock); 6.319 - INIT_LIST_HEAD(&hd->pdev_list); 6.320 - 6.321 /* allocate page directroy */ 6.322 - if ( allocate_domain_resources(hd) != 0 ) { 6.323 - dprintk(XENLOG_ERR, "AMD IOMMU: %s()\n", __FUNCTION__); 6.324 - goto error_out; 6.325 + if ( allocate_domain_resources(hd) != 0 ) 6.326 + { 6.327 + deallocate_domain_resources(hd); 6.328 + return -ENOMEM; 6.329 } 6.330 6.331 if ( is_hvm_domain(domain) ) 6.332 @@ -380,10 +404,168 @@ int amd_iommu_domain_init(struct domain 6.333 hd->domain_id = domain->domain_id; 6.334 6.335 return 0; 6.336 +} 6.337 6.338 -error_out: 6.339 - deallocate_domain_resources(hd); 6.340 - return -ENOMEM; 6.341 +static void amd_iommu_disable_domain_device( 6.342 + struct domain *domain, struct amd_iommu *iommu, u16 requestor_id) 6.343 +{ 6.344 + void *dte; 6.345 + unsigned long flags; 6.346 + 6.347 + dte = iommu->dev_table.buffer + 6.348 + (requestor_id * IOMMU_DEV_TABLE_ENTRY_SIZE); 6.349 + 6.350 + if ( amd_iommu_is_dte_page_translation_valid((u32 *)dte) ) 6.351 + { 6.352 + spin_lock_irqsave(&iommu->lock, flags); 6.353 + memset (dte, 0, IOMMU_DEV_TABLE_ENTRY_SIZE); 6.354 + invalidate_dev_table_entry(iommu, requestor_id); 6.355 + flush_command_buffer(iommu); 6.356 + dprintk(XENLOG_INFO , "AMD IOMMU: disable DTE 0x%x," 6.357 + " domain_id:%d, paging_mode:%d\n", 6.358 + requestor_id, domain_hvm_iommu(domain)->domain_id, 6.359 + domain_hvm_iommu(domain)->paging_mode); 6.360 + spin_unlock_irqrestore(&iommu->lock, flags); 6.361 + } 6.362 +} 6.363 + 6.364 +extern void pdev_flr(u8 bus, u8 devfn); 6.365 + 6.366 +static int reassign_device( struct domain *source, struct domain *target, 6.367 + u8 bus, u8 devfn) 6.368 +{ 6.369 + struct hvm_iommu *source_hd = domain_hvm_iommu(source); 6.370 + struct hvm_iommu *target_hd = domain_hvm_iommu(target); 6.371 + struct pci_dev *pdev; 6.372 + struct amd_iommu *iommu; 6.373 + int req_id, bdf; 6.374 + unsigned long flags; 6.375 + 6.376 + for_each_pdev( source, pdev ) 6.377 + { 6.378 + if ( (pdev->bus != bus) || (pdev->devfn != devfn) ) 6.379 + continue; 6.380 + 6.381 + pdev->bus = bus; 6.382 + pdev->devfn = devfn; 6.383 + 6.384 + bdf = (bus << 8) | devfn; 6.385 + req_id = requestor_id_from_bdf(bdf); 6.386 + iommu = find_iommu_for_device(bus, devfn); 6.387 + 6.388 + if ( iommu ) 6.389 + { 6.390 + amd_iommu_disable_domain_device(source, iommu, req_id); 6.391 + /* Move pci device from the source domain to target domain. */ 6.392 + spin_lock_irqsave(&source_hd->iommu_list_lock, flags); 6.393 + spin_lock_irqsave(&target_hd->iommu_list_lock, flags); 6.394 + list_move(&pdev->list, &target_hd->pdev_list); 6.395 + spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags); 6.396 + spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags); 6.397 + 6.398 + amd_iommu_setup_domain_device(target, iommu, req_id); 6.399 + gdprintk(XENLOG_INFO , 6.400 + "AMD IOMMU: reassign %x:%x.%x domain %d -> domain %d\n", 6.401 + bus, PCI_SLOT(devfn), PCI_FUNC(devfn), 6.402 + source->domain_id, target->domain_id); 6.403 + } 6.404 + else 6.405 + { 6.406 + gdprintk(XENLOG_ERR , "AMD IOMMU: fail to find iommu." 6.407 + " %x:%x.%x cannot be assigned to domain %d\n", 6.408 + bus, PCI_SLOT(devfn), PCI_FUNC(devfn), target->domain_id); 6.409 + return -ENODEV; 6.410 + } 6.411 + 6.412 + break; 6.413 + } 6.414 + return 0; 6.415 } 6.416 6.417 +int amd_iommu_assign_device(struct domain *d, u8 bus, u8 devfn) 6.418 +{ 6.419 + pdev_flr(bus, devfn); 6.420 + return reassign_device(dom0, d, bus, devfn); 6.421 +} 6.422 6.423 +static void release_domain_devices(struct domain *d) 6.424 +{ 6.425 + struct hvm_iommu *hd = domain_hvm_iommu(d); 6.426 + struct pci_dev *pdev; 6.427 + 6.428 + while ( !list_empty(&hd->pdev_list) ) 6.429 + { 6.430 + pdev = list_entry(hd->pdev_list.next, typeof(*pdev), list); 6.431 + pdev_flr(pdev->bus, pdev->devfn); 6.432 + gdprintk(XENLOG_INFO , 6.433 + "AMD IOMMU: release devices %x:%x.%x\n", 6.434 + pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 6.435 + reassign_device(d, dom0, pdev->bus, pdev->devfn); 6.436 + } 6.437 +} 6.438 + 6.439 +static void deallocate_next_page_table(void *table, unsigned long index, 6.440 + int level) 6.441 +{ 6.442 + unsigned long next_index; 6.443 + void *next_table, *pde; 6.444 + int next_level; 6.445 + 6.446 + pde = table + (index * IOMMU_PAGE_TABLE_ENTRY_SIZE); 6.447 + next_table = amd_iommu_get_vptr_from_page_table_entry((u32 *)pde); 6.448 + 6.449 + if ( next_table ) 6.450 + { 6.451 + next_level = level - 1; 6.452 + if ( next_level > 1 ) 6.453 + { 6.454 + next_index = 0; 6.455 + do 6.456 + { 6.457 + deallocate_next_page_table(next_table, 6.458 + next_index, next_level); 6.459 + ++next_index; 6.460 + } while (next_index < PTE_PER_TABLE_SIZE); 6.461 + } 6.462 + 6.463 + free_xenheap_page(next_table); 6.464 + } 6.465 +} 6.466 + 6.467 +static void deallocate_iommu_page_tables(struct domain *d) 6.468 +{ 6.469 + unsigned long index; 6.470 + struct hvm_iommu *hd = domain_hvm_iommu(d); 6.471 + 6.472 + if ( hd ->root_table ) 6.473 + { 6.474 + index = 0; 6.475 + do 6.476 + { 6.477 + deallocate_next_page_table(hd->root_table, 6.478 + index, hd->paging_mode); 6.479 + ++index; 6.480 + } while ( index < PTE_PER_TABLE_SIZE ); 6.481 + 6.482 + free_xenheap_page(hd ->root_table); 6.483 + } 6.484 + 6.485 + hd ->root_table = NULL; 6.486 +} 6.487 + 6.488 +void amd_iommu_domain_destroy(struct domain *d) 6.489 +{ 6.490 + if ( !amd_iommu_enabled ) 6.491 + return; 6.492 + 6.493 + deallocate_iommu_page_tables(d); 6.494 + release_domain_devices(d); 6.495 +} 6.496 + 6.497 +struct iommu_ops amd_iommu_ops = { 6.498 + .init = amd_iommu_domain_init, 6.499 + .assign_device = amd_iommu_assign_device, 6.500 + .teardown = amd_iommu_domain_destroy, 6.501 + .map_page = amd_iommu_map_page, 6.502 + .unmap_page = amd_iommu_unmap_page, 6.503 +};
7.1 --- a/xen/arch/x86/hvm/svm/intr.c Thu Feb 14 10:36:47 2008 +0000 7.2 +++ b/xen/arch/x86/hvm/svm/intr.c Thu Feb 14 11:14:17 2008 +0000 7.3 @@ -94,6 +94,46 @@ static void enable_intr_window(struct vc 7.4 vmcb->general1_intercepts |= GENERAL1_INTERCEPT_VINTR; 7.5 } 7.6 7.7 +static void svm_dirq_assist(struct vcpu *v) 7.8 +{ 7.9 + unsigned int irq; 7.10 + uint32_t device, intx; 7.11 + struct domain *d = v->domain; 7.12 + struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; 7.13 + struct dev_intx_gsi_link *digl; 7.14 + 7.15 + if ( !amd_iommu_enabled || (v->vcpu_id != 0) || (hvm_irq_dpci == NULL) ) 7.16 + return; 7.17 + 7.18 + for ( irq = find_first_bit(hvm_irq_dpci->dirq_mask, NR_IRQS); 7.19 + irq < NR_IRQS; 7.20 + irq = find_next_bit(hvm_irq_dpci->dirq_mask, NR_IRQS, irq + 1) ) 7.21 + { 7.22 + stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)]); 7.23 + clear_bit(irq, &hvm_irq_dpci->dirq_mask); 7.24 + 7.25 + list_for_each_entry ( digl, &hvm_irq_dpci->mirq[irq].digl_list, list ) 7.26 + { 7.27 + device = digl->device; 7.28 + intx = digl->intx; 7.29 + hvm_pci_intx_assert(d, device, intx); 7.30 + spin_lock(&hvm_irq_dpci->dirq_lock); 7.31 + hvm_irq_dpci->mirq[irq].pending++; 7.32 + spin_unlock(&hvm_irq_dpci->dirq_lock); 7.33 + } 7.34 + 7.35 + /* 7.36 + * Set a timer to see if the guest can finish the interrupt or not. For 7.37 + * example, the guest OS may unmask the PIC during boot, before the 7.38 + * guest driver is loaded. hvm_pci_intx_assert() may succeed, but the 7.39 + * guest will never deal with the irq, then the physical interrupt line 7.40 + * will never be deasserted. 7.41 + */ 7.42 + set_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)], 7.43 + NOW() + PT_IRQ_TIME_OUT); 7.44 + } 7.45 +} 7.46 + 7.47 asmlinkage void svm_intr_assist(void) 7.48 { 7.49 struct vcpu *v = current; 7.50 @@ -102,6 +142,7 @@ asmlinkage void svm_intr_assist(void) 7.51 7.52 /* Crank the handle on interrupt state. */ 7.53 pt_update_irq(v); 7.54 + svm_dirq_assist(v); 7.55 7.56 do { 7.57 intack = hvm_vcpu_has_pending_irq(v);
8.1 --- a/xen/arch/x86/hvm/vioapic.c Thu Feb 14 10:36:47 2008 +0000 8.2 +++ b/xen/arch/x86/hvm/vioapic.c Thu Feb 14 11:14:17 2008 +0000 8.3 @@ -458,7 +458,7 @@ void vioapic_update_EOI(struct domain *d 8.4 8.5 ent->fields.remote_irr = 0; 8.6 8.7 - if ( vtd_enabled ) 8.8 + if ( iommu_enabled ) 8.9 { 8.10 spin_unlock(&d->arch.hvm_domain.irq_lock); 8.11 hvm_dpci_eoi(current->domain, gsi, ent);
9.1 --- a/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c Thu Feb 14 10:36:47 2008 +0000 9.2 +++ b/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c Thu Feb 14 11:14:17 2008 +0000 9.3 @@ -1047,7 +1047,7 @@ static void free_iommu(struct iommu *iom 9.4 agaw = 64; \ 9.5 agaw; }) 9.6 9.7 -int iommu_domain_init(struct domain *domain) 9.8 +int intel_iommu_domain_init(struct domain *domain) 9.9 { 9.10 struct hvm_iommu *hd = domain_hvm_iommu(domain); 9.11 struct iommu *iommu = NULL; 9.12 @@ -1056,11 +1056,6 @@ int iommu_domain_init(struct domain *dom 9.13 unsigned long sagaw; 9.14 struct acpi_drhd_unit *drhd; 9.15 9.16 - spin_lock_init(&hd->mapping_lock); 9.17 - spin_lock_init(&hd->iommu_list_lock); 9.18 - INIT_LIST_HEAD(&hd->pdev_list); 9.19 - INIT_LIST_HEAD(&hd->g2m_ioport_list); 9.20 - 9.21 if ( !vtd_enabled || list_empty(&acpi_drhd_units) ) 9.22 return 0; 9.23 9.24 @@ -1550,7 +1545,8 @@ static int domain_context_mapped(struct 9.25 return 0; 9.26 } 9.27 9.28 -int iommu_map_page(struct domain *d, paddr_t gfn, paddr_t mfn) 9.29 +int intel_iommu_map_page( 9.30 + struct domain *d, unsigned long gfn, unsigned long mfn) 9.31 { 9.32 struct acpi_drhd_unit *drhd; 9.33 struct iommu *iommu; 9.34 @@ -1566,12 +1562,12 @@ int iommu_map_page(struct domain *d, pad 9.35 return 0; 9.36 #endif 9.37 9.38 - pg = addr_to_dma_page(d, gfn << PAGE_SHIFT_4K); 9.39 + pg = addr_to_dma_page(d, (paddr_t)gfn << PAGE_SHIFT_4K); 9.40 if ( !pg ) 9.41 return -ENOMEM; 9.42 pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg)); 9.43 pte += gfn & LEVEL_MASK; 9.44 - dma_set_pte_addr(*pte, mfn << PAGE_SHIFT_4K); 9.45 + dma_set_pte_addr(*pte, (paddr_t)mfn << PAGE_SHIFT_4K); 9.46 dma_set_pte_prot(*pte, DMA_PTE_READ | DMA_PTE_WRITE); 9.47 iommu_flush_cache_entry(iommu, pte); 9.48 unmap_domain_page(pte); 9.49 @@ -1581,7 +1577,7 @@ int iommu_map_page(struct domain *d, pad 9.50 iommu = drhd->iommu; 9.51 if ( cap_caching_mode(iommu->cap) ) 9.52 iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d), 9.53 - gfn << PAGE_SHIFT_4K, 1, 0); 9.54 + (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0); 9.55 else if ( cap_rwbf(iommu->cap) ) 9.56 iommu_flush_write_buffer(iommu); 9.57 } 9.58 @@ -1589,7 +1585,7 @@ int iommu_map_page(struct domain *d, pad 9.59 return 0; 9.60 } 9.61 9.62 -int iommu_unmap_page(struct domain *d, dma_addr_t gfn) 9.63 +int intel_iommu_unmap_page(struct domain *d, unsigned long gfn) 9.64 { 9.65 struct acpi_drhd_unit *drhd; 9.66 struct iommu *iommu; 9.67 @@ -1603,12 +1599,12 @@ int iommu_unmap_page(struct domain *d, d 9.68 return 0; 9.69 #endif 9.70 9.71 - dma_pte_clear_one(d, gfn << PAGE_SHIFT_4K); 9.72 + dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K); 9.73 9.74 return 0; 9.75 } 9.76 9.77 -int iommu_page_mapping(struct domain *domain, dma_addr_t iova, 9.78 +int iommu_page_mapping(struct domain *domain, paddr_t iova, 9.79 void *hpa, size_t size, int prot) 9.80 { 9.81 struct acpi_drhd_unit *drhd; 9.82 @@ -1655,14 +1651,14 @@ int iommu_page_mapping(struct domain *do 9.83 return 0; 9.84 } 9.85 9.86 -int iommu_page_unmapping(struct domain *domain, dma_addr_t addr, size_t size) 9.87 +int iommu_page_unmapping(struct domain *domain, paddr_t addr, size_t size) 9.88 { 9.89 dma_pte_clear_range(domain, addr, addr + size); 9.90 9.91 return 0; 9.92 } 9.93 9.94 -void iommu_flush(struct domain *d, dma_addr_t gfn, u64 *p2m_entry) 9.95 +void iommu_flush(struct domain *d, unsigned long gfn, u64 *p2m_entry) 9.96 { 9.97 struct acpi_drhd_unit *drhd; 9.98 struct iommu *iommu = NULL; 9.99 @@ -1673,7 +1669,7 @@ void iommu_flush(struct domain *d, dma_a 9.100 iommu = drhd->iommu; 9.101 if ( cap_caching_mode(iommu->cap) ) 9.102 iommu_flush_iotlb_psi(iommu, domain_iommu_domid(d), 9.103 - gfn << PAGE_SHIFT_4K, 1, 0); 9.104 + (paddr_t)gfn << PAGE_SHIFT_4K, 1, 0); 9.105 else if ( cap_rwbf(iommu->cap) ) 9.106 iommu_flush_write_buffer(iommu); 9.107 } 9.108 @@ -1921,7 +1917,7 @@ int device_assigned(u8 bus, u8 devfn) 9.109 return 1; 9.110 } 9.111 9.112 -int assign_device(struct domain *d, u8 bus, u8 devfn) 9.113 +int intel_iommu_assign_device(struct domain *d, u8 bus, u8 devfn) 9.114 { 9.115 struct acpi_rmrr_unit *rmrr; 9.116 struct pci_dev *pdev; 9.117 @@ -2151,6 +2147,14 @@ int iommu_resume(void) 9.118 return 0; 9.119 } 9.120 9.121 +struct iommu_ops intel_iommu_ops = { 9.122 + .init = intel_iommu_domain_init, 9.123 + .assign_device = intel_iommu_assign_device, 9.124 + .teardown = iommu_domain_teardown, 9.125 + .map_page = intel_iommu_map_page, 9.126 + .unmap_page = intel_iommu_unmap_page, 9.127 +}; 9.128 + 9.129 /* 9.130 * Local variables: 9.131 * mode: C
10.1 --- a/xen/arch/x86/hvm/vmx/vtd/io.c Thu Feb 14 10:36:47 2008 +0000 10.2 +++ b/xen/arch/x86/hvm/vmx/vtd/io.c Thu Feb 14 11:14:17 2008 +0000 10.3 @@ -141,7 +141,7 @@ int hvm_do_IRQ_dpci(struct domain *d, un 10.4 { 10.5 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq; 10.6 10.7 - if ( !vtd_enabled || (d == dom0) || (hvm_irq->dpci == NULL) || 10.8 + if ( !iommu_enabled || (d == dom0) || (hvm_irq->dpci == NULL) || 10.9 !hvm_irq->dpci->mirq[mirq].valid ) 10.10 return 0; 10.11 10.12 @@ -167,7 +167,7 @@ static void hvm_dpci_isairq_eoi(struct d 10.13 int i; 10.14 10.15 ASSERT(isairq < NR_ISAIRQS); 10.16 - if ( !vtd_enabled || !dpci || 10.17 + if ( !iommu_enabled || !dpci || 10.18 !test_bit(isairq, dpci->isairq_map) ) 10.19 return; 10.20 10.21 @@ -205,7 +205,7 @@ void hvm_dpci_eoi(struct domain *d, unsi 10.22 struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; 10.23 uint32_t device, intx, machine_gsi; 10.24 10.25 - if ( !vtd_enabled || (hvm_irq_dpci == NULL) || 10.26 + if ( !iommu_enabled || (hvm_irq_dpci == NULL) || 10.27 (guest_gsi >= NR_ISAIRQS && 10.28 !hvm_irq_dpci->girq[guest_gsi].valid) ) 10.29 return; 10.30 @@ -235,50 +235,3 @@ void hvm_dpci_eoi(struct domain *d, unsi 10.31 else 10.32 spin_unlock(&hvm_irq_dpci->dirq_lock); 10.33 } 10.34 - 10.35 -void iommu_domain_destroy(struct domain *d) 10.36 -{ 10.37 - struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; 10.38 - uint32_t i; 10.39 - struct hvm_iommu *hd = domain_hvm_iommu(d); 10.40 - struct list_head *ioport_list, *digl_list, *tmp; 10.41 - struct g2m_ioport *ioport; 10.42 - struct dev_intx_gsi_link *digl; 10.43 - 10.44 - if ( !vtd_enabled ) 10.45 - return; 10.46 - 10.47 - if ( hvm_irq_dpci != NULL ) 10.48 - { 10.49 - for ( i = 0; i < NR_IRQS; i++ ) 10.50 - if ( hvm_irq_dpci->mirq[i].valid ) 10.51 - { 10.52 - pirq_guest_unbind(d, i); 10.53 - kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]); 10.54 - 10.55 - list_for_each_safe ( digl_list, tmp, 10.56 - &hvm_irq_dpci->mirq[i].digl_list ) 10.57 - { 10.58 - digl = list_entry(digl_list, 10.59 - struct dev_intx_gsi_link, list); 10.60 - list_del(&digl->list); 10.61 - xfree(digl); 10.62 - } 10.63 - } 10.64 - 10.65 - d->arch.hvm_domain.irq.dpci = NULL; 10.66 - xfree(hvm_irq_dpci); 10.67 - } 10.68 - 10.69 - if ( hd ) 10.70 - { 10.71 - list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list ) 10.72 - { 10.73 - ioport = list_entry(ioport_list, struct g2m_ioport, list); 10.74 - list_del(&ioport->list); 10.75 - xfree(ioport); 10.76 - } 10.77 - } 10.78 - 10.79 - iommu_domain_teardown(d); 10.80 -}
11.1 --- a/xen/arch/x86/mm/p2m.c Thu Feb 14 10:36:47 2008 +0000 11.2 +++ b/xen/arch/x86/mm/p2m.c Thu Feb 14 11:14:17 2008 +0000 11.3 @@ -255,8 +255,21 @@ set_p2m_entry(struct domain *d, unsigned 11.4 /* level 1 entry */ 11.5 paging_write_p2m_entry(d, gfn, p2m_entry, table_mfn, entry_content, 1); 11.6 11.7 - if ( vtd_enabled && (p2mt == p2m_mmio_direct) && is_hvm_domain(d) ) 11.8 - iommu_flush(d, gfn, (u64*)p2m_entry); 11.9 + if ( iommu_enabled && is_hvm_domain(d) ) 11.10 + { 11.11 + if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) 11.12 + { 11.13 + if ( (p2mt == p2m_mmio_direct) ) 11.14 + iommu_flush(d, gfn, (u64*)p2m_entry); 11.15 + } 11.16 + else if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD ) 11.17 + { 11.18 + if ( p2mt == p2m_ram_rw ) 11.19 + iommu_map_page(d, gfn, mfn_x(mfn)); 11.20 + else 11.21 + iommu_unmap_page(d, gfn); 11.22 + } 11.23 + } 11.24 11.25 /* Success */ 11.26 rv = 1;
12.1 --- a/xen/include/asm-x86/hvm/iommu.h Thu Feb 14 10:36:47 2008 +0000 12.2 +++ b/xen/include/asm-x86/hvm/iommu.h Thu Feb 14 11:14:17 2008 +0000 12.3 @@ -48,6 +48,9 @@ struct hvm_iommu { 12.4 int domain_id; 12.5 int paging_mode; 12.6 void *root_table; 12.7 + 12.8 + /* iommu_ops */ 12.9 + struct iommu_ops *platform_ops; 12.10 }; 12.11 12.12 #endif // __ASM_X86_HVM_IOMMU_H__
13.1 --- a/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h Thu Feb 14 10:36:47 2008 +0000 13.2 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-defs.h Thu Feb 14 11:14:17 2008 +0000 13.3 @@ -263,6 +263,10 @@ 13.4 #define IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK 0xFFFFFFFF 13.5 #define IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT 0 13.6 13.7 +/* INVALIDATE_DEVTAB_ENTRY command */ 13.8 +#define IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_MASK 0x0000FFFF 13.9 +#define IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_SHIFT 0 13.10 + 13.11 /* Event Log */ 13.12 #define IOMMU_EVENT_LOG_BASE_LOW_OFFSET 0x10 13.13 #define IOMMU_EVENT_LOG_BASE_HIGH_OFFSET 0x14 13.14 @@ -415,5 +419,6 @@ 13.15 #define IOMMU_PAGE_TABLE_LEVEL_4 4 13.16 #define IOMMU_IO_WRITE_ENABLED 1 13.17 #define IOMMU_IO_READ_ENABLED 1 13.18 +#define HACK_BIOS_SETTINGS 0 13.19 13.20 #endif /* _ASM_X86_64_AMD_IOMMU_DEFS_H */
14.1 --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Thu Feb 14 10:36:47 2008 +0000 14.2 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Thu Feb 14 11:14:17 2008 +0000 14.3 @@ -27,13 +27,15 @@ 14.4 list_for_each_entry(amd_iommu, \ 14.5 &amd_iommu_head, list) 14.6 14.7 +#define for_each_pdev(domain, pdev) \ 14.8 + list_for_each_entry(pdev, \ 14.9 + &(domain->arch.hvm_domain.hvm_iommu.pdev_list), list) 14.10 + 14.11 #define DMA_32BIT_MASK 0x00000000ffffffffULL 14.12 #define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) 14.13 -#define PAGE_SHIFT_4K (12) 14.14 -#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K) 14.15 -#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K) 14.16 14.17 -typedef int (*iommu_detect_callback_ptr_t)(u8 bus, u8 dev, u8 func, u8 cap_ptr); 14.18 +typedef int (*iommu_detect_callback_ptr_t)( 14.19 + u8 bus, u8 dev, u8 func, u8 cap_ptr); 14.20 14.21 /* amd-iommu-detect functions */ 14.22 int __init scan_for_iommu(iommu_detect_callback_ptr_t iommu_detect_callback); 14.23 @@ -49,16 +51,20 @@ void __init register_iommu_cmd_buffer_in 14.24 void __init enable_iommu(struct amd_iommu *iommu); 14.25 14.26 /* mapping functions */ 14.27 -int amd_iommu_map_page(struct domain *d, unsigned long gfn, 14.28 - unsigned long mfn); 14.29 +int amd_iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn); 14.30 int amd_iommu_unmap_page(struct domain *d, unsigned long gfn); 14.31 +void *amd_iommu_get_vptr_from_page_table_entry(u32 *entry); 14.32 14.33 /* device table functions */ 14.34 void amd_iommu_set_dev_table_entry(u32 *dte, 14.35 u64 root_ptr, u16 domain_id, u8 paging_mode); 14.36 +int amd_iommu_is_dte_page_translation_valid(u32 *entry); 14.37 +void invalidate_dev_table_entry(struct amd_iommu *iommu, 14.38 + u16 devic_id); 14.39 14.40 /* send cmd to iommu */ 14.41 int send_iommu_command(struct amd_iommu *iommu, u32 cmd[]); 14.42 +void flush_command_buffer(struct amd_iommu *iommu); 14.43 14.44 /* iommu domain funtions */ 14.45 int amd_iommu_domain_init(struct domain *domain);
15.1 --- a/xen/include/asm-x86/hvm/vmx/intel-iommu.h Thu Feb 14 10:36:47 2008 +0000 15.2 +++ b/xen/include/asm-x86/hvm/vmx/intel-iommu.h Thu Feb 14 11:14:17 2008 +0000 15.3 @@ -422,8 +422,6 @@ struct poll_info { 15.4 #define VTD_PAGE_TABLE_LEVEL_3 3 15.5 #define VTD_PAGE_TABLE_LEVEL_4 4 15.6 15.7 -typedef paddr_t dma_addr_t; 15.8 - 15.9 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 15.10 #define MAX_IOMMUS 32 15.11 #define MAX_IOMMU_REGS 0xc0 15.12 @@ -447,8 +445,10 @@ struct ir_ctrl { 15.13 }; 15.14 15.15 struct iommu_flush { 15.16 - int (*context)(void *iommu, u16 did, u16 source_id, u8 function_mask, u64 type, int non_present_entry_flush); 15.17 - int (*iotlb)(void *iommu, u16 did, u64 addr, unsigned int size_order, u64 type, int non_present_entry_flush); 15.18 + int (*context)(void *iommu, u16 did, u16 source_id, 15.19 + u8 function_mask, u64 type, int non_present_entry_flush); 15.20 + int (*iotlb)(void *iommu, u16 did, u64 addr, unsigned int size_order, 15.21 + u64 type, int non_present_entry_flush); 15.22 }; 15.23 15.24 struct intel_iommu {
16.1 --- a/xen/include/asm-x86/iommu.h Thu Feb 14 10:36:47 2008 +0000 16.2 +++ b/xen/include/asm-x86/iommu.h Thu Feb 14 11:14:17 2008 +0000 16.3 @@ -28,7 +28,9 @@ 16.4 #include <public/domctl.h> 16.5 16.6 extern int vtd_enabled; 16.7 +extern int amd_iommu_enabled; 16.8 16.9 +#define iommu_enabled ( amd_iommu_enabled || vtd_enabled ) 16.10 #define domain_hvm_iommu(d) (&d->arch.hvm_domain.hvm_iommu) 16.11 #define domain_vmx_iommu(d) (&d->arch.hvm_domain.hvm_iommu.vmx_iommu) 16.12 #define iommu_qi_ctrl(iommu) (&(iommu->intel.qi_ctrl)); 16.13 @@ -72,9 +74,9 @@ int iommu_domain_init(struct domain *d); 16.14 void iommu_domain_destroy(struct domain *d); 16.15 int device_assigned(u8 bus, u8 devfn); 16.16 int assign_device(struct domain *d, u8 bus, u8 devfn); 16.17 -int iommu_map_page(struct domain *d, dma_addr_t gfn, dma_addr_t mfn); 16.18 -int iommu_unmap_page(struct domain *d, dma_addr_t gfn); 16.19 -void iommu_flush(struct domain *d, dma_addr_t gfn, u64 *p2m_entry); 16.20 +int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn); 16.21 +int iommu_unmap_page(struct domain *d, unsigned long gfn); 16.22 +void iommu_flush(struct domain *d, unsigned long gfn, u64 *p2m_entry); 16.23 void iommu_set_pgd(struct domain *d); 16.24 void iommu_domain_teardown(struct domain *d); 16.25 int hvm_do_IRQ_dpci(struct domain *d, unsigned int irq); 16.26 @@ -89,4 +91,12 @@ void io_apic_write_remap_rte(unsigned in 16.27 #define PT_IRQ_TIME_OUT MILLISECS(8) 16.28 #define VTDPREFIX "[VT-D]" 16.29 16.30 +struct iommu_ops { 16.31 + int (*init)(struct domain *d); 16.32 + int (*assign_device)(struct domain *d, u8 bus, u8 devfn); 16.33 + void (*teardown)(struct domain *d); 16.34 + int (*map_page)(struct domain *d, unsigned long gfn, unsigned long mfn); 16.35 + int (*unmap_page)(struct domain *d, unsigned long gfn); 16.36 +}; 16.37 + 16.38 #endif /* _IOMMU_H_ */