debuggers.hg

changeset 22871:969f26450ad5

amd iommu: reduce io page level for hvm guest (1/3)

Since in most case, 2 or 3 - level IO page tables are sufficient, this
patch updates page table level for device assignment to reduces
overhead of dma translation

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Wei Wang <wei.wang2@amd.com>
date Thu Jan 27 16:10:52 2011 +0000 (2011-01-27)
parents f346343cb978
children 699f22481479
files xen/drivers/passthrough/amd/iommu_map.c xen/drivers/passthrough/amd/pci_amd_iommu.c
line diff
     1.1 --- a/xen/drivers/passthrough/amd/iommu_map.c	Thu Jan 27 14:59:04 2011 +0000
     1.2 +++ b/xen/drivers/passthrough/amd/iommu_map.c	Thu Jan 27 16:10:52 2011 +0000
     1.3 @@ -411,10 +411,14 @@ static u64 iommu_l2e_from_pfn(struct pag
     1.4      void *pde = NULL;
     1.5      void *table_vaddr;
     1.6      u64 next_table_maddr = 0;
     1.7 +    unsigned int lowest = 1;
     1.8  
     1.9 -    BUG_ON( table == NULL || level == 0 );
    1.10 +    BUG_ON( table == NULL || level < lowest );
    1.11  
    1.12 -    while ( level > 1 )
    1.13 +    if ( level == lowest )
    1.14 +        return page_to_maddr(table);
    1.15 +
    1.16 +    while ( level > lowest )
    1.17      {
    1.18          offset = io_pfn >> ((PTE_PER_TABLE_SHIFT *
    1.19                               (level - IOMMU_PAGING_MODE_LEVEL_1)));
     2.1 --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c	Thu Jan 27 14:59:04 2011 +0000
     2.2 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c	Thu Jan 27 16:10:52 2011 +0000
     2.3 @@ -190,10 +190,7 @@ static int get_paging_mode(unsigned long
     2.4  {
     2.5      int level = 1;
     2.6  
     2.7 -    BUG_ON(!max_page);
     2.8 -
     2.9 -    if ( entries > max_page )
    2.10 -        entries = max_page;
    2.11 +    BUG_ON( !entries );
    2.12  
    2.13      while ( entries > PTE_PER_TABLE_SIZE )
    2.14      {
    2.15 @@ -278,6 +275,7 @@ static int reassign_device( struct domai
    2.16      struct pci_dev *pdev;
    2.17      struct amd_iommu *iommu;
    2.18      int bdf;
    2.19 +    struct hvm_iommu *t = domain_hvm_iommu(target);
    2.20  
    2.21      ASSERT(spin_is_locked(&pcidevs_lock));
    2.22      pdev = pci_get_pdev_by_domain(source, bus, devfn);
    2.23 @@ -300,6 +298,9 @@ static int reassign_device( struct domai
    2.24      list_move(&pdev->domain_list, &target->arch.pdev_list);
    2.25      pdev->domain = target;
    2.26  
    2.27 +    if ( target->max_pages > 0 )
    2.28 +        t->paging_mode = get_paging_mode(target->max_pages);
    2.29 +
    2.30      amd_iommu_setup_domain_device(target, iommu, bdf);
    2.31      AMD_IOMMU_DEBUG("Re-assign %02x:%02x.%x from domain %d to domain %d\n",
    2.32                      bus, PCI_SLOT(devfn), PCI_FUNC(devfn),