debuggers.hg

changeset 22873:3b00ee057c4a

amd iommu: reduce io page level for hvm guest (3/3)

Clean up invalidate_iommu_page function. Make it suitable for variable
page sizes and can be reused by invalidate_all_iommu_pages.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Wei Wang <wei.wang2@amd.com>
date Thu Jan 27 16:12:07 2011 +0000 (2011-01-27)
parents 699f22481479
children 6067a17114bc
files xen/drivers/passthrough/amd/iommu_map.c
line diff
     1.1 --- a/xen/drivers/passthrough/amd/iommu_map.c	Thu Jan 27 16:11:37 2011 +0000
     1.2 +++ b/xen/drivers/passthrough/amd/iommu_map.c	Thu Jan 27 16:12:07 2011 +0000
     1.3 @@ -71,11 +71,29 @@ int send_iommu_command(struct amd_iommu 
     1.4      return 0;
     1.5  }
     1.6  
     1.7 -static void invalidate_iommu_page(struct amd_iommu *iommu,
     1.8 -                                  u64 io_addr, u16 domain_id)
     1.9 +static void invalidate_iommu_pages(struct amd_iommu *iommu,
    1.10 +                                   u64 io_addr, u16 domain_id, u16 order)
    1.11  {
    1.12      u64 addr_lo, addr_hi;
    1.13      u32 cmd[4], entry;
    1.14 +    u64 mask = 0;
    1.15 +    int sflag = 0, pde = 0;
    1.16 +
    1.17 +    /* If sflag == 1, the size of the invalidate command is determined
    1.18 +     by the first zero bit in the address starting from Address[12] */
    1.19 +    if ( order == 9 || order == 18 )
    1.20 +    {
    1.21 +        mask = ((1ULL << (order - 1)) - 1) << PAGE_SHIFT;
    1.22 +        io_addr |= mask;
    1.23 +        sflag = 1;
    1.24 +    }
    1.25 +
    1.26 +    /* All pages associated with the domainID are invalidated */
    1.27 +    else if ( io_addr == 0x7FFFFFFFFFFFF000ULL )
    1.28 +    {
    1.29 +        sflag = 1;
    1.30 +        pde = 1;
    1.31 +    }
    1.32  
    1.33      addr_lo = io_addr & DMA_32BIT_MASK;
    1.34      addr_hi = io_addr >> 32;
    1.35 @@ -88,10 +106,10 @@ static void invalidate_iommu_page(struct
    1.36                           &entry);
    1.37      cmd[1] = entry;
    1.38  
    1.39 -    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, 0,
    1.40 +    set_field_in_reg_u32(sflag, 0,
    1.41                           IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK,
    1.42                           IOMMU_INV_IOMMU_PAGES_S_FLAG_SHIFT, &entry);
    1.43 -    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
    1.44 +    set_field_in_reg_u32(pde, entry,
    1.45                           IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK,
    1.46                           IOMMU_INV_IOMMU_PAGES_PDE_FLAG_SHIFT, &entry);
    1.47      set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry,
    1.48 @@ -510,7 +528,7 @@ int amd_iommu_unmap_page(struct domain *
    1.49      for_each_amd_iommu ( iommu )
    1.50      {
    1.51          spin_lock_irqsave(&iommu->lock, flags);
    1.52 -        invalidate_iommu_page(iommu, (u64)gfn << PAGE_SHIFT, hd->domain_id);
    1.53 +        invalidate_iommu_pages(iommu, (u64)gfn << PAGE_SHIFT, hd->domain_id, 0);
    1.54          flush_command_buffer(iommu);
    1.55          spin_unlock_irqrestore(&iommu->lock, flags);
    1.56      }
    1.57 @@ -543,43 +561,14 @@ int amd_iommu_reserve_domain_unity_map(s
    1.58  
    1.59  void invalidate_all_iommu_pages(struct domain *d)
    1.60  {
    1.61 -    u32 cmd[4], entry;
    1.62      unsigned long flags;
    1.63      struct amd_iommu *iommu;
    1.64 -    int domain_id = d->domain_id;
    1.65 -    u64 addr_lo = 0x7FFFFFFFFFFFF000ULL & DMA_32BIT_MASK;
    1.66 -    u64 addr_hi = 0x7FFFFFFFFFFFF000ULL >> 32;
    1.67 -
    1.68 -    set_field_in_reg_u32(domain_id, 0,
    1.69 -                         IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK,
    1.70 -                         IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &entry);
    1.71 -    set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_PAGES, entry,
    1.72 -                         IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT,
    1.73 -                         &entry);
    1.74 -    cmd[1] = entry;
    1.75 -
    1.76 -    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0,
    1.77 -                         IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK,
    1.78 -                         IOMMU_INV_IOMMU_PAGES_S_FLAG_SHIFT, &entry);
    1.79 -    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
    1.80 -                         IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK,
    1.81 -                         IOMMU_INV_IOMMU_PAGES_PDE_FLAG_SHIFT, &entry);
    1.82 -    set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry,
    1.83 -                         IOMMU_INV_IOMMU_PAGES_ADDR_LOW_MASK,
    1.84 -                         IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT, &entry);
    1.85 -    cmd[2] = entry;
    1.86 -
    1.87 -    set_field_in_reg_u32((u32)addr_hi, 0,
    1.88 -                         IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK,
    1.89 -                         IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT, &entry);
    1.90 -    cmd[3] = entry;
    1.91 -
    1.92 -    cmd[0] = 0;
    1.93  
    1.94      for_each_amd_iommu ( iommu )
    1.95      {
    1.96          spin_lock_irqsave(&iommu->lock, flags);
    1.97 -        send_iommu_command(iommu, cmd);
    1.98 +        invalidate_iommu_pages(iommu, 0x7FFFFFFFFFFFF000ULL,
    1.99 +                               d->domain_id, 0);
   1.100          flush_command_buffer(iommu);
   1.101          spin_unlock_irqrestore(&iommu->lock, flags);
   1.102      }