debuggers.hg

changeset 19939:0ab211e699e6

AMD IOMMU: Add suspend and resume support for amd iommu.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jul 06 11:57:18 2009 +0100 (2009-07-06)
parents ff5bc91d0057
children d768628c28a4
files xen/drivers/passthrough/amd/iommu_init.c
line diff
     1.1 --- a/xen/drivers/passthrough/amd/iommu_init.c	Mon Jul 06 11:56:51 2009 +0100
     1.2 +++ b/xen/drivers/passthrough/amd/iommu_init.c	Mon Jul 06 11:57:18 2009 +0100
     1.3 @@ -716,3 +716,83 @@ error_out:
     1.4      }
     1.5      return -ENOMEM;
     1.6  }
     1.7 +
     1.8 +static void disable_iommu(struct amd_iommu *iommu)
     1.9 +{
    1.10 +    unsigned long flags;
    1.11 +
    1.12 +    spin_lock_irqsave(&iommu->lock, flags);
    1.13 +
    1.14 +    if ( !iommu->enabled )
    1.15 +    {
    1.16 +        spin_unlock_irqrestore(&iommu->lock, flags); 
    1.17 +        return;
    1.18 +    }
    1.19 +
    1.20 +    amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED);
    1.21 +    set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_DISABLED);
    1.22 +    set_iommu_event_log_control(iommu, IOMMU_CONTROL_DISABLED);
    1.23 +    set_iommu_translation_control(iommu, IOMMU_CONTROL_DISABLED);
    1.24 +
    1.25 +    iommu->enabled = 0;
    1.26 +
    1.27 +    spin_unlock_irqrestore(&iommu->lock, flags);
    1.28 +
    1.29 +}
    1.30 +
    1.31 +static void invalidate_all_domain_pages(void)
    1.32 +{
    1.33 +    struct domain *d;
    1.34 +    for_each_domain( d )
    1.35 +        invalidate_all_iommu_pages(d);
    1.36 +}
    1.37 +
    1.38 +static void invalidate_all_devices(void)
    1.39 +{
    1.40 +    u16 bus, devfn, bdf, req_id;
    1.41 +    unsigned long flags;
    1.42 +    struct amd_iommu *iommu;
    1.43 +
    1.44 +    for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
    1.45 +    {
    1.46 +        bus = bdf >> 8;
    1.47 +        devfn = bdf & 0xFF;
    1.48 +        iommu = find_iommu_for_device(bus, devfn);
    1.49 +        req_id = ivrs_mappings[bdf].dte_requestor_id;
    1.50 +        if ( iommu )
    1.51 +        {
    1.52 +            spin_lock_irqsave(&iommu->lock, flags);
    1.53 +            invalidate_dev_table_entry(iommu, req_id);
    1.54 +            invalidate_interrupt_table(iommu, req_id);
    1.55 +            flush_command_buffer(iommu);
    1.56 +            spin_unlock_irqrestore(&iommu->lock, flags);
    1.57 +        }
    1.58 +    }
    1.59 +}
    1.60 +
    1.61 +void amd_iommu_suspend(void)
    1.62 +{
    1.63 +    struct amd_iommu *iommu;
    1.64 +
    1.65 +    for_each_amd_iommu ( iommu )
    1.66 +        disable_iommu(iommu);
    1.67 +}
    1.68 +
    1.69 +void amd_iommu_resume(void)
    1.70 +{
    1.71 +    struct amd_iommu *iommu;
    1.72 +
    1.73 +    for_each_amd_iommu ( iommu )
    1.74 +    {
    1.75 +       /*
    1.76 +        * To make sure that iommus have not been touched 
    1.77 +        * before re-enablement
    1.78 +        */
    1.79 +        disable_iommu(iommu);
    1.80 +        enable_iommu(iommu);
    1.81 +    }
    1.82 +
    1.83 +    /* flush all cache entries after iommu re-enabled */
    1.84 +    invalidate_all_devices();
    1.85 +    invalidate_all_domain_pages();
    1.86 +}