xcp-1.6-updates/xen-4.1.hg

changeset 23263:b01091775dd9

iommu: Move IOMMU faults handling into softirq for AMD-Vi.

Dealing with interrupts from AMD-Vi IOMMU(s) is deferred to a
softirq-tasklet, raised by the actual IRQ handler. To avoid more
interrupts being generated (because of further faults), they must be
masked in the IOMMU within the low level IRQ handler and enabled back
in the tasklet body. Notice that this may cause the log to overflow,
but none of the existing entry will be overwritten.

Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>
Committed-by: Keir Fraser <keir@xen.org>
xen-unstable changeset: 24527:028230eb2359
xen-unstable date: Fri Jan 20 10:20:32 2012 +0000
author Dario Faggioli <dario.faggioli@citrix.com>
date Thu Mar 08 10:05:04 2012 +0000 (2012-03-08)
parents 7c0d02aa4742
children 3f158dd40b31
files xen/drivers/passthrough/amd/iommu_init.c
line diff
     1.1 --- a/xen/drivers/passthrough/amd/iommu_init.c	Thu Mar 08 10:04:22 2012 +0000
     1.2 +++ b/xen/drivers/passthrough/amd/iommu_init.c	Thu Mar 08 10:05:04 2012 +0000
     1.3 @@ -33,6 +33,8 @@ static int nr_amd_iommus;
     1.4  static long amd_iommu_cmd_buffer_entries = IOMMU_CMD_BUFFER_DEFAULT_ENTRIES;
     1.5  static long amd_iommu_event_log_entries = IOMMU_EVENT_LOG_DEFAULT_ENTRIES;
     1.6  
     1.7 +static struct tasklet amd_iommu_irq_tasklet;
     1.8 +
     1.9  unsigned short ivrs_bdf_entries;
    1.10  struct ivrs_mappings *ivrs_mappings;
    1.11  struct list_head amd_iommu_head;
    1.12 @@ -517,34 +519,70 @@ static void parse_event_log_entry(u32 en
    1.13      }
    1.14  }
    1.15  
    1.16 +static void do_amd_iommu_irq(unsigned long data)
    1.17 +{
    1.18 +    struct amd_iommu *iommu;
    1.19 +
    1.20 +    if ( !iommu_found() )
    1.21 +    {
    1.22 +        AMD_IOMMU_DEBUG("no device found, something must be very wrong!\n");
    1.23 +        return;
    1.24 +   }
    1.25 +
    1.26 +    /*
    1.27 +     * No matter from where the interrupt came from, check all the
    1.28 +     * IOMMUs present in the system. This allows for having just one
    1.29 +     * tasklet (instead of one per each IOMMUs).
    1.30 +     */
    1.31 +    for_each_amd_iommu ( iommu )
    1.32 +    {
    1.33 +        u32 entry;
    1.34 +        unsigned long flags;
    1.35 +        int of;
    1.36 +
    1.37 +        spin_lock_irqsave(&iommu->lock, flags);
    1.38 +        amd_iommu_read_event_log(iommu);
    1.39 +
    1.40 +        /* check event overflow */
    1.41 +        entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
    1.42 +        of = get_field_from_reg_u32(entry,
    1.43 +                                   IOMMU_STATUS_EVENT_OVERFLOW_MASK,
    1.44 +                                   IOMMU_STATUS_EVENT_OVERFLOW_SHIFT);
    1.45 +
    1.46 +        /* reset event log if event overflow */
    1.47 +        if ( of )
    1.48 +            amd_iommu_reset_event_log(iommu);
    1.49 +
    1.50 +        /* reset interrupt status bit */
    1.51 +        entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
    1.52 +        set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
    1.53 +                             IOMMU_STATUS_EVENT_LOG_INT_MASK,
    1.54 +                             IOMMU_STATUS_EVENT_LOG_INT_SHIFT, &entry);
    1.55 +        writel(entry, iommu->mmio_base+IOMMU_STATUS_MMIO_OFFSET);
    1.56 +        spin_unlock_irqrestore(&iommu->lock, flags);
    1.57 +    }
    1.58 +}
    1.59 +
    1.60  static void amd_iommu_page_fault(int irq, void *dev_id,
    1.61                               struct cpu_user_regs *regs)
    1.62  {
    1.63      u32 entry;
    1.64      unsigned long flags;
    1.65 -    int of;
    1.66      struct amd_iommu *iommu = dev_id;
    1.67  
    1.68      spin_lock_irqsave(&iommu->lock, flags);
    1.69 -    amd_iommu_read_event_log(iommu);
    1.70  
    1.71 -    /*check event overflow */
    1.72 +    /* Silence interrupts from both event logging */
    1.73      entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
    1.74 -    of = get_field_from_reg_u32(entry,
    1.75 -                               IOMMU_STATUS_EVENT_OVERFLOW_MASK,
    1.76 -                               IOMMU_STATUS_EVENT_OVERFLOW_SHIFT);
    1.77 -
    1.78 -    /* reset event log if event overflow */
    1.79 -    if ( of )
    1.80 -        amd_iommu_reset_event_log(iommu);
    1.81 -
    1.82 -    /* reset interrupt status bit */
    1.83 -    entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
    1.84 -    set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
    1.85 +    set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
    1.86                           IOMMU_STATUS_EVENT_LOG_INT_MASK,
    1.87                           IOMMU_STATUS_EVENT_LOG_INT_SHIFT, &entry);
    1.88      writel(entry, iommu->mmio_base+IOMMU_STATUS_MMIO_OFFSET);
    1.89 +
    1.90      spin_unlock_irqrestore(&iommu->lock, flags);
    1.91 +
    1.92 +    /* It is the tasklet that will clear the logs and re-enable interrupts */
    1.93 +    tasklet_schedule(&amd_iommu_irq_tasklet);
    1.94  }
    1.95  
    1.96  static int set_iommu_interrupt_handler(struct amd_iommu *iommu)
    1.97 @@ -689,6 +727,8 @@ static int __init amd_iommu_init_one(str
    1.98      printk("AMD-Vi: IOMMU %d Enabled.\n", nr_amd_iommus );
    1.99      nr_amd_iommus++;
   1.100  
   1.101 +    softirq_tasklet_init(&amd_iommu_irq_tasklet, do_amd_iommu_irq, 0);
   1.102 +
   1.103      return 0;
   1.104  
   1.105  error_out: