debuggers.hg

changeset 19937:7d5433600932

AMD IOMMU: Clean up hardware initialization functions to make them
more friendly to iommu suspend and resume operations.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jul 06 11:56:17 2009 +0100 (2009-07-06)
parents 100b05eed0d5
children ff5bc91d0057
files xen/drivers/passthrough/amd/iommu_init.c
line diff
     1.1 --- a/xen/drivers/passthrough/amd/iommu_init.c	Mon Jul 06 11:55:17 2009 +0100
     1.2 +++ b/xen/drivers/passthrough/amd/iommu_init.c	Mon Jul 06 11:56:17 2009 +0100
     1.3 @@ -67,7 +67,7 @@ static void __init unmap_iommu_mmio_regi
     1.4      }
     1.5  }
     1.6  
     1.7 -static void __init register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu)
     1.8 +static void register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu)
     1.9  {
    1.10      u64 addr_64, addr_lo, addr_hi;
    1.11      u32 entry;
    1.12 @@ -90,7 +90,7 @@ static void __init register_iommu_dev_ta
    1.13      writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_HIGH_OFFSET);
    1.14  }
    1.15  
    1.16 -static void __init register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu)
    1.17 +static void register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu)
    1.18  {
    1.19      u64 addr_64, addr_lo, addr_hi;
    1.20      u32 power_of2_entries;
    1.21 @@ -144,7 +144,7 @@ static void __init register_iommu_event_
    1.22      writel(entry, iommu->mmio_base+IOMMU_EVENT_LOG_BASE_HIGH_OFFSET);
    1.23  }
    1.24  
    1.25 -static void __init set_iommu_translation_control(struct amd_iommu *iommu,
    1.26 +static void set_iommu_translation_control(struct amd_iommu *iommu,
    1.27                                                   int enable)
    1.28  {
    1.29      u32 entry;
    1.30 @@ -181,24 +181,28 @@ static void __init set_iommu_translation
    1.31      writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
    1.32  }
    1.33  
    1.34 -static void __init set_iommu_command_buffer_control(struct amd_iommu *iommu,
    1.35 +static void set_iommu_command_buffer_control(struct amd_iommu *iommu,
    1.36                                                      int enable)
    1.37  {
    1.38      u32 entry;
    1.39  
    1.40 -    entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
    1.41 +    entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
    1.42      set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
    1.43                           IOMMU_CONTROL_DISABLED, entry,
    1.44                           IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_MASK,
    1.45                           IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT, &entry);
    1.46 -    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
    1.47  
    1.48 -    /*reset head and tail pointer */
    1.49 -    writel(0x0, iommu->mmio_base + IOMMU_CMD_BUFFER_HEAD_OFFSET);
    1.50 -    writel(0x0, iommu->mmio_base + IOMMU_CMD_BUFFER_TAIL_OFFSET);
    1.51 +    /*reset head and tail pointer manually before enablement */
    1.52 +    if ( enable == IOMMU_CONTROL_ENABLED )
    1.53 +    {
    1.54 +        writel(0x0, iommu->mmio_base + IOMMU_CMD_BUFFER_HEAD_OFFSET);
    1.55 +        writel(0x0, iommu->mmio_base + IOMMU_CMD_BUFFER_TAIL_OFFSET);
    1.56 +    }
    1.57 +
    1.58 +    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
    1.59  }
    1.60  
    1.61 -static void __init register_iommu_exclusion_range(struct amd_iommu *iommu)
    1.62 +static void register_iommu_exclusion_range(struct amd_iommu *iommu)
    1.63  {
    1.64      u64 addr_lo, addr_hi;
    1.65      u32 entry;
    1.66 @@ -238,32 +242,31 @@ static void __init register_iommu_exclus
    1.67      writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_LOW_OFFSET);
    1.68  }
    1.69  
    1.70 -static void __init set_iommu_event_log_control(struct amd_iommu *iommu,
    1.71 +static void set_iommu_event_log_control(struct amd_iommu *iommu,
    1.72              int enable)
    1.73  {
    1.74      u32 entry;
    1.75  
    1.76 -    entry = readl(iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
    1.77 +    entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
    1.78      set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
    1.79                           IOMMU_CONTROL_DISABLED, entry,
    1.80                           IOMMU_CONTROL_EVENT_LOG_ENABLE_MASK,
    1.81                           IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT, &entry);
    1.82 -    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
    1.83 -
    1.84      set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
    1.85                           IOMMU_CONTROL_DISABLED, entry,
    1.86                           IOMMU_CONTROL_EVENT_LOG_INT_MASK,
    1.87                           IOMMU_CONTROL_EVENT_LOG_INT_SHIFT, &entry);
    1.88 -    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
    1.89 -
    1.90      set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
    1.91                           IOMMU_CONTROL_COMP_WAIT_INT_MASK,
    1.92                           IOMMU_CONTROL_COMP_WAIT_INT_SHIFT, &entry);
    1.93 -    writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
    1.94  
    1.95 -    /*reset head and tail pointer */
    1.96 -    writel(0x0, iommu->mmio_base + IOMMU_EVENT_LOG_HEAD_OFFSET);
    1.97 -    writel(0x0, iommu->mmio_base + IOMMU_EVENT_LOG_TAIL_OFFSET);
    1.98 +    /*reset head and tail pointer manually before enablement */
    1.99 +    if ( enable == IOMMU_CONTROL_ENABLED )
   1.100 +    {
   1.101 +        writel(0x0, iommu->mmio_base + IOMMU_EVENT_LOG_HEAD_OFFSET);
   1.102 +        writel(0x0, iommu->mmio_base + IOMMU_EVENT_LOG_TAIL_OFFSET);
   1.103 +    }
   1.104 +    writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
   1.105  }
   1.106  
   1.107  static int amd_iommu_read_event_log(struct amd_iommu *iommu, u32 event[])
   1.108 @@ -502,7 +505,7 @@ static int set_iommu_interrupt_handler(s
   1.109      return vector;
   1.110  }
   1.111  
   1.112 -void __init enable_iommu(struct amd_iommu *iommu)
   1.113 +void enable_iommu(struct amd_iommu *iommu)
   1.114  {
   1.115      unsigned long flags;
   1.116  
   1.117 @@ -514,10 +517,6 @@ void __init enable_iommu(struct amd_iomm
   1.118          return;
   1.119      }
   1.120  
   1.121 -    iommu->dev_table.alloc_size = device_table.alloc_size;
   1.122 -    iommu->dev_table.entries = device_table.entries;
   1.123 -    iommu->dev_table.buffer = device_table.buffer;
   1.124 -
   1.125      register_iommu_dev_table_in_mmio_space(iommu);
   1.126      register_iommu_cmd_buffer_in_mmio_space(iommu);
   1.127      register_iommu_event_log_in_mmio_space(iommu);
   1.128 @@ -531,9 +530,6 @@ void __init enable_iommu(struct amd_iomm
   1.129      set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED);
   1.130      set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED);
   1.131  
   1.132 -    printk("AMD_IOV: IOMMU %d Enabled.\n", nr_amd_iommus );
   1.133 -    nr_amd_iommus++;
   1.134 -
   1.135      iommu->enabled = 1;
   1.136      spin_unlock_irqrestore(&iommu->lock, flags);
   1.137  
   1.138 @@ -580,20 +576,24 @@ static int __init allocate_iommu_tables(
   1.139  {
   1.140      /* allocate 'command buffer' in power of 2 increments of 4K */
   1.141      iommu->cmd_buffer_tail = 0;
   1.142 -    iommu->cmd_buffer.alloc_size = PAGE_SIZE << get_order_from_bytes(
   1.143 -        PAGE_ALIGN(amd_iommu_cmd_buffer_entries * IOMMU_CMD_BUFFER_ENTRY_SIZE));
   1.144 -    iommu->cmd_buffer.entries =
   1.145 -        iommu->cmd_buffer.alloc_size / IOMMU_CMD_BUFFER_ENTRY_SIZE;
   1.146 +    iommu->cmd_buffer.alloc_size = PAGE_SIZE <<
   1.147 +                                   get_order_from_bytes(
   1.148 +                                   PAGE_ALIGN(amd_iommu_cmd_buffer_entries *
   1.149 +                                   IOMMU_CMD_BUFFER_ENTRY_SIZE));
   1.150 +    iommu->cmd_buffer.entries = iommu->cmd_buffer.alloc_size /
   1.151 +                                IOMMU_CMD_BUFFER_ENTRY_SIZE;
   1.152  
   1.153      if ( allocate_iommu_table_struct(&iommu->cmd_buffer, "Command Buffer") != 0 )
   1.154          goto error_out;
   1.155  
   1.156      /* allocate 'event log' in power of 2 increments of 4K */
   1.157      iommu->event_log_head = 0;
   1.158 -    iommu->event_log.alloc_size = PAGE_SIZE << get_order_from_bytes(
   1.159 -        PAGE_ALIGN(amd_iommu_event_log_entries * IOMMU_EVENT_LOG_ENTRY_SIZE));
   1.160 -    iommu->event_log.entries =
   1.161 -        iommu->event_log.alloc_size / IOMMU_EVENT_LOG_ENTRY_SIZE;
   1.162 +    iommu->event_log.alloc_size = PAGE_SIZE <<
   1.163 +                                  get_order_from_bytes(
   1.164 +                                  PAGE_ALIGN(amd_iommu_event_log_entries *
   1.165 +                                  IOMMU_EVENT_LOG_ENTRY_SIZE));
   1.166 +    iommu->event_log.entries = iommu->event_log.alloc_size /
   1.167 +                               IOMMU_EVENT_LOG_ENTRY_SIZE;
   1.168  
   1.169      if ( allocate_iommu_table_struct(&iommu->event_log, "Event Log") != 0 )
   1.170          goto error_out;
   1.171 @@ -607,7 +607,6 @@ static int __init allocate_iommu_tables(
   1.172  
   1.173  int __init amd_iommu_init_one(struct amd_iommu *iommu)
   1.174  {
   1.175 -
   1.176      if ( allocate_iommu_tables(iommu) != 0 )
   1.177          goto error_out;
   1.178  
   1.179 @@ -617,7 +616,18 @@ int __init amd_iommu_init_one(struct amd
   1.180      if ( set_iommu_interrupt_handler(iommu) == 0 )
   1.181          goto error_out;
   1.182  
   1.183 +    /* To make sure that device_table.buffer has been successfully allocated */
   1.184 +    if ( device_table.buffer == NULL )
   1.185 +        goto error_out;
   1.186 +
   1.187 +    iommu->dev_table.alloc_size = device_table.alloc_size;
   1.188 +    iommu->dev_table.entries = device_table.entries;
   1.189 +    iommu->dev_table.buffer = device_table.buffer;
   1.190 +
   1.191      enable_iommu(iommu);
   1.192 +    printk("AMD-Vi: IOMMU %d Enabled.\n", nr_amd_iommus );
   1.193 +    nr_amd_iommus++;
   1.194 +
   1.195      return 0;
   1.196  
   1.197  error_out:
   1.198 @@ -670,9 +680,12 @@ static int __init init_ivrs_mapping(void
   1.199  static int __init amd_iommu_setup_device_table(void)
   1.200  {
   1.201      /* allocate 'device table' on a 4K boundary */
   1.202 -    device_table.alloc_size = PAGE_SIZE << get_order_from_bytes(
   1.203 -        PAGE_ALIGN(ivrs_bdf_entries * IOMMU_DEV_TABLE_ENTRY_SIZE));
   1.204 -    device_table.entries = device_table.alloc_size / IOMMU_DEV_TABLE_ENTRY_SIZE;
   1.205 +    device_table.alloc_size = PAGE_SIZE <<
   1.206 +                              get_order_from_bytes(
   1.207 +                              PAGE_ALIGN(ivrs_bdf_entries *
   1.208 +                              IOMMU_DEV_TABLE_ENTRY_SIZE));
   1.209 +    device_table.entries = device_table.alloc_size /
   1.210 +                           IOMMU_DEV_TABLE_ENTRY_SIZE;
   1.211  
   1.212      return ( allocate_iommu_table_struct(&device_table, "Device Table") );
   1.213  }
   1.214 @@ -681,7 +694,7 @@ int __init amd_iommu_setup_shared_tables
   1.215  {
   1.216      BUG_ON( !ivrs_bdf_entries );
   1.217  
   1.218 -    if (init_ivrs_mapping() != 0 )
   1.219 +    if ( init_ivrs_mapping() != 0 )
   1.220          goto error_out;
   1.221  
   1.222      if ( amd_iommu_setup_device_table() != 0 )