debuggers.hg

view xen/drivers/passthrough/vtd/x86/ats.c @ 19804:133c889c21a7

vtd: ats and queued invalidation cleanup

Use iommu_qinval in place of qinval_enabled flag. Use
ecap_queued_inval() for determining whether queued invalidation is
available on this vt-d engine or not.

Signed-off-by: Allen Kay <allen.m.kay@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jun 16 13:41:17 2009 +0100 (2009-06-16)
parents 42fe00c6f8b4
children e39acea851f4
line source
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Author: Allen Kay <allen.m.kay@intel.com>
18 */
20 #include <xen/sched.h>
21 #include <xen/iommu.h>
22 #include <xen/time.h>
23 #include <xen/pci.h>
24 #include <xen/pci_regs.h>
25 #include <asm/msi.h>
26 #include "../iommu.h"
27 #include "../dmar.h"
28 #include "../vtd.h"
29 #include "../extern.h"
31 LIST_HEAD(ats_dev_drhd_units);
33 #define ATS_REG_CAP 4
34 #define ATS_REG_CTL 6
35 #define ATS_QUEUE_DEPTH_MASK 0xF
36 #define ATS_ENABLE (1<<15)
38 struct pci_ats_dev {
39 struct list_head list;
40 u8 bus;
41 u8 devfn;
42 u16 ats_queue_depth; /* ATS device invalidation queue depth */
43 spinlock_t lock;
44 };
45 static LIST_HEAD(ats_devices);
47 static void parse_ats_param(char *s);
48 custom_param("ats", parse_ats_param);
50 int ats_enabled = 1;
52 static void parse_ats_param(char *s)
53 {
54 char *ss;
56 do {
57 ss = strchr(s, ',');
58 if ( ss )
59 *ss = '\0';
61 if ( !strcmp(s, "off") || !strcmp(s, "no") || !strcmp(s, "false") ||
62 !strcmp(s, "0") || !strcmp(s, "disable") )
63 ats_enabled = 0;
65 if ( !strcmp(s, "on") || !strcmp(s, "yes") || !strcmp(s, "true") ||
66 !strcmp(s, "1") || !strcmp(s, "enable") )
67 ats_enabled = 1;
69 s = ss + 1;
70 } while ( ss );
71 }
73 struct acpi_drhd_unit * find_ats_dev_drhd(struct iommu *iommu)
74 {
75 struct acpi_drhd_unit *drhd;
76 list_for_each_entry ( drhd, &ats_dev_drhd_units, list )
77 {
78 if ( drhd->iommu == iommu )
79 return drhd;
80 }
81 return NULL;
82 }
84 int ats_device(int seg, int bus, int devfn)
85 {
86 struct acpi_drhd_unit *drhd, *ats_drhd, *new_drhd;
87 struct pci_dev *pdev;
88 int pos = 0;
90 if ( !ats_enabled || !iommu_qinval )
91 return 0;
93 pdev = pci_get_pdev(bus, devfn);
94 drhd = acpi_find_matched_drhd_unit(pdev);
95 if ( !ecap_queued_inval(drhd->iommu->ecap) ||
96 !ecap_dev_iotlb(drhd->iommu->ecap) )
97 return 0;
99 if ( !acpi_find_matched_atsr_unit(bus, devfn) )
100 return 0;
102 ats_drhd = find_ats_dev_drhd(drhd->iommu);
103 pos = pci_find_ext_capability(seg, bus, devfn, PCI_EXT_CAP_ID_ATS);
105 if ( pos && (ats_drhd == NULL) )
106 {
107 new_drhd = xmalloc(struct acpi_drhd_unit);
108 memcpy(new_drhd, drhd, sizeof(struct acpi_drhd_unit));
109 list_add_tail(&new_drhd->list, &ats_dev_drhd_units);
110 }
111 return pos;
112 }
114 int enable_ats_device(int seg, int bus, int devfn)
115 {
116 struct pci_ats_dev *pdev;
117 u32 value;
118 u16 queue_depth;
119 int pos;
121 pos = pci_find_ext_capability(seg, bus, devfn, PCI_EXT_CAP_ID_ATS);
123 if ( !pos )
124 {
125 dprintk(XENLOG_ERR VTDPREFIX, "ats capability not found %x:%x:%x\n",
126 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
127 return 0;
128 }
129 else
130 dprintk(XENLOG_ERR VTDPREFIX, "ats capability found %x:%x:%x\n",
131 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
133 /* BUGBUG: add back seg when multi-seg platform support is enabled */
134 value = pci_conf_read16(bus, PCI_SLOT(devfn),
135 PCI_FUNC(devfn), pos + ATS_REG_CAP);
136 queue_depth = value & ATS_QUEUE_DEPTH_MASK;
138 /* BUGBUG: add back seg when multi-seg platform support is enabled */
139 value = pci_conf_read16(bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos + ATS_REG_CTL);
140 value |= ATS_ENABLE;
142 /* BUGBUG: add back seg when multi-seg platform support is enabled */
143 pci_conf_write16(bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos + ATS_REG_CTL, value);
145 if ( acpi_find_matched_atsr_unit(bus, devfn) )
146 {
147 pdev = xmalloc(struct pci_ats_dev);
148 pdev->bus = bus;
149 pdev->devfn = devfn;
150 pdev->ats_queue_depth = queue_depth;
151 list_add(&(pdev->list), &ats_devices);
152 }
153 return pos;
154 }
156 static int device_in_domain(struct iommu *iommu, struct pci_ats_dev *pdev, u16 did)
157 {
158 struct root_entry *root_entry = NULL;
159 struct context_entry *ctxt_entry = NULL;
160 int tt, found = 0;
162 root_entry = (struct root_entry *) map_vtd_domain_page(iommu->root_maddr);
163 if ( !root_entry || !root_present(root_entry[pdev->bus]) )
164 goto out;
166 ctxt_entry = (struct context_entry *)
167 map_vtd_domain_page(root_entry[pdev->bus].val);
169 if ( ctxt_entry == NULL )
170 goto out;
172 if ( context_domain_id(ctxt_entry[pdev->devfn]) != did )
173 goto out;
175 tt = context_translation_type(ctxt_entry[pdev->devfn]);
176 if ( tt != CONTEXT_TT_DEV_IOTLB )
177 goto out;
179 found = 1;
180 out:
181 if ( root_entry )
182 unmap_vtd_domain_page(root_entry);
184 if ( ctxt_entry )
185 unmap_vtd_domain_page(ctxt_entry);
187 if ( found )
188 return 1;
190 return 0;
191 }
193 int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
194 u64 addr, unsigned int size_order, u64 type)
195 {
196 struct pci_ats_dev *pdev;
197 int sbit, ret = 0;
198 u16 sid;
200 if ( !ecap_dev_iotlb(iommu->ecap) )
201 return ret;
203 list_for_each_entry( pdev, &ats_devices, list )
204 {
205 sid = (pdev->bus << 8) | pdev->devfn;
207 switch ( type ) {
208 case DMA_TLB_DSI_FLUSH:
209 if ( !device_in_domain(iommu, pdev, did) )
210 break;
211 /* fall through if DSI condition met */
212 case DMA_TLB_GLOBAL_FLUSH:
213 /* invalidate all translations: sbit=1,bit_63=0,bit[62:12]=1 */
214 sbit = 1;
215 addr = (~0 << PAGE_SHIFT_4K) & 0x7FFFFFFFFFFFFFFF;
216 ret |= qinval_device_iotlb(iommu, pdev->ats_queue_depth,
217 sid, sbit, addr);
218 break;
219 case DMA_TLB_PSI_FLUSH:
220 if ( !device_in_domain(iommu, pdev, did) )
221 break;
223 addr &= ~0 << (PAGE_SHIFT + size_order);
225 /* if size <= 4K, set sbit = 0, else set sbit = 1 */
226 sbit = size_order ? 1 : 0;
228 /* clear lower bits */
229 addr &= (~0 << (PAGE_SHIFT + size_order));
231 /* if sbit == 1, zero out size_order bit and set lower bits to 1 */
232 if ( sbit )
233 addr &= (~0 & ~(1 << (PAGE_SHIFT + size_order)));
235 ret |= qinval_device_iotlb(iommu, pdev->ats_queue_depth,
236 sid, sbit, addr);
237 break;
238 default:
239 dprintk(XENLOG_WARNING VTDPREFIX, "invalid vt-d flush type\n");
240 break;
241 }
242 }
243 return ret;
244 }