debuggers.hg

view xen/drivers/passthrough/vtd/x86/ats.c @ 22823:1e7594758b28

VT-d/ATS: misc fixes

First of all there were three places potentially de-referencing NULL
(two after an allocation failure, and one after a failed lookup).

Second, if ATS_ENABLE was already set, the device would not have got
added to the ats_devices list, potentially resulting in
dev_invalidate_iotlb() doing an incomplete job.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Jan Beulich <jbeulich@novell.com>
date Tue Jan 18 12:28:10 2011 +0000 (2011-01-18)
parents e8acb9753ff1
children
line source
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Author: Allen Kay <allen.m.kay@intel.com>
18 */
20 #include <xen/sched.h>
21 #include <xen/iommu.h>
22 #include <xen/time.h>
23 #include <xen/pci.h>
24 #include <xen/pci_regs.h>
25 #include <asm/msi.h>
26 #include "../iommu.h"
27 #include "../dmar.h"
28 #include "../vtd.h"
29 #include "../extern.h"
31 static LIST_HEAD(ats_dev_drhd_units);
33 #define ATS_REG_CAP 4
34 #define ATS_REG_CTL 6
35 #define ATS_QUEUE_DEPTH_MASK 0xF
36 #define ATS_ENABLE (1<<15)
38 struct pci_ats_dev {
39 struct list_head list;
40 u8 bus;
41 u8 devfn;
42 u16 ats_queue_depth; /* ATS device invalidation queue depth */
43 spinlock_t lock;
44 };
45 static LIST_HEAD(ats_devices);
47 static void parse_ats_param(char *s);
48 custom_param("ats", parse_ats_param);
50 bool_t __read_mostly ats_enabled = 1;
52 static void __init parse_ats_param(char *s)
53 {
54 char *ss;
56 do {
57 ss = strchr(s, ',');
58 if ( ss )
59 *ss = '\0';
61 switch ( parse_bool(s) )
62 {
63 case 0:
64 ats_enabled = 0;
65 break;
66 case 1:
67 ats_enabled = 1;
68 break;
69 }
71 s = ss + 1;
72 } while ( ss );
73 }
75 struct acpi_drhd_unit * find_ats_dev_drhd(struct iommu *iommu)
76 {
77 struct acpi_drhd_unit *drhd;
78 list_for_each_entry ( drhd, &ats_dev_drhd_units, list )
79 {
80 if ( drhd->iommu == iommu )
81 return drhd;
82 }
83 return NULL;
84 }
86 int ats_device(int seg, int bus, int devfn)
87 {
88 struct acpi_drhd_unit *drhd, *ats_drhd, *new_drhd;
89 struct pci_dev *pdev;
90 int pos = 0;
92 if ( !ats_enabled || !iommu_qinval )
93 return 0;
95 pdev = pci_get_pdev(bus, devfn);
96 if ( !pdev )
97 return 0;
99 drhd = acpi_find_matched_drhd_unit(pdev);
100 if ( !drhd )
101 return 0;
103 if ( !ecap_queued_inval(drhd->iommu->ecap) ||
104 !ecap_dev_iotlb(drhd->iommu->ecap) )
105 return 0;
107 if ( !acpi_find_matched_atsr_unit(bus, devfn) )
108 return 0;
110 ats_drhd = find_ats_dev_drhd(drhd->iommu);
111 pos = pci_find_ext_capability(seg, bus, devfn, PCI_EXT_CAP_ID_ATS);
113 if ( pos && (ats_drhd == NULL) )
114 {
115 new_drhd = xmalloc(struct acpi_drhd_unit);
116 if ( !new_drhd )
117 return 0;
118 memcpy(new_drhd, drhd, sizeof(struct acpi_drhd_unit));
119 list_add_tail(&new_drhd->list, &ats_dev_drhd_units);
120 }
121 return pos;
122 }
124 int enable_ats_device(int seg, int bus, int devfn)
125 {
126 struct pci_ats_dev *pdev = NULL;
127 u32 value;
128 int pos;
130 if ( !acpi_find_matched_atsr_unit(bus, devfn) )
131 {
132 dprintk(XENLOG_WARNING VTDPREFIX,
133 "cannot find matched atsr for %x:%x.%x\n",
134 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
135 return 0;
136 }
138 pos = pci_find_ext_capability(seg, bus, devfn, PCI_EXT_CAP_ID_ATS);
139 if ( !pos )
140 {
141 dprintk(XENLOG_ERR VTDPREFIX, "ats capability not found %x:%x.%x\n",
142 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
143 return 0;
144 }
145 else
146 dprintk(XENLOG_ERR VTDPREFIX, "ats capability found %x:%x.%x\n",
147 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
149 /* BUGBUG: add back seg when multi-seg platform support is enabled */
150 value = pci_conf_read16(bus, PCI_SLOT(devfn),
151 PCI_FUNC(devfn), pos + ATS_REG_CTL);
152 if ( value & ATS_ENABLE )
153 {
154 list_for_each_entry ( pdev, &ats_devices, list )
155 {
156 if ( pdev->bus == bus && pdev->devfn == devfn )
157 {
158 pos = 0;
159 break;
160 }
161 }
162 }
163 if ( pos )
164 pdev = xmalloc(struct pci_ats_dev);
165 if ( !pdev )
166 return -ENOMEM;
168 if ( !(value & ATS_ENABLE) )
169 {
170 value |= ATS_ENABLE;
171 pci_conf_write16(bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
172 pos + ATS_REG_CTL, value);
173 }
175 if ( pos )
176 {
177 pdev->bus = bus;
178 pdev->devfn = devfn;
179 value = pci_conf_read16(bus, PCI_SLOT(devfn),
180 PCI_FUNC(devfn), pos + ATS_REG_CAP);
181 pdev->ats_queue_depth = value & ATS_QUEUE_DEPTH_MASK;
182 list_add(&pdev->list, &ats_devices);
183 }
185 if ( iommu_verbose )
186 dprintk(XENLOG_INFO VTDPREFIX, "%x:%x.%x: ATS %s enabled\n",
187 bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos ? "is" : "was");
189 return pos;
190 }
192 int disable_ats_device(int seg, int bus, int devfn)
193 {
194 struct list_head *pdev_list, *tmp;
195 struct pci_ats_dev *pdev;
196 u32 value;
197 int pos;
199 pos = pci_find_ext_capability(seg, bus, devfn, PCI_EXT_CAP_ID_ATS);
200 if ( !pos )
201 return 0;
203 /* BUGBUG: add back seg when multi-seg platform support is enabled */
204 value = pci_conf_read16(bus, PCI_SLOT(devfn),
205 PCI_FUNC(devfn), pos + ATS_REG_CTL);
206 value &= ~ATS_ENABLE;
207 pci_conf_write16(bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
208 pos + ATS_REG_CTL, value);
210 list_for_each_safe( pdev_list, tmp, &ats_devices )
211 {
212 pdev = list_entry(pdev_list, struct pci_ats_dev, list);
213 if ( pdev->bus == bus && pdev->devfn == devfn )
214 {
215 list_del(&pdev->list);
216 xfree(pdev);
217 break;
218 }
219 }
221 if ( iommu_verbose )
222 dprintk(XENLOG_INFO VTDPREFIX, "%x:%x.%x: ATS is disabled\n",
223 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
225 return 0;
226 }
229 static int device_in_domain(struct iommu *iommu, struct pci_ats_dev *pdev, u16 did)
230 {
231 struct root_entry *root_entry = NULL;
232 struct context_entry *ctxt_entry = NULL;
233 int tt, found = 0;
235 root_entry = (struct root_entry *) map_vtd_domain_page(iommu->root_maddr);
236 if ( !root_entry || !root_present(root_entry[pdev->bus]) )
237 goto out;
239 ctxt_entry = (struct context_entry *)
240 map_vtd_domain_page(root_entry[pdev->bus].val);
242 if ( ctxt_entry == NULL )
243 goto out;
245 if ( context_domain_id(ctxt_entry[pdev->devfn]) != did )
246 goto out;
248 tt = context_translation_type(ctxt_entry[pdev->devfn]);
249 if ( tt != CONTEXT_TT_DEV_IOTLB )
250 goto out;
252 found = 1;
253 out:
254 if ( root_entry )
255 unmap_vtd_domain_page(root_entry);
257 if ( ctxt_entry )
258 unmap_vtd_domain_page(ctxt_entry);
260 if ( found )
261 return 1;
263 return 0;
264 }
266 int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
267 u64 addr, unsigned int size_order, u64 type)
268 {
269 struct pci_ats_dev *pdev;
270 int sbit, ret = 0;
271 u16 sid;
273 if ( !ecap_dev_iotlb(iommu->ecap) )
274 return ret;
276 list_for_each_entry( pdev, &ats_devices, list )
277 {
278 sid = (pdev->bus << 8) | pdev->devfn;
280 switch ( type ) {
281 case DMA_TLB_DSI_FLUSH:
282 if ( !device_in_domain(iommu, pdev, did) )
283 break;
284 /* fall through if DSI condition met */
285 case DMA_TLB_GLOBAL_FLUSH:
286 /* invalidate all translations: sbit=1,bit_63=0,bit[62:12]=1 */
287 sbit = 1;
288 addr = (~0 << PAGE_SHIFT_4K) & 0x7FFFFFFFFFFFFFFF;
289 ret |= qinval_device_iotlb(iommu, pdev->ats_queue_depth,
290 sid, sbit, addr);
291 break;
292 case DMA_TLB_PSI_FLUSH:
293 if ( !device_in_domain(iommu, pdev, did) )
294 break;
296 addr &= ~0 << (PAGE_SHIFT + size_order);
298 /* if size <= 4K, set sbit = 0, else set sbit = 1 */
299 sbit = size_order ? 1 : 0;
301 /* clear lower bits */
302 addr &= (~0 << (PAGE_SHIFT + size_order));
304 /* if sbit == 1, zero out size_order bit and set lower bits to 1 */
305 if ( sbit )
306 addr &= (~0 & ~(1 << (PAGE_SHIFT + size_order)));
308 ret |= qinval_device_iotlb(iommu, pdev->ats_queue_depth,
309 sid, sbit, addr);
310 break;
311 default:
312 dprintk(XENLOG_WARNING VTDPREFIX, "invalid vt-d flush type\n");
313 break;
314 }
315 }
316 return ret;
317 }