debuggers.hg

view xen/drivers/passthrough/iommu.c @ 20870:07f95839e431

Enable IOMMU by default.

Can be disabled with 'iommu=0' boot parameter.

Note that iommu_inclusive_mapping is now also enabled by default, to
deal with systems with broken BIOS tables specifying bad RMRRs. Old
behaviour can be specified via 'iommu_inclusive_mapping=0'.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Jan 19 15:44:54 2010 +0000 (2010-01-19)
parents bf43d35585fb
children acd7d3f06d9a
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms and conditions of the GNU General Public License,
4 * version 2, as published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
9 * more details.
10 *
11 * You should have received a copy of the GNU General Public License along with
12 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
13 * Place - Suite 330, Boston, MA 02111-1307 USA.
14 */
16 #include <xen/sched.h>
17 #include <xen/iommu.h>
18 #include <asm/hvm/iommu.h>
19 #include <xen/paging.h>
20 #include <xen/guest_access.h>
22 static void parse_iommu_param(char *s);
23 static int iommu_populate_page_table(struct domain *d);
25 /*
26 * The 'iommu' parameter enables the IOMMU. Optional comma separated
27 * value may contain:
28 *
29 * off|no|false|disable Disable IOMMU (default)
30 * pv Enable IOMMU for PV domains
31 * no-pv Disable IOMMU for PV domains (default)
32 * force|required Don't boot unless IOMMU is enabled
33 * passthrough Enable VT-d DMA passthrough (no DMA
34 * translation for Dom0)
35 * no-snoop Disable VT-d Snoop Control
36 * no-qinval Disable VT-d Queued Invalidation
37 * no-intremap Disable VT-d Interrupt Remapping
38 */
39 custom_param("iommu", parse_iommu_param);
40 int iommu_enabled = 1;
41 int iommu_pv_enabled;
42 int force_iommu;
43 int iommu_passthrough;
44 int iommu_snoop = 1;
45 int iommu_qinval = 1;
46 int iommu_intremap = 1;
47 int amd_iommu_debug;
48 int amd_iommu_perdev_intremap;
50 static void __init parse_iommu_param(char *s)
51 {
52 char *ss;
54 do {
55 ss = strchr(s, ',');
56 if ( ss )
57 *ss = '\0';
59 if ( !strcmp(s, "off") || !strcmp(s, "no") || !strcmp(s, "false") ||
60 !strcmp(s, "0") || !strcmp(s, "disable") )
61 iommu_enabled = 0;
62 else if ( !strcmp(s, "pv") )
63 iommu_pv_enabled = 1;
64 else if ( !strcmp(s, "no-pv") )
65 iommu_pv_enabled = 0;
66 else if ( !strcmp(s, "force") || !strcmp(s, "required") )
67 force_iommu = 1;
68 else if ( !strcmp(s, "passthrough") )
69 iommu_passthrough = 1;
70 else if ( !strcmp(s, "no-snoop") )
71 iommu_snoop = 0;
72 else if ( !strcmp(s, "no-qinval") )
73 iommu_qinval = 0;
74 else if ( !strcmp(s, "no-intremap") )
75 iommu_intremap = 0;
76 else if ( !strcmp(s, "amd-iommu-debug") )
77 amd_iommu_debug = 1;
78 else if ( !strcmp(s, "amd-iommu-perdev-intremap") )
79 amd_iommu_perdev_intremap = 1;
81 s = ss + 1;
82 } while ( ss );
83 }
85 int iommu_domain_init(struct domain *domain)
86 {
87 struct hvm_iommu *hd = domain_hvm_iommu(domain);
89 spin_lock_init(&hd->mapping_lock);
90 INIT_LIST_HEAD(&hd->g2m_ioport_list);
91 INIT_LIST_HEAD(&hd->mapped_rmrrs);
93 if ( !iommu_enabled )
94 return 0;
96 hd->platform_ops = iommu_get_ops();
97 return hd->platform_ops->init(domain);
98 }
100 int iommu_add_device(struct pci_dev *pdev)
101 {
102 struct hvm_iommu *hd;
104 if ( !pdev->domain )
105 return -EINVAL;
107 ASSERT(spin_is_locked(&pcidevs_lock));
109 hd = domain_hvm_iommu(pdev->domain);
110 if ( !iommu_enabled || !hd->platform_ops )
111 return 0;
113 return hd->platform_ops->add_device(pdev);
114 }
116 int iommu_remove_device(struct pci_dev *pdev)
117 {
118 struct hvm_iommu *hd;
119 if ( !pdev->domain )
120 return -EINVAL;
122 hd = domain_hvm_iommu(pdev->domain);
123 if ( !iommu_enabled || !hd->platform_ops )
124 return 0;
126 return hd->platform_ops->remove_device(pdev);
127 }
129 int assign_device(struct domain *d, u8 bus, u8 devfn)
130 {
131 struct hvm_iommu *hd = domain_hvm_iommu(d);
132 int rc = 0;
134 if ( !iommu_enabled || !hd->platform_ops )
135 return 0;
137 spin_lock(&pcidevs_lock);
138 if ( (rc = hd->platform_ops->assign_device(d, bus, devfn)) )
139 goto done;
141 if ( has_arch_pdevs(d) && !need_iommu(d) )
142 {
143 d->need_iommu = 1;
144 rc = iommu_populate_page_table(d);
145 goto done;
146 }
147 done:
148 spin_unlock(&pcidevs_lock);
149 return rc;
150 }
152 static int iommu_populate_page_table(struct domain *d)
153 {
154 struct hvm_iommu *hd = domain_hvm_iommu(d);
155 struct page_info *page;
156 int rc;
158 spin_lock(&d->page_alloc_lock);
160 page_list_for_each ( page, &d->page_list )
161 {
162 if ( is_hvm_domain(d) ||
163 (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
164 {
165 BUG_ON(SHARED_M2P(mfn_to_gmfn(d, page_to_mfn(page))));
166 rc = hd->platform_ops->map_page(
167 d, mfn_to_gmfn(d, page_to_mfn(page)), page_to_mfn(page));
168 if (rc)
169 {
170 spin_unlock(&d->page_alloc_lock);
171 hd->platform_ops->teardown(d);
172 return rc;
173 }
174 }
175 }
176 spin_unlock(&d->page_alloc_lock);
177 return 0;
178 }
181 void iommu_domain_destroy(struct domain *d)
182 {
183 struct hvm_iommu *hd = domain_hvm_iommu(d);
184 struct list_head *ioport_list, *rmrr_list, *tmp;
185 struct g2m_ioport *ioport;
186 struct mapped_rmrr *mrmrr;
188 if ( !iommu_enabled || !hd->platform_ops )
189 return;
191 if ( need_iommu(d) )
192 {
193 d->need_iommu = 0;
194 hd->platform_ops->teardown(d);
195 }
197 list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list )
198 {
199 ioport = list_entry(ioport_list, struct g2m_ioport, list);
200 list_del(&ioport->list);
201 xfree(ioport);
202 }
204 list_for_each_safe ( rmrr_list, tmp, &hd->mapped_rmrrs )
205 {
206 mrmrr = list_entry(rmrr_list, struct mapped_rmrr, list);
207 list_del(&mrmrr->list);
208 xfree(mrmrr);
209 }
210 }
212 int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn)
213 {
214 struct hvm_iommu *hd = domain_hvm_iommu(d);
216 if ( !iommu_enabled || !hd->platform_ops )
217 return 0;
219 return hd->platform_ops->map_page(d, gfn, mfn);
220 }
222 int iommu_unmap_page(struct domain *d, unsigned long gfn)
223 {
224 struct hvm_iommu *hd = domain_hvm_iommu(d);
226 if ( !iommu_enabled || !hd->platform_ops )
227 return 0;
229 return hd->platform_ops->unmap_page(d, gfn);
230 }
232 /* caller should hold the pcidevs_lock */
233 int deassign_device(struct domain *d, u8 bus, u8 devfn)
234 {
235 struct hvm_iommu *hd = domain_hvm_iommu(d);
236 struct pci_dev *pdev = NULL;
238 if ( !iommu_enabled || !hd->platform_ops )
239 return -EINVAL;
241 ASSERT(spin_is_locked(&pcidevs_lock));
242 pdev = pci_get_pdev(bus, devfn);
243 if (!pdev)
244 return -ENODEV;
246 if (pdev->domain != d)
247 {
248 gdprintk(XENLOG_ERR VTDPREFIX,
249 "IOMMU: deassign a device not owned\n");
250 return -EINVAL;
251 }
253 hd->platform_ops->reassign_device(d, dom0, bus, devfn);
255 if ( !has_arch_pdevs(d) && need_iommu(d) )
256 {
257 d->need_iommu = 0;
258 hd->platform_ops->teardown(d);
259 }
261 return 0;
262 }
264 int iommu_setup(void)
265 {
266 int rc = -ENODEV;
268 if ( iommu_enabled )
269 {
270 rc = iommu_hardware_setup();
271 iommu_enabled = (rc == 0);
272 }
274 if ( force_iommu && !iommu_enabled )
275 panic("IOMMU setup failed, crash Xen for security purpose!\n");
277 if ( !iommu_enabled )
278 iommu_pv_enabled = 0;
279 printk("I/O virtualisation %sabled\n", iommu_enabled ? "en" : "dis");
280 if ( iommu_enabled )
281 printk("I/O virtualisation for PV guests %sabled\n",
282 iommu_pv_enabled ? "en" : "dis");
283 return rc;
284 }
286 int iommu_get_device_group(struct domain *d, u8 bus, u8 devfn,
287 XEN_GUEST_HANDLE_64(uint32) buf, int max_sdevs)
288 {
289 struct hvm_iommu *hd = domain_hvm_iommu(d);
290 struct pci_dev *pdev;
291 int group_id, sdev_id;
292 u32 bdf;
293 int i = 0;
294 const struct iommu_ops *ops = hd->platform_ops;
296 if ( !iommu_enabled || !ops || !ops->get_device_group_id )
297 return 0;
299 group_id = ops->get_device_group_id(bus, devfn);
301 spin_lock(&pcidevs_lock);
302 for_each_pdev( d, pdev )
303 {
304 if ( (pdev->bus == bus) && (pdev->devfn == devfn) )
305 continue;
307 sdev_id = ops->get_device_group_id(pdev->bus, pdev->devfn);
308 if ( (sdev_id == group_id) && (i < max_sdevs) )
309 {
310 bdf = 0;
311 bdf |= (pdev->bus & 0xff) << 16;
312 bdf |= (pdev->devfn & 0xff) << 8;
313 if ( unlikely(copy_to_guest_offset(buf, i, &bdf, 1)) )
314 {
315 spin_unlock(&pcidevs_lock);
316 return -1;
317 }
318 i++;
319 }
320 }
321 spin_unlock(&pcidevs_lock);
323 return i;
324 }
326 void iommu_update_ire_from_apic(
327 unsigned int apic, unsigned int reg, unsigned int value)
328 {
329 const struct iommu_ops *ops = iommu_get_ops();
330 ops->update_ire_from_apic(apic, reg, value);
331 }
332 void iommu_update_ire_from_msi(
333 struct msi_desc *msi_desc, struct msi_msg *msg)
334 {
335 const struct iommu_ops *ops = iommu_get_ops();
336 ops->update_ire_from_msi(msi_desc, msg);
337 }
339 void iommu_read_msi_from_ire(
340 struct msi_desc *msi_desc, struct msi_msg *msg)
341 {
342 const struct iommu_ops *ops = iommu_get_ops();
343 ops->read_msi_from_ire(msi_desc, msg);
344 }
346 unsigned int iommu_read_apic_from_ire(unsigned int apic, unsigned int reg)
347 {
348 const struct iommu_ops *ops = iommu_get_ops();
349 return ops->read_apic_from_ire(apic, reg);
350 }
352 void iommu_resume()
353 {
354 const struct iommu_ops *ops = iommu_get_ops();
355 if ( iommu_enabled )
356 ops->resume();
357 }
359 void iommu_suspend()
360 {
361 const struct iommu_ops *ops = iommu_get_ops();
362 if ( iommu_enabled )
363 ops->suspend();
364 }
366 /*
367 * Local variables:
368 * mode: C
369 * c-set-style: "BSD"
370 * c-basic-offset: 4
371 * indent-tabs-mode: nil
372 * End:
373 */