debuggers.hg

view xen/arch/x86/hvm/vmx/vtd/intel-iommu.c @ 16380:cb6675149af8

x86, vt-d: Clean up utils code.
Signed-off-by: Weidong Han <weidong.han@intel.com>
author Keir Fraser <keir@xensource.com>
date Thu Nov 08 13:14:03 2007 +0000 (2007-11-08)
parents b544448502a4
children 03d6d0f96e12
line source
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Ashok Raj <ashok.raj@intel.com>
18 * Copyright (C) Shaohua Li <shaohua.li@intel.com>
19 * Copyright (C) Allen Kay <allen.m.kay@intel.com> - adapted to xen
20 */
22 #include <xen/init.h>
23 #include <xen/irq.h>
24 #include <xen/spinlock.h>
25 #include <xen/sched.h>
26 #include <xen/xmalloc.h>
27 #include <xen/domain_page.h>
28 #include <asm/delay.h>
29 #include <asm/string.h>
30 #include <asm/mm.h>
31 #include <asm/iommu.h>
32 #include <asm/hvm/vmx/intel-iommu.h>
33 #include "dmar.h"
34 #include "pci-direct.h"
35 #include "pci_regs.h"
36 #include "msi.h"
38 #define VTDPREFIX
39 extern void print_iommu_regs(struct acpi_drhd_unit *drhd);
40 extern void print_vtd_entries(struct domain *d, int bus, int devfn,
41 unsigned long gmfn);
43 unsigned int x86_clflush_size;
44 void clflush_cache_range(void *adr, int size)
45 {
46 int i;
47 for ( i = 0; i < size; i += x86_clflush_size )
48 clflush(adr + i);
49 }
51 static void __iommu_flush_cache(struct iommu *iommu, void *addr, int size)
52 {
53 if ( !ecap_coherent(iommu->ecap) )
54 clflush_cache_range(addr, size);
55 }
57 #define iommu_flush_cache_entry(iommu, addr) \
58 __iommu_flush_cache(iommu, addr, 8)
59 #define iommu_flush_cache_page(iommu, addr) \
60 __iommu_flush_cache(iommu, addr, PAGE_SIZE_4K)
62 int nr_iommus;
63 /* context entry handling */
64 static struct context_entry * device_to_context_entry(struct iommu *iommu,
65 u8 bus, u8 devfn)
66 {
67 struct root_entry *root;
68 struct context_entry *context;
69 unsigned long phy_addr;
70 unsigned long flags;
72 spin_lock_irqsave(&iommu->lock, flags);
73 root = &iommu->root_entry[bus];
74 if ( !root_present(*root) )
75 {
76 phy_addr = (unsigned long) alloc_xenheap_page();
77 if ( !phy_addr )
78 {
79 spin_unlock_irqrestore(&iommu->lock, flags);
80 return NULL;
81 }
82 memset((void *) phy_addr, 0, PAGE_SIZE);
83 iommu_flush_cache_page(iommu, (void *)phy_addr);
84 phy_addr = virt_to_maddr((void *)phy_addr);
85 set_root_value(*root, phy_addr);
86 set_root_present(*root);
87 iommu_flush_cache_entry(iommu, root);
88 }
89 phy_addr = (unsigned long) get_context_addr(*root);
90 context = (struct context_entry *)maddr_to_virt(phy_addr);
91 spin_unlock_irqrestore(&iommu->lock, flags);
92 return &context[devfn];
93 }
95 static int device_context_mapped(struct iommu *iommu, u8 bus, u8 devfn)
96 {
97 struct root_entry *root;
98 struct context_entry *context;
99 unsigned long phy_addr;
100 int ret;
101 unsigned long flags;
103 spin_lock_irqsave(&iommu->lock, flags);
104 root = &iommu->root_entry[bus];
105 if ( !root_present(*root) )
106 {
107 ret = 0;
108 goto out;
109 }
110 phy_addr = get_context_addr(*root);
111 context = (struct context_entry *)maddr_to_virt(phy_addr);
112 ret = context_present(context[devfn]);
113 out:
114 spin_unlock_irqrestore(&iommu->lock, flags);
115 return ret;
116 }
118 static struct page_info *addr_to_dma_page(struct domain *domain, u64 addr)
119 {
120 struct hvm_iommu *hd = domain_hvm_iommu(domain);
121 struct acpi_drhd_unit *drhd;
122 struct iommu *iommu;
123 int addr_width = agaw_to_width(hd->agaw);
124 struct dma_pte *parent, *pte = NULL, *pgd;
125 int level = agaw_to_level(hd->agaw);
126 int offset;
127 unsigned long flags;
128 struct page_info *pg = NULL;
129 u64 *vaddr = NULL;
131 drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
132 iommu = drhd->iommu;
134 addr &= (((u64)1) << addr_width) - 1;
135 spin_lock_irqsave(&hd->mapping_lock, flags);
136 if ( !hd->pgd )
137 {
138 pgd = (struct dma_pte *)alloc_xenheap_page();
139 if ( !pgd )
140 {
141 spin_unlock_irqrestore(&hd->mapping_lock, flags);
142 return NULL;
143 }
144 memset(pgd, 0, PAGE_SIZE);
145 hd->pgd = pgd;
146 }
148 parent = hd->pgd;
149 while ( level > 1 )
150 {
151 offset = address_level_offset(addr, level);
152 pte = &parent[offset];
154 if ( dma_pte_addr(*pte) == 0 )
155 {
156 pg = alloc_domheap_page(NULL);
157 vaddr = map_domain_page(page_to_mfn(pg));
158 if ( !vaddr )
159 {
160 spin_unlock_irqrestore(&hd->mapping_lock, flags);
161 return NULL;
162 }
163 memset(vaddr, 0, PAGE_SIZE);
164 iommu_flush_cache_page(iommu, vaddr);
166 dma_set_pte_addr(*pte, page_to_maddr(pg));
168 /*
169 * high level table always sets r/w, last level
170 * page table control read/write
171 */
172 dma_set_pte_readable(*pte);
173 dma_set_pte_writable(*pte);
174 iommu_flush_cache_entry(iommu, pte);
175 }
176 else
177 {
178 pg = maddr_to_page(pte->val);
179 vaddr = map_domain_page(page_to_mfn(pg));
180 if ( !vaddr )
181 {
182 spin_unlock_irqrestore(&hd->mapping_lock, flags);
183 return NULL;
184 }
185 }
187 if ( parent != hd->pgd )
188 unmap_domain_page(parent);
190 if ( level == 2 && vaddr )
191 {
192 unmap_domain_page(vaddr);
193 break;
194 }
196 parent = (struct dma_pte *)vaddr;
197 vaddr = NULL;
198 level--;
199 }
201 spin_unlock_irqrestore(&hd->mapping_lock, flags);
202 return pg;
203 }
205 /* return address's page at specific level */
206 static struct page_info *dma_addr_level_page(struct domain *domain,
207 u64 addr, int level)
208 {
209 struct hvm_iommu *hd = domain_hvm_iommu(domain);
210 struct dma_pte *parent, *pte = NULL;
211 int total = agaw_to_level(hd->agaw);
212 int offset;
213 struct page_info *pg = NULL;
215 parent = hd->pgd;
216 while ( level <= total )
217 {
218 offset = address_level_offset(addr, total);
219 pte = &parent[offset];
220 if ( dma_pte_addr(*pte) == 0 )
221 {
222 if ( parent != hd->pgd )
223 unmap_domain_page(parent);
224 break;
225 }
227 pg = maddr_to_page(pte->val);
228 if ( parent != hd->pgd )
229 unmap_domain_page(parent);
231 if ( level == total )
232 return pg;
234 parent = map_domain_page(page_to_mfn(pg));
235 total--;
236 }
238 return NULL;
239 }
241 static void iommu_flush_write_buffer(struct iommu *iommu)
242 {
243 u32 val;
244 unsigned long flag;
245 unsigned long start_time;
247 if ( !cap_rwbf(iommu->cap) )
248 return;
249 val = iommu->gcmd | DMA_GCMD_WBF;
251 spin_lock_irqsave(&iommu->register_lock, flag);
252 dmar_writel(iommu->reg, DMAR_GCMD_REG, val);
254 /* Make sure hardware complete it */
255 start_time = jiffies;
256 for ( ; ; )
257 {
258 val = dmar_readl(iommu->reg, DMAR_GSTS_REG);
259 if ( !(val & DMA_GSTS_WBFS) )
260 break;
261 if ( time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT) )
262 panic("DMAR hardware is malfunctional,"
263 " please disable IOMMU\n");
264 cpu_relax();
265 }
266 spin_unlock_irqrestore(&iommu->register_lock, flag);
267 }
269 /* return value determine if we need a write buffer flush */
270 static int __iommu_flush_context(
271 struct iommu *iommu,
272 u16 did, u16 source_id, u8 function_mask, u64 type,
273 int non_present_entry_flush)
274 {
275 u64 val = 0;
276 unsigned long flag;
277 unsigned long start_time;
279 /*
280 * In the non-present entry flush case, if hardware doesn't cache
281 * non-present entry we do nothing and if hardware cache non-present
282 * entry, we flush entries of domain 0 (the domain id is used to cache
283 * any non-present entries)
284 */
285 if ( non_present_entry_flush )
286 {
287 if ( !cap_caching_mode(iommu->cap) )
288 return 1;
289 else
290 did = 0;
291 }
293 /* use register invalidation */
294 switch ( type )
295 {
296 case DMA_CCMD_GLOBAL_INVL:
297 val = DMA_CCMD_GLOBAL_INVL;
298 break;
299 case DMA_CCMD_DOMAIN_INVL:
300 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
301 break;
302 case DMA_CCMD_DEVICE_INVL:
303 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
304 |DMA_CCMD_SID(source_id)|DMA_CCMD_FM(function_mask);
305 break;
306 default:
307 BUG();
308 }
309 val |= DMA_CCMD_ICC;
311 spin_lock_irqsave(&iommu->register_lock, flag);
312 dmar_writeq(iommu->reg, DMAR_CCMD_REG, val);
314 /* Make sure hardware complete it */
315 start_time = jiffies;
316 for ( ; ; )
317 {
318 val = dmar_readq(iommu->reg, DMAR_CCMD_REG);
319 if ( !(val & DMA_CCMD_ICC) )
320 break;
321 if ( time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT) )
322 panic("DMAR hardware is malfunctional, please disable IOMMU\n");
323 cpu_relax();
324 }
325 spin_unlock_irqrestore(&iommu->register_lock, flag);
326 /* flush context entry will implictly flush write buffer */
327 return 0;
328 }
330 static int inline iommu_flush_context_global(
331 struct iommu *iommu, int non_present_entry_flush)
332 {
333 return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
334 non_present_entry_flush);
335 }
337 static int inline iommu_flush_context_domain(
338 struct iommu *iommu, u16 did, int non_present_entry_flush)
339 {
340 return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL,
341 non_present_entry_flush);
342 }
344 static int inline iommu_flush_context_device(
345 struct iommu *iommu, u16 did, u16 source_id,
346 u8 function_mask, int non_present_entry_flush)
347 {
348 return __iommu_flush_context(iommu, did, source_id, function_mask,
349 DMA_CCMD_DEVICE_INVL,
350 non_present_entry_flush);
351 }
353 /* return value determine if we need a write buffer flush */
354 static int __iommu_flush_iotlb(struct iommu *iommu, u16 did,
355 u64 addr, unsigned int size_order, u64 type,
356 int non_present_entry_flush)
357 {
358 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
359 u64 val = 0, val_iva = 0;
360 unsigned long flag;
361 unsigned long start_time;
363 /*
364 * In the non-present entry flush case, if hardware doesn't cache
365 * non-present entry we do nothing and if hardware cache non-present
366 * entry, we flush entries of domain 0 (the domain id is used to cache
367 * any non-present entries)
368 */
369 if ( non_present_entry_flush )
370 {
371 if ( !cap_caching_mode(iommu->cap) )
372 return 1;
373 else
374 did = 0;
375 }
377 /* use register invalidation */
378 switch ( type )
379 {
380 case DMA_TLB_GLOBAL_FLUSH:
381 /* global flush doesn't need set IVA_REG */
382 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
383 break;
384 case DMA_TLB_DSI_FLUSH:
385 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
386 break;
387 case DMA_TLB_PSI_FLUSH:
388 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
389 /* Note: always flush non-leaf currently */
390 val_iva = size_order | addr;
391 break;
392 default:
393 BUG();
394 }
395 /* Note: set drain read/write */
396 if ( cap_read_drain(iommu->cap) )
397 val |= DMA_TLB_READ_DRAIN;
398 if ( cap_write_drain(iommu->cap) )
399 val |= DMA_TLB_WRITE_DRAIN;
401 spin_lock_irqsave(&iommu->register_lock, flag);
402 /* Note: Only uses first TLB reg currently */
403 if ( val_iva )
404 dmar_writeq(iommu->reg, tlb_offset, val_iva);
405 dmar_writeq(iommu->reg, tlb_offset + 8, val);
407 /* Make sure hardware complete it */
408 start_time = jiffies;
409 for ( ; ; )
410 {
411 val = dmar_readq(iommu->reg, tlb_offset + 8);
412 if ( !(val & DMA_TLB_IVT) )
413 break;
414 if ( time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT) )
415 panic("DMAR hardware is malfunctional, please disable IOMMU\n");
416 cpu_relax();
417 }
418 spin_unlock_irqrestore(&iommu->register_lock, flag);
420 /* check IOTLB invalidation granularity */
421 if ( DMA_TLB_IAIG(val) == 0 )
422 printk(KERN_ERR VTDPREFIX "IOMMU: flush IOTLB failed\n");
423 if ( DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type) )
424 printk(KERN_ERR VTDPREFIX "IOMMU: tlb flush request %x, actual %x\n",
425 (u32)DMA_TLB_IIRG(type), (u32)DMA_TLB_IAIG(val));
426 /* flush context entry will implictly flush write buffer */
427 return 0;
428 }
430 static int inline iommu_flush_iotlb_global(struct iommu *iommu,
431 int non_present_entry_flush)
432 {
433 return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
434 non_present_entry_flush);
435 }
437 static int inline iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did,
438 int non_present_entry_flush)
439 {
440 return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
441 non_present_entry_flush);
442 }
444 static int inline get_alignment(u64 base, unsigned int size)
445 {
446 int t = 0;
447 u64 end;
449 end = base + size - 1;
450 while ( base != end )
451 {
452 t++;
453 base >>= 1;
454 end >>= 1;
455 }
456 return t;
457 }
459 static int inline iommu_flush_iotlb_psi(
460 struct iommu *iommu, u16 did,
461 u64 addr, unsigned int pages, int non_present_entry_flush)
462 {
463 unsigned int align;
465 BUG_ON(addr & (~PAGE_MASK_4K));
466 BUG_ON(pages == 0);
468 /* Fallback to domain selective flush if no PSI support */
469 if ( !cap_pgsel_inv(iommu->cap) )
470 return iommu_flush_iotlb_dsi(iommu, did,
471 non_present_entry_flush);
473 /*
474 * PSI requires page size is 2 ^ x, and the base address is naturally
475 * aligned to the size
476 */
477 align = get_alignment(addr >> PAGE_SHIFT_4K, pages);
478 /* Fallback to domain selective flush if size is too big */
479 if ( align > cap_max_amask_val(iommu->cap) )
480 return iommu_flush_iotlb_dsi(iommu, did,
481 non_present_entry_flush);
483 addr >>= PAGE_SHIFT_4K + align;
484 addr <<= PAGE_SHIFT_4K + align;
486 return __iommu_flush_iotlb(iommu, did, addr, align,
487 DMA_TLB_PSI_FLUSH, non_present_entry_flush);
488 }
490 void iommu_flush_all(void)
491 {
492 struct acpi_drhd_unit *drhd;
493 struct iommu *iommu;
494 int i = 0;
496 wbinvd();
497 for_each_drhd_unit ( drhd )
498 {
499 iommu = drhd->iommu;
500 iommu_flush_context_global(iommu, 0);
501 iommu_flush_iotlb_global(iommu, 0);
502 i++;
503 }
504 }
506 /* clear one page's page table */
507 static void dma_pte_clear_one(struct domain *domain, u64 addr)
508 {
509 struct acpi_drhd_unit *drhd;
510 struct iommu *iommu;
511 struct dma_pte *pte = NULL;
512 struct page_info *pg = NULL;
514 drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
516 /* get last level pte */
517 pg = dma_addr_level_page(domain, addr, 1);
518 if ( !pg )
519 return;
520 pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg));
521 pte += address_level_offset(addr, 1);
522 if ( pte )
523 {
524 dma_clear_pte(*pte);
525 iommu_flush_cache_entry(drhd->iommu, pte);
527 for_each_drhd_unit ( drhd )
528 {
529 iommu = drhd->iommu;
530 if ( cap_caching_mode(iommu->cap) )
531 iommu_flush_iotlb_psi(iommu, domain->domain_id, addr, 1, 0);
532 else if (cap_rwbf(iommu->cap))
533 iommu_flush_write_buffer(iommu);
534 }
535 }
536 }
538 /* clear last level pte, a tlb flush should be followed */
539 static void dma_pte_clear_range(struct domain *domain, u64 start, u64 end)
540 {
541 struct hvm_iommu *hd = domain_hvm_iommu(domain);
542 int addr_width = agaw_to_width(hd->agaw);
544 start &= (((u64)1) << addr_width) - 1;
545 end &= (((u64)1) << addr_width) - 1;
546 /* in case it's partial page */
547 start = PAGE_ALIGN_4K(start);
548 end &= PAGE_MASK_4K;
550 /* we don't need lock here, nobody else touches the iova range */
551 while ( start < end )
552 {
553 dma_pte_clear_one(domain, start);
554 start += PAGE_SIZE_4K;
555 }
556 }
558 /* free page table pages. last level pte should already be cleared */
559 void dma_pte_free_pagetable(struct domain *domain, u64 start, u64 end)
560 {
561 struct acpi_drhd_unit *drhd;
562 struct hvm_iommu *hd = domain_hvm_iommu(domain);
563 struct iommu *iommu;
564 int addr_width = agaw_to_width(hd->agaw);
565 struct dma_pte *pte;
566 int total = agaw_to_level(hd->agaw);
567 int level;
568 u32 tmp;
569 struct page_info *pg = NULL;
571 drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
572 iommu = drhd->iommu;
574 start &= (((u64)1) << addr_width) - 1;
575 end &= (((u64)1) << addr_width) - 1;
577 /* we don't need lock here, nobody else touches the iova range */
578 level = 2;
579 while ( level <= total )
580 {
581 tmp = align_to_level(start, level);
582 if ( (tmp >= end) || ((tmp + level_size(level)) > end) )
583 return;
585 while ( tmp < end )
586 {
587 pg = dma_addr_level_page(domain, tmp, level);
588 if ( !pg )
589 return;
590 pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg));
591 pte += address_level_offset(tmp, level);
592 dma_clear_pte(*pte);
593 iommu_flush_cache_entry(iommu, pte);
594 unmap_domain_page(pte);
595 free_domheap_page(pg);
597 tmp += level_size(level);
598 }
599 level++;
600 }
602 /* free pgd */
603 if ( start == 0 && end == ((((u64)1) << addr_width) - 1) )
604 {
605 free_xenheap_page((void *)hd->pgd);
606 hd->pgd = NULL;
607 }
608 }
610 /* iommu handling */
611 static int iommu_set_root_entry(struct iommu *iommu)
612 {
613 void *addr;
614 u32 cmd, sts;
615 struct root_entry *root;
616 unsigned long flags;
618 if ( iommu == NULL )
619 {
620 gdprintk(XENLOG_ERR VTDPREFIX,
621 "iommu_set_root_entry: iommu == NULL\n");
622 return -EINVAL;
623 }
625 if ( unlikely(!iommu->root_entry) )
626 {
627 root = (struct root_entry *)alloc_xenheap_page();
628 if ( root == NULL )
629 return -ENOMEM;
631 memset((u8*)root, 0, PAGE_SIZE);
632 iommu_flush_cache_page(iommu, root);
634 if ( cmpxchg((unsigned long *)&iommu->root_entry,
635 0, (unsigned long)root) != 0 )
636 free_xenheap_page((void *)root);
637 }
639 addr = iommu->root_entry;
641 spin_lock_irqsave(&iommu->register_lock, flags);
643 dmar_writeq(iommu->reg, DMAR_RTADDR_REG, virt_to_maddr(addr));
644 cmd = iommu->gcmd | DMA_GCMD_SRTP;
645 dmar_writel(iommu->reg, DMAR_GCMD_REG, cmd);
647 /* Make sure hardware complete it */
648 for ( ; ; )
649 {
650 sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
651 if ( sts & DMA_GSTS_RTPS )
652 break;
653 cpu_relax();
654 }
656 spin_unlock_irqrestore(&iommu->register_lock, flags);
658 return 0;
659 }
661 static int iommu_enable_translation(struct iommu *iommu)
662 {
663 u32 sts;
664 unsigned long flags;
666 dprintk(XENLOG_INFO VTDPREFIX,
667 "iommu_enable_translation: enabling vt-d translation\n");
668 spin_lock_irqsave(&iommu->register_lock, flags);
669 iommu->gcmd |= DMA_GCMD_TE;
670 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
671 /* Make sure hardware complete it */
672 for ( ; ; )
673 {
674 sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
675 if ( sts & DMA_GSTS_TES )
676 break;
677 cpu_relax();
678 }
680 /* Disable PMRs when VT-d engine takes effect per spec definition */
681 disable_pmr(iommu);
682 spin_unlock_irqrestore(&iommu->register_lock, flags);
683 return 0;
684 }
686 int iommu_disable_translation(struct iommu *iommu)
687 {
688 u32 sts;
689 unsigned long flags;
691 spin_lock_irqsave(&iommu->register_lock, flags);
692 iommu->gcmd &= ~ DMA_GCMD_TE;
693 dmar_writel(iommu->reg, DMAR_GCMD_REG, iommu->gcmd);
695 /* Make sure hardware complete it */
696 for ( ; ; )
697 {
698 sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
699 if ( !(sts & DMA_GSTS_TES) )
700 break;
701 cpu_relax();
702 }
703 spin_unlock_irqrestore(&iommu->register_lock, flags);
704 return 0;
705 }
707 static struct iommu *vector_to_iommu[NR_VECTORS];
708 static int iommu_page_fault_do_one(struct iommu *iommu, int type,
709 u8 fault_reason, u16 source_id, u32 addr)
710 {
711 dprintk(XENLOG_WARNING VTDPREFIX,
712 "iommu_page_fault:%s: DEVICE %x:%x.%x addr %x REASON %x\n",
713 (type ? "DMA Read" : "DMA Write"),
714 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
715 PCI_FUNC(source_id & 0xFF), addr, fault_reason);
717 print_vtd_entries(current->domain, (source_id >> 8),(source_id & 0xff),
718 (addr >> PAGE_SHIFT));
719 return 0;
720 }
722 #define PRIMARY_FAULT_REG_LEN (16)
723 static void iommu_page_fault(int vector, void *dev_id,
724 struct cpu_user_regs *regs)
725 {
726 struct iommu *iommu = dev_id;
727 int reg, fault_index;
728 u32 fault_status;
729 unsigned long flags;
731 dprintk(XENLOG_WARNING VTDPREFIX,
732 "iommu_page_fault: iommu->reg = %p\n", iommu->reg);
734 spin_lock_irqsave(&iommu->register_lock, flags);
735 fault_status = dmar_readl(iommu->reg, DMAR_FSTS_REG);
736 spin_unlock_irqrestore(&iommu->register_lock, flags);
738 /* FIXME: ignore advanced fault log */
739 if ( !(fault_status & DMA_FSTS_PPF) )
740 return;
741 fault_index = dma_fsts_fault_record_index(fault_status);
742 reg = cap_fault_reg_offset(iommu->cap);
743 for ( ; ; )
744 {
745 u8 fault_reason;
746 u16 source_id;
747 u32 guest_addr, data;
748 int type;
750 /* highest 32 bits */
751 spin_lock_irqsave(&iommu->register_lock, flags);
752 data = dmar_readl(iommu->reg, reg +
753 fault_index * PRIMARY_FAULT_REG_LEN + 12);
754 if ( !(data & DMA_FRCD_F) )
755 {
756 spin_unlock_irqrestore(&iommu->register_lock, flags);
757 break;
758 }
760 fault_reason = dma_frcd_fault_reason(data);
761 type = dma_frcd_type(data);
763 data = dmar_readl(iommu->reg, reg +
764 fault_index * PRIMARY_FAULT_REG_LEN + 8);
765 source_id = dma_frcd_source_id(data);
767 guest_addr = dmar_readq(iommu->reg, reg +
768 fault_index * PRIMARY_FAULT_REG_LEN);
769 guest_addr = dma_frcd_page_addr(guest_addr);
770 /* clear the fault */
771 dmar_writel(iommu->reg, reg +
772 fault_index * PRIMARY_FAULT_REG_LEN + 12, DMA_FRCD_F);
773 spin_unlock_irqrestore(&iommu->register_lock, flags);
775 iommu_page_fault_do_one(iommu, type, fault_reason,
776 source_id, guest_addr);
778 fault_index++;
779 if ( fault_index > cap_num_fault_regs(iommu->cap) )
780 fault_index = 0;
781 }
783 /* clear primary fault overflow */
784 if ( fault_status & DMA_FSTS_PFO )
785 {
786 spin_lock_irqsave(&iommu->register_lock, flags);
787 dmar_writel(iommu->reg, DMAR_FSTS_REG, DMA_FSTS_PFO);
788 spin_unlock_irqrestore(&iommu->register_lock, flags);
789 }
790 }
792 static void dma_msi_unmask(unsigned int vector)
793 {
794 struct iommu *iommu = vector_to_iommu[vector];
795 unsigned long flags;
797 /* unmask it */
798 spin_lock_irqsave(&iommu->register_lock, flags);
799 dmar_writel(iommu->reg, DMAR_FECTL_REG, 0);
800 spin_unlock_irqrestore(&iommu->register_lock, flags);
801 }
803 static void dma_msi_mask(unsigned int vector)
804 {
805 unsigned long flags;
806 struct iommu *iommu = vector_to_iommu[vector];
808 /* mask it */
809 spin_lock_irqsave(&iommu->register_lock, flags);
810 dmar_writel(iommu->reg, DMAR_FECTL_REG, DMA_FECTL_IM);
811 spin_unlock_irqrestore(&iommu->register_lock, flags);
812 }
814 static unsigned int dma_msi_startup(unsigned int vector)
815 {
816 dma_msi_unmask(vector);
817 return 0;
818 }
820 static void dma_msi_end(unsigned int vector)
821 {
822 dma_msi_unmask(vector);
823 ack_APIC_irq();
824 }
826 static void dma_msi_data_init(struct iommu *iommu, int vector)
827 {
828 u32 msi_data = 0;
829 unsigned long flags;
831 /* Fixed, edge, assert mode. Follow MSI setting */
832 msi_data |= vector & 0xff;
833 msi_data |= 1 << 14;
835 spin_lock_irqsave(&iommu->register_lock, flags);
836 dmar_writel(iommu->reg, DMAR_FEDATA_REG, msi_data);
837 spin_unlock_irqrestore(&iommu->register_lock, flags);
838 }
840 static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu)
841 {
842 u64 msi_address;
843 unsigned long flags;
845 /* Physical, dedicated cpu. Follow MSI setting */
846 msi_address = (MSI_ADDRESS_HEADER << (MSI_ADDRESS_HEADER_SHIFT + 8));
847 msi_address |= MSI_PHYSICAL_MODE << 2;
848 msi_address |= MSI_REDIRECTION_HINT_MODE << 3;
849 msi_address |= phy_cpu << MSI_TARGET_CPU_SHIFT;
851 spin_lock_irqsave(&iommu->register_lock, flags);
852 dmar_writel(iommu->reg, DMAR_FEADDR_REG, (u32)msi_address);
853 dmar_writel(iommu->reg, DMAR_FEUADDR_REG, (u32)(msi_address >> 32));
854 spin_unlock_irqrestore(&iommu->register_lock, flags);
855 }
857 static void dma_msi_set_affinity(unsigned int vector, cpumask_t dest)
858 {
859 struct iommu *iommu = vector_to_iommu[vector];
860 dma_msi_addr_init(iommu, cpu_physical_id(first_cpu(dest)));
861 }
863 static struct hw_interrupt_type dma_msi_type = {
864 .typename = "DMA_MSI",
865 .startup = dma_msi_startup,
866 .shutdown = dma_msi_mask,
867 .enable = dma_msi_unmask,
868 .disable = dma_msi_mask,
869 .ack = dma_msi_mask,
870 .end = dma_msi_end,
871 .set_affinity = dma_msi_set_affinity,
872 };
874 int iommu_set_interrupt(struct iommu *iommu)
875 {
876 int vector, ret;
878 vector = assign_irq_vector(AUTO_ASSIGN);
879 vector_to_iommu[vector] = iommu;
881 /* VT-d fault is a MSI, make irq == vector */
882 irq_vector[vector] = vector;
883 vector_irq[vector] = vector;
885 if ( !vector )
886 {
887 gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: no vectors\n");
888 return -EINVAL;
889 }
891 irq_desc[vector].handler = &dma_msi_type;
892 ret = request_irq(vector, iommu_page_fault, 0, "dmar", iommu);
893 if ( ret )
894 gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: can't request irq\n");
895 return vector;
896 }
898 struct iommu *iommu_alloc(void *hw_data)
899 {
900 struct acpi_drhd_unit *drhd = (struct acpi_drhd_unit *) hw_data;
901 struct iommu *iommu;
903 if ( nr_iommus > MAX_IOMMUS )
904 {
905 gdprintk(XENLOG_ERR VTDPREFIX,
906 "IOMMU: nr_iommus %d > MAX_IOMMUS\n", nr_iommus);
907 return NULL;
908 }
910 iommu = xmalloc(struct iommu);
911 if ( !iommu )
912 return NULL;
913 memset(iommu, 0, sizeof(struct iommu));
915 set_fixmap_nocache(FIX_IOMMU_REGS_BASE_0 + nr_iommus, drhd->address);
916 iommu->reg = (void *) fix_to_virt(FIX_IOMMU_REGS_BASE_0 + nr_iommus);
917 dprintk(XENLOG_INFO VTDPREFIX,
918 "iommu_alloc: iommu->reg = %p drhd->address = %lx\n",
919 iommu->reg, drhd->address);
920 nr_iommus++;
922 if ( !iommu->reg )
923 {
924 printk(KERN_ERR VTDPREFIX "IOMMU: can't mapping the region\n");
925 goto error;
926 }
928 iommu->cap = dmar_readq(iommu->reg, DMAR_CAP_REG);
929 iommu->ecap = dmar_readq(iommu->reg, DMAR_ECAP_REG);
931 spin_lock_init(&iommu->lock);
932 spin_lock_init(&iommu->register_lock);
934 drhd->iommu = iommu;
935 return iommu;
936 error:
937 xfree(iommu);
938 return NULL;
939 }
941 static void free_iommu(struct iommu *iommu)
942 {
943 if ( !iommu )
944 return;
945 if ( iommu->root_entry )
946 free_xenheap_page((void *)iommu->root_entry);
947 if ( iommu->reg )
948 iounmap(iommu->reg);
949 free_irq(iommu->vector);
950 xfree(iommu);
951 }
953 #define guestwidth_to_adjustwidth(gaw) ({ \
954 int agaw, r = (gaw - 12) % 9; \
955 agaw = (r == 0) ? gaw : (gaw + 9 - r); \
956 if ( agaw > 64 ) \
957 agaw = 64; \
958 agaw; })
960 int iommu_domain_init(struct domain *domain)
961 {
962 struct hvm_iommu *hd = domain_hvm_iommu(domain);
963 struct iommu *iommu = NULL;
964 int guest_width = DEFAULT_DOMAIN_ADDRESS_WIDTH;
965 int adjust_width, agaw;
966 unsigned long sagaw;
967 struct acpi_drhd_unit *drhd;
969 spin_lock_init(&hd->mapping_lock);
970 spin_lock_init(&hd->iommu_list_lock);
971 INIT_LIST_HEAD(&hd->pdev_list);
972 INIT_LIST_HEAD(&hd->g2m_ioport_list);
974 if ( !vtd_enabled || list_empty(&acpi_drhd_units) )
975 return 0;
977 for_each_drhd_unit ( drhd )
978 iommu = drhd->iommu ? : iommu_alloc(drhd);
980 /* calculate AGAW */
981 if (guest_width > cap_mgaw(iommu->cap))
982 guest_width = cap_mgaw(iommu->cap);
983 adjust_width = guestwidth_to_adjustwidth(guest_width);
984 agaw = width_to_agaw(adjust_width);
985 /* FIXME: hardware doesn't support it, choose a bigger one? */
986 sagaw = cap_sagaw(iommu->cap);
987 if ( !test_bit(agaw, &sagaw) )
988 {
989 gdprintk(XENLOG_ERR VTDPREFIX,
990 "IOMMU: hardware doesn't support the agaw\n");
991 agaw = find_next_bit(&sagaw, 5, agaw);
992 if ( agaw >= 5 )
993 return -ENODEV;
994 }
995 hd->agaw = agaw;
996 return 0;
997 }
999 static int domain_context_mapping_one(
1000 struct domain *domain,
1001 struct iommu *iommu,
1002 u8 bus, u8 devfn)
1004 struct hvm_iommu *hd = domain_hvm_iommu(domain);
1005 struct context_entry *context;
1006 unsigned long flags;
1007 int ret = 0;
1009 context = device_to_context_entry(iommu, bus, devfn);
1010 if ( !context )
1012 gdprintk(XENLOG_INFO VTDPREFIX,
1013 "domain_context_mapping_one:context == NULL:"
1014 "bdf = %x:%x:%x\n",
1015 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1016 return -ENOMEM;
1018 spin_lock_irqsave(&iommu->lock, flags);
1019 if ( context_present(*context) )
1021 spin_unlock_irqrestore(&iommu->lock, flags);
1022 gdprintk(XENLOG_INFO VTDPREFIX,
1023 "domain_context_mapping_one:context present:bdf=%x:%x:%x\n",
1024 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1025 return 0;
1028 #ifdef VTD_DEBUG
1029 dprintk(XENLOG_INFO VTDPREFIX,
1030 "context_mapping_one_1-%x:%x:%x-*context = %lx %lx\n",
1031 bus, PCI_SLOT(devfn), PCI_FUNC(devfn), context->hi, context->lo);
1032 #endif
1034 /*
1035 * domain_id 0 is not valid on Intel's IOMMU, force domain_id to
1036 * be 1 based as required by intel's iommu hw.
1037 */
1038 context_set_domain_id(*context, domain->domain_id);
1039 context_set_address_width(*context, hd->agaw);
1041 if ( ecap_pass_thru(iommu->ecap) )
1043 context_set_translation_type(*context, CONTEXT_TT_PASS_THRU);
1045 else
1047 context_set_address_root(*context, virt_to_maddr(hd->pgd));
1048 context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL);
1051 context_set_fault_enable(*context);
1052 context_set_present(*context);
1053 iommu_flush_cache_entry(iommu, context);
1055 #ifdef VTD_DEBUG
1056 dprintk(XENLOG_INFO VTDPREFIX,
1057 "context_mapping_one_2-%x:%x:%x-*context=%lx %lx hd->pgd = %p\n",
1058 bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
1059 context->hi, context->lo, hd->pgd);
1060 #endif
1062 if ( iommu_flush_context_device(iommu, domain->domain_id,
1063 (((u16)bus) << 8) | devfn,
1064 DMA_CCMD_MASK_NOBIT, 1) )
1065 iommu_flush_write_buffer(iommu);
1066 else
1067 iommu_flush_iotlb_dsi(iommu, domain->domain_id, 0);
1068 spin_unlock_irqrestore(&iommu->lock, flags);
1069 return ret;
1072 static int __pci_find_next_cap(u8 bus, unsigned int devfn, u8 pos, int cap)
1074 u8 id;
1075 int ttl = 48;
1077 while ( ttl-- )
1079 pos = read_pci_config_byte(bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pos);
1080 if ( pos < 0x40 )
1081 break;
1083 pos &= ~3;
1084 id = read_pci_config_byte(bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
1085 pos + PCI_CAP_LIST_ID);
1087 if ( id == 0xff )
1088 break;
1089 if ( id == cap )
1090 return pos;
1092 pos += PCI_CAP_LIST_NEXT;
1094 return 0;
1097 #define PCI_BASE_CLASS_BRIDGE 0x06
1098 #define PCI_CLASS_BRIDGE_PCI 0x0604
1100 #define DEV_TYPE_PCIe_ENDPOINT 1
1101 #define DEV_TYPE_PCI_BRIDGE 2
1102 #define DEV_TYPE_PCI 3
1104 int pdev_type(struct pci_dev *dev)
1106 u16 class_device;
1107 u16 status;
1109 class_device = read_pci_config_16(dev->bus, PCI_SLOT(dev->devfn),
1110 PCI_FUNC(dev->devfn), PCI_CLASS_DEVICE);
1111 if ( class_device == PCI_CLASS_BRIDGE_PCI )
1112 return DEV_TYPE_PCI_BRIDGE;
1114 status = read_pci_config_16(dev->bus, PCI_SLOT(dev->devfn),
1115 PCI_FUNC(dev->devfn), PCI_STATUS);
1117 if ( !(status & PCI_STATUS_CAP_LIST) )
1118 return DEV_TYPE_PCI;
1120 if ( __pci_find_next_cap(dev->bus, dev->devfn,
1121 PCI_CAPABILITY_LIST, PCI_CAP_ID_EXP) )
1122 return DEV_TYPE_PCIe_ENDPOINT;
1124 return DEV_TYPE_PCI;
1127 #define MAX_BUSES 256
1128 struct pci_dev bus2bridge[MAX_BUSES];
1130 static int domain_context_mapping(
1131 struct domain *domain,
1132 struct iommu *iommu,
1133 struct pci_dev *pdev)
1135 int ret = 0;
1136 int dev, func, sec_bus, sub_bus;
1137 u32 type;
1139 type = pdev_type(pdev);
1140 switch ( type )
1142 case DEV_TYPE_PCI_BRIDGE:
1143 sec_bus = read_pci_config_byte(
1144 pdev->bus, PCI_SLOT(pdev->devfn),
1145 PCI_FUNC(pdev->devfn), PCI_SECONDARY_BUS);
1147 if ( bus2bridge[sec_bus].bus == 0 )
1149 bus2bridge[sec_bus].bus = pdev->bus;
1150 bus2bridge[sec_bus].devfn = pdev->devfn;
1153 sub_bus = read_pci_config_byte(
1154 pdev->bus, PCI_SLOT(pdev->devfn),
1155 PCI_FUNC(pdev->devfn), PCI_SUBORDINATE_BUS);
1157 if ( sec_bus != sub_bus )
1159 dprintk(XENLOG_INFO VTDPREFIX,
1160 "context_mapping: nested PCI bridge not supported\n");
1161 dprintk(XENLOG_INFO VTDPREFIX,
1162 " bdf = %x:%x:%x sec_bus = %x sub_bus = %x\n",
1163 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1164 sec_bus, sub_bus);
1166 break;
1167 case DEV_TYPE_PCIe_ENDPOINT:
1168 gdprintk(XENLOG_INFO VTDPREFIX,
1169 "domain_context_mapping:PCIe : bdf = %x:%x:%x\n",
1170 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1171 ret = domain_context_mapping_one(domain, iommu,
1172 (u8)(pdev->bus), (u8)(pdev->devfn));
1173 break;
1174 case DEV_TYPE_PCI:
1175 gdprintk(XENLOG_INFO VTDPREFIX,
1176 "domain_context_mapping:PCI: bdf = %x:%x:%x\n",
1177 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1179 if ( pdev->bus == 0 )
1181 ret = domain_context_mapping_one(
1182 domain, iommu, (u8)(pdev->bus), (u8)(pdev->devfn));
1184 else
1186 if ( bus2bridge[pdev->bus].bus != 0 )
1187 gdprintk(XENLOG_ERR VTDPREFIX,
1188 "domain_context_mapping:bus2bridge"
1189 "[pdev->bus].bus != 0\n");
1191 ret = domain_context_mapping_one(
1192 domain, iommu,
1193 (u8)(bus2bridge[pdev->bus].bus),
1194 (u8)(bus2bridge[pdev->bus].devfn));
1196 /* now map everything behind the PCI bridge */
1197 for ( dev = 0; dev < 32; dev++ )
1199 for ( func = 0; func < 8; func++ )
1201 ret = domain_context_mapping_one(
1202 domain, iommu,
1203 pdev->bus, (u8)PCI_DEVFN(dev, func));
1204 if ( ret )
1205 return ret;
1209 break;
1210 default:
1211 gdprintk(XENLOG_ERR VTDPREFIX,
1212 "domain_context_mapping:unknown type : bdf = %x:%x:%x\n",
1213 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1214 ret = -EINVAL;
1215 break;
1218 return ret;
1221 static int domain_context_unmap_one(
1222 struct domain *domain,
1223 struct iommu *iommu,
1224 u8 bus, u8 devfn)
1226 struct context_entry *context;
1227 unsigned long flags;
1229 context = device_to_context_entry(iommu, bus, devfn);
1230 if ( !context )
1232 gdprintk(XENLOG_INFO VTDPREFIX,
1233 "domain_context_unmap_one-%x:%x:%x- context == NULL:return\n",
1234 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1235 return -ENOMEM;
1238 spin_lock_irqsave(&iommu->lock, flags);
1239 if ( !context_present(*context) )
1241 spin_unlock_irqrestore(&iommu->lock, flags);
1242 gdprintk(XENLOG_INFO VTDPREFIX,
1243 "domain_context_unmap_one-%x:%x:%x- "
1244 "context NOT present:return\n",
1245 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1246 return 0;
1249 gdprintk(XENLOG_INFO VTDPREFIX,
1250 "domain_context_unmap_one_1:bdf = %x:%x:%x\n",
1251 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1253 context_clear_present(*context);
1254 context_clear_entry(*context);
1255 iommu_flush_cache_entry(iommu, context);
1256 iommu_flush_context_global(iommu, 0);
1257 iommu_flush_iotlb_global(iommu, 0);
1258 spin_unlock_irqrestore(&iommu->lock, flags);
1260 gdprintk(XENLOG_INFO VTDPREFIX,
1261 "domain_context_unmap_one_2:bdf = %x:%x:%x\n",
1262 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1264 return 0;
1267 static int domain_context_unmap(
1268 struct domain *domain,
1269 struct iommu *iommu,
1270 struct pci_dev *pdev)
1272 int ret = 0;
1273 int dev, func, sec_bus, sub_bus;
1274 u32 type;
1276 type = pdev_type(pdev);
1277 switch ( type )
1279 case DEV_TYPE_PCI_BRIDGE:
1280 sec_bus = read_pci_config_byte(
1281 pdev->bus, PCI_SLOT(pdev->devfn),
1282 PCI_FUNC(pdev->devfn), PCI_SECONDARY_BUS);
1283 sub_bus = read_pci_config_byte(
1284 pdev->bus, PCI_SLOT(pdev->devfn),
1285 PCI_FUNC(pdev->devfn), PCI_SUBORDINATE_BUS);
1287 gdprintk(XENLOG_INFO VTDPREFIX,
1288 "domain_context_unmap:BRIDGE:%x:%x:%x "
1289 "sec_bus=%x sub_bus=%x\n",
1290 pdev->bus, PCI_SLOT(pdev->devfn),
1291 PCI_FUNC(pdev->devfn), sec_bus, sub_bus);
1292 break;
1293 case DEV_TYPE_PCIe_ENDPOINT:
1294 gdprintk(XENLOG_INFO VTDPREFIX,
1295 "domain_context_unmap:PCIe : bdf = %x:%x:%x\n",
1296 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1297 ret = domain_context_unmap_one(domain, iommu,
1298 (u8)(pdev->bus), (u8)(pdev->devfn));
1299 break;
1300 case DEV_TYPE_PCI:
1301 gdprintk(XENLOG_INFO VTDPREFIX,
1302 "domain_context_unmap:PCI: bdf = %x:%x:%x\n",
1303 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1304 if ( pdev->bus == 0 )
1306 ret = domain_context_unmap_one(
1307 domain, iommu,
1308 (u8)(pdev->bus), (u8)(pdev->devfn));
1310 else
1312 if ( bus2bridge[pdev->bus].bus != 0 )
1313 gdprintk(XENLOG_INFO VTDPREFIX,
1314 "domain_context_mapping:"
1315 "bus2bridge[pdev->bus].bus != 0\n");
1317 ret = domain_context_unmap_one(domain, iommu,
1318 (u8)(bus2bridge[pdev->bus].bus),
1319 (u8)(bus2bridge[pdev->bus].devfn));
1321 /* now map everything behind the PCI bridge */
1322 for ( dev = 0; dev < 32; dev++ )
1324 for ( func = 0; func < 8; func++ )
1326 ret = domain_context_unmap_one(
1327 domain, iommu,
1328 pdev->bus, (u8)PCI_DEVFN(dev, func));
1329 if ( ret )
1330 return ret;
1334 break;
1335 default:
1336 gdprintk(XENLOG_ERR VTDPREFIX,
1337 "domain_context_unmap:unknown type: bdf = %x:%x:%x\n",
1338 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1339 ret = -EINVAL;
1340 break;
1343 return ret;
1346 void reassign_device_ownership(
1347 struct domain *source,
1348 struct domain *target,
1349 u8 bus, u8 devfn)
1351 struct hvm_iommu *source_hd = domain_hvm_iommu(source);
1352 struct hvm_iommu *target_hd = domain_hvm_iommu(target);
1353 struct pci_dev *pdev;
1354 struct acpi_drhd_unit *drhd;
1355 struct iommu *iommu;
1356 int status;
1357 unsigned long flags;
1359 gdprintk(XENLOG_ERR VTDPREFIX,
1360 "reassign_device-%x:%x:%x- source = %d target = %d\n",
1361 bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
1362 source->domain_id, target->domain_id);
1364 for_each_pdev( source, pdev )
1366 if ( (pdev->bus != bus) || (pdev->devfn != devfn) )
1367 continue;
1369 pdev->bus = bus;
1370 pdev->devfn = devfn;
1371 drhd = acpi_find_matched_drhd_unit(pdev);
1372 iommu = drhd->iommu;
1373 domain_context_unmap(source, iommu, pdev);
1375 /* Move pci device from the source domain to target domain. */
1376 spin_lock_irqsave(&source_hd->iommu_list_lock, flags);
1377 spin_lock_irqsave(&target_hd->iommu_list_lock, flags);
1378 list_move(&pdev->list, &target_hd->pdev_list);
1379 spin_unlock_irqrestore(&target_hd->iommu_list_lock, flags);
1380 spin_unlock_irqrestore(&source_hd->iommu_list_lock, flags);
1382 status = domain_context_mapping(target, iommu, pdev);
1383 if ( status != 0 )
1384 gdprintk(XENLOG_ERR VTDPREFIX, "domain_context_mapping failed\n");
1386 break;
1390 void return_devices_to_dom0(struct domain *d)
1392 struct hvm_iommu *hd = domain_hvm_iommu(d);
1393 struct pci_dev *pdev;
1395 while ( !list_empty(&hd->pdev_list) )
1397 pdev = list_entry(hd->pdev_list.next, typeof(*pdev), list);
1398 dprintk(XENLOG_INFO VTDPREFIX,
1399 "return_devices_to_dom0: bdf = %x:%x:%x\n",
1400 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1401 reassign_device_ownership(d, dom0, pdev->bus, pdev->devfn);
1404 #ifdef VTD_DEBUG
1405 for_each_pdev ( dom0, pdev )
1406 dprintk(XENLOG_INFO VTDPREFIX,
1407 "return_devices_to_dom0:%x: bdf = %x:%x:%x\n",
1408 dom0->domain_id, pdev->bus,
1409 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1410 #endif
1413 void iommu_domain_teardown(struct domain *d)
1415 if ( list_empty(&acpi_drhd_units) )
1416 return;
1418 #if CONFIG_PAGING_LEVELS == 3
1420 struct hvm_iommu *hd = domain_hvm_iommu(d);
1421 int level = agaw_to_level(hd->agaw);
1422 struct dma_pte *pgd = NULL;
1424 switch ( level )
1426 case VTD_PAGE_TABLE_LEVEL_3:
1427 if ( hd->pgd )
1428 free_xenheap_page((void *)hd->pgd);
1429 break;
1430 case VTD_PAGE_TABLE_LEVEL_4:
1431 if ( hd->pgd )
1433 pgd = hd->pgd;
1434 if ( pgd[0].val != 0 )
1435 free_xenheap_page((void*)maddr_to_virt(
1436 dma_pte_addr(pgd[0])));
1438 free_xenheap_page((void *)hd->pgd);
1440 break;
1441 default:
1442 gdprintk(XENLOG_ERR VTDPREFIX,
1443 "Unsupported p2m table sharing level!\n");
1444 break;
1447 #endif
1448 return_devices_to_dom0(d);
1451 static int domain_context_mapped(struct domain *domain, struct pci_dev *pdev)
1453 struct acpi_drhd_unit *drhd;
1454 struct iommu *iommu;
1455 int ret;
1457 for_each_drhd_unit ( drhd )
1459 iommu = drhd->iommu;
1460 ret = device_context_mapped(iommu, pdev->bus, pdev->devfn);
1461 if ( ret )
1462 return ret;
1465 return 0;
1468 int iommu_map_page(struct domain *d, paddr_t gfn, paddr_t mfn)
1470 struct acpi_drhd_unit *drhd;
1471 struct iommu *iommu;
1472 struct dma_pte *pte = NULL;
1473 struct page_info *pg = NULL;
1475 drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
1476 iommu = drhd->iommu;
1478 /* do nothing if dom0 and iommu supports pass thru */
1479 if ( ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) )
1480 return 0;
1482 pg = addr_to_dma_page(d, gfn << PAGE_SHIFT_4K);
1483 if ( !pg )
1484 return -ENOMEM;
1485 pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg));
1486 pte += mfn & LEVEL_MASK;
1487 dma_set_pte_addr(*pte, mfn << PAGE_SHIFT_4K);
1488 dma_set_pte_prot(*pte, DMA_PTE_READ | DMA_PTE_WRITE);
1489 iommu_flush_cache_entry(iommu, pte);
1490 unmap_domain_page(pte);
1492 for_each_drhd_unit ( drhd )
1494 iommu = drhd->iommu;
1495 if ( cap_caching_mode(iommu->cap) )
1496 iommu_flush_iotlb_psi(iommu, d->domain_id,
1497 gfn << PAGE_SHIFT_4K, 1, 0);
1498 else if ( cap_rwbf(iommu->cap) )
1499 iommu_flush_write_buffer(iommu);
1502 return 0;
1505 int iommu_unmap_page(struct domain *d, dma_addr_t gfn)
1507 struct acpi_drhd_unit *drhd;
1508 struct iommu *iommu;
1510 drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
1511 iommu = drhd->iommu;
1513 /* do nothing if dom0 and iommu supports pass thru */
1514 if ( ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) )
1515 return 0;
1517 dma_pte_clear_one(d, gfn << PAGE_SHIFT_4K);
1519 return 0;
1522 int iommu_page_mapping(struct domain *domain, dma_addr_t iova,
1523 void *hpa, size_t size, int prot)
1525 struct acpi_drhd_unit *drhd;
1526 struct iommu *iommu;
1527 unsigned long start_pfn, end_pfn;
1528 struct dma_pte *pte = NULL;
1529 int index;
1530 struct page_info *pg = NULL;
1532 drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
1533 iommu = drhd->iommu;
1534 if ( (prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0 )
1535 return -EINVAL;
1536 iova = (iova >> PAGE_SHIFT_4K) << PAGE_SHIFT_4K;
1537 start_pfn = (unsigned long)(((unsigned long) hpa) >> PAGE_SHIFT_4K);
1538 end_pfn = (unsigned long)
1539 ((PAGE_ALIGN_4K(((unsigned long)hpa) + size)) >> PAGE_SHIFT_4K);
1540 index = 0;
1541 while ( start_pfn < end_pfn )
1543 pg = addr_to_dma_page(domain, iova + PAGE_SIZE_4K * index);
1544 if ( !pg )
1545 return -ENOMEM;
1546 pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg));
1547 pte += start_pfn & LEVEL_MASK;
1548 dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K);
1549 dma_set_pte_prot(*pte, prot);
1550 iommu_flush_cache_entry(iommu, pte);
1551 unmap_domain_page(pte);
1552 start_pfn++;
1553 index++;
1556 for_each_drhd_unit ( drhd )
1558 iommu = drhd->iommu;
1559 if ( cap_caching_mode(iommu->cap) )
1560 iommu_flush_iotlb_psi(iommu, domain->domain_id, iova, size, 0);
1561 else if ( cap_rwbf(iommu->cap) )
1562 iommu_flush_write_buffer(iommu);
1565 return 0;
1568 int iommu_page_unmapping(struct domain *domain, dma_addr_t addr, size_t size)
1570 dma_pte_clear_range(domain, addr, addr + size);
1572 return 0;
1575 void iommu_flush(struct domain *d, dma_addr_t gfn, u64 *p2m_entry)
1577 struct acpi_drhd_unit *drhd;
1578 struct iommu *iommu = NULL;
1579 struct dma_pte *pte = (struct dma_pte *) p2m_entry;
1581 for_each_drhd_unit ( drhd )
1583 iommu = drhd->iommu;
1584 if ( cap_caching_mode(iommu->cap) )
1585 iommu_flush_iotlb_psi(iommu, d->domain_id,
1586 gfn << PAGE_SHIFT_4K, 1, 0);
1587 else if ( cap_rwbf(iommu->cap) )
1588 iommu_flush_write_buffer(iommu);
1591 iommu_flush_cache_entry(iommu, pte);
1594 static int iommu_prepare_rmrr_dev(
1595 struct domain *d,
1596 struct acpi_rmrr_unit *rmrr,
1597 struct pci_dev *pdev)
1599 struct acpi_drhd_unit *drhd;
1600 unsigned long size;
1601 int ret;
1603 /* page table init */
1604 size = rmrr->end_address - rmrr->base_address + 1;
1605 ret = iommu_page_mapping(d, rmrr->base_address,
1606 (void *)rmrr->base_address, size,
1607 DMA_PTE_READ|DMA_PTE_WRITE);
1608 if ( ret )
1609 return ret;
1611 if ( domain_context_mapped(d, pdev) == 0 )
1613 drhd = acpi_find_matched_drhd_unit(pdev);
1614 ret = domain_context_mapping(d, drhd->iommu, pdev);
1615 if ( !ret )
1616 return 0;
1619 return ret;
1622 void __init setup_dom0_devices(void)
1624 struct hvm_iommu *hd = domain_hvm_iommu(dom0);
1625 struct acpi_drhd_unit *drhd;
1626 struct pci_dev *pdev;
1627 int bus, dev, func, ret;
1628 u32 l;
1630 #ifdef DEBUG_VTD_CONTEXT_ENTRY
1631 for ( bus = 0; bus < 256; bus++ )
1633 for ( dev = 0; dev < 32; dev++ )
1635 for ( func = 0; func < 8; func++ )
1637 struct context_entry *context;
1638 struct pci_dev device;
1640 device.bus = bus;
1641 device.devfn = PCI_DEVFN(dev, func);
1642 drhd = acpi_find_matched_drhd_unit(&device);
1643 context = device_to_context_entry(drhd->iommu,
1644 bus, PCI_DEVFN(dev, func));
1645 if ( (context->lo != 0) || (context->hi != 0) )
1646 dprintk(XENLOG_INFO VTDPREFIX,
1647 "setup_dom0_devices-%x:%x:%x- context not 0\n",
1648 bus, dev, func);
1652 #endif
1654 for ( bus = 0; bus < 256; bus++ )
1656 for ( dev = 0; dev < 32; dev++ )
1658 for ( func = 0; func < 8; func++ )
1660 l = read_pci_config(bus, dev, func, PCI_VENDOR_ID);
1661 /* some broken boards return 0 or ~0 if a slot is empty: */
1662 if ( (l == 0xffffffff) || (l == 0x00000000) ||
1663 (l == 0x0000ffff) || (l == 0xffff0000) )
1664 continue;
1665 pdev = xmalloc(struct pci_dev);
1666 pdev->bus = bus;
1667 pdev->devfn = PCI_DEVFN(dev, func);
1668 list_add_tail(&pdev->list, &hd->pdev_list);
1670 drhd = acpi_find_matched_drhd_unit(pdev);
1671 ret = domain_context_mapping(dom0, drhd->iommu, pdev);
1672 if ( ret != 0 )
1673 gdprintk(XENLOG_ERR VTDPREFIX,
1674 "domain_context_mapping failed\n");
1679 for_each_pdev ( dom0, pdev )
1680 dprintk(XENLOG_INFO VTDPREFIX,
1681 "setup_dom0_devices: bdf = %x:%x:%x\n",
1682 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1685 void clear_fault_bit(struct iommu *iommu)
1687 u64 val;
1689 val = dmar_readq(
1690 iommu->reg,
1691 cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+0x8);
1692 dmar_writeq(
1693 iommu->reg,
1694 cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+8,
1695 val);
1696 dmar_writel(iommu->reg, DMAR_FSTS_REG, DMA_FSTS_PFO);
1699 static int init_vtd_hw(void)
1701 struct acpi_drhd_unit *drhd;
1702 struct iommu *iommu;
1703 int ret;
1705 for_each_drhd_unit ( drhd )
1707 iommu = drhd->iommu;
1708 ret = iommu_set_root_entry(iommu);
1709 if ( ret )
1711 gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: set root entry failed\n");
1712 return -EIO;
1716 return 0;
1719 static int enable_vtd_translation(void)
1721 struct acpi_drhd_unit *drhd;
1722 struct iommu *iommu;
1723 int vector = 0;
1725 for_each_drhd_unit ( drhd )
1727 iommu = drhd->iommu;
1728 vector = iommu_set_interrupt(iommu);
1729 dma_msi_data_init(iommu, vector);
1730 dma_msi_addr_init(iommu, cpu_physical_id(first_cpu(cpu_online_map)));
1731 iommu->vector = vector;
1732 clear_fault_bit(iommu);
1733 if ( iommu_enable_translation(iommu) )
1734 return -EIO;
1737 return 0;
1740 static void setup_dom0_rmrr(void)
1742 struct acpi_rmrr_unit *rmrr;
1743 struct pci_dev *pdev;
1744 int ret;
1746 for_each_rmrr_device ( rmrr, pdev )
1747 ret = iommu_prepare_rmrr_dev(dom0, rmrr, pdev);
1749 if ( ret )
1750 gdprintk(XENLOG_ERR VTDPREFIX,
1751 "IOMMU: mapping reserved region failed\n");
1753 end_for_each_rmrr_device ( rmrr, pdev )
1756 int iommu_setup(void)
1758 struct hvm_iommu *hd = domain_hvm_iommu(dom0);
1759 struct acpi_drhd_unit *drhd;
1760 struct iommu *iommu;
1761 unsigned long i;
1763 if ( !vtd_enabled )
1764 return 0;
1766 INIT_LIST_HEAD(&hd->pdev_list);
1768 /* start from scratch */
1769 iommu_flush_all();
1771 /* setup clflush size */
1772 x86_clflush_size = ((cpuid_ebx(1) >> 8) & 0xff) * 8;
1774 /*
1775 * allocate IO page directory page for the domain.
1776 */
1777 drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list);
1778 iommu = drhd->iommu;
1780 /* setup 1:1 page table for dom0 */
1781 for ( i = 0; i < max_page; i++ )
1782 iommu_map_page(dom0, i, i);
1784 if ( init_vtd_hw() )
1785 goto error;
1786 setup_dom0_devices();
1787 setup_dom0_rmrr();
1788 if ( enable_vtd_translation() )
1789 goto error;
1791 return 0;
1793 error:
1794 printk("iommu_setup() failed\n");
1795 for_each_drhd_unit ( drhd )
1797 iommu = drhd->iommu;
1798 free_iommu(iommu);
1800 return -EIO;
1803 /*
1804 * If the device isn't owned by dom0, it means it already
1805 * has been assigned to other domain, or it's not exist.
1806 */
1807 int device_assigned(u8 bus, u8 devfn)
1809 struct pci_dev *pdev;
1811 for_each_pdev( dom0, pdev )
1812 if ( (pdev->bus == bus ) && (pdev->devfn == devfn) )
1813 return 0;
1815 return 1;
1818 int assign_device(struct domain *d, u8 bus, u8 devfn)
1820 struct hvm_iommu *hd = domain_hvm_iommu(d);
1821 struct acpi_rmrr_unit *rmrr;
1822 struct pci_dev *pdev;
1823 int ret = 0;
1825 if ( list_empty(&acpi_drhd_units) )
1826 return ret;
1828 dprintk(XENLOG_INFO VTDPREFIX,
1829 "assign_device: bus = %x dev = %x func = %x\n",
1830 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
1832 reassign_device_ownership(dom0, d, bus, devfn);
1834 /* setup rmrr identify mapping just once per domain */
1835 if ( list_empty(&hd->pdev_list) )
1836 for_each_rmrr_device(rmrr, pdev)
1837 ret = iommu_prepare_rmrr_dev(d, rmrr, pdev);
1838 if ( ret )
1839 gdprintk(XENLOG_ERR VTDPREFIX,
1840 "IOMMU: mapping reserved region failed\n");
1841 end_for_each_rmrr_device(rmrr, pdev)
1842 return ret;
1845 void iommu_set_pgd(struct domain *d)
1847 struct hvm_iommu *hd = domain_hvm_iommu(d);
1848 unsigned long p2m_table;
1850 if ( hd->pgd )
1852 gdprintk(XENLOG_INFO VTDPREFIX,
1853 "iommu_set_pgd_1: hd->pgd = %p\n", hd->pgd);
1854 hd->pgd = NULL;
1856 p2m_table = mfn_x(pagetable_get_mfn(d->arch.phys_table));
1858 #if CONFIG_PAGING_LEVELS == 3
1859 if ( !hd->pgd )
1861 int level = agaw_to_level(hd->agaw);
1862 struct dma_pte *pmd = NULL;
1863 struct dma_pte *pgd = NULL;
1864 struct dma_pte *pte = NULL;
1865 l3_pgentry_t *l3e;
1866 unsigned long flags;
1867 int i;
1869 spin_lock_irqsave(&hd->mapping_lock, flags);
1870 if ( !hd->pgd )
1872 pgd = (struct dma_pte *)alloc_xenheap_page();
1873 memset((u8*)pgd, 0, PAGE_SIZE);
1874 if ( !hd->pgd )
1875 hd->pgd = pgd;
1876 else /* somebody is fast */
1877 free_xenheap_page((void *) pgd);
1880 l3e = map_domain_page(p2m_table);
1881 switch ( level )
1883 case VTD_PAGE_TABLE_LEVEL_3: /* Weybridge */
1884 /* We only support 8 entries for the PAE L3 p2m table */
1885 for ( i = 0; i < 8 ; i++ )
1887 /* Don't create new L2 entry, use ones from p2m table */
1888 pgd[i].val = l3e[i].l3 | _PAGE_PRESENT | _PAGE_RW;
1890 break;
1892 case VTD_PAGE_TABLE_LEVEL_4: /* Stoakley */
1893 /* We allocate one more page for the top vtd page table. */
1894 pmd = (struct dma_pte *)alloc_xenheap_page();
1895 memset((u8*)pmd, 0, PAGE_SIZE);
1896 pte = &pgd[0];
1897 dma_set_pte_addr(*pte, virt_to_maddr(pmd));
1898 dma_set_pte_readable(*pte);
1899 dma_set_pte_writable(*pte);
1901 for ( i = 0; i < 8; i++ )
1903 /* Don't create new L2 entry, use ones from p2m table */
1904 pmd[i].val = l3e[i].l3 | _PAGE_PRESENT | _PAGE_RW;
1906 break;
1907 default:
1908 gdprintk(XENLOG_ERR VTDPREFIX,
1909 "iommu_set_pgd:Unsupported p2m table sharing level!\n");
1910 break;
1912 unmap_domain_page(l3e);
1913 spin_unlock_irqrestore(&hd->mapping_lock, flags);
1915 #elif CONFIG_PAGING_LEVELS == 4
1916 if ( !hd->pgd )
1918 int level = agaw_to_level(hd->agaw);
1919 l3_pgentry_t *l3e;
1920 mfn_t pgd_mfn;
1922 switch ( level )
1924 case VTD_PAGE_TABLE_LEVEL_3:
1925 l3e = map_domain_page(p2m_table);
1926 if ( (l3e_get_flags(*l3e) & _PAGE_PRESENT) == 0 )
1928 gdprintk(XENLOG_ERR VTDPREFIX,
1929 "iommu_set_pgd: second level wasn't there\n");
1930 unmap_domain_page(l3e);
1931 return;
1933 pgd_mfn = _mfn(l3e_get_pfn(*l3e));
1934 unmap_domain_page(l3e);
1935 hd->pgd = maddr_to_virt(pagetable_get_paddr(
1936 pagetable_from_mfn(pgd_mfn)));
1937 break;
1939 case VTD_PAGE_TABLE_LEVEL_4:
1940 pgd_mfn = _mfn(p2m_table);
1941 hd->pgd = maddr_to_virt(pagetable_get_paddr(
1942 pagetable_from_mfn(pgd_mfn)));
1943 break;
1944 default:
1945 gdprintk(XENLOG_ERR VTDPREFIX,
1946 "iommu_set_pgd:Unsupported p2m table sharing level!\n");
1947 break;
1950 #endif
1951 gdprintk(XENLOG_INFO VTDPREFIX,
1952 "iommu_set_pgd: hd->pgd = %p\n", hd->pgd);
1956 u8 iommu_state[MAX_IOMMU_REGS * MAX_IOMMUS];
1957 int iommu_suspend(void)
1959 struct acpi_drhd_unit *drhd;
1960 struct iommu *iommu;
1961 int i = 0;
1963 iommu_flush_all();
1965 for_each_drhd_unit ( drhd )
1967 iommu = drhd->iommu;
1968 iommu_state[DMAR_RTADDR_REG * i] =
1969 (u64) dmar_readq(iommu->reg, DMAR_RTADDR_REG);
1970 iommu_state[DMAR_FECTL_REG * i] =
1971 (u32) dmar_readl(iommu->reg, DMAR_FECTL_REG);
1972 iommu_state[DMAR_FEDATA_REG * i] =
1973 (u32) dmar_readl(iommu->reg, DMAR_FEDATA_REG);
1974 iommu_state[DMAR_FEADDR_REG * i] =
1975 (u32) dmar_readl(iommu->reg, DMAR_FEADDR_REG);
1976 iommu_state[DMAR_FEUADDR_REG * i] =
1977 (u32) dmar_readl(iommu->reg, DMAR_FEUADDR_REG);
1978 iommu_state[DMAR_PLMBASE_REG * i] =
1979 (u32) dmar_readl(iommu->reg, DMAR_PLMBASE_REG);
1980 iommu_state[DMAR_PLMLIMIT_REG * i] =
1981 (u32) dmar_readl(iommu->reg, DMAR_PLMLIMIT_REG);
1982 iommu_state[DMAR_PHMBASE_REG * i] =
1983 (u64) dmar_readq(iommu->reg, DMAR_PHMBASE_REG);
1984 iommu_state[DMAR_PHMLIMIT_REG * i] =
1985 (u64) dmar_readq(iommu->reg, DMAR_PHMLIMIT_REG);
1986 i++;
1989 return 0;
1992 int iommu_resume(void)
1994 struct acpi_drhd_unit *drhd;
1995 struct iommu *iommu;
1996 int i = 0;
1998 iommu_flush_all();
2000 init_vtd_hw();
2001 for_each_drhd_unit ( drhd )
2003 iommu = drhd->iommu;
2004 dmar_writeq( iommu->reg, DMAR_RTADDR_REG,
2005 (u64) iommu_state[DMAR_RTADDR_REG * i]);
2006 dmar_writel(iommu->reg, DMAR_FECTL_REG,
2007 (u32) iommu_state[DMAR_FECTL_REG * i]);
2008 dmar_writel(iommu->reg, DMAR_FEDATA_REG,
2009 (u32) iommu_state[DMAR_FEDATA_REG * i]);
2010 dmar_writel(iommu->reg, DMAR_FEADDR_REG,
2011 (u32) iommu_state[DMAR_FEADDR_REG * i]);
2012 dmar_writel(iommu->reg, DMAR_FEUADDR_REG,
2013 (u32) iommu_state[DMAR_FEUADDR_REG * i]);
2014 dmar_writel(iommu->reg, DMAR_PLMBASE_REG,
2015 (u32) iommu_state[DMAR_PLMBASE_REG * i]);
2016 dmar_writel(iommu->reg, DMAR_PLMLIMIT_REG,
2017 (u32) iommu_state[DMAR_PLMLIMIT_REG * i]);
2018 dmar_writeq(iommu->reg, DMAR_PHMBASE_REG,
2019 (u64) iommu_state[DMAR_PHMBASE_REG * i]);
2020 dmar_writeq(iommu->reg, DMAR_PHMLIMIT_REG,
2021 (u64) iommu_state[DMAR_PHMLIMIT_REG * i]);
2023 if ( iommu_enable_translation(iommu) )
2024 return -EIO;
2025 i++;
2027 return 0;
2030 /*
2031 * Local variables:
2032 * mode: C
2033 * c-set-style: "BSD"
2034 * c-basic-offset: 4
2035 * tab-width: 4
2036 * indent-tabs-mode: nil
2037 * End:
2038 */