/root/src/xen/xen/drivers/passthrough/vtd/x86/vtd.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2008, Intel Corporation. |
3 | | * |
4 | | * This program is free software; you can redistribute it and/or modify it |
5 | | * under the terms and conditions of the GNU General Public License, |
6 | | * version 2, as published by the Free Software Foundation. |
7 | | * |
8 | | * This program is distributed in the hope it will be useful, but WITHOUT |
9 | | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
10 | | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
11 | | * more details. |
12 | | * |
13 | | * You should have received a copy of the GNU General Public License along with |
14 | | * this program; If not, see <http://www.gnu.org/licenses/>. |
15 | | * |
16 | | * Copyright (C) Allen Kay <allen.m.kay@intel.com> |
17 | | * Copyright (C) Weidong Han <weidong.han@intel.com> |
18 | | */ |
19 | | |
20 | | #include <xen/sched.h> |
21 | | #include <xen/softirq.h> |
22 | | #include <xen/domain_page.h> |
23 | | #include <asm/paging.h> |
24 | | #include <xen/iommu.h> |
25 | | #include <xen/irq.h> |
26 | | #include <xen/numa.h> |
27 | | #include <asm/fixmap.h> |
28 | | #include <asm/setup.h> |
29 | | #include "../iommu.h" |
30 | | #include "../dmar.h" |
31 | | #include "../vtd.h" |
32 | | #include "../extern.h" |
33 | | |
34 | | /* |
35 | | * iommu_inclusive_mapping: when set, all memory below 4GB is included in dom0 |
36 | | * 1:1 iommu mappings except xen and unusable regions. |
37 | | */ |
38 | | static bool_t __hwdom_initdata iommu_inclusive_mapping = 1; |
39 | | boolean_param("iommu_inclusive_mapping", iommu_inclusive_mapping); |
40 | | |
41 | | void *map_vtd_domain_page(u64 maddr) |
42 | 18.6M | { |
43 | 18.6M | return map_domain_page(_mfn(paddr_to_pfn(maddr))); |
44 | 18.6M | } |
45 | | |
46 | | void unmap_vtd_domain_page(void *va) |
47 | 18.6M | { |
48 | 18.6M | unmap_domain_page(va); |
49 | 18.6M | } |
50 | | |
51 | | unsigned int get_cache_line_size(void) |
52 | 1 | { |
53 | 1 | return ((cpuid_ebx(1) >> 8) & 0xff) * 8; |
54 | 1 | } |
55 | | |
56 | | void cacheline_flush(char * addr) |
57 | 5.10M | { |
58 | 5.10M | clflush(addr); |
59 | 5.10M | } |
60 | | |
61 | | void flush_all_cache() |
62 | 2 | { |
63 | 2 | wbinvd(); |
64 | 2 | } |
65 | | |
66 | | static int _hvm_dpci_isairq_eoi(struct domain *d, |
67 | | struct hvm_pirq_dpci *pirq_dpci, void *arg) |
68 | 0 | { |
69 | 0 | struct hvm_irq *hvm_irq = hvm_domain_irq(d); |
70 | 0 | unsigned int isairq = (long)arg; |
71 | 0 | const struct dev_intx_gsi_link *digl; |
72 | 0 |
|
73 | 0 | list_for_each_entry ( digl, &pirq_dpci->digl_list, list ) |
74 | 0 | { |
75 | 0 | unsigned int link = hvm_pci_intx_link(digl->device, digl->intx); |
76 | 0 |
|
77 | 0 | if ( hvm_irq->pci_link.route[link] == isairq ) |
78 | 0 | { |
79 | 0 | hvm_pci_intx_deassert(d, digl->device, digl->intx); |
80 | 0 | if ( --pirq_dpci->pending == 0 ) |
81 | 0 | { |
82 | 0 | stop_timer(&pirq_dpci->timer); |
83 | 0 | pirq_guest_eoi(dpci_pirq(pirq_dpci)); |
84 | 0 | } |
85 | 0 | } |
86 | 0 | } |
87 | 0 |
|
88 | 0 | return 0; |
89 | 0 | } |
90 | | |
91 | | void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq) |
92 | 0 | { |
93 | 0 | struct hvm_irq_dpci *dpci = NULL; |
94 | 0 |
|
95 | 0 | ASSERT(isairq < NR_ISAIRQS); |
96 | 0 | if ( !iommu_enabled) |
97 | 0 | return; |
98 | 0 |
|
99 | 0 | spin_lock(&d->event_lock); |
100 | 0 |
|
101 | 0 | dpci = domain_get_irq_dpci(d); |
102 | 0 |
|
103 | 0 | if ( dpci && test_bit(isairq, dpci->isairq_map) ) |
104 | 0 | { |
105 | 0 | /* Multiple mirq may be mapped to one isa irq */ |
106 | 0 | pt_pirq_iterate(d, _hvm_dpci_isairq_eoi, (void *)(long)isairq); |
107 | 0 | } |
108 | 0 | spin_unlock(&d->event_lock); |
109 | 0 | } |
110 | | |
111 | | void __hwdom_init vtd_set_hwdom_mapping(struct domain *d) |
112 | 0 | { |
113 | 0 | unsigned long i, j, tmp, top; |
114 | 0 |
|
115 | 0 | BUG_ON(!is_hardware_domain(d)); |
116 | 0 |
|
117 | 0 | top = max(max_pdx, pfn_to_pdx(0xffffffffUL >> PAGE_SHIFT) + 1); |
118 | 0 |
|
119 | 0 | for ( i = 0; i < top; i++ ) |
120 | 0 | { |
121 | 0 | int rc = 0; |
122 | 0 |
|
123 | 0 | /* |
124 | 0 | * Set up 1:1 mapping for dom0. Default to use only conventional RAM |
125 | 0 | * areas and let RMRRs include needed reserved regions. When set, the |
126 | 0 | * inclusive mapping maps in everything below 4GB except unusable |
127 | 0 | * ranges. |
128 | 0 | */ |
129 | 0 | unsigned long pfn = pdx_to_pfn(i); |
130 | 0 |
|
131 | 0 | if ( pfn > (0xffffffffUL >> PAGE_SHIFT) ? |
132 | 0 | (!mfn_valid(_mfn(pfn)) || |
133 | 0 | !page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL)) : |
134 | 0 | iommu_inclusive_mapping ? |
135 | 0 | page_is_ram_type(pfn, RAM_TYPE_UNUSABLE) : |
136 | 0 | !page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL) ) |
137 | 0 | continue; |
138 | 0 |
|
139 | 0 | /* Exclude Xen bits */ |
140 | 0 | if ( xen_in_range(pfn) ) |
141 | 0 | continue; |
142 | 0 |
|
143 | 0 | tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K); |
144 | 0 | for ( j = 0; j < tmp; j++ ) |
145 | 0 | { |
146 | 0 | int ret = iommu_map_page(d, pfn * tmp + j, pfn * tmp + j, |
147 | 0 | IOMMUF_readable|IOMMUF_writable); |
148 | 0 |
|
149 | 0 | if ( !rc ) |
150 | 0 | rc = ret; |
151 | 0 | } |
152 | 0 |
|
153 | 0 | if ( rc ) |
154 | 0 | printk(XENLOG_WARNING VTDPREFIX " d%d: IOMMU mapping failed: %d\n", |
155 | 0 | d->domain_id, rc); |
156 | 0 |
|
157 | 0 | if (!(i & (0xfffff >> (PAGE_SHIFT - PAGE_SHIFT_4K)))) |
158 | 0 | process_pending_softirqs(); |
159 | 0 | } |
160 | 0 | } |
161 | | |