/root/src/xen/xen/drivers/passthrough/amd/iommu_init.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (C) 2007 Advanced Micro Devices, Inc. |
3 | | * Author: Leo Duran <leo.duran@amd.com> |
4 | | * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen |
5 | | * |
6 | | * This program is free software; you can redistribute it and/or modify |
7 | | * it under the terms of the GNU General Public License as published by |
8 | | * the Free Software Foundation; either version 2 of the License, or |
9 | | * (at your option) any later version. |
10 | | * |
11 | | * This program is distributed in the hope that it will be useful, |
12 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | | * GNU General Public License for more details. |
15 | | * |
16 | | * You should have received a copy of the GNU General Public License |
17 | | * along with this program; If not, see <http://www.gnu.org/licenses/>. |
18 | | */ |
19 | | |
20 | | #include <xen/errno.h> |
21 | | #include <xen/acpi.h> |
22 | | #include <xen/pci.h> |
23 | | #include <xen/pci_regs.h> |
24 | | #include <xen/irq.h> |
25 | | #include <asm/amd-iommu.h> |
26 | | #include <asm/msi.h> |
27 | | #include <asm/hvm/svm/amd-iommu-proto.h> |
28 | | #include <asm-x86/fixmap.h> |
29 | | #include <mach_apic.h> |
30 | | #include <xen/delay.h> |
31 | | |
32 | | static int __initdata nr_amd_iommus; |
33 | | |
34 | | static struct tasklet amd_iommu_irq_tasklet; |
35 | | |
36 | | unsigned int __read_mostly ivrs_bdf_entries; |
37 | | u8 __read_mostly ivhd_type; |
38 | | static struct radix_tree_root ivrs_maps; |
39 | | struct list_head amd_iommu_head; |
40 | | struct table_struct device_table; |
41 | | bool_t iommuv2_enabled; |
42 | | |
43 | | static int iommu_has_ht_flag(struct amd_iommu *iommu, u8 mask) |
44 | 0 | { |
45 | 0 | return iommu->ht_flags & mask; |
46 | 0 | } |
47 | | |
48 | | static int __init map_iommu_mmio_region(struct amd_iommu *iommu) |
49 | 0 | { |
50 | 0 | iommu->mmio_base = ioremap(iommu->mmio_base_phys, |
51 | 0 | IOMMU_MMIO_REGION_LENGTH); |
52 | 0 | if ( !iommu->mmio_base ) |
53 | 0 | return -ENOMEM; |
54 | 0 |
|
55 | 0 | memset(iommu->mmio_base, 0, IOMMU_MMIO_REGION_LENGTH); |
56 | 0 |
|
57 | 0 | return 0; |
58 | 0 | } |
59 | | |
60 | | static void __init unmap_iommu_mmio_region(struct amd_iommu *iommu) |
61 | 0 | { |
62 | 0 | if ( iommu->mmio_base ) |
63 | 0 | { |
64 | 0 | iounmap(iommu->mmio_base); |
65 | 0 | iommu->mmio_base = NULL; |
66 | 0 | } |
67 | 0 | } |
68 | | |
69 | | static void set_iommu_ht_flags(struct amd_iommu *iommu) |
70 | 0 | { |
71 | 0 | u32 entry; |
72 | 0 | entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); |
73 | 0 |
|
74 | 0 | /* Setup HT flags */ |
75 | 0 | if ( iommu_has_cap(iommu, PCI_CAP_HT_TUNNEL_SHIFT) ) |
76 | 0 | iommu_has_ht_flag(iommu, ACPI_IVHD_TT_ENABLE) ? |
77 | 0 | iommu_set_bit(&entry, IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT) : |
78 | 0 | iommu_clear_bit(&entry, IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT); |
79 | 0 |
|
80 | 0 | iommu_has_ht_flag(iommu, ACPI_IVHD_RES_PASS_PW) ? |
81 | 0 | iommu_set_bit(&entry, IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT): |
82 | 0 | iommu_clear_bit(&entry, IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT); |
83 | 0 |
|
84 | 0 | iommu_has_ht_flag(iommu, ACPI_IVHD_ISOC) ? |
85 | 0 | iommu_set_bit(&entry, IOMMU_CONTROL_ISOCHRONOUS_SHIFT): |
86 | 0 | iommu_clear_bit(&entry, IOMMU_CONTROL_ISOCHRONOUS_SHIFT); |
87 | 0 |
|
88 | 0 | iommu_has_ht_flag(iommu, ACPI_IVHD_PASS_PW) ? |
89 | 0 | iommu_set_bit(&entry, IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT): |
90 | 0 | iommu_clear_bit(&entry, IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT); |
91 | 0 |
|
92 | 0 | /* Force coherent */ |
93 | 0 | iommu_set_bit(&entry, IOMMU_CONTROL_COHERENT_SHIFT); |
94 | 0 |
|
95 | 0 | writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); |
96 | 0 | } |
97 | | |
98 | | static void register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu) |
99 | 0 | { |
100 | 0 | u64 addr_64, addr_lo, addr_hi; |
101 | 0 | u32 entry; |
102 | 0 |
|
103 | 0 | ASSERT( iommu->dev_table.buffer ); |
104 | 0 |
|
105 | 0 | addr_64 = (u64)virt_to_maddr(iommu->dev_table.buffer); |
106 | 0 | addr_lo = addr_64 & DMA_32BIT_MASK; |
107 | 0 | addr_hi = addr_64 >> 32; |
108 | 0 |
|
109 | 0 | entry = 0; |
110 | 0 | iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT); |
111 | 0 | set_field_in_reg_u32((iommu->dev_table.alloc_size / PAGE_SIZE) - 1, |
112 | 0 | entry, IOMMU_DEV_TABLE_SIZE_MASK, |
113 | 0 | IOMMU_DEV_TABLE_SIZE_SHIFT, &entry); |
114 | 0 | writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_LOW_OFFSET); |
115 | 0 |
|
116 | 0 | entry = 0; |
117 | 0 | iommu_set_addr_hi_to_reg(&entry, addr_hi); |
118 | 0 | writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_HIGH_OFFSET); |
119 | 0 | } |
120 | | |
121 | | static void register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu) |
122 | 0 | { |
123 | 0 | u64 addr_64; |
124 | 0 | u32 addr_lo, addr_hi; |
125 | 0 | u32 power_of2_entries; |
126 | 0 | u32 entry; |
127 | 0 |
|
128 | 0 | ASSERT( iommu->cmd_buffer.buffer ); |
129 | 0 |
|
130 | 0 | addr_64 = virt_to_maddr(iommu->cmd_buffer.buffer); |
131 | 0 | addr_lo = addr_64; |
132 | 0 | addr_hi = addr_64 >> 32; |
133 | 0 |
|
134 | 0 | entry = 0; |
135 | 0 | iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT); |
136 | 0 | writel(entry, iommu->mmio_base + IOMMU_CMD_BUFFER_BASE_LOW_OFFSET); |
137 | 0 |
|
138 | 0 | power_of2_entries = get_order_from_bytes(iommu->cmd_buffer.alloc_size) + |
139 | 0 | IOMMU_CMD_BUFFER_POWER_OF2_ENTRIES_PER_PAGE; |
140 | 0 |
|
141 | 0 | entry = 0; |
142 | 0 | iommu_set_addr_hi_to_reg(&entry, addr_hi); |
143 | 0 | set_field_in_reg_u32(power_of2_entries, entry, |
144 | 0 | IOMMU_CMD_BUFFER_LENGTH_MASK, |
145 | 0 | IOMMU_CMD_BUFFER_LENGTH_SHIFT, &entry); |
146 | 0 | writel(entry, iommu->mmio_base+IOMMU_CMD_BUFFER_BASE_HIGH_OFFSET); |
147 | 0 | } |
148 | | |
149 | | static void register_iommu_event_log_in_mmio_space(struct amd_iommu *iommu) |
150 | 0 | { |
151 | 0 | u64 addr_64; |
152 | 0 | u32 addr_lo, addr_hi; |
153 | 0 | u32 power_of2_entries; |
154 | 0 | u32 entry; |
155 | 0 |
|
156 | 0 | ASSERT( iommu->event_log.buffer ); |
157 | 0 |
|
158 | 0 | addr_64 = virt_to_maddr(iommu->event_log.buffer); |
159 | 0 | addr_lo = addr_64; |
160 | 0 | addr_hi = addr_64 >> 32; |
161 | 0 |
|
162 | 0 | entry = 0; |
163 | 0 | iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT); |
164 | 0 | writel(entry, iommu->mmio_base + IOMMU_EVENT_LOG_BASE_LOW_OFFSET); |
165 | 0 |
|
166 | 0 | power_of2_entries = get_order_from_bytes(iommu->event_log.alloc_size) + |
167 | 0 | IOMMU_EVENT_LOG_POWER_OF2_ENTRIES_PER_PAGE; |
168 | 0 |
|
169 | 0 | entry = 0; |
170 | 0 | iommu_set_addr_hi_to_reg(&entry, addr_hi); |
171 | 0 | set_field_in_reg_u32(power_of2_entries, entry, |
172 | 0 | IOMMU_EVENT_LOG_LENGTH_MASK, |
173 | 0 | IOMMU_EVENT_LOG_LENGTH_SHIFT, &entry); |
174 | 0 | writel(entry, iommu->mmio_base+IOMMU_EVENT_LOG_BASE_HIGH_OFFSET); |
175 | 0 | } |
176 | | |
177 | | static void register_iommu_ppr_log_in_mmio_space(struct amd_iommu *iommu) |
178 | 0 | { |
179 | 0 | u64 addr_64; |
180 | 0 | u32 addr_lo, addr_hi; |
181 | 0 | u32 power_of2_entries; |
182 | 0 | u32 entry; |
183 | 0 |
|
184 | 0 | ASSERT ( iommu->ppr_log.buffer ); |
185 | 0 |
|
186 | 0 | addr_64 = virt_to_maddr(iommu->ppr_log.buffer); |
187 | 0 | addr_lo = addr_64; |
188 | 0 | addr_hi = addr_64 >> 32; |
189 | 0 |
|
190 | 0 | entry = 0; |
191 | 0 | iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT); |
192 | 0 | writel(entry, iommu->mmio_base + IOMMU_PPR_LOG_BASE_LOW_OFFSET); |
193 | 0 |
|
194 | 0 | power_of2_entries = get_order_from_bytes(iommu->ppr_log.alloc_size) + |
195 | 0 | IOMMU_PPR_LOG_POWER_OF2_ENTRIES_PER_PAGE; |
196 | 0 |
|
197 | 0 | entry = 0; |
198 | 0 | iommu_set_addr_hi_to_reg(&entry, addr_hi); |
199 | 0 | set_field_in_reg_u32(power_of2_entries, entry, |
200 | 0 | IOMMU_PPR_LOG_LENGTH_MASK, |
201 | 0 | IOMMU_PPR_LOG_LENGTH_SHIFT, &entry); |
202 | 0 | writel(entry, iommu->mmio_base + IOMMU_PPR_LOG_BASE_HIGH_OFFSET); |
203 | 0 | } |
204 | | |
205 | | |
206 | | static void set_iommu_translation_control(struct amd_iommu *iommu, |
207 | | int enable) |
208 | 0 | { |
209 | 0 | u32 entry; |
210 | 0 |
|
211 | 0 | entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); |
212 | 0 |
|
213 | 0 | enable ? |
214 | 0 | iommu_set_bit(&entry, IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT) : |
215 | 0 | iommu_clear_bit(&entry, IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT); |
216 | 0 |
|
217 | 0 | writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); |
218 | 0 | } |
219 | | |
220 | | static void set_iommu_guest_translation_control(struct amd_iommu *iommu, |
221 | | int enable) |
222 | 0 | { |
223 | 0 | u32 entry; |
224 | 0 |
|
225 | 0 | entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); |
226 | 0 |
|
227 | 0 | enable ? |
228 | 0 | iommu_set_bit(&entry, IOMMU_CONTROL_GT_ENABLE_SHIFT) : |
229 | 0 | iommu_clear_bit(&entry, IOMMU_CONTROL_GT_ENABLE_SHIFT); |
230 | 0 |
|
231 | 0 | writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); |
232 | 0 |
|
233 | 0 | if ( enable ) |
234 | 0 | AMD_IOMMU_DEBUG("Guest Translation Enabled.\n"); |
235 | 0 | } |
236 | | |
237 | | static void set_iommu_command_buffer_control(struct amd_iommu *iommu, |
238 | | int enable) |
239 | 0 | { |
240 | 0 | u32 entry; |
241 | 0 |
|
242 | 0 | entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); |
243 | 0 |
|
244 | 0 | /*reset head and tail pointer manually before enablement */ |
245 | 0 | if ( enable ) |
246 | 0 | { |
247 | 0 | writeq(0, iommu->mmio_base + IOMMU_CMD_BUFFER_HEAD_OFFSET); |
248 | 0 | writeq(0, iommu->mmio_base + IOMMU_CMD_BUFFER_TAIL_OFFSET); |
249 | 0 |
|
250 | 0 | iommu_set_bit(&entry, IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT); |
251 | 0 | } |
252 | 0 | else |
253 | 0 | iommu_clear_bit(&entry, IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT); |
254 | 0 |
|
255 | 0 | writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET); |
256 | 0 | } |
257 | | |
258 | | static void register_iommu_exclusion_range(struct amd_iommu *iommu) |
259 | 0 | { |
260 | 0 | u32 addr_lo, addr_hi; |
261 | 0 | u32 entry; |
262 | 0 |
|
263 | 0 | addr_lo = iommu->exclusion_limit; |
264 | 0 | addr_hi = iommu->exclusion_limit >> 32; |
265 | 0 |
|
266 | 0 | set_field_in_reg_u32((u32)addr_hi, 0, |
267 | 0 | IOMMU_EXCLUSION_LIMIT_HIGH_MASK, |
268 | 0 | IOMMU_EXCLUSION_LIMIT_HIGH_SHIFT, &entry); |
269 | 0 | writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_HIGH_OFFSET); |
270 | 0 |
|
271 | 0 | set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0, |
272 | 0 | IOMMU_EXCLUSION_LIMIT_LOW_MASK, |
273 | 0 | IOMMU_EXCLUSION_LIMIT_LOW_SHIFT, &entry); |
274 | 0 | writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_LOW_OFFSET); |
275 | 0 |
|
276 | 0 | addr_lo = iommu->exclusion_base & DMA_32BIT_MASK; |
277 | 0 | addr_hi = iommu->exclusion_base >> 32; |
278 | 0 |
|
279 | 0 | entry = 0; |
280 | 0 | iommu_set_addr_hi_to_reg(&entry, addr_hi); |
281 | 0 | writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_HIGH_OFFSET); |
282 | 0 |
|
283 | 0 | entry = 0; |
284 | 0 | iommu_set_addr_lo_to_reg(&entry, addr_lo >> PAGE_SHIFT); |
285 | 0 |
|
286 | 0 | set_field_in_reg_u32(iommu->exclusion_allow_all, entry, |
287 | 0 | IOMMU_EXCLUSION_ALLOW_ALL_MASK, |
288 | 0 | IOMMU_EXCLUSION_ALLOW_ALL_SHIFT, &entry); |
289 | 0 |
|
290 | 0 | set_field_in_reg_u32(iommu->exclusion_enable, entry, |
291 | 0 | IOMMU_EXCLUSION_RANGE_ENABLE_MASK, |
292 | 0 | IOMMU_EXCLUSION_RANGE_ENABLE_SHIFT, &entry); |
293 | 0 | writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_LOW_OFFSET); |
294 | 0 | } |
295 | | |
296 | | static void set_iommu_event_log_control(struct amd_iommu *iommu, |
297 | | int enable) |
298 | 0 | { |
299 | 0 | u32 entry; |
300 | 0 |
|
301 | 0 | entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); |
302 | 0 |
|
303 | 0 | /*reset head and tail pointer manually before enablement */ |
304 | 0 | if ( enable ) |
305 | 0 | { |
306 | 0 | writeq(0, iommu->mmio_base + IOMMU_EVENT_LOG_HEAD_OFFSET); |
307 | 0 | writeq(0, iommu->mmio_base + IOMMU_EVENT_LOG_TAIL_OFFSET); |
308 | 0 |
|
309 | 0 | iommu_set_bit(&entry, IOMMU_CONTROL_EVENT_LOG_INT_SHIFT); |
310 | 0 | iommu_set_bit(&entry, IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT); |
311 | 0 | } |
312 | 0 | else |
313 | 0 | { |
314 | 0 | iommu_clear_bit(&entry, IOMMU_CONTROL_EVENT_LOG_INT_SHIFT); |
315 | 0 | iommu_clear_bit(&entry, IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT); |
316 | 0 | } |
317 | 0 |
|
318 | 0 | iommu_clear_bit(&entry, IOMMU_CONTROL_COMP_WAIT_INT_SHIFT); |
319 | 0 |
|
320 | 0 | writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); |
321 | 0 | } |
322 | | |
323 | | static void set_iommu_ppr_log_control(struct amd_iommu *iommu, |
324 | | int enable) |
325 | 0 | { |
326 | 0 | u32 entry; |
327 | 0 |
|
328 | 0 | entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); |
329 | 0 |
|
330 | 0 | /*reset head and tail pointer manually before enablement */ |
331 | 0 | if ( enable ) |
332 | 0 | { |
333 | 0 | writeq(0, iommu->mmio_base + IOMMU_PPR_LOG_HEAD_OFFSET); |
334 | 0 | writeq(0, iommu->mmio_base + IOMMU_PPR_LOG_TAIL_OFFSET); |
335 | 0 |
|
336 | 0 | iommu_set_bit(&entry, IOMMU_CONTROL_PPR_ENABLE_SHIFT); |
337 | 0 | iommu_set_bit(&entry, IOMMU_CONTROL_PPR_LOG_INT_SHIFT); |
338 | 0 | iommu_set_bit(&entry, IOMMU_CONTROL_PPR_LOG_ENABLE_SHIFT); |
339 | 0 | } |
340 | 0 | else |
341 | 0 | { |
342 | 0 | iommu_clear_bit(&entry, IOMMU_CONTROL_PPR_ENABLE_SHIFT); |
343 | 0 | iommu_clear_bit(&entry, IOMMU_CONTROL_PPR_LOG_INT_SHIFT); |
344 | 0 | iommu_clear_bit(&entry, IOMMU_CONTROL_PPR_LOG_ENABLE_SHIFT); |
345 | 0 | } |
346 | 0 |
|
347 | 0 | writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); |
348 | 0 | if ( enable ) |
349 | 0 | AMD_IOMMU_DEBUG("PPR Log Enabled.\n"); |
350 | 0 | } |
351 | | |
352 | | /* read event log or ppr log from iommu ring buffer */ |
353 | | static int iommu_read_log(struct amd_iommu *iommu, |
354 | | struct ring_buffer *log, |
355 | | unsigned int entry_size, |
356 | | void (*parse_func)(struct amd_iommu *, u32 *)) |
357 | 0 | { |
358 | 0 | u32 tail, head, *entry, tail_offest, head_offset; |
359 | 0 |
|
360 | 0 | BUG_ON(!iommu || ((log != &iommu->event_log) && (log != &iommu->ppr_log))); |
361 | 0 | |
362 | 0 | spin_lock(&log->lock); |
363 | 0 |
|
364 | 0 | /* make sure there's an entry in the log */ |
365 | 0 | tail_offest = ( log == &iommu->event_log ) ? |
366 | 0 | IOMMU_EVENT_LOG_TAIL_OFFSET : |
367 | 0 | IOMMU_PPR_LOG_TAIL_OFFSET; |
368 | 0 |
|
369 | 0 | head_offset = ( log == &iommu->event_log ) ? |
370 | 0 | IOMMU_EVENT_LOG_HEAD_OFFSET : |
371 | 0 | IOMMU_PPR_LOG_HEAD_OFFSET; |
372 | 0 |
|
373 | 0 | tail = readl(iommu->mmio_base + tail_offest); |
374 | 0 | tail = iommu_get_rb_pointer(tail); |
375 | 0 |
|
376 | 0 | while ( tail != log->head ) |
377 | 0 | { |
378 | 0 | /* read event log entry */ |
379 | 0 | entry = (u32 *)(log->buffer + log->head * entry_size); |
380 | 0 |
|
381 | 0 | parse_func(iommu, entry); |
382 | 0 | if ( ++log->head == log->entries ) |
383 | 0 | log->head = 0; |
384 | 0 |
|
385 | 0 | /* update head pointer */ |
386 | 0 | head = 0; |
387 | 0 | iommu_set_rb_pointer(&head, log->head); |
388 | 0 |
|
389 | 0 | writel(head, iommu->mmio_base + head_offset); |
390 | 0 | } |
391 | 0 |
|
392 | 0 | spin_unlock(&log->lock); |
393 | 0 | |
394 | 0 | return 0; |
395 | 0 | } |
396 | | |
397 | | /* reset event log or ppr log when overflow */ |
398 | | static void iommu_reset_log(struct amd_iommu *iommu, |
399 | | struct ring_buffer *log, |
400 | | void (*ctrl_func)(struct amd_iommu *iommu, int)) |
401 | 0 | { |
402 | 0 | u32 entry; |
403 | 0 | int log_run, run_bit; |
404 | 0 | int loop_count = 1000; |
405 | 0 |
|
406 | 0 | BUG_ON(!iommu || ((log != &iommu->event_log) && (log != &iommu->ppr_log))); |
407 | 0 |
|
408 | 0 | run_bit = ( log == &iommu->event_log ) ? |
409 | 0 | IOMMU_STATUS_EVENT_LOG_RUN_SHIFT : |
410 | 0 | IOMMU_STATUS_PPR_LOG_RUN_SHIFT; |
411 | 0 |
|
412 | 0 | /* wait until EventLogRun bit = 0 */ |
413 | 0 | do { |
414 | 0 | entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); |
415 | 0 | log_run = iommu_get_bit(entry, run_bit); |
416 | 0 | loop_count--; |
417 | 0 | } while ( log_run && loop_count ); |
418 | 0 |
|
419 | 0 | if ( log_run ) |
420 | 0 | { |
421 | 0 | AMD_IOMMU_DEBUG("Warning: Log Run bit %d is not cleared" |
422 | 0 | "before reset!\n", run_bit); |
423 | 0 | return; |
424 | 0 | } |
425 | 0 |
|
426 | 0 | ctrl_func(iommu, IOMMU_CONTROL_DISABLED); |
427 | 0 |
|
428 | 0 | /* RW1C overflow bit */ |
429 | 0 | writel(log == &iommu->event_log ? IOMMU_STATUS_EVENT_OVERFLOW_MASK |
430 | 0 | : IOMMU_STATUS_PPR_LOG_OVERFLOW_MASK, |
431 | 0 | iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); |
432 | 0 |
|
433 | 0 | /*reset event log base address */ |
434 | 0 | log->head = 0; |
435 | 0 |
|
436 | 0 | ctrl_func(iommu, IOMMU_CONTROL_ENABLED); |
437 | 0 | } |
438 | | |
439 | | static void amd_iommu_msi_enable(struct amd_iommu *iommu, int flag) |
440 | 0 | { |
441 | 0 | __msi_set_enable(iommu->seg, PCI_BUS(iommu->bdf), PCI_SLOT(iommu->bdf), |
442 | 0 | PCI_FUNC(iommu->bdf), iommu->msi.msi_attrib.pos, flag); |
443 | 0 | } |
444 | | |
445 | | static void iommu_msi_unmask(struct irq_desc *desc) |
446 | 0 | { |
447 | 0 | unsigned long flags; |
448 | 0 | struct amd_iommu *iommu = desc->action->dev_id; |
449 | 0 |
|
450 | 0 | spin_lock_irqsave(&iommu->lock, flags); |
451 | 0 | amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED); |
452 | 0 | spin_unlock_irqrestore(&iommu->lock, flags); |
453 | 0 | iommu->msi.msi_attrib.host_masked = 0; |
454 | 0 | } |
455 | | |
456 | | static void iommu_msi_mask(struct irq_desc *desc) |
457 | 0 | { |
458 | 0 | unsigned long flags; |
459 | 0 | struct amd_iommu *iommu = desc->action->dev_id; |
460 | 0 |
|
461 | 0 | irq_complete_move(desc); |
462 | 0 |
|
463 | 0 | spin_lock_irqsave(&iommu->lock, flags); |
464 | 0 | amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED); |
465 | 0 | spin_unlock_irqrestore(&iommu->lock, flags); |
466 | 0 | iommu->msi.msi_attrib.host_masked = 1; |
467 | 0 | } |
468 | | |
469 | | static unsigned int iommu_msi_startup(struct irq_desc *desc) |
470 | 0 | { |
471 | 0 | iommu_msi_unmask(desc); |
472 | 0 | return 0; |
473 | 0 | } |
474 | | |
475 | | static void iommu_msi_end(struct irq_desc *desc, u8 vector) |
476 | 0 | { |
477 | 0 | iommu_msi_unmask(desc); |
478 | 0 | ack_APIC_irq(); |
479 | 0 | } |
480 | | |
481 | | |
482 | | static hw_irq_controller iommu_msi_type = { |
483 | | .typename = "AMD-IOMMU-MSI", |
484 | | .startup = iommu_msi_startup, |
485 | | .shutdown = iommu_msi_mask, |
486 | | .enable = iommu_msi_unmask, |
487 | | .disable = iommu_msi_mask, |
488 | | .ack = iommu_msi_mask, |
489 | | .end = iommu_msi_end, |
490 | | .set_affinity = set_msi_affinity, |
491 | | }; |
492 | | |
493 | | static unsigned int iommu_maskable_msi_startup(struct irq_desc *desc) |
494 | 0 | { |
495 | 0 | iommu_msi_unmask(desc); |
496 | 0 | unmask_msi_irq(desc); |
497 | 0 | return 0; |
498 | 0 | } |
499 | | |
500 | | static void iommu_maskable_msi_shutdown(struct irq_desc *desc) |
501 | 0 | { |
502 | 0 | mask_msi_irq(desc); |
503 | 0 | iommu_msi_mask(desc); |
504 | 0 | } |
505 | | |
506 | | /* |
507 | | * While the names may appear mismatched, we indeed want to use the non- |
508 | | * maskable flavors here, as we want the ACK to be issued in ->end(). |
509 | | */ |
510 | | #define iommu_maskable_msi_ack ack_nonmaskable_msi_irq |
511 | | #define iommu_maskable_msi_end end_nonmaskable_msi_irq |
512 | | |
513 | | static hw_irq_controller iommu_maskable_msi_type = { |
514 | | .typename = "IOMMU-M-MSI", |
515 | | .startup = iommu_maskable_msi_startup, |
516 | | .shutdown = iommu_maskable_msi_shutdown, |
517 | | .enable = unmask_msi_irq, |
518 | | .disable = mask_msi_irq, |
519 | | .ack = iommu_maskable_msi_ack, |
520 | | .end = iommu_maskable_msi_end, |
521 | | .set_affinity = set_msi_affinity, |
522 | | }; |
523 | | |
524 | | static void parse_event_log_entry(struct amd_iommu *iommu, u32 entry[]) |
525 | 0 | { |
526 | 0 | u16 domain_id, device_id, flags; |
527 | 0 | unsigned int bdf; |
528 | 0 | u32 code; |
529 | 0 | u64 *addr; |
530 | 0 | int count = 0; |
531 | 0 | static const char *const event_str[] = { |
532 | 0 | #define EVENT_STR(name) [IOMMU_EVENT_##name - 1] = #name |
533 | 0 | EVENT_STR(ILLEGAL_DEV_TABLE_ENTRY), |
534 | 0 | EVENT_STR(IO_PAGE_FAULT), |
535 | 0 | EVENT_STR(DEV_TABLE_HW_ERROR), |
536 | 0 | EVENT_STR(PAGE_TABLE_HW_ERROR), |
537 | 0 | EVENT_STR(ILLEGAL_COMMAND_ERROR), |
538 | 0 | EVENT_STR(COMMAND_HW_ERROR), |
539 | 0 | EVENT_STR(IOTLB_INV_TIMEOUT), |
540 | 0 | EVENT_STR(INVALID_DEV_REQUEST) |
541 | 0 | #undef EVENT_STR |
542 | 0 | }; |
543 | 0 |
|
544 | 0 | code = get_field_from_reg_u32(entry[1], IOMMU_EVENT_CODE_MASK, |
545 | 0 | IOMMU_EVENT_CODE_SHIFT); |
546 | 0 |
|
547 | 0 | /* |
548 | 0 | * Workaround for erratum 732: |
549 | 0 | * It can happen that the tail pointer is updated before the actual entry |
550 | 0 | * got written. As suggested by RevGuide, we initialize the event log |
551 | 0 | * buffer to all zeros and clear event log entries after processing them. |
552 | 0 | */ |
553 | 0 | while ( code == 0 ) |
554 | 0 | { |
555 | 0 | if ( unlikely(++count == IOMMU_LOG_ENTRY_TIMEOUT) ) |
556 | 0 | { |
557 | 0 | AMD_IOMMU_DEBUG("AMD-Vi: No event written to log\n"); |
558 | 0 | return; |
559 | 0 | } |
560 | 0 | udelay(1); |
561 | 0 | barrier(); /* Prevent hoisting of the entry[] read. */ |
562 | 0 | code = get_field_from_reg_u32(entry[1], IOMMU_EVENT_CODE_MASK, |
563 | 0 | IOMMU_EVENT_CODE_SHIFT); |
564 | 0 | } |
565 | 0 |
|
566 | 0 | if ( code == IOMMU_EVENT_IO_PAGE_FAULT ) |
567 | 0 | { |
568 | 0 | device_id = iommu_get_devid_from_event(entry[0]); |
569 | 0 | domain_id = get_field_from_reg_u32(entry[1], |
570 | 0 | IOMMU_EVENT_DOMAIN_ID_MASK, |
571 | 0 | IOMMU_EVENT_DOMAIN_ID_SHIFT); |
572 | 0 | flags = get_field_from_reg_u32(entry[1], |
573 | 0 | IOMMU_EVENT_FLAGS_MASK, |
574 | 0 | IOMMU_EVENT_FLAGS_SHIFT); |
575 | 0 | addr= (u64*) (entry + 2); |
576 | 0 | printk(XENLOG_ERR "AMD-Vi: " |
577 | 0 | "%s: domain = %d, device id = %#x, " |
578 | 0 | "fault address = %#"PRIx64", flags = %#x\n", |
579 | 0 | event_str[code-1], domain_id, device_id, *addr, flags); |
580 | 0 |
|
581 | 0 | for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ ) |
582 | 0 | if ( get_dma_requestor_id(iommu->seg, bdf) == device_id ) |
583 | 0 | pci_check_disable_device(iommu->seg, PCI_BUS(bdf), |
584 | 0 | PCI_DEVFN2(bdf)); |
585 | 0 | } |
586 | 0 | else |
587 | 0 | { |
588 | 0 | AMD_IOMMU_DEBUG("%s %08x %08x %08x %08x\n", |
589 | 0 | code <= ARRAY_SIZE(event_str) ? event_str[code - 1] |
590 | 0 | : "event", |
591 | 0 | entry[0], entry[1], entry[2], entry[3]); |
592 | 0 | } |
593 | 0 |
|
594 | 0 | memset(entry, 0, IOMMU_EVENT_LOG_ENTRY_SIZE); |
595 | 0 | } |
596 | | |
597 | | static void iommu_check_event_log(struct amd_iommu *iommu) |
598 | 0 | { |
599 | 0 | u32 entry; |
600 | 0 | unsigned long flags; |
601 | 0 |
|
602 | 0 | /* RW1C interrupt status bit */ |
603 | 0 | writel(IOMMU_STATUS_EVENT_LOG_INT_MASK, |
604 | 0 | iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); |
605 | 0 |
|
606 | 0 | iommu_read_log(iommu, &iommu->event_log, |
607 | 0 | sizeof(event_entry_t), parse_event_log_entry); |
608 | 0 |
|
609 | 0 | spin_lock_irqsave(&iommu->lock, flags); |
610 | 0 | |
611 | 0 | /* Check event overflow. */ |
612 | 0 | entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); |
613 | 0 | if ( iommu_get_bit(entry, IOMMU_STATUS_EVENT_OVERFLOW_SHIFT) ) |
614 | 0 | iommu_reset_log(iommu, &iommu->event_log, set_iommu_event_log_control); |
615 | 0 | else |
616 | 0 | { |
617 | 0 | entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); |
618 | 0 | if ( !(entry & IOMMU_CONTROL_EVENT_LOG_INT_MASK) ) |
619 | 0 | { |
620 | 0 | entry |= IOMMU_CONTROL_EVENT_LOG_INT_MASK; |
621 | 0 | writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); |
622 | 0 | /* |
623 | 0 | * Re-schedule the tasklet to handle eventual log entries added |
624 | 0 | * between reading the log above and re-enabling the interrupt. |
625 | 0 | */ |
626 | 0 | tasklet_schedule(&amd_iommu_irq_tasklet); |
627 | 0 | } |
628 | 0 | } |
629 | 0 |
|
630 | 0 | /* |
631 | 0 | * Workaround for erratum787: |
632 | 0 | * Re-check to make sure the bit has been cleared. |
633 | 0 | */ |
634 | 0 | entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); |
635 | 0 | if ( entry & IOMMU_STATUS_EVENT_LOG_INT_MASK ) |
636 | 0 | tasklet_schedule(&amd_iommu_irq_tasklet); |
637 | 0 |
|
638 | 0 | spin_unlock_irqrestore(&iommu->lock, flags); |
639 | 0 | } |
640 | | |
641 | | void parse_ppr_log_entry(struct amd_iommu *iommu, u32 entry[]) |
642 | 0 | { |
643 | 0 |
|
644 | 0 | u16 device_id; |
645 | 0 | u8 bus, devfn, code; |
646 | 0 | struct pci_dev *pdev; |
647 | 0 | int count = 0; |
648 | 0 |
|
649 | 0 | code = get_field_from_reg_u32(entry[1], IOMMU_PPR_LOG_CODE_MASK, |
650 | 0 | IOMMU_PPR_LOG_CODE_SHIFT); |
651 | 0 |
|
652 | 0 | /* |
653 | 0 | * Workaround for erratum 733: |
654 | 0 | * It can happen that the tail pointer is updated before the actual entry |
655 | 0 | * got written. As suggested by RevGuide, we initialize the event log |
656 | 0 | * buffer to all zeros and clear ppr log entries after processing them. |
657 | 0 | */ |
658 | 0 | while ( code == 0 ) |
659 | 0 | { |
660 | 0 | if ( unlikely(++count == IOMMU_LOG_ENTRY_TIMEOUT) ) |
661 | 0 | { |
662 | 0 | AMD_IOMMU_DEBUG("AMD-Vi: No ppr written to log\n"); |
663 | 0 | return; |
664 | 0 | } |
665 | 0 | udelay(1); |
666 | 0 | barrier(); /* Prevent hoisting of the entry[] read. */ |
667 | 0 | code = get_field_from_reg_u32(entry[1], IOMMU_PPR_LOG_CODE_MASK, |
668 | 0 | IOMMU_PPR_LOG_CODE_SHIFT); |
669 | 0 | } |
670 | 0 |
|
671 | 0 | /* here device_id is physical value */ |
672 | 0 | device_id = iommu_get_devid_from_cmd(entry[0]); |
673 | 0 | bus = PCI_BUS(device_id); |
674 | 0 | devfn = PCI_DEVFN2(device_id); |
675 | 0 |
|
676 | 0 | pcidevs_lock(); |
677 | 0 | pdev = pci_get_real_pdev(iommu->seg, bus, devfn); |
678 | 0 | pcidevs_unlock(); |
679 | 0 |
|
680 | 0 | if ( pdev ) |
681 | 0 | guest_iommu_add_ppr_log(pdev->domain, entry); |
682 | 0 |
|
683 | 0 | memset(entry, 0, IOMMU_PPR_LOG_ENTRY_SIZE); |
684 | 0 | } |
685 | | |
686 | | static void iommu_check_ppr_log(struct amd_iommu *iommu) |
687 | 0 | { |
688 | 0 | u32 entry; |
689 | 0 | unsigned long flags; |
690 | 0 |
|
691 | 0 | /* RW1C interrupt status bit */ |
692 | 0 | writel(IOMMU_STATUS_PPR_LOG_INT_MASK, |
693 | 0 | iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); |
694 | 0 |
|
695 | 0 | iommu_read_log(iommu, &iommu->ppr_log, |
696 | 0 | sizeof(ppr_entry_t), parse_ppr_log_entry); |
697 | 0 | |
698 | 0 | spin_lock_irqsave(&iommu->lock, flags); |
699 | 0 |
|
700 | 0 | /* Check event overflow. */ |
701 | 0 | entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); |
702 | 0 | if ( iommu_get_bit(entry, IOMMU_STATUS_PPR_LOG_OVERFLOW_SHIFT) ) |
703 | 0 | iommu_reset_log(iommu, &iommu->ppr_log, set_iommu_ppr_log_control); |
704 | 0 | else |
705 | 0 | { |
706 | 0 | entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); |
707 | 0 | if ( !(entry & IOMMU_CONTROL_PPR_LOG_INT_MASK) ) |
708 | 0 | { |
709 | 0 | entry |= IOMMU_CONTROL_PPR_LOG_INT_MASK; |
710 | 0 | writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); |
711 | 0 | /* |
712 | 0 | * Re-schedule the tasklet to handle eventual log entries added |
713 | 0 | * between reading the log above and re-enabling the interrupt. |
714 | 0 | */ |
715 | 0 | tasklet_schedule(&amd_iommu_irq_tasklet); |
716 | 0 | } |
717 | 0 | } |
718 | 0 |
|
719 | 0 | /* |
720 | 0 | * Workaround for erratum787: |
721 | 0 | * Re-check to make sure the bit has been cleared. |
722 | 0 | */ |
723 | 0 | entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); |
724 | 0 | if ( entry & IOMMU_STATUS_PPR_LOG_INT_MASK ) |
725 | 0 | tasklet_schedule(&amd_iommu_irq_tasklet); |
726 | 0 |
|
727 | 0 | spin_unlock_irqrestore(&iommu->lock, flags); |
728 | 0 | } |
729 | | |
730 | | static void do_amd_iommu_irq(unsigned long data) |
731 | 0 | { |
732 | 0 | struct amd_iommu *iommu; |
733 | 0 |
|
734 | 0 | if ( !iommu_found() ) |
735 | 0 | { |
736 | 0 | AMD_IOMMU_DEBUG("no device found, something must be very wrong!\n"); |
737 | 0 | return; |
738 | 0 | } |
739 | 0 |
|
740 | 0 | /* |
741 | 0 | * No matter from where the interrupt came from, check all the |
742 | 0 | * IOMMUs present in the system. This allows for having just one |
743 | 0 | * tasklet (instead of one per each IOMMUs). |
744 | 0 | */ |
745 | 0 | for_each_amd_iommu ( iommu ) { |
746 | 0 | iommu_check_event_log(iommu); |
747 | 0 |
|
748 | 0 | if ( iommu->ppr_log.buffer != NULL ) |
749 | 0 | iommu_check_ppr_log(iommu); |
750 | 0 | } |
751 | 0 | } |
752 | | |
753 | | static void iommu_interrupt_handler(int irq, void *dev_id, |
754 | | struct cpu_user_regs *regs) |
755 | 0 | { |
756 | 0 | u32 entry; |
757 | 0 | unsigned long flags; |
758 | 0 | struct amd_iommu *iommu = dev_id; |
759 | 0 |
|
760 | 0 | spin_lock_irqsave(&iommu->lock, flags); |
761 | 0 |
|
762 | 0 | /* |
763 | 0 | * Silence interrupts from both event and PPR by clearing the |
764 | 0 | * enable logging bits in the control register |
765 | 0 | */ |
766 | 0 | entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); |
767 | 0 | iommu_clear_bit(&entry, IOMMU_CONTROL_EVENT_LOG_INT_SHIFT); |
768 | 0 | iommu_clear_bit(&entry, IOMMU_CONTROL_PPR_LOG_INT_SHIFT); |
769 | 0 | writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET); |
770 | 0 |
|
771 | 0 | spin_unlock_irqrestore(&iommu->lock, flags); |
772 | 0 |
|
773 | 0 | /* It is the tasklet that will clear the logs and re-enable interrupts */ |
774 | 0 | tasklet_schedule(&amd_iommu_irq_tasklet); |
775 | 0 | } |
776 | | |
777 | | static bool_t __init set_iommu_interrupt_handler(struct amd_iommu *iommu) |
778 | 0 | { |
779 | 0 | int irq, ret; |
780 | 0 | hw_irq_controller *handler; |
781 | 0 | u16 control; |
782 | 0 |
|
783 | 0 | irq = create_irq(NUMA_NO_NODE); |
784 | 0 | if ( irq <= 0 ) |
785 | 0 | { |
786 | 0 | dprintk(XENLOG_ERR, "IOMMU: no irqs\n"); |
787 | 0 | return 0; |
788 | 0 | } |
789 | 0 |
|
790 | 0 | pcidevs_lock(); |
791 | 0 | iommu->msi.dev = pci_get_pdev(iommu->seg, PCI_BUS(iommu->bdf), |
792 | 0 | PCI_DEVFN2(iommu->bdf)); |
793 | 0 | pcidevs_unlock(); |
794 | 0 | if ( !iommu->msi.dev ) |
795 | 0 | { |
796 | 0 | AMD_IOMMU_DEBUG("IOMMU: no pdev for %04x:%02x:%02x.%u\n", |
797 | 0 | iommu->seg, PCI_BUS(iommu->bdf), |
798 | 0 | PCI_SLOT(iommu->bdf), PCI_FUNC(iommu->bdf)); |
799 | 0 | return 0; |
800 | 0 | } |
801 | 0 | control = pci_conf_read16(iommu->seg, PCI_BUS(iommu->bdf), |
802 | 0 | PCI_SLOT(iommu->bdf), PCI_FUNC(iommu->bdf), |
803 | 0 | iommu->msi.msi_attrib.pos + PCI_MSI_FLAGS); |
804 | 0 | iommu->msi.msi.nvec = 1; |
805 | 0 | if ( is_mask_bit_support(control) ) |
806 | 0 | { |
807 | 0 | iommu->msi.msi_attrib.maskbit = 1; |
808 | 0 | iommu->msi.msi.mpos = msi_mask_bits_reg(iommu->msi.msi_attrib.pos, |
809 | 0 | is_64bit_address(control)); |
810 | 0 | handler = &iommu_maskable_msi_type; |
811 | 0 | } |
812 | 0 | else |
813 | 0 | handler = &iommu_msi_type; |
814 | 0 | ret = __setup_msi_irq(irq_to_desc(irq), &iommu->msi, handler); |
815 | 0 | if ( !ret ) |
816 | 0 | ret = request_irq(irq, 0, iommu_interrupt_handler, "amd_iommu", iommu); |
817 | 0 | if ( ret ) |
818 | 0 | { |
819 | 0 | destroy_irq(irq); |
820 | 0 | AMD_IOMMU_DEBUG("can't request irq\n"); |
821 | 0 | return 0; |
822 | 0 | } |
823 | 0 |
|
824 | 0 | iommu->msi.irq = irq; |
825 | 0 |
|
826 | 0 | return 1; |
827 | 0 | } |
828 | | |
829 | | /* |
830 | | * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations) |
831 | | * Workaround: |
832 | | * BIOS should disable L2B micellaneous clock gating by setting |
833 | | * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b |
834 | | */ |
835 | | static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) |
836 | 0 | { |
837 | 0 | u32 value; |
838 | 0 | u8 bus = PCI_BUS(iommu->bdf); |
839 | 0 | u8 dev = PCI_SLOT(iommu->bdf); |
840 | 0 | u8 func = PCI_FUNC(iommu->bdf); |
841 | 0 |
|
842 | 0 | if ( (boot_cpu_data.x86 != 0x15) || |
843 | 0 | (boot_cpu_data.x86_model < 0x10) || |
844 | 0 | (boot_cpu_data.x86_model > 0x1f) ) |
845 | 0 | return; |
846 | 0 |
|
847 | 0 | pci_conf_write32(iommu->seg, bus, dev, func, 0xf0, 0x90); |
848 | 0 | value = pci_conf_read32(iommu->seg, bus, dev, func, 0xf4); |
849 | 0 |
|
850 | 0 | if ( value & (1 << 2) ) |
851 | 0 | return; |
852 | 0 |
|
853 | 0 | /* Select NB indirect register 0x90 and enable writing */ |
854 | 0 | pci_conf_write32(iommu->seg, bus, dev, func, 0xf0, 0x90 | (1 << 8)); |
855 | 0 |
|
856 | 0 | pci_conf_write32(iommu->seg, bus, dev, func, 0xf4, value | (1 << 2)); |
857 | 0 | printk(XENLOG_INFO |
858 | 0 | "AMD-Vi: Applying erratum 746 workaround for IOMMU at %04x:%02x:%02x.%u\n", |
859 | 0 | iommu->seg, bus, dev, func); |
860 | 0 |
|
861 | 0 | /* Clear the enable writing bit */ |
862 | 0 | pci_conf_write32(iommu->seg, bus, dev, func, 0xf0, 0x90); |
863 | 0 | } |
864 | | |
865 | | static void enable_iommu(struct amd_iommu *iommu) |
866 | 0 | { |
867 | 0 | unsigned long flags; |
868 | 0 | struct irq_desc *desc; |
869 | 0 |
|
870 | 0 | spin_lock_irqsave(&iommu->lock, flags); |
871 | 0 |
|
872 | 0 | if ( iommu->enabled ) |
873 | 0 | { |
874 | 0 | spin_unlock_irqrestore(&iommu->lock, flags); |
875 | 0 | return; |
876 | 0 | } |
877 | 0 |
|
878 | 0 | amd_iommu_erratum_746_workaround(iommu); |
879 | 0 |
|
880 | 0 | register_iommu_dev_table_in_mmio_space(iommu); |
881 | 0 | register_iommu_cmd_buffer_in_mmio_space(iommu); |
882 | 0 | register_iommu_event_log_in_mmio_space(iommu); |
883 | 0 | register_iommu_exclusion_range(iommu); |
884 | 0 |
|
885 | 0 | if ( amd_iommu_has_feature(iommu, IOMMU_EXT_FEATURE_PPRSUP_SHIFT) ) |
886 | 0 | register_iommu_ppr_log_in_mmio_space(iommu); |
887 | 0 |
|
888 | 0 | desc = irq_to_desc(iommu->msi.irq); |
889 | 0 | spin_lock(&desc->lock); |
890 | 0 | set_msi_affinity(desc, &cpu_online_map); |
891 | 0 | spin_unlock(&desc->lock); |
892 | 0 |
|
893 | 0 | amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED); |
894 | 0 |
|
895 | 0 | set_iommu_ht_flags(iommu); |
896 | 0 | set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED); |
897 | 0 | set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED); |
898 | 0 |
|
899 | 0 | if ( amd_iommu_has_feature(iommu, IOMMU_EXT_FEATURE_PPRSUP_SHIFT) ) |
900 | 0 | set_iommu_ppr_log_control(iommu, IOMMU_CONTROL_ENABLED); |
901 | 0 |
|
902 | 0 | if ( amd_iommu_has_feature(iommu, IOMMU_EXT_FEATURE_GTSUP_SHIFT) ) |
903 | 0 | set_iommu_guest_translation_control(iommu, IOMMU_CONTROL_ENABLED); |
904 | 0 |
|
905 | 0 | set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED); |
906 | 0 |
|
907 | 0 | if ( amd_iommu_has_feature(iommu, IOMMU_EXT_FEATURE_IASUP_SHIFT) ) |
908 | 0 | amd_iommu_flush_all_caches(iommu); |
909 | 0 |
|
910 | 0 | iommu->enabled = 1; |
911 | 0 | spin_unlock_irqrestore(&iommu->lock, flags); |
912 | 0 |
|
913 | 0 | } |
914 | | |
915 | | static void __init deallocate_buffer(void *buf, uint32_t sz) |
916 | 0 | { |
917 | 0 | int order = 0; |
918 | 0 | if ( buf ) |
919 | 0 | { |
920 | 0 | order = get_order_from_bytes(sz); |
921 | 0 | __free_amd_iommu_tables(buf, order); |
922 | 0 | } |
923 | 0 | } |
924 | | |
925 | | static void __init deallocate_device_table(struct table_struct *table) |
926 | 0 | { |
927 | 0 | deallocate_buffer(table->buffer, table->alloc_size); |
928 | 0 | table->buffer = NULL; |
929 | 0 | } |
930 | | |
931 | | static void __init deallocate_ring_buffer(struct ring_buffer *ring_buf) |
932 | 0 | { |
933 | 0 | deallocate_buffer(ring_buf->buffer, ring_buf->alloc_size); |
934 | 0 | ring_buf->buffer = NULL; |
935 | 0 | ring_buf->head = 0; |
936 | 0 | ring_buf->tail = 0; |
937 | 0 | } |
938 | | |
939 | | static void * __init allocate_buffer(uint32_t alloc_size, const char *name) |
940 | 0 | { |
941 | 0 | void * buffer; |
942 | 0 | int order = get_order_from_bytes(alloc_size); |
943 | 0 |
|
944 | 0 | buffer = __alloc_amd_iommu_tables(order); |
945 | 0 |
|
946 | 0 | if ( buffer == NULL ) |
947 | 0 | { |
948 | 0 | AMD_IOMMU_DEBUG("Error allocating %s\n", name); |
949 | 0 | return NULL; |
950 | 0 | } |
951 | 0 |
|
952 | 0 | memset(buffer, 0, PAGE_SIZE * (1UL << order)); |
953 | 0 | return buffer; |
954 | 0 | } |
955 | | |
956 | | static void * __init allocate_ring_buffer(struct ring_buffer *ring_buf, |
957 | | uint32_t entry_size, |
958 | | uint64_t entries, const char *name) |
959 | 0 | { |
960 | 0 | ring_buf->head = 0; |
961 | 0 | ring_buf->tail = 0; |
962 | 0 |
|
963 | 0 | spin_lock_init(&ring_buf->lock); |
964 | 0 | |
965 | 0 | ring_buf->alloc_size = PAGE_SIZE << get_order_from_bytes(entries * |
966 | 0 | entry_size); |
967 | 0 | ring_buf->entries = ring_buf->alloc_size / entry_size; |
968 | 0 | ring_buf->buffer = allocate_buffer(ring_buf->alloc_size, name); |
969 | 0 | return ring_buf->buffer; |
970 | 0 | } |
971 | | |
972 | | static void * __init allocate_cmd_buffer(struct amd_iommu *iommu) |
973 | 0 | { |
974 | 0 | /* allocate 'command buffer' in power of 2 increments of 4K */ |
975 | 0 | return allocate_ring_buffer(&iommu->cmd_buffer, sizeof(cmd_entry_t), |
976 | 0 | IOMMU_CMD_BUFFER_DEFAULT_ENTRIES, |
977 | 0 | "Command Buffer"); |
978 | 0 | } |
979 | | |
980 | | static void * __init allocate_event_log(struct amd_iommu *iommu) |
981 | 0 | { |
982 | 0 | /* allocate 'event log' in power of 2 increments of 4K */ |
983 | 0 | return allocate_ring_buffer(&iommu->event_log, sizeof(event_entry_t), |
984 | 0 | IOMMU_EVENT_LOG_DEFAULT_ENTRIES, "Event Log"); |
985 | 0 | } |
986 | | |
987 | | static void * __init allocate_ppr_log(struct amd_iommu *iommu) |
988 | 0 | { |
989 | 0 | /* allocate 'ppr log' in power of 2 increments of 4K */ |
990 | 0 | return allocate_ring_buffer(&iommu->ppr_log, sizeof(ppr_entry_t), |
991 | 0 | IOMMU_PPR_LOG_DEFAULT_ENTRIES, "PPR Log"); |
992 | 0 | } |
993 | | |
994 | | static int __init amd_iommu_init_one(struct amd_iommu *iommu) |
995 | 0 | { |
996 | 0 | if ( map_iommu_mmio_region(iommu) != 0 ) |
997 | 0 | goto error_out; |
998 | 0 |
|
999 | 0 | get_iommu_features(iommu); |
1000 | 0 |
|
1001 | 0 | if ( iommu->features ) |
1002 | 0 | iommuv2_enabled = 1; |
1003 | 0 |
|
1004 | 0 | if ( allocate_cmd_buffer(iommu) == NULL ) |
1005 | 0 | goto error_out; |
1006 | 0 |
|
1007 | 0 | if ( allocate_event_log(iommu) == NULL ) |
1008 | 0 | goto error_out; |
1009 | 0 |
|
1010 | 0 | if ( amd_iommu_has_feature(iommu, IOMMU_EXT_FEATURE_PPRSUP_SHIFT) ) |
1011 | 0 | if ( allocate_ppr_log(iommu) == NULL ) |
1012 | 0 | goto error_out; |
1013 | 0 |
|
1014 | 0 | if ( !set_iommu_interrupt_handler(iommu) ) |
1015 | 0 | goto error_out; |
1016 | 0 |
|
1017 | 0 | /* To make sure that device_table.buffer has been successfully allocated */ |
1018 | 0 | if ( device_table.buffer == NULL ) |
1019 | 0 | goto error_out; |
1020 | 0 |
|
1021 | 0 | iommu->dev_table.alloc_size = device_table.alloc_size; |
1022 | 0 | iommu->dev_table.entries = device_table.entries; |
1023 | 0 | iommu->dev_table.buffer = device_table.buffer; |
1024 | 0 |
|
1025 | 0 | enable_iommu(iommu); |
1026 | 0 | printk("AMD-Vi: IOMMU %d Enabled.\n", nr_amd_iommus ); |
1027 | 0 | nr_amd_iommus++; |
1028 | 0 |
|
1029 | 0 | softirq_tasklet_init(&amd_iommu_irq_tasklet, do_amd_iommu_irq, 0); |
1030 | 0 |
|
1031 | 0 | return 0; |
1032 | 0 |
|
1033 | 0 | error_out: |
1034 | 0 | return -ENODEV; |
1035 | 0 | } |
1036 | | |
1037 | | static void __init amd_iommu_init_cleanup(void) |
1038 | 0 | { |
1039 | 0 | struct amd_iommu *iommu, *next; |
1040 | 0 |
|
1041 | 0 | /* free amd iommu list */ |
1042 | 0 | list_for_each_entry_safe ( iommu, next, &amd_iommu_head, list ) |
1043 | 0 | { |
1044 | 0 | list_del(&iommu->list); |
1045 | 0 | if ( iommu->enabled ) |
1046 | 0 | { |
1047 | 0 | deallocate_ring_buffer(&iommu->cmd_buffer); |
1048 | 0 | deallocate_ring_buffer(&iommu->event_log); |
1049 | 0 | deallocate_ring_buffer(&iommu->ppr_log); |
1050 | 0 | unmap_iommu_mmio_region(iommu); |
1051 | 0 | } |
1052 | 0 | xfree(iommu); |
1053 | 0 | } |
1054 | 0 |
|
1055 | 0 | /* free interrupt remapping table */ |
1056 | 0 | iterate_ivrs_entries(amd_iommu_free_intremap_table); |
1057 | 0 |
|
1058 | 0 | /* free device table */ |
1059 | 0 | deallocate_device_table(&device_table); |
1060 | 0 |
|
1061 | 0 | /* free ivrs_mappings[] */ |
1062 | 0 | radix_tree_destroy(&ivrs_maps, xfree); |
1063 | 0 |
|
1064 | 0 | iommu_enabled = 0; |
1065 | 0 | iommu_passthrough = 0; |
1066 | 0 | iommu_intremap = 0; |
1067 | 0 | iommuv2_enabled = 0; |
1068 | 0 | } |
1069 | | |
1070 | | /* |
1071 | | * We allocate an extra array element to store the segment number |
1072 | | * (and in the future perhaps other global information). |
1073 | | */ |
1074 | 0 | #define IVRS_MAPPINGS_SEG(m) m[ivrs_bdf_entries].dte_requestor_id |
1075 | | |
1076 | | struct ivrs_mappings *get_ivrs_mappings(u16 seg) |
1077 | 0 | { |
1078 | 0 | return radix_tree_lookup(&ivrs_maps, seg); |
1079 | 0 | } |
1080 | | |
1081 | | int iterate_ivrs_mappings(int (*handler)(u16 seg, struct ivrs_mappings *)) |
1082 | 0 | { |
1083 | 0 | u16 seg = 0; |
1084 | 0 | int rc = 0; |
1085 | 0 |
|
1086 | 0 | do { |
1087 | 0 | struct ivrs_mappings *map; |
1088 | 0 |
|
1089 | 0 | if ( !radix_tree_gang_lookup(&ivrs_maps, (void **)&map, seg, 1) ) |
1090 | 0 | break; |
1091 | 0 | seg = IVRS_MAPPINGS_SEG(map); |
1092 | 0 | rc = handler(seg, map); |
1093 | 0 | } while ( !rc && ++seg ); |
1094 | 0 |
|
1095 | 0 | return rc; |
1096 | 0 | } |
1097 | | |
1098 | | int iterate_ivrs_entries(int (*handler)(u16 seg, struct ivrs_mappings *)) |
1099 | 0 | { |
1100 | 0 | u16 seg = 0; |
1101 | 0 | int rc = 0; |
1102 | 0 |
|
1103 | 0 | do { |
1104 | 0 | struct ivrs_mappings *map; |
1105 | 0 | unsigned int bdf; |
1106 | 0 |
|
1107 | 0 | if ( !radix_tree_gang_lookup(&ivrs_maps, (void **)&map, seg, 1) ) |
1108 | 0 | break; |
1109 | 0 | seg = IVRS_MAPPINGS_SEG(map); |
1110 | 0 | for ( bdf = 0; !rc && bdf < ivrs_bdf_entries; ++bdf ) |
1111 | 0 | rc = handler(seg, map + bdf); |
1112 | 0 | } while ( !rc && ++seg ); |
1113 | 0 |
|
1114 | 0 | return rc; |
1115 | 0 | } |
1116 | | |
1117 | | static int __init alloc_ivrs_mappings(u16 seg) |
1118 | 0 | { |
1119 | 0 | struct ivrs_mappings *ivrs_mappings; |
1120 | 0 | unsigned int bdf; |
1121 | 0 |
|
1122 | 0 | BUG_ON( !ivrs_bdf_entries ); |
1123 | 0 |
|
1124 | 0 | if ( get_ivrs_mappings(seg) ) |
1125 | 0 | return 0; |
1126 | 0 |
|
1127 | 0 | ivrs_mappings = xzalloc_array(struct ivrs_mappings, ivrs_bdf_entries + 1); |
1128 | 0 | if ( ivrs_mappings == NULL ) |
1129 | 0 | { |
1130 | 0 | AMD_IOMMU_DEBUG("Error allocating IVRS Mappings table\n"); |
1131 | 0 | return -ENOMEM; |
1132 | 0 | } |
1133 | 0 | IVRS_MAPPINGS_SEG(ivrs_mappings) = seg; |
1134 | 0 |
|
1135 | 0 | /* assign default values for device entries */ |
1136 | 0 | for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ ) |
1137 | 0 | { |
1138 | 0 | ivrs_mappings[bdf].dte_requestor_id = bdf; |
1139 | 0 | ivrs_mappings[bdf].dte_allow_exclusion = IOMMU_CONTROL_DISABLED; |
1140 | 0 | ivrs_mappings[bdf].unity_map_enable = IOMMU_CONTROL_DISABLED; |
1141 | 0 | ivrs_mappings[bdf].iommu = NULL; |
1142 | 0 |
|
1143 | 0 | ivrs_mappings[bdf].intremap_table = NULL; |
1144 | 0 | ivrs_mappings[bdf].device_flags = 0; |
1145 | 0 |
|
1146 | 0 | if ( amd_iommu_perdev_intremap ) |
1147 | 0 | spin_lock_init(&ivrs_mappings[bdf].intremap_lock); |
1148 | 0 | } |
1149 | 0 |
|
1150 | 0 | radix_tree_insert(&ivrs_maps, seg, ivrs_mappings); |
1151 | 0 |
|
1152 | 0 | return 0; |
1153 | 0 | } |
1154 | | |
1155 | | static int __init amd_iommu_setup_device_table( |
1156 | | u16 seg, struct ivrs_mappings *ivrs_mappings) |
1157 | 0 | { |
1158 | 0 | unsigned int bdf; |
1159 | 0 | void *intr_tb, *dte; |
1160 | 0 |
|
1161 | 0 | BUG_ON( (ivrs_bdf_entries == 0) ); |
1162 | 0 |
|
1163 | 0 | /* allocate 'device table' on a 4K boundary */ |
1164 | 0 | device_table.alloc_size = PAGE_SIZE << |
1165 | 0 | get_order_from_bytes( |
1166 | 0 | PAGE_ALIGN(ivrs_bdf_entries * |
1167 | 0 | IOMMU_DEV_TABLE_ENTRY_SIZE)); |
1168 | 0 | device_table.entries = device_table.alloc_size / |
1169 | 0 | IOMMU_DEV_TABLE_ENTRY_SIZE; |
1170 | 0 |
|
1171 | 0 | device_table.buffer = allocate_buffer(device_table.alloc_size, |
1172 | 0 | "Device Table"); |
1173 | 0 | if ( device_table.buffer == NULL ) |
1174 | 0 | return -ENOMEM; |
1175 | 0 |
|
1176 | 0 | /* Add device table entries */ |
1177 | 0 | for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ ) |
1178 | 0 | { |
1179 | 0 | intr_tb = ivrs_mappings[bdf].intremap_table; |
1180 | 0 |
|
1181 | 0 | if ( intr_tb ) |
1182 | 0 | { |
1183 | 0 | /* add device table entry */ |
1184 | 0 | dte = device_table.buffer + (bdf * IOMMU_DEV_TABLE_ENTRY_SIZE); |
1185 | 0 | iommu_dte_add_device_entry(dte, &ivrs_mappings[bdf]); |
1186 | 0 |
|
1187 | 0 | amd_iommu_set_intremap_table( |
1188 | 0 | dte, (u64)virt_to_maddr(intr_tb), iommu_intremap); |
1189 | 0 | } |
1190 | 0 | } |
1191 | 0 |
|
1192 | 0 | return 0; |
1193 | 0 | } |
1194 | | |
1195 | | /* Check whether SP5100 SATA Combined mode is on */ |
1196 | | static bool_t __init amd_sp5100_erratum28(void) |
1197 | 0 | { |
1198 | 0 | u32 bus, id; |
1199 | 0 | u16 vendor_id, dev_id; |
1200 | 0 | u8 byte; |
1201 | 0 |
|
1202 | 0 | for (bus = 0; bus < 256; bus++) |
1203 | 0 | { |
1204 | 0 | id = pci_conf_read32(0, bus, 0x14, 0, PCI_VENDOR_ID); |
1205 | 0 |
|
1206 | 0 | vendor_id = id & 0xffff; |
1207 | 0 | dev_id = (id >> 16) & 0xffff; |
1208 | 0 |
|
1209 | 0 | /* SP5100 SMBus module sets Combined mode on */ |
1210 | 0 | if (vendor_id != 0x1002 || dev_id != 0x4385) |
1211 | 0 | continue; |
1212 | 0 |
|
1213 | 0 | byte = pci_conf_read8(0, bus, 0x14, 0, 0xad); |
1214 | 0 | if ( (byte >> 3) & 1 ) |
1215 | 0 | { |
1216 | 0 | printk(XENLOG_WARNING "AMD-Vi: SP5100 erratum 28 detected, disabling IOMMU.\n" |
1217 | 0 | "If possible, disable SATA Combined mode in BIOS or contact your vendor for BIOS update.\n"); |
1218 | 0 | return 1; |
1219 | 0 | } |
1220 | 0 | } |
1221 | 0 |
|
1222 | 0 | return 0; |
1223 | 0 | } |
1224 | | |
1225 | | int __init amd_iommu_init(void) |
1226 | 0 | { |
1227 | 0 | struct amd_iommu *iommu; |
1228 | 0 | int rc = -ENODEV; |
1229 | 0 |
|
1230 | 0 | BUG_ON( !iommu_found() ); |
1231 | 0 |
|
1232 | 0 | if ( iommu_intremap && amd_iommu_perdev_intremap && |
1233 | 0 | amd_sp5100_erratum28() ) |
1234 | 0 | goto error_out; |
1235 | 0 |
|
1236 | 0 | /* We implies no IOMMU if ACPI indicates no MSI. */ |
1237 | 0 | if ( unlikely(acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) ) |
1238 | 0 | goto error_out; |
1239 | 0 |
|
1240 | 0 | rc = amd_iommu_get_supported_ivhd_type(); |
1241 | 0 | if ( rc < 0 ) |
1242 | 0 | goto error_out; |
1243 | 0 | ivhd_type = rc; |
1244 | 0 |
|
1245 | 0 | rc = amd_iommu_get_ivrs_dev_entries(); |
1246 | 0 | if ( !rc ) |
1247 | 0 | rc = -ENODEV; |
1248 | 0 | if ( rc < 0 ) |
1249 | 0 | goto error_out; |
1250 | 0 | ivrs_bdf_entries = rc; |
1251 | 0 |
|
1252 | 0 | radix_tree_init(&ivrs_maps); |
1253 | 0 | for_each_amd_iommu ( iommu ) |
1254 | 0 | { |
1255 | 0 | rc = alloc_ivrs_mappings(iommu->seg); |
1256 | 0 | if ( rc ) |
1257 | 0 | goto error_out; |
1258 | 0 | } |
1259 | 0 |
|
1260 | 0 | rc = amd_iommu_update_ivrs_mapping_acpi(); |
1261 | 0 | if ( rc ) |
1262 | 0 | goto error_out; |
1263 | 0 |
|
1264 | 0 | /* initialize io-apic interrupt remapping entries */ |
1265 | 0 | if ( iommu_intremap ) |
1266 | 0 | rc = amd_iommu_setup_ioapic_remapping(); |
1267 | 0 | if ( rc ) |
1268 | 0 | goto error_out; |
1269 | 0 |
|
1270 | 0 | /* allocate and initialize a global device table shared by all iommus */ |
1271 | 0 | rc = iterate_ivrs_mappings(amd_iommu_setup_device_table); |
1272 | 0 | if ( rc ) |
1273 | 0 | goto error_out; |
1274 | 0 |
|
1275 | 0 | /* |
1276 | 0 | * Disable sharing HAP page tables with AMD IOMMU, |
1277 | 0 | * since it only supports p2m_ram_rw, and this would |
1278 | 0 | * prevent doing IO to/from mapped grant frames. |
1279 | 0 | */ |
1280 | 0 | iommu_hap_pt_share = 0; |
1281 | 0 | printk(XENLOG_DEBUG "AMD-Vi: Disabled HAP memory map sharing with IOMMU\n"); |
1282 | 0 |
|
1283 | 0 | /* per iommu initialization */ |
1284 | 0 | for_each_amd_iommu ( iommu ) |
1285 | 0 | { |
1286 | 0 | rc = amd_iommu_init_one(iommu); |
1287 | 0 | if ( rc ) |
1288 | 0 | goto error_out; |
1289 | 0 | } |
1290 | 0 |
|
1291 | 0 | return 0; |
1292 | 0 |
|
1293 | 0 | error_out: |
1294 | 0 | amd_iommu_init_cleanup(); |
1295 | 0 | return rc; |
1296 | 0 | } |
1297 | | |
1298 | | static void disable_iommu(struct amd_iommu *iommu) |
1299 | 0 | { |
1300 | 0 | unsigned long flags; |
1301 | 0 |
|
1302 | 0 | spin_lock_irqsave(&iommu->lock, flags); |
1303 | 0 |
|
1304 | 0 | if ( !iommu->enabled ) |
1305 | 0 | { |
1306 | 0 | spin_unlock_irqrestore(&iommu->lock, flags); |
1307 | 0 | return; |
1308 | 0 | } |
1309 | 0 |
|
1310 | 0 | amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED); |
1311 | 0 | set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_DISABLED); |
1312 | 0 | set_iommu_event_log_control(iommu, IOMMU_CONTROL_DISABLED); |
1313 | 0 |
|
1314 | 0 | if ( amd_iommu_has_feature(iommu, IOMMU_EXT_FEATURE_PPRSUP_SHIFT) ) |
1315 | 0 | set_iommu_ppr_log_control(iommu, IOMMU_CONTROL_DISABLED); |
1316 | 0 |
|
1317 | 0 | if ( amd_iommu_has_feature(iommu, IOMMU_EXT_FEATURE_GTSUP_SHIFT) ) |
1318 | 0 | set_iommu_guest_translation_control(iommu, IOMMU_CONTROL_DISABLED); |
1319 | 0 |
|
1320 | 0 | set_iommu_translation_control(iommu, IOMMU_CONTROL_DISABLED); |
1321 | 0 |
|
1322 | 0 | iommu->enabled = 0; |
1323 | 0 |
|
1324 | 0 | spin_unlock_irqrestore(&iommu->lock, flags); |
1325 | 0 |
|
1326 | 0 | } |
1327 | | |
1328 | | static void invalidate_all_domain_pages(void) |
1329 | 0 | { |
1330 | 0 | struct domain *d; |
1331 | 0 | for_each_domain( d ) |
1332 | 0 | amd_iommu_flush_all_pages(d); |
1333 | 0 | } |
1334 | | |
1335 | | static int _invalidate_all_devices( |
1336 | | u16 seg, struct ivrs_mappings *ivrs_mappings) |
1337 | 0 | { |
1338 | 0 | unsigned int bdf; |
1339 | 0 | u16 req_id; |
1340 | 0 | unsigned long flags; |
1341 | 0 | struct amd_iommu *iommu; |
1342 | 0 |
|
1343 | 0 | for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ ) |
1344 | 0 | { |
1345 | 0 | iommu = find_iommu_for_device(seg, bdf); |
1346 | 0 | req_id = ivrs_mappings[bdf].dte_requestor_id; |
1347 | 0 | if ( iommu ) |
1348 | 0 | { |
1349 | 0 | spin_lock_irqsave(&iommu->lock, flags); |
1350 | 0 | amd_iommu_flush_device(iommu, req_id); |
1351 | 0 | amd_iommu_flush_intremap(iommu, req_id); |
1352 | 0 | spin_unlock_irqrestore(&iommu->lock, flags); |
1353 | 0 | } |
1354 | 0 | } |
1355 | 0 |
|
1356 | 0 | return 0; |
1357 | 0 | } |
1358 | | |
1359 | | static void invalidate_all_devices(void) |
1360 | 0 | { |
1361 | 0 | iterate_ivrs_mappings(_invalidate_all_devices); |
1362 | 0 | } |
1363 | | |
1364 | | int amd_iommu_suspend(void) |
1365 | 0 | { |
1366 | 0 | amd_iommu_crash_shutdown(); |
1367 | 0 |
|
1368 | 0 | return 0; |
1369 | 0 | } |
1370 | | |
1371 | | void amd_iommu_crash_shutdown(void) |
1372 | 0 | { |
1373 | 0 | struct amd_iommu *iommu; |
1374 | 0 |
|
1375 | 0 | for_each_amd_iommu ( iommu ) |
1376 | 0 | disable_iommu(iommu); |
1377 | 0 | } |
1378 | | |
1379 | | void amd_iommu_resume(void) |
1380 | 0 | { |
1381 | 0 | struct amd_iommu *iommu; |
1382 | 0 |
|
1383 | 0 | for_each_amd_iommu ( iommu ) |
1384 | 0 | { |
1385 | 0 | /* |
1386 | 0 | * To make sure that iommus have not been touched |
1387 | 0 | * before re-enablement |
1388 | 0 | */ |
1389 | 0 | disable_iommu(iommu); |
1390 | 0 | enable_iommu(iommu); |
1391 | 0 | } |
1392 | 0 |
|
1393 | 0 | /* flush all cache entries after iommu re-enabled */ |
1394 | 0 | if ( !amd_iommu_has_feature(iommu, IOMMU_EXT_FEATURE_IASUP_SHIFT) ) |
1395 | 0 | { |
1396 | 0 | invalidate_all_devices(); |
1397 | 0 | invalidate_all_domain_pages(); |
1398 | 0 | } |
1399 | 0 | } |