/root/src/xen/xen/drivers/passthrough/amd/iommu_intr.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (C) 2007 Advanced Micro Devices, Inc. |
3 | | * Author: Wei Wang <wei.wang2@amd.com> |
4 | | * |
5 | | * This program is free software; you can redistribute it and/or modify |
6 | | * it under the terms of the GNU General Public License as published by |
7 | | * the Free Software Foundation; either version 2 of the License, or |
8 | | * (at your option) any later version. |
9 | | * |
10 | | * This program is distributed in the hope that it will be useful, |
11 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | | * GNU General Public License for more details. |
14 | | * |
15 | | * You should have received a copy of the GNU General Public License |
16 | | * along with this program; If not, see <http://www.gnu.org/licenses/>. |
17 | | */ |
18 | | |
19 | | #include <xen/err.h> |
20 | | #include <xen/sched.h> |
21 | | #include <asm/amd-iommu.h> |
22 | | #include <asm/hvm/svm/amd-iommu-proto.h> |
23 | | #include <asm/io_apic.h> |
24 | | #include <xen/keyhandler.h> |
25 | | |
26 | 0 | #define INTREMAP_TABLE_ORDER 1 |
27 | 0 | #define INTREMAP_LENGTH 0xB |
28 | 0 | #define INTREMAP_ENTRIES (1 << INTREMAP_LENGTH) |
29 | | |
30 | | struct ioapic_sbdf ioapic_sbdf[MAX_IO_APICS]; |
31 | | struct hpet_sbdf hpet_sbdf; |
32 | | void *shared_intremap_table; |
33 | | unsigned long *shared_intremap_inuse; |
34 | | static DEFINE_SPINLOCK(shared_intremap_lock); |
35 | | unsigned int nr_ioapic_sbdf; |
36 | | |
37 | | static void dump_intremap_tables(unsigned char key); |
38 | | |
39 | | unsigned int ioapic_id_to_index(unsigned int apic_id) |
40 | 0 | { |
41 | 0 | unsigned int idx; |
42 | 0 |
|
43 | 0 | for ( idx = 0 ; idx < nr_ioapic_sbdf; idx++ ) |
44 | 0 | if ( ioapic_sbdf[idx].id == apic_id ) |
45 | 0 | break; |
46 | 0 |
|
47 | 0 | if ( idx == nr_ioapic_sbdf ) |
48 | 0 | return MAX_IO_APICS; |
49 | 0 |
|
50 | 0 | return idx; |
51 | 0 | } |
52 | | |
53 | | unsigned int __init get_next_ioapic_sbdf_index(void) |
54 | 0 | { |
55 | 0 | if ( nr_ioapic_sbdf < MAX_IO_APICS ) |
56 | 0 | return nr_ioapic_sbdf++; |
57 | 0 |
|
58 | 0 | return MAX_IO_APICS; |
59 | 0 | } |
60 | | |
61 | | static spinlock_t* get_intremap_lock(int seg, int req_id) |
62 | 0 | { |
63 | 0 | return (amd_iommu_perdev_intremap ? |
64 | 0 | &get_ivrs_mappings(seg)[req_id].intremap_lock: |
65 | 0 | &shared_intremap_lock); |
66 | 0 | } |
67 | | |
68 | | static int get_intremap_requestor_id(int seg, int bdf) |
69 | 0 | { |
70 | 0 | ASSERT( bdf < ivrs_bdf_entries ); |
71 | 0 | return get_ivrs_mappings(seg)[bdf].dte_requestor_id; |
72 | 0 | } |
73 | | |
74 | | static unsigned int alloc_intremap_entry(int seg, int bdf, unsigned int nr) |
75 | 0 | { |
76 | 0 | unsigned long *inuse = get_ivrs_mappings(seg)[bdf].intremap_inuse; |
77 | 0 | unsigned int slot = find_first_zero_bit(inuse, INTREMAP_ENTRIES); |
78 | 0 |
|
79 | 0 | for ( ; ; ) |
80 | 0 | { |
81 | 0 | unsigned int end; |
82 | 0 |
|
83 | 0 | if ( slot >= INTREMAP_ENTRIES ) |
84 | 0 | break; |
85 | 0 | end = find_next_bit(inuse, INTREMAP_ENTRIES, slot + 1); |
86 | 0 | if ( end > INTREMAP_ENTRIES ) |
87 | 0 | end = INTREMAP_ENTRIES; |
88 | 0 | slot = (slot + nr - 1) & ~(nr - 1); |
89 | 0 | if ( slot + nr <= end ) |
90 | 0 | { |
91 | 0 | while ( nr-- ) |
92 | 0 | __set_bit(slot + nr, inuse); |
93 | 0 | break; |
94 | 0 | } |
95 | 0 | slot = (end + nr) & ~(nr - 1); |
96 | 0 | if ( slot >= INTREMAP_ENTRIES ) |
97 | 0 | break; |
98 | 0 | slot = find_next_zero_bit(inuse, INTREMAP_ENTRIES, slot); |
99 | 0 | } |
100 | 0 |
|
101 | 0 | return slot; |
102 | 0 | } |
103 | | |
104 | | static u32 *get_intremap_entry(int seg, int bdf, int offset) |
105 | 0 | { |
106 | 0 | u32 *table = get_ivrs_mappings(seg)[bdf].intremap_table; |
107 | 0 |
|
108 | 0 | ASSERT( (table != NULL) && (offset < INTREMAP_ENTRIES) ); |
109 | 0 |
|
110 | 0 | return table + offset; |
111 | 0 | } |
112 | | |
113 | | static void free_intremap_entry(int seg, int bdf, int offset) |
114 | 0 | { |
115 | 0 | u32 *entry = get_intremap_entry(seg, bdf, offset); |
116 | 0 |
|
117 | 0 | memset(entry, 0, sizeof(u32)); |
118 | 0 | __clear_bit(offset, get_ivrs_mappings(seg)[bdf].intremap_inuse); |
119 | 0 | } |
120 | | |
121 | | static void update_intremap_entry(u32* entry, u8 vector, u8 int_type, |
122 | | u8 dest_mode, u8 dest) |
123 | 0 | { |
124 | 0 | set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0, |
125 | 0 | INT_REMAP_ENTRY_REMAPEN_MASK, |
126 | 0 | INT_REMAP_ENTRY_REMAPEN_SHIFT, entry); |
127 | 0 | set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, *entry, |
128 | 0 | INT_REMAP_ENTRY_SUPIOPF_MASK, |
129 | 0 | INT_REMAP_ENTRY_SUPIOPF_SHIFT, entry); |
130 | 0 | set_field_in_reg_u32(int_type, *entry, |
131 | 0 | INT_REMAP_ENTRY_INTTYPE_MASK, |
132 | 0 | INT_REMAP_ENTRY_INTTYPE_SHIFT, entry); |
133 | 0 | set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, *entry, |
134 | 0 | INT_REMAP_ENTRY_REQEOI_MASK, |
135 | 0 | INT_REMAP_ENTRY_REQEOI_SHIFT, entry); |
136 | 0 | set_field_in_reg_u32((u32)dest_mode, *entry, |
137 | 0 | INT_REMAP_ENTRY_DM_MASK, |
138 | 0 | INT_REMAP_ENTRY_DM_SHIFT, entry); |
139 | 0 | set_field_in_reg_u32((u32)dest, *entry, |
140 | 0 | INT_REMAP_ENTRY_DEST_MAST, |
141 | 0 | INT_REMAP_ENTRY_DEST_SHIFT, entry); |
142 | 0 | set_field_in_reg_u32((u32)vector, *entry, |
143 | 0 | INT_REMAP_ENTRY_VECTOR_MASK, |
144 | 0 | INT_REMAP_ENTRY_VECTOR_SHIFT, entry); |
145 | 0 | } |
146 | | |
147 | | static inline int get_rte_index(const struct IO_APIC_route_entry *rte) |
148 | 0 | { |
149 | 0 | return rte->vector | (rte->delivery_mode << 8); |
150 | 0 | } |
151 | | |
152 | | static inline void set_rte_index(struct IO_APIC_route_entry *rte, int offset) |
153 | 0 | { |
154 | 0 | rte->vector = (u8)offset; |
155 | 0 | rte->delivery_mode = offset >> 8; |
156 | 0 | } |
157 | | |
158 | | static int update_intremap_entry_from_ioapic( |
159 | | int bdf, |
160 | | struct amd_iommu *iommu, |
161 | | struct IO_APIC_route_entry *rte, |
162 | | bool_t lo_update, |
163 | | u16 *index) |
164 | 0 | { |
165 | 0 | unsigned long flags; |
166 | 0 | u32* entry; |
167 | 0 | u8 delivery_mode, dest, vector, dest_mode; |
168 | 0 | int req_id; |
169 | 0 | spinlock_t *lock; |
170 | 0 | unsigned int offset; |
171 | 0 |
|
172 | 0 | req_id = get_intremap_requestor_id(iommu->seg, bdf); |
173 | 0 | lock = get_intremap_lock(iommu->seg, req_id); |
174 | 0 |
|
175 | 0 | delivery_mode = rte->delivery_mode; |
176 | 0 | vector = rte->vector; |
177 | 0 | dest_mode = rte->dest_mode; |
178 | 0 | dest = rte->dest.logical.logical_dest; |
179 | 0 |
|
180 | 0 | spin_lock_irqsave(lock, flags); |
181 | 0 |
|
182 | 0 | offset = *index; |
183 | 0 | if ( offset >= INTREMAP_ENTRIES ) |
184 | 0 | { |
185 | 0 | offset = alloc_intremap_entry(iommu->seg, req_id, 1); |
186 | 0 | if ( offset >= INTREMAP_ENTRIES ) |
187 | 0 | { |
188 | 0 | spin_unlock_irqrestore(lock, flags); |
189 | 0 | rte->mask = 1; |
190 | 0 | return -ENOSPC; |
191 | 0 | } |
192 | 0 | *index = offset; |
193 | 0 | lo_update = 1; |
194 | 0 | } |
195 | 0 |
|
196 | 0 | entry = get_intremap_entry(iommu->seg, req_id, offset); |
197 | 0 | if ( !lo_update ) |
198 | 0 | { |
199 | 0 | /* |
200 | 0 | * Low half of incoming RTE is already in remapped format, |
201 | 0 | * so need to recover vector and delivery mode from IRTE. |
202 | 0 | */ |
203 | 0 | ASSERT(get_rte_index(rte) == offset); |
204 | 0 | vector = get_field_from_reg_u32(*entry, |
205 | 0 | INT_REMAP_ENTRY_VECTOR_MASK, |
206 | 0 | INT_REMAP_ENTRY_VECTOR_SHIFT); |
207 | 0 | delivery_mode = get_field_from_reg_u32(*entry, |
208 | 0 | INT_REMAP_ENTRY_INTTYPE_MASK, |
209 | 0 | INT_REMAP_ENTRY_INTTYPE_SHIFT); |
210 | 0 | } |
211 | 0 | update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest); |
212 | 0 |
|
213 | 0 | spin_unlock_irqrestore(lock, flags); |
214 | 0 |
|
215 | 0 | if ( iommu->enabled ) |
216 | 0 | { |
217 | 0 | spin_lock_irqsave(&iommu->lock, flags); |
218 | 0 | amd_iommu_flush_intremap(iommu, req_id); |
219 | 0 | spin_unlock_irqrestore(&iommu->lock, flags); |
220 | 0 | } |
221 | 0 |
|
222 | 0 | set_rte_index(rte, offset); |
223 | 0 |
|
224 | 0 | return 0; |
225 | 0 | } |
226 | | |
227 | | int __init amd_iommu_setup_ioapic_remapping(void) |
228 | 0 | { |
229 | 0 | struct IO_APIC_route_entry rte; |
230 | 0 | unsigned long flags; |
231 | 0 | u32* entry; |
232 | 0 | int apic, pin; |
233 | 0 | u8 delivery_mode, dest, vector, dest_mode; |
234 | 0 | u16 seg, bdf, req_id; |
235 | 0 | struct amd_iommu *iommu; |
236 | 0 | spinlock_t *lock; |
237 | 0 | unsigned int offset; |
238 | 0 |
|
239 | 0 | /* Read ioapic entries and update interrupt remapping table accordingly */ |
240 | 0 | for ( apic = 0; apic < nr_ioapics; apic++ ) |
241 | 0 | { |
242 | 0 | for ( pin = 0; pin < nr_ioapic_entries[apic]; pin++ ) |
243 | 0 | { |
244 | 0 | unsigned int idx; |
245 | 0 |
|
246 | 0 | rte = __ioapic_read_entry(apic, pin, 1); |
247 | 0 | if ( rte.mask == 1 ) |
248 | 0 | continue; |
249 | 0 |
|
250 | 0 | /* get device id of ioapic devices */ |
251 | 0 | idx = ioapic_id_to_index(IO_APIC_ID(apic)); |
252 | 0 | if ( idx == MAX_IO_APICS ) |
253 | 0 | return -EINVAL; |
254 | 0 |
|
255 | 0 | bdf = ioapic_sbdf[idx].bdf; |
256 | 0 | seg = ioapic_sbdf[idx].seg; |
257 | 0 | iommu = find_iommu_for_device(seg, bdf); |
258 | 0 | if ( !iommu ) |
259 | 0 | { |
260 | 0 | AMD_IOMMU_DEBUG("Fail to find iommu for ioapic " |
261 | 0 | "device id = %04x:%04x\n", seg, bdf); |
262 | 0 | continue; |
263 | 0 | } |
264 | 0 |
|
265 | 0 | req_id = get_intremap_requestor_id(iommu->seg, bdf); |
266 | 0 | lock = get_intremap_lock(iommu->seg, req_id); |
267 | 0 |
|
268 | 0 | delivery_mode = rte.delivery_mode; |
269 | 0 | vector = rte.vector; |
270 | 0 | dest_mode = rte.dest_mode; |
271 | 0 | dest = rte.dest.logical.logical_dest; |
272 | 0 |
|
273 | 0 | spin_lock_irqsave(lock, flags); |
274 | 0 | offset = alloc_intremap_entry(seg, req_id, 1); |
275 | 0 | BUG_ON(offset >= INTREMAP_ENTRIES); |
276 | 0 | entry = get_intremap_entry(iommu->seg, req_id, offset); |
277 | 0 | update_intremap_entry(entry, vector, |
278 | 0 | delivery_mode, dest_mode, dest); |
279 | 0 | spin_unlock_irqrestore(lock, flags); |
280 | 0 |
|
281 | 0 | set_rte_index(&rte, offset); |
282 | 0 | ioapic_sbdf[idx].pin_2_idx[pin] = offset; |
283 | 0 | __ioapic_write_entry(apic, pin, 1, rte); |
284 | 0 |
|
285 | 0 | if ( iommu->enabled ) |
286 | 0 | { |
287 | 0 | spin_lock_irqsave(&iommu->lock, flags); |
288 | 0 | amd_iommu_flush_intremap(iommu, req_id); |
289 | 0 | spin_unlock_irqrestore(&iommu->lock, flags); |
290 | 0 | } |
291 | 0 | } |
292 | 0 | } |
293 | 0 |
|
294 | 0 | register_keyhandler('V', &dump_intremap_tables, |
295 | 0 | "dump IOMMU intremap tables", 0); |
296 | 0 |
|
297 | 0 | return 0; |
298 | 0 | } |
299 | | |
300 | | void amd_iommu_ioapic_update_ire( |
301 | | unsigned int apic, unsigned int reg, unsigned int value) |
302 | 0 | { |
303 | 0 | struct IO_APIC_route_entry old_rte = { 0 }; |
304 | 0 | struct IO_APIC_route_entry new_rte = { 0 }; |
305 | 0 | unsigned int rte_lo = (reg & 1) ? reg - 1 : reg; |
306 | 0 | unsigned int pin = (reg - 0x10) / 2; |
307 | 0 | int saved_mask, seg, bdf, rc; |
308 | 0 | struct amd_iommu *iommu; |
309 | 0 | unsigned int idx; |
310 | 0 |
|
311 | 0 | if ( !iommu_intremap ) |
312 | 0 | { |
313 | 0 | __io_apic_write(apic, reg, value); |
314 | 0 | return; |
315 | 0 | } |
316 | 0 |
|
317 | 0 | idx = ioapic_id_to_index(IO_APIC_ID(apic)); |
318 | 0 | if ( idx == MAX_IO_APICS ) |
319 | 0 | return; |
320 | 0 |
|
321 | 0 | /* get device id of ioapic devices */ |
322 | 0 | bdf = ioapic_sbdf[idx].bdf; |
323 | 0 | seg = ioapic_sbdf[idx].seg; |
324 | 0 | iommu = find_iommu_for_device(seg, bdf); |
325 | 0 | if ( !iommu ) |
326 | 0 | { |
327 | 0 | AMD_IOMMU_DEBUG("Fail to find iommu for ioapic device id =" |
328 | 0 | " %04x:%04x\n", seg, bdf); |
329 | 0 | __io_apic_write(apic, reg, value); |
330 | 0 | return; |
331 | 0 | } |
332 | 0 |
|
333 | 0 | /* save io-apic rte lower 32 bits */ |
334 | 0 | *((u32 *)&old_rte) = __io_apic_read(apic, rte_lo); |
335 | 0 | saved_mask = old_rte.mask; |
336 | 0 |
|
337 | 0 | if ( reg == rte_lo ) |
338 | 0 | { |
339 | 0 | *((u32 *)&new_rte) = value; |
340 | 0 | /* read upper 32 bits from io-apic rte */ |
341 | 0 | *(((u32 *)&new_rte) + 1) = __io_apic_read(apic, reg + 1); |
342 | 0 | } |
343 | 0 | else |
344 | 0 | { |
345 | 0 | *((u32 *)&new_rte) = *((u32 *)&old_rte); |
346 | 0 | *(((u32 *)&new_rte) + 1) = value; |
347 | 0 | } |
348 | 0 |
|
349 | 0 | if ( new_rte.mask && |
350 | 0 | ioapic_sbdf[idx].pin_2_idx[pin] >= INTREMAP_ENTRIES ) |
351 | 0 | { |
352 | 0 | ASSERT(saved_mask); |
353 | 0 | __io_apic_write(apic, reg, value); |
354 | 0 | return; |
355 | 0 | } |
356 | 0 |
|
357 | 0 | /* mask the interrupt while we change the intremap table */ |
358 | 0 | if ( !saved_mask ) |
359 | 0 | { |
360 | 0 | old_rte.mask = 1; |
361 | 0 | __io_apic_write(apic, rte_lo, *((u32 *)&old_rte)); |
362 | 0 | } |
363 | 0 |
|
364 | 0 | /* Update interrupt remapping entry */ |
365 | 0 | rc = update_intremap_entry_from_ioapic( |
366 | 0 | bdf, iommu, &new_rte, reg == rte_lo, |
367 | 0 | &ioapic_sbdf[idx].pin_2_idx[pin]); |
368 | 0 |
|
369 | 0 | __io_apic_write(apic, reg, ((u32 *)&new_rte)[reg != rte_lo]); |
370 | 0 |
|
371 | 0 | if ( rc ) |
372 | 0 | { |
373 | 0 | /* Keep the entry masked. */ |
374 | 0 | printk(XENLOG_ERR "Remapping IO-APIC %#x pin %u failed (%d)\n", |
375 | 0 | IO_APIC_ID(apic), pin, rc); |
376 | 0 | return; |
377 | 0 | } |
378 | 0 |
|
379 | 0 | /* For lower bits access, return directly to avoid double writes */ |
380 | 0 | if ( reg == rte_lo ) |
381 | 0 | return; |
382 | 0 |
|
383 | 0 | /* unmask the interrupt after we have updated the intremap table */ |
384 | 0 | if ( !saved_mask ) |
385 | 0 | { |
386 | 0 | old_rte.mask = saved_mask; |
387 | 0 | __io_apic_write(apic, rte_lo, *((u32 *)&old_rte)); |
388 | 0 | } |
389 | 0 | } |
390 | | |
391 | | unsigned int amd_iommu_read_ioapic_from_ire( |
392 | | unsigned int apic, unsigned int reg) |
393 | 0 | { |
394 | 0 | unsigned int idx; |
395 | 0 | unsigned int offset; |
396 | 0 | unsigned int val = __io_apic_read(apic, reg); |
397 | 0 | unsigned int pin = (reg - 0x10) / 2; |
398 | 0 |
|
399 | 0 | idx = ioapic_id_to_index(IO_APIC_ID(apic)); |
400 | 0 | if ( idx == MAX_IO_APICS ) |
401 | 0 | return -EINVAL; |
402 | 0 |
|
403 | 0 | offset = ioapic_sbdf[idx].pin_2_idx[pin]; |
404 | 0 |
|
405 | 0 | if ( !(reg & 1) && offset < INTREMAP_ENTRIES ) |
406 | 0 | { |
407 | 0 | u16 bdf = ioapic_sbdf[idx].bdf; |
408 | 0 | u16 seg = ioapic_sbdf[idx].seg; |
409 | 0 | u16 req_id = get_intremap_requestor_id(seg, bdf); |
410 | 0 | const u32 *entry = get_intremap_entry(seg, req_id, offset); |
411 | 0 |
|
412 | 0 | ASSERT(offset == (val & (INTREMAP_ENTRIES - 1))); |
413 | 0 | val &= ~(INTREMAP_ENTRIES - 1); |
414 | 0 | val |= get_field_from_reg_u32(*entry, |
415 | 0 | INT_REMAP_ENTRY_INTTYPE_MASK, |
416 | 0 | INT_REMAP_ENTRY_INTTYPE_SHIFT) << 8; |
417 | 0 | val |= get_field_from_reg_u32(*entry, |
418 | 0 | INT_REMAP_ENTRY_VECTOR_MASK, |
419 | 0 | INT_REMAP_ENTRY_VECTOR_SHIFT); |
420 | 0 | } |
421 | 0 |
|
422 | 0 | return val; |
423 | 0 | } |
424 | | |
425 | | static int update_intremap_entry_from_msi_msg( |
426 | | struct amd_iommu *iommu, u16 bdf, unsigned int nr, |
427 | | int *remap_index, const struct msi_msg *msg, u32 *data) |
428 | 0 | { |
429 | 0 | unsigned long flags; |
430 | 0 | u32* entry; |
431 | 0 | u16 req_id, alias_id; |
432 | 0 | u8 delivery_mode, dest, vector, dest_mode; |
433 | 0 | spinlock_t *lock; |
434 | 0 | unsigned int offset, i; |
435 | 0 |
|
436 | 0 | req_id = get_dma_requestor_id(iommu->seg, bdf); |
437 | 0 | alias_id = get_intremap_requestor_id(iommu->seg, bdf); |
438 | 0 |
|
439 | 0 | if ( msg == NULL ) |
440 | 0 | { |
441 | 0 | lock = get_intremap_lock(iommu->seg, req_id); |
442 | 0 | spin_lock_irqsave(lock, flags); |
443 | 0 | for ( i = 0; i < nr; ++i ) |
444 | 0 | free_intremap_entry(iommu->seg, req_id, *remap_index + i); |
445 | 0 | spin_unlock_irqrestore(lock, flags); |
446 | 0 | goto done; |
447 | 0 | } |
448 | 0 |
|
449 | 0 | lock = get_intremap_lock(iommu->seg, req_id); |
450 | 0 |
|
451 | 0 | spin_lock_irqsave(lock, flags); |
452 | 0 | dest_mode = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1; |
453 | 0 | delivery_mode = (msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x1; |
454 | 0 | vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK; |
455 | 0 | dest = (msg->address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff; |
456 | 0 | offset = *remap_index; |
457 | 0 | if ( offset >= INTREMAP_ENTRIES ) |
458 | 0 | { |
459 | 0 | ASSERT(nr); |
460 | 0 | offset = alloc_intremap_entry(iommu->seg, bdf, nr); |
461 | 0 | if ( offset >= INTREMAP_ENTRIES ) |
462 | 0 | { |
463 | 0 | spin_unlock_irqrestore(lock, flags); |
464 | 0 | return -ENOSPC; |
465 | 0 | } |
466 | 0 | *remap_index = offset; |
467 | 0 | } |
468 | 0 |
|
469 | 0 | entry = get_intremap_entry(iommu->seg, req_id, offset); |
470 | 0 | update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest); |
471 | 0 | spin_unlock_irqrestore(lock, flags); |
472 | 0 |
|
473 | 0 | *data = (msg->data & ~(INTREMAP_ENTRIES - 1)) | offset; |
474 | 0 |
|
475 | 0 | /* |
476 | 0 | * In some special cases, a pci-e device(e.g SATA controller in IDE mode) |
477 | 0 | * will use alias id to index interrupt remapping table. |
478 | 0 | * We have to setup a secondary interrupt remapping entry to satisfy those |
479 | 0 | * devices. |
480 | 0 | */ |
481 | 0 |
|
482 | 0 | if ( ( req_id != alias_id ) && |
483 | 0 | get_ivrs_mappings(iommu->seg)[alias_id].intremap_table != NULL ) |
484 | 0 | { |
485 | 0 | BUG_ON(get_ivrs_mappings(iommu->seg)[req_id].intremap_table != |
486 | 0 | get_ivrs_mappings(iommu->seg)[alias_id].intremap_table); |
487 | 0 | } |
488 | 0 |
|
489 | 0 | done: |
490 | 0 | if ( iommu->enabled ) |
491 | 0 | { |
492 | 0 | spin_lock_irqsave(&iommu->lock, flags); |
493 | 0 | amd_iommu_flush_intremap(iommu, req_id); |
494 | 0 | if ( alias_id != req_id ) |
495 | 0 | amd_iommu_flush_intremap(iommu, alias_id); |
496 | 0 | spin_unlock_irqrestore(&iommu->lock, flags); |
497 | 0 | } |
498 | 0 |
|
499 | 0 | return 0; |
500 | 0 | } |
501 | | |
502 | | static struct amd_iommu *_find_iommu_for_device(int seg, int bdf) |
503 | 0 | { |
504 | 0 | struct amd_iommu *iommu; |
505 | 0 |
|
506 | 0 | list_for_each_entry ( iommu, &amd_iommu_head, list ) |
507 | 0 | if ( iommu->seg == seg && iommu->bdf == bdf ) |
508 | 0 | return NULL; |
509 | 0 |
|
510 | 0 | iommu = find_iommu_for_device(seg, bdf); |
511 | 0 | if ( iommu ) |
512 | 0 | return iommu; |
513 | 0 |
|
514 | 0 | AMD_IOMMU_DEBUG("No IOMMU for MSI dev = %04x:%02x:%02x.%u\n", |
515 | 0 | seg, PCI_BUS(bdf), PCI_SLOT(bdf), PCI_FUNC(bdf)); |
516 | 0 | return ERR_PTR(-EINVAL); |
517 | 0 | } |
518 | | |
519 | | int amd_iommu_msi_msg_update_ire( |
520 | | struct msi_desc *msi_desc, struct msi_msg *msg) |
521 | 0 | { |
522 | 0 | struct pci_dev *pdev = msi_desc->dev; |
523 | 0 | int bdf, seg, rc; |
524 | 0 | struct amd_iommu *iommu; |
525 | 0 | unsigned int i, nr = 1; |
526 | 0 | u32 data; |
527 | 0 |
|
528 | 0 | bdf = pdev ? PCI_BDF2(pdev->bus, pdev->devfn) : hpet_sbdf.bdf; |
529 | 0 | seg = pdev ? pdev->seg : hpet_sbdf.seg; |
530 | 0 |
|
531 | 0 | iommu = _find_iommu_for_device(seg, bdf); |
532 | 0 | if ( IS_ERR_OR_NULL(iommu) ) |
533 | 0 | return PTR_ERR(iommu); |
534 | 0 |
|
535 | 0 | if ( msi_desc->msi_attrib.type == PCI_CAP_ID_MSI ) |
536 | 0 | nr = msi_desc->msi.nvec; |
537 | 0 |
|
538 | 0 | if ( msi_desc->remap_index >= 0 && !msg ) |
539 | 0 | { |
540 | 0 | do { |
541 | 0 | update_intremap_entry_from_msi_msg(iommu, bdf, nr, |
542 | 0 | &msi_desc->remap_index, |
543 | 0 | NULL, NULL); |
544 | 0 | if ( !pdev || !pdev->phantom_stride ) |
545 | 0 | break; |
546 | 0 | bdf += pdev->phantom_stride; |
547 | 0 | } while ( PCI_SLOT(bdf) == PCI_SLOT(pdev->devfn) ); |
548 | 0 |
|
549 | 0 | for ( i = 0; i < nr; ++i ) |
550 | 0 | msi_desc[i].remap_index = -1; |
551 | 0 | if ( pdev ) |
552 | 0 | bdf = PCI_BDF2(pdev->bus, pdev->devfn); |
553 | 0 | } |
554 | 0 |
|
555 | 0 | if ( !msg ) |
556 | 0 | return 0; |
557 | 0 |
|
558 | 0 | do { |
559 | 0 | rc = update_intremap_entry_from_msi_msg(iommu, bdf, nr, |
560 | 0 | &msi_desc->remap_index, |
561 | 0 | msg, &data); |
562 | 0 | if ( rc || !pdev || !pdev->phantom_stride ) |
563 | 0 | break; |
564 | 0 | bdf += pdev->phantom_stride; |
565 | 0 | } while ( PCI_SLOT(bdf) == PCI_SLOT(pdev->devfn) ); |
566 | 0 |
|
567 | 0 | if ( !rc ) |
568 | 0 | { |
569 | 0 | for ( i = 1; i < nr; ++i ) |
570 | 0 | msi_desc[i].remap_index = msi_desc->remap_index + i; |
571 | 0 | msg->data = data; |
572 | 0 | } |
573 | 0 |
|
574 | 0 | return rc; |
575 | 0 | } |
576 | | |
577 | | void amd_iommu_read_msi_from_ire( |
578 | | struct msi_desc *msi_desc, struct msi_msg *msg) |
579 | 0 | { |
580 | 0 | unsigned int offset = msg->data & (INTREMAP_ENTRIES - 1); |
581 | 0 | const struct pci_dev *pdev = msi_desc->dev; |
582 | 0 | u16 bdf = pdev ? PCI_BDF2(pdev->bus, pdev->devfn) : hpet_sbdf.bdf; |
583 | 0 | u16 seg = pdev ? pdev->seg : hpet_sbdf.seg; |
584 | 0 | const u32 *entry; |
585 | 0 |
|
586 | 0 | if ( IS_ERR_OR_NULL(_find_iommu_for_device(seg, bdf)) ) |
587 | 0 | return; |
588 | 0 |
|
589 | 0 | entry = get_intremap_entry(seg, get_dma_requestor_id(seg, bdf), offset); |
590 | 0 |
|
591 | 0 | if ( msi_desc->msi_attrib.type == PCI_CAP_ID_MSI ) |
592 | 0 | { |
593 | 0 | int nr = msi_desc->msi_attrib.entry_nr; |
594 | 0 |
|
595 | 0 | ASSERT(!(offset & (msi_desc[-nr].msi.nvec - 1))); |
596 | 0 | offset |= nr; |
597 | 0 | } |
598 | 0 |
|
599 | 0 | msg->data &= ~(INTREMAP_ENTRIES - 1); |
600 | 0 | msg->data |= get_field_from_reg_u32(*entry, |
601 | 0 | INT_REMAP_ENTRY_INTTYPE_MASK, |
602 | 0 | INT_REMAP_ENTRY_INTTYPE_SHIFT) << 8; |
603 | 0 | msg->data |= get_field_from_reg_u32(*entry, |
604 | 0 | INT_REMAP_ENTRY_VECTOR_MASK, |
605 | 0 | INT_REMAP_ENTRY_VECTOR_SHIFT); |
606 | 0 | } |
607 | | |
608 | | int __init amd_iommu_free_intremap_table( |
609 | | u16 seg, struct ivrs_mappings *ivrs_mapping) |
610 | 0 | { |
611 | 0 | void *tb = ivrs_mapping->intremap_table; |
612 | 0 |
|
613 | 0 | if ( tb ) |
614 | 0 | { |
615 | 0 | __free_amd_iommu_tables(tb, INTREMAP_TABLE_ORDER); |
616 | 0 | ivrs_mapping->intremap_table = NULL; |
617 | 0 | } |
618 | 0 |
|
619 | 0 | return 0; |
620 | 0 | } |
621 | | |
622 | | void* __init amd_iommu_alloc_intremap_table(unsigned long **inuse_map) |
623 | 0 | { |
624 | 0 | void *tb; |
625 | 0 | tb = __alloc_amd_iommu_tables(INTREMAP_TABLE_ORDER); |
626 | 0 | BUG_ON(tb == NULL); |
627 | 0 | memset(tb, 0, PAGE_SIZE * (1UL << INTREMAP_TABLE_ORDER)); |
628 | 0 | *inuse_map = xzalloc_array(unsigned long, BITS_TO_LONGS(INTREMAP_ENTRIES)); |
629 | 0 | BUG_ON(*inuse_map == NULL); |
630 | 0 | return tb; |
631 | 0 | } |
632 | | |
633 | | int __init amd_setup_hpet_msi(struct msi_desc *msi_desc) |
634 | 0 | { |
635 | 0 | spinlock_t *lock; |
636 | 0 | unsigned long flags; |
637 | 0 | int rc = 0; |
638 | 0 |
|
639 | 0 | if ( hpet_sbdf.init == HPET_NONE ) |
640 | 0 | { |
641 | 0 | AMD_IOMMU_DEBUG("Failed to setup HPET MSI remapping." |
642 | 0 | " Missing IVRS HPET info.\n"); |
643 | 0 | return -ENODEV; |
644 | 0 | } |
645 | 0 | if ( msi_desc->hpet_id != hpet_sbdf.id ) |
646 | 0 | { |
647 | 0 | AMD_IOMMU_DEBUG("Failed to setup HPET MSI remapping." |
648 | 0 | " Wrong HPET.\n"); |
649 | 0 | return -ENODEV; |
650 | 0 | } |
651 | 0 |
|
652 | 0 | lock = get_intremap_lock(hpet_sbdf.seg, hpet_sbdf.bdf); |
653 | 0 | spin_lock_irqsave(lock, flags); |
654 | 0 |
|
655 | 0 | msi_desc->remap_index = alloc_intremap_entry(hpet_sbdf.seg, |
656 | 0 | hpet_sbdf.bdf, 1); |
657 | 0 | if ( msi_desc->remap_index >= INTREMAP_ENTRIES ) |
658 | 0 | { |
659 | 0 | msi_desc->remap_index = -1; |
660 | 0 | rc = -ENXIO; |
661 | 0 | } |
662 | 0 |
|
663 | 0 | spin_unlock_irqrestore(lock, flags); |
664 | 0 |
|
665 | 0 | return rc; |
666 | 0 | } |
667 | | |
668 | | static void dump_intremap_table(const u32 *table) |
669 | 0 | { |
670 | 0 | u32 count; |
671 | 0 |
|
672 | 0 | if ( !table ) |
673 | 0 | return; |
674 | 0 |
|
675 | 0 | for ( count = 0; count < INTREMAP_ENTRIES; count++ ) |
676 | 0 | { |
677 | 0 | if ( !table[count] ) |
678 | 0 | continue; |
679 | 0 | printk(" IRTE[%03x] %08x\n", count, table[count]); |
680 | 0 | } |
681 | 0 | } |
682 | | |
683 | | static int dump_intremap_mapping(u16 seg, struct ivrs_mappings *ivrs_mapping) |
684 | 0 | { |
685 | 0 | unsigned long flags; |
686 | 0 |
|
687 | 0 | if ( !ivrs_mapping ) |
688 | 0 | return 0; |
689 | 0 |
|
690 | 0 | printk(" %04x:%02x:%02x:%u:\n", seg, |
691 | 0 | PCI_BUS(ivrs_mapping->dte_requestor_id), |
692 | 0 | PCI_SLOT(ivrs_mapping->dte_requestor_id), |
693 | 0 | PCI_FUNC(ivrs_mapping->dte_requestor_id)); |
694 | 0 |
|
695 | 0 | spin_lock_irqsave(&(ivrs_mapping->intremap_lock), flags); |
696 | 0 | dump_intremap_table(ivrs_mapping->intremap_table); |
697 | 0 | spin_unlock_irqrestore(&(ivrs_mapping->intremap_lock), flags); |
698 | 0 |
|
699 | 0 | return 0; |
700 | 0 | } |
701 | | |
702 | | static void dump_intremap_tables(unsigned char key) |
703 | 0 | { |
704 | 0 | unsigned long flags; |
705 | 0 |
|
706 | 0 | printk("--- Dumping Per-dev IOMMU Interrupt Remapping Table ---\n"); |
707 | 0 |
|
708 | 0 | iterate_ivrs_entries(dump_intremap_mapping); |
709 | 0 |
|
710 | 0 | printk("--- Dumping Shared IOMMU Interrupt Remapping Table ---\n"); |
711 | 0 |
|
712 | 0 | spin_lock_irqsave(&shared_intremap_lock, flags); |
713 | 0 | dump_intremap_table(shared_intremap_table); |
714 | 0 | spin_unlock_irqrestore(&shared_intremap_lock, flags); |
715 | 0 | } |