debuggers.hg

view xen/drivers/passthrough/amd/iommu_init.c @ 19939:0ab211e699e6

AMD IOMMU: Add suspend and resume support for amd iommu.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jul 06 11:57:18 2009 +0100 (2009-07-06)
parents 7d5433600932
children 722c7e94e764
line source
1 /*
2 * Copyright (C) 2007 Advanced Micro Devices, Inc.
3 * Author: Leo Duran <leo.duran@amd.com>
4 * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
21 #include <xen/config.h>
22 #include <xen/errno.h>
23 #include <xen/pci.h>
24 #include <xen/pci_regs.h>
25 #include <asm/amd-iommu.h>
26 #include <asm/msi.h>
27 #include <asm/hvm/svm/amd-iommu-proto.h>
28 #include <asm-x86/fixmap.h>
30 static struct amd_iommu *vector_to_iommu[NR_VECTORS];
31 static int nr_amd_iommus;
32 static long amd_iommu_cmd_buffer_entries = IOMMU_CMD_BUFFER_DEFAULT_ENTRIES;
33 static long amd_iommu_event_log_entries = IOMMU_EVENT_LOG_DEFAULT_ENTRIES;
35 unsigned short ivrs_bdf_entries;
36 struct ivrs_mappings *ivrs_mappings;
37 struct list_head amd_iommu_head;
38 struct table_struct device_table;
40 static int __init map_iommu_mmio_region(struct amd_iommu *iommu)
41 {
42 unsigned long mfn;
44 if ( nr_amd_iommus > MAX_AMD_IOMMUS )
45 {
46 amd_iov_error("nr_amd_iommus %d > MAX_IOMMUS\n", nr_amd_iommus);
47 return -ENOMEM;
48 }
50 iommu->mmio_base = (void *)fix_to_virt(
51 FIX_IOMMU_MMIO_BASE_0 + nr_amd_iommus * MMIO_PAGES_PER_IOMMU);
52 mfn = (unsigned long)(iommu->mmio_base_phys >> PAGE_SHIFT);
53 map_pages_to_xen((unsigned long)iommu->mmio_base, mfn,
54 MMIO_PAGES_PER_IOMMU, PAGE_HYPERVISOR_NOCACHE);
56 memset(iommu->mmio_base, 0, IOMMU_MMIO_REGION_LENGTH);
58 return 0;
59 }
61 static void __init unmap_iommu_mmio_region(struct amd_iommu *iommu)
62 {
63 if ( iommu->mmio_base )
64 {
65 iounmap(iommu->mmio_base);
66 iommu->mmio_base = NULL;
67 }
68 }
70 static void register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu)
71 {
72 u64 addr_64, addr_lo, addr_hi;
73 u32 entry;
75 addr_64 = (u64)virt_to_maddr(iommu->dev_table.buffer);
76 addr_lo = addr_64 & DMA_32BIT_MASK;
77 addr_hi = addr_64 >> 32;
79 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
80 IOMMU_DEV_TABLE_BASE_LOW_MASK,
81 IOMMU_DEV_TABLE_BASE_LOW_SHIFT, &entry);
82 set_field_in_reg_u32((iommu->dev_table.alloc_size / PAGE_SIZE) - 1,
83 entry, IOMMU_DEV_TABLE_SIZE_MASK,
84 IOMMU_DEV_TABLE_SIZE_SHIFT, &entry);
85 writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_LOW_OFFSET);
87 set_field_in_reg_u32((u32)addr_hi, 0,
88 IOMMU_DEV_TABLE_BASE_HIGH_MASK,
89 IOMMU_DEV_TABLE_BASE_HIGH_SHIFT, &entry);
90 writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_HIGH_OFFSET);
91 }
93 static void register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu)
94 {
95 u64 addr_64, addr_lo, addr_hi;
96 u32 power_of2_entries;
97 u32 entry;
99 addr_64 = (u64)virt_to_maddr(iommu->cmd_buffer.buffer);
100 addr_lo = addr_64 & DMA_32BIT_MASK;
101 addr_hi = addr_64 >> 32;
103 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
104 IOMMU_CMD_BUFFER_BASE_LOW_MASK,
105 IOMMU_CMD_BUFFER_BASE_LOW_SHIFT, &entry);
106 writel(entry, iommu->mmio_base + IOMMU_CMD_BUFFER_BASE_LOW_OFFSET);
108 power_of2_entries = get_order_from_bytes(iommu->cmd_buffer.alloc_size) +
109 IOMMU_CMD_BUFFER_POWER_OF2_ENTRIES_PER_PAGE;
111 set_field_in_reg_u32((u32)addr_hi, 0,
112 IOMMU_CMD_BUFFER_BASE_HIGH_MASK,
113 IOMMU_CMD_BUFFER_BASE_HIGH_SHIFT, &entry);
114 set_field_in_reg_u32(power_of2_entries, entry,
115 IOMMU_CMD_BUFFER_LENGTH_MASK,
116 IOMMU_CMD_BUFFER_LENGTH_SHIFT, &entry);
117 writel(entry, iommu->mmio_base+IOMMU_CMD_BUFFER_BASE_HIGH_OFFSET);
118 }
120 static void __init register_iommu_event_log_in_mmio_space(struct amd_iommu *iommu)
121 {
122 u64 addr_64, addr_lo, addr_hi;
123 u32 power_of2_entries;
124 u32 entry;
126 addr_64 = (u64)virt_to_maddr(iommu->event_log.buffer);
127 addr_lo = addr_64 & DMA_32BIT_MASK;
128 addr_hi = addr_64 >> 32;
130 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
131 IOMMU_EVENT_LOG_BASE_LOW_MASK,
132 IOMMU_EVENT_LOG_BASE_LOW_SHIFT, &entry);
133 writel(entry, iommu->mmio_base + IOMMU_EVENT_LOG_BASE_LOW_OFFSET);
135 power_of2_entries = get_order_from_bytes(iommu->event_log.alloc_size) +
136 IOMMU_EVENT_LOG_POWER_OF2_ENTRIES_PER_PAGE;
138 set_field_in_reg_u32((u32)addr_hi, 0,
139 IOMMU_EVENT_LOG_BASE_HIGH_MASK,
140 IOMMU_EVENT_LOG_BASE_HIGH_SHIFT, &entry);
141 set_field_in_reg_u32(power_of2_entries, entry,
142 IOMMU_EVENT_LOG_LENGTH_MASK,
143 IOMMU_EVENT_LOG_LENGTH_SHIFT, &entry);
144 writel(entry, iommu->mmio_base+IOMMU_EVENT_LOG_BASE_HIGH_OFFSET);
145 }
147 static void set_iommu_translation_control(struct amd_iommu *iommu,
148 int enable)
149 {
150 u32 entry;
152 entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
154 if ( enable )
155 {
156 set_field_in_reg_u32(iommu->ht_tunnel_support ? IOMMU_CONTROL_ENABLED :
157 IOMMU_CONTROL_DISABLED, entry,
158 IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_MASK,
159 IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT, &entry);
160 set_field_in_reg_u32(iommu->isochronous ? IOMMU_CONTROL_ENABLED :
161 IOMMU_CONTROL_DISABLED, entry,
162 IOMMU_CONTROL_ISOCHRONOUS_MASK,
163 IOMMU_CONTROL_ISOCHRONOUS_SHIFT, &entry);
164 set_field_in_reg_u32(iommu->coherent ? IOMMU_CONTROL_ENABLED :
165 IOMMU_CONTROL_DISABLED, entry,
166 IOMMU_CONTROL_COHERENT_MASK,
167 IOMMU_CONTROL_COHERENT_SHIFT, &entry);
168 set_field_in_reg_u32(iommu->res_pass_pw ? IOMMU_CONTROL_ENABLED :
169 IOMMU_CONTROL_DISABLED, entry,
170 IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_MASK,
171 IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT, &entry);
172 /* do not set PassPW bit */
173 set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
174 IOMMU_CONTROL_PASS_POSTED_WRITE_MASK,
175 IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT, &entry);
176 }
177 set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
178 IOMMU_CONTROL_DISABLED, entry,
179 IOMMU_CONTROL_TRANSLATION_ENABLE_MASK,
180 IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT, &entry);
181 writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
182 }
184 static void set_iommu_command_buffer_control(struct amd_iommu *iommu,
185 int enable)
186 {
187 u32 entry;
189 entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
190 set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
191 IOMMU_CONTROL_DISABLED, entry,
192 IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_MASK,
193 IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT, &entry);
195 /*reset head and tail pointer manually before enablement */
196 if ( enable == IOMMU_CONTROL_ENABLED )
197 {
198 writel(0x0, iommu->mmio_base + IOMMU_CMD_BUFFER_HEAD_OFFSET);
199 writel(0x0, iommu->mmio_base + IOMMU_CMD_BUFFER_TAIL_OFFSET);
200 }
202 writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
203 }
205 static void register_iommu_exclusion_range(struct amd_iommu *iommu)
206 {
207 u64 addr_lo, addr_hi;
208 u32 entry;
210 addr_lo = iommu->exclusion_limit & DMA_32BIT_MASK;
211 addr_hi = iommu->exclusion_limit >> 32;
213 set_field_in_reg_u32((u32)addr_hi, 0,
214 IOMMU_EXCLUSION_LIMIT_HIGH_MASK,
215 IOMMU_EXCLUSION_LIMIT_HIGH_SHIFT, &entry);
216 writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_HIGH_OFFSET);
218 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
219 IOMMU_EXCLUSION_LIMIT_LOW_MASK,
220 IOMMU_EXCLUSION_LIMIT_LOW_SHIFT, &entry);
221 writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_LOW_OFFSET);
223 addr_lo = iommu->exclusion_base & DMA_32BIT_MASK;
224 addr_hi = iommu->exclusion_base >> 32;
226 set_field_in_reg_u32((u32)addr_hi, 0,
227 IOMMU_EXCLUSION_BASE_HIGH_MASK,
228 IOMMU_EXCLUSION_BASE_HIGH_SHIFT, &entry);
229 writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_HIGH_OFFSET);
231 set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
232 IOMMU_EXCLUSION_BASE_LOW_MASK,
233 IOMMU_EXCLUSION_BASE_LOW_SHIFT, &entry);
235 set_field_in_reg_u32(iommu->exclusion_allow_all, entry,
236 IOMMU_EXCLUSION_ALLOW_ALL_MASK,
237 IOMMU_EXCLUSION_ALLOW_ALL_SHIFT, &entry);
239 set_field_in_reg_u32(iommu->exclusion_enable, entry,
240 IOMMU_EXCLUSION_RANGE_ENABLE_MASK,
241 IOMMU_EXCLUSION_RANGE_ENABLE_SHIFT, &entry);
242 writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_LOW_OFFSET);
243 }
245 static void set_iommu_event_log_control(struct amd_iommu *iommu,
246 int enable)
247 {
248 u32 entry;
250 entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
251 set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
252 IOMMU_CONTROL_DISABLED, entry,
253 IOMMU_CONTROL_EVENT_LOG_ENABLE_MASK,
254 IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT, &entry);
255 set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
256 IOMMU_CONTROL_DISABLED, entry,
257 IOMMU_CONTROL_EVENT_LOG_INT_MASK,
258 IOMMU_CONTROL_EVENT_LOG_INT_SHIFT, &entry);
259 set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
260 IOMMU_CONTROL_COMP_WAIT_INT_MASK,
261 IOMMU_CONTROL_COMP_WAIT_INT_SHIFT, &entry);
263 /*reset head and tail pointer manually before enablement */
264 if ( enable == IOMMU_CONTROL_ENABLED )
265 {
266 writel(0x0, iommu->mmio_base + IOMMU_EVENT_LOG_HEAD_OFFSET);
267 writel(0x0, iommu->mmio_base + IOMMU_EVENT_LOG_TAIL_OFFSET);
268 }
269 writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
270 }
272 static int amd_iommu_read_event_log(struct amd_iommu *iommu, u32 event[])
273 {
274 u32 tail, head, *event_log;
275 int i;
277 BUG_ON( !iommu || !event );
279 /* make sure there's an entry in the log */
280 tail = get_field_from_reg_u32(
281 readl(iommu->mmio_base + IOMMU_EVENT_LOG_TAIL_OFFSET),
282 IOMMU_EVENT_LOG_TAIL_MASK,
283 IOMMU_EVENT_LOG_TAIL_SHIFT);
284 if ( tail != iommu->event_log_head )
285 {
286 /* read event log entry */
287 event_log = (u32 *)(iommu->event_log.buffer +
288 (iommu->event_log_head *
289 IOMMU_EVENT_LOG_ENTRY_SIZE));
290 for ( i = 0; i < IOMMU_EVENT_LOG_U32_PER_ENTRY; i++ )
291 event[i] = event_log[i];
292 if ( ++iommu->event_log_head == iommu->event_log.entries )
293 iommu->event_log_head = 0;
295 /* update head pointer */
296 set_field_in_reg_u32(iommu->event_log_head, 0,
297 IOMMU_EVENT_LOG_HEAD_MASK,
298 IOMMU_EVENT_LOG_HEAD_SHIFT, &head);
299 writel(head, iommu->mmio_base + IOMMU_EVENT_LOG_HEAD_OFFSET);
300 return 0;
301 }
303 return -EFAULT;
304 }
306 static void amd_iommu_msi_data_init(struct amd_iommu *iommu)
307 {
308 u32 msi_data;
309 u8 bus = (iommu->bdf >> 8) & 0xff;
310 u8 dev = PCI_SLOT(iommu->bdf & 0xff);
311 u8 func = PCI_FUNC(iommu->bdf & 0xff);
312 int vector = iommu->vector;
314 msi_data = MSI_DATA_TRIGGER_EDGE |
315 MSI_DATA_LEVEL_ASSERT |
316 MSI_DATA_DELIVERY_FIXED |
317 MSI_DATA_VECTOR(vector);
319 pci_conf_write32(bus, dev, func,
320 iommu->msi_cap + PCI_MSI_DATA_64, msi_data);
321 }
323 static void amd_iommu_msi_addr_init(struct amd_iommu *iommu, int phy_cpu)
324 {
326 int bus = (iommu->bdf >> 8) & 0xff;
327 int dev = PCI_SLOT(iommu->bdf & 0xff);
328 int func = PCI_FUNC(iommu->bdf & 0xff);
330 u32 address_hi = 0;
331 u32 address_lo = MSI_ADDR_HEADER |
332 MSI_ADDR_DESTMODE_PHYS |
333 MSI_ADDR_REDIRECTION_CPU |
334 MSI_ADDR_DEST_ID(phy_cpu);
336 pci_conf_write32(bus, dev, func,
337 iommu->msi_cap + PCI_MSI_ADDRESS_LO, address_lo);
338 pci_conf_write32(bus, dev, func,
339 iommu->msi_cap + PCI_MSI_ADDRESS_HI, address_hi);
340 }
342 static void amd_iommu_msi_enable(struct amd_iommu *iommu, int flag)
343 {
344 u16 control;
345 int bus = (iommu->bdf >> 8) & 0xff;
346 int dev = PCI_SLOT(iommu->bdf & 0xff);
347 int func = PCI_FUNC(iommu->bdf & 0xff);
349 control = pci_conf_read16(bus, dev, func,
350 iommu->msi_cap + PCI_MSI_FLAGS);
351 control &= ~(1);
352 if ( flag )
353 control |= flag;
354 pci_conf_write16(bus, dev, func,
355 iommu->msi_cap + PCI_MSI_FLAGS, control);
356 }
358 static void iommu_msi_unmask(unsigned int vector)
359 {
360 unsigned long flags;
361 struct amd_iommu *iommu = vector_to_iommu[vector];
363 /* FIXME: do not support mask bits at the moment */
364 if ( iommu->maskbit )
365 return;
367 spin_lock_irqsave(&iommu->lock, flags);
368 amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED);
369 spin_unlock_irqrestore(&iommu->lock, flags);
370 }
372 static void iommu_msi_mask(unsigned int vector)
373 {
374 unsigned long flags;
375 struct amd_iommu *iommu = vector_to_iommu[vector];
377 /* FIXME: do not support mask bits at the moment */
378 if ( iommu->maskbit )
379 return;
381 spin_lock_irqsave(&iommu->lock, flags);
382 amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED);
383 spin_unlock_irqrestore(&iommu->lock, flags);
384 }
386 static unsigned int iommu_msi_startup(unsigned int vector)
387 {
388 iommu_msi_unmask(vector);
389 return 0;
390 }
392 static void iommu_msi_end(unsigned int vector)
393 {
394 iommu_msi_unmask(vector);
395 ack_APIC_irq();
396 }
398 static void iommu_msi_set_affinity(unsigned int vector, cpumask_t dest)
399 {
400 struct amd_iommu *iommu = vector_to_iommu[vector];
401 amd_iommu_msi_addr_init(iommu, cpu_physical_id(first_cpu(dest)));
402 }
404 static struct hw_interrupt_type iommu_msi_type = {
405 .typename = "AMD_IOV_MSI",
406 .startup = iommu_msi_startup,
407 .shutdown = iommu_msi_mask,
408 .enable = iommu_msi_unmask,
409 .disable = iommu_msi_mask,
410 .ack = iommu_msi_mask,
411 .end = iommu_msi_end,
412 .set_affinity = iommu_msi_set_affinity,
413 };
415 static void parse_event_log_entry(u32 entry[])
416 {
417 u16 domain_id, device_id;
418 u32 code;
419 u64 *addr;
420 char * event_str[] = {"ILLEGAL_DEV_TABLE_ENTRY",
421 "IO_PAGE_FALT",
422 "DEV_TABLE_HW_ERROR",
423 "PAGE_TABLE_HW_ERROR",
424 "ILLEGAL_COMMAND_ERROR",
425 "COMMAND_HW_ERROR",
426 "IOTLB_INV_TIMEOUT",
427 "INVALID_DEV_REQUEST"};
429 code = get_field_from_reg_u32(entry[1], IOMMU_EVENT_CODE_MASK,
430 IOMMU_EVENT_CODE_SHIFT);
432 if ( (code > IOMMU_EVENT_INVALID_DEV_REQUEST) ||
433 (code < IOMMU_EVENT_ILLEGAL_DEV_TABLE_ENTRY) )
434 {
435 amd_iov_error("Invalid event log entry!\n");
436 return;
437 }
439 if ( code == IOMMU_EVENT_IO_PAGE_FALT )
440 {
441 device_id = get_field_from_reg_u32(entry[0],
442 IOMMU_EVENT_DEVICE_ID_MASK,
443 IOMMU_EVENT_DEVICE_ID_SHIFT);
444 domain_id = get_field_from_reg_u32(entry[1],
445 IOMMU_EVENT_DOMAIN_ID_MASK,
446 IOMMU_EVENT_DOMAIN_ID_SHIFT);
447 addr= (u64*) (entry + 2);
448 printk(XENLOG_ERR "AMD_IOV: "
449 "%s: domain:%d, device id:0x%x, fault address:0x%"PRIx64"\n",
450 event_str[code-1], domain_id, device_id, *addr);
451 }
452 }
454 static void amd_iommu_page_fault(int vector, void *dev_id,
455 struct cpu_user_regs *regs)
456 {
457 u32 event[4];
458 u32 entry;
459 unsigned long flags;
460 int ret = 0;
461 struct amd_iommu *iommu = dev_id;
463 spin_lock_irqsave(&iommu->lock, flags);
464 ret = amd_iommu_read_event_log(iommu, event);
465 /* reset interrupt status bit */
466 entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
467 set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
468 IOMMU_STATUS_EVENT_LOG_INT_MASK,
469 IOMMU_STATUS_EVENT_LOG_INT_SHIFT, &entry);
470 writel(entry, iommu->mmio_base+IOMMU_STATUS_MMIO_OFFSET);
471 spin_unlock_irqrestore(&iommu->lock, flags);
473 if ( ret != 0 )
474 return;
475 parse_event_log_entry(event);
476 }
478 static int set_iommu_interrupt_handler(struct amd_iommu *iommu)
479 {
480 int vector, ret;
482 vector = assign_irq_vector(AUTO_ASSIGN_IRQ);
483 if ( vector <= 0 )
484 {
485 gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: no vectors\n");
486 return 0;
487 }
489 irq_desc[vector].handler = &iommu_msi_type;
490 vector_to_iommu[vector] = iommu;
491 ret = request_irq_vector(vector, amd_iommu_page_fault, 0,
492 "amd_iommu", iommu);
493 if ( ret )
494 {
495 irq_desc[vector].handler = &no_irq_type;
496 vector_to_iommu[vector] = NULL;
497 free_irq_vector(vector);
498 amd_iov_error("can't request irq\n");
499 return 0;
500 }
502 /* Make sure that vector is never re-used. */
503 vector_irq[vector] = NEVER_ASSIGN_IRQ;
504 iommu->vector = vector;
505 return vector;
506 }
508 void enable_iommu(struct amd_iommu *iommu)
509 {
510 unsigned long flags;
512 spin_lock_irqsave(&iommu->lock, flags);
514 if ( iommu->enabled )
515 {
516 spin_unlock_irqrestore(&iommu->lock, flags);
517 return;
518 }
520 register_iommu_dev_table_in_mmio_space(iommu);
521 register_iommu_cmd_buffer_in_mmio_space(iommu);
522 register_iommu_event_log_in_mmio_space(iommu);
523 register_iommu_exclusion_range(iommu);
525 amd_iommu_msi_data_init (iommu);
526 amd_iommu_msi_addr_init(iommu, cpu_physical_id(first_cpu(cpu_online_map)));
527 amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED);
529 set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED);
530 set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED);
531 set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED);
533 iommu->enabled = 1;
534 spin_unlock_irqrestore(&iommu->lock, flags);
536 }
538 static void __init deallocate_iommu_table_struct(
539 struct table_struct *table)
540 {
541 int order = 0;
542 if ( table->buffer )
543 {
544 order = get_order_from_bytes(table->alloc_size);
545 __free_amd_iommu_tables(table->buffer, order);
546 table->buffer = NULL;
547 }
548 }
550 static void __init deallocate_iommu_tables(struct amd_iommu *iommu)
551 {
552 deallocate_iommu_table_struct(&iommu->cmd_buffer);
553 deallocate_iommu_table_struct(&iommu->event_log);
554 }
556 static int __init allocate_iommu_table_struct(struct table_struct *table,
557 const char *name)
558 {
559 int order = 0;
560 if ( table->buffer == NULL )
561 {
562 order = get_order_from_bytes(table->alloc_size);
563 table->buffer = __alloc_amd_iommu_tables(order);
565 if ( table->buffer == NULL )
566 {
567 amd_iov_error("Error allocating %s\n", name);
568 return -ENOMEM;
569 }
570 memset(table->buffer, 0, PAGE_SIZE * (1UL << order));
571 }
572 return 0;
573 }
575 static int __init allocate_iommu_tables(struct amd_iommu *iommu)
576 {
577 /* allocate 'command buffer' in power of 2 increments of 4K */
578 iommu->cmd_buffer_tail = 0;
579 iommu->cmd_buffer.alloc_size = PAGE_SIZE <<
580 get_order_from_bytes(
581 PAGE_ALIGN(amd_iommu_cmd_buffer_entries *
582 IOMMU_CMD_BUFFER_ENTRY_SIZE));
583 iommu->cmd_buffer.entries = iommu->cmd_buffer.alloc_size /
584 IOMMU_CMD_BUFFER_ENTRY_SIZE;
586 if ( allocate_iommu_table_struct(&iommu->cmd_buffer, "Command Buffer") != 0 )
587 goto error_out;
589 /* allocate 'event log' in power of 2 increments of 4K */
590 iommu->event_log_head = 0;
591 iommu->event_log.alloc_size = PAGE_SIZE <<
592 get_order_from_bytes(
593 PAGE_ALIGN(amd_iommu_event_log_entries *
594 IOMMU_EVENT_LOG_ENTRY_SIZE));
595 iommu->event_log.entries = iommu->event_log.alloc_size /
596 IOMMU_EVENT_LOG_ENTRY_SIZE;
598 if ( allocate_iommu_table_struct(&iommu->event_log, "Event Log") != 0 )
599 goto error_out;
601 return 0;
603 error_out:
604 deallocate_iommu_tables(iommu);
605 return -ENOMEM;
606 }
608 int __init amd_iommu_init_one(struct amd_iommu *iommu)
609 {
610 if ( allocate_iommu_tables(iommu) != 0 )
611 goto error_out;
613 if ( map_iommu_mmio_region(iommu) != 0 )
614 goto error_out;
616 if ( set_iommu_interrupt_handler(iommu) == 0 )
617 goto error_out;
619 /* To make sure that device_table.buffer has been successfully allocated */
620 if ( device_table.buffer == NULL )
621 goto error_out;
623 iommu->dev_table.alloc_size = device_table.alloc_size;
624 iommu->dev_table.entries = device_table.entries;
625 iommu->dev_table.buffer = device_table.buffer;
627 enable_iommu(iommu);
628 printk("AMD-Vi: IOMMU %d Enabled.\n", nr_amd_iommus );
629 nr_amd_iommus++;
631 return 0;
633 error_out:
634 return -ENODEV;
635 }
637 void __init amd_iommu_init_cleanup(void)
638 {
639 struct amd_iommu *iommu, *next;
641 list_for_each_entry_safe ( iommu, next, &amd_iommu_head, list )
642 {
643 list_del(&iommu->list);
644 if ( iommu->enabled )
645 {
646 deallocate_iommu_tables(iommu);
647 unmap_iommu_mmio_region(iommu);
648 }
649 xfree(iommu);
650 }
651 }
653 static int __init init_ivrs_mapping(void)
654 {
655 int bdf;
657 BUG_ON( !ivrs_bdf_entries );
659 ivrs_mappings = xmalloc_array( struct ivrs_mappings, ivrs_bdf_entries);
660 if ( ivrs_mappings == NULL )
661 {
662 amd_iov_error("Error allocating IVRS Mappings table\n");
663 return -ENOMEM;
664 }
665 memset(ivrs_mappings, 0, ivrs_bdf_entries * sizeof(struct ivrs_mappings));
667 /* assign default values for device entries */
668 for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
669 {
670 ivrs_mappings[bdf].dte_requestor_id = bdf;
671 ivrs_mappings[bdf].dte_sys_mgt_enable =
672 IOMMU_DEV_TABLE_SYS_MGT_MSG_FORWARDED;
673 ivrs_mappings[bdf].dte_allow_exclusion = IOMMU_CONTROL_DISABLED;
674 ivrs_mappings[bdf].unity_map_enable = IOMMU_CONTROL_DISABLED;
675 ivrs_mappings[bdf].iommu = NULL;
676 }
677 return 0;
678 }
680 static int __init amd_iommu_setup_device_table(void)
681 {
682 /* allocate 'device table' on a 4K boundary */
683 device_table.alloc_size = PAGE_SIZE <<
684 get_order_from_bytes(
685 PAGE_ALIGN(ivrs_bdf_entries *
686 IOMMU_DEV_TABLE_ENTRY_SIZE));
687 device_table.entries = device_table.alloc_size /
688 IOMMU_DEV_TABLE_ENTRY_SIZE;
690 return ( allocate_iommu_table_struct(&device_table, "Device Table") );
691 }
693 int __init amd_iommu_setup_shared_tables(void)
694 {
695 BUG_ON( !ivrs_bdf_entries );
697 if ( init_ivrs_mapping() != 0 )
698 goto error_out;
700 if ( amd_iommu_setup_device_table() != 0 )
701 goto error_out;
703 if ( amd_iommu_setup_intremap_table() != 0 )
704 goto error_out;
706 return 0;
708 error_out:
709 deallocate_intremap_table();
710 deallocate_iommu_table_struct(&device_table);
712 if ( ivrs_mappings )
713 {
714 xfree(ivrs_mappings);
715 ivrs_mappings = NULL;
716 }
717 return -ENOMEM;
718 }
720 static void disable_iommu(struct amd_iommu *iommu)
721 {
722 unsigned long flags;
724 spin_lock_irqsave(&iommu->lock, flags);
726 if ( !iommu->enabled )
727 {
728 spin_unlock_irqrestore(&iommu->lock, flags);
729 return;
730 }
732 amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED);
733 set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_DISABLED);
734 set_iommu_event_log_control(iommu, IOMMU_CONTROL_DISABLED);
735 set_iommu_translation_control(iommu, IOMMU_CONTROL_DISABLED);
737 iommu->enabled = 0;
739 spin_unlock_irqrestore(&iommu->lock, flags);
741 }
743 static void invalidate_all_domain_pages(void)
744 {
745 struct domain *d;
746 for_each_domain( d )
747 invalidate_all_iommu_pages(d);
748 }
750 static void invalidate_all_devices(void)
751 {
752 u16 bus, devfn, bdf, req_id;
753 unsigned long flags;
754 struct amd_iommu *iommu;
756 for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
757 {
758 bus = bdf >> 8;
759 devfn = bdf & 0xFF;
760 iommu = find_iommu_for_device(bus, devfn);
761 req_id = ivrs_mappings[bdf].dte_requestor_id;
762 if ( iommu )
763 {
764 spin_lock_irqsave(&iommu->lock, flags);
765 invalidate_dev_table_entry(iommu, req_id);
766 invalidate_interrupt_table(iommu, req_id);
767 flush_command_buffer(iommu);
768 spin_unlock_irqrestore(&iommu->lock, flags);
769 }
770 }
771 }
773 void amd_iommu_suspend(void)
774 {
775 struct amd_iommu *iommu;
777 for_each_amd_iommu ( iommu )
778 disable_iommu(iommu);
779 }
781 void amd_iommu_resume(void)
782 {
783 struct amd_iommu *iommu;
785 for_each_amd_iommu ( iommu )
786 {
787 /*
788 * To make sure that iommus have not been touched
789 * before re-enablement
790 */
791 disable_iommu(iommu);
792 enable_iommu(iommu);
793 }
795 /* flush all cache entries after iommu re-enabled */
796 invalidate_all_devices();
797 invalidate_all_domain_pages();
798 }