debuggers.hg

view xen/drivers/passthrough/vtd/intremap.c @ 21045:e9d4cb54c551

vt-d: ensure x2apic is not enabled accidently if no DRHD at all.

Thanks to Jan Beulich for pointing this out.

Signed-off-by: Dexuan Cui <dexuan.cui@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Mar 05 14:31:14 2010 +0000 (2010-03-05)
parents 04037c99b5f1
children 3198a2e81d38
line source
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
18 * Copyright (C) Xiaohui Xin <xiaohui.xin@intel.com>
19 */
21 #include <xen/irq.h>
22 #include <xen/sched.h>
23 #include <xen/iommu.h>
24 #include <asm/hvm/iommu.h>
25 #include <xen/time.h>
26 #include <xen/list.h>
27 #include <xen/pci.h>
28 #include <xen/pci_regs.h>
29 #include "iommu.h"
30 #include "dmar.h"
31 #include "vtd.h"
32 #include "extern.h"
34 #ifdef __ia64__
35 #define nr_ioapics iosapic_get_nr_iosapics()
36 #define nr_ioapic_registers(i) iosapic_get_nr_pins(i)
37 #else
38 #define nr_ioapic_registers(i) nr_ioapic_registers[i]
39 #endif
41 /*
42 * source validation type (SVT)
43 */
44 #define SVT_NO_VERIFY 0x0 /* no verification is required */
45 #define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
46 #define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
48 /*
49 * source-id qualifier (SQ)
50 */
51 #define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
52 #define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
53 * the third least significant bit
54 */
55 #define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
56 * the second and third least significant bits
57 */
58 #define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
59 * the least three significant bits
60 */
62 /* apic_pin_2_ir_idx[apicid][pin] = interrupt remapping table index */
63 static int **apic_pin_2_ir_idx;
65 static int init_apic_pin_2_ir_idx(void)
66 {
67 int *_apic_pin_2_ir_idx;
68 unsigned int nr_pins, i;
70 /* Here we shouldn't need to re-init when resuming from S3. */
71 if ( apic_pin_2_ir_idx != NULL )
72 return 0;
74 nr_pins = 0;
75 for ( i = 0; i < nr_ioapics; i++ )
76 nr_pins += nr_ioapic_registers(i);
78 _apic_pin_2_ir_idx = xmalloc_array(int, nr_pins);
79 apic_pin_2_ir_idx = xmalloc_array(int *, nr_ioapics);
80 if ( (_apic_pin_2_ir_idx == NULL) || (apic_pin_2_ir_idx == NULL) )
81 {
82 xfree(_apic_pin_2_ir_idx);
83 xfree(apic_pin_2_ir_idx);
84 return -ENOMEM;
85 }
87 for ( i = 0; i < nr_pins; i++ )
88 _apic_pin_2_ir_idx[i] = -1;
90 nr_pins = 0;
91 for ( i = 0; i < nr_ioapics; i++ )
92 {
93 apic_pin_2_ir_idx[i] = &_apic_pin_2_ir_idx[nr_pins];
94 nr_pins += nr_ioapic_registers(i);
95 }
97 return 0;
98 }
100 static u16 apicid_to_bdf(int apic_id)
101 {
102 struct acpi_drhd_unit *drhd = ioapic_to_drhd(apic_id);
103 struct acpi_ioapic_unit *acpi_ioapic_unit;
105 list_for_each_entry ( acpi_ioapic_unit, &drhd->ioapic_list, list )
106 if ( acpi_ioapic_unit->apic_id == apic_id )
107 return acpi_ioapic_unit->ioapic.info;
109 dprintk(XENLOG_ERR VTDPREFIX, "Didn't find the bdf for the apic_id!\n");
110 return 0;
111 }
113 static void set_ire_sid(struct iremap_entry *ire,
114 unsigned int svt, unsigned int sq, unsigned int sid)
115 {
116 ire->hi.svt = svt;
117 ire->hi.sq = sq;
118 ire->hi.sid = sid;
119 }
121 static void set_ioapic_source_id(int apic_id, struct iremap_entry *ire)
122 {
123 set_ire_sid(ire, SVT_VERIFY_SID_SQ, SQ_ALL_16,
124 apicid_to_bdf(apic_id));
125 }
127 int iommu_supports_eim(void)
128 {
129 struct acpi_drhd_unit *drhd;
131 if ( !iommu_enabled || !iommu_qinval || !iommu_intremap )
132 return 0;
134 if ( list_empty(&acpi_drhd_units) )
135 return 0;
137 for_each_drhd_unit ( drhd )
138 if ( !ecap_queued_inval(drhd->ecap) ||
139 !ecap_intr_remap(drhd->ecap) ||
140 !ecap_eim(drhd->ecap) )
141 return 0;
143 return 1;
144 }
146 /* Mark specified intr remap entry as free */
147 static void free_remap_entry(struct iommu *iommu, int index)
148 {
149 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
150 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
152 if ( index < 0 || index > IREMAP_ENTRY_NR - 1 )
153 return;
155 ASSERT( spin_is_locked(&ir_ctrl->iremap_lock) );
157 GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index,
158 iremap_entries, iremap_entry);
160 memset(iremap_entry, 0, sizeof(struct iremap_entry));
161 iommu_flush_cache_entry(iremap_entry, sizeof(struct iremap_entry));
162 iommu_flush_iec_index(iommu, 0, index);
164 unmap_vtd_domain_page(iremap_entries);
165 ir_ctrl->iremap_num--;
166 }
168 /*
169 * Look for a free intr remap entry.
170 * Need hold iremap_lock, and setup returned entry before releasing lock.
171 */
172 static int alloc_remap_entry(struct iommu *iommu)
173 {
174 struct iremap_entry *iremap_entries = NULL;
175 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
176 int i;
178 ASSERT( spin_is_locked(&ir_ctrl->iremap_lock) );
180 for ( i = 0; i < IREMAP_ENTRY_NR; i++ )
181 {
182 struct iremap_entry *p;
183 if ( i % (1 << IREMAP_ENTRY_ORDER) == 0 )
184 {
185 /* This entry across page boundry */
186 if ( iremap_entries )
187 unmap_vtd_domain_page(iremap_entries);
189 GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, i,
190 iremap_entries, p);
191 }
192 else
193 p = &iremap_entries[i % (1 << IREMAP_ENTRY_ORDER)];
195 if ( p->lo_val == 0 && p->hi_val == 0 ) /* a free entry */
196 break;
197 }
199 if ( iremap_entries )
200 unmap_vtd_domain_page(iremap_entries);
202 ir_ctrl->iremap_num++;
203 return i;
204 }
206 static int remap_entry_to_ioapic_rte(
207 struct iommu *iommu, int index, struct IO_xAPIC_route_entry *old_rte)
208 {
209 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
210 unsigned long flags;
211 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
213 if ( ir_ctrl == NULL )
214 {
215 dprintk(XENLOG_ERR VTDPREFIX,
216 "remap_entry_to_ioapic_rte: ir_ctl is not ready\n");
217 return -EFAULT;
218 }
220 if ( index < 0 || index > IREMAP_ENTRY_NR - 1 )
221 {
222 dprintk(XENLOG_ERR VTDPREFIX,
223 "%s: index (%d) for remap table is invalid !\n",
224 __func__, index);
225 return -EFAULT;
226 }
228 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
230 GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index,
231 iremap_entries, iremap_entry);
233 if ( iremap_entry->hi_val == 0 && iremap_entry->lo_val == 0 )
234 {
235 dprintk(XENLOG_ERR VTDPREFIX,
236 "%s: index (%d) get an empty entry!\n",
237 __func__, index);
238 return -EFAULT;
239 }
241 old_rte->vector = iremap_entry->lo.vector;
242 old_rte->delivery_mode = iremap_entry->lo.dlm;
243 old_rte->dest_mode = iremap_entry->lo.dm;
244 old_rte->trigger = iremap_entry->lo.tm;
245 old_rte->__reserved_2 = 0;
246 old_rte->dest.logical.__reserved_1 = 0;
247 old_rte->dest.logical.logical_dest = iremap_entry->lo.dst >> 8;
249 unmap_vtd_domain_page(iremap_entries);
250 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
251 return 0;
252 }
254 static int ioapic_rte_to_remap_entry(struct iommu *iommu,
255 int apic, unsigned int ioapic_pin, struct IO_xAPIC_route_entry *old_rte,
256 unsigned int rte_upper, unsigned int value)
257 {
258 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
259 struct iremap_entry new_ire;
260 struct IO_APIC_route_remap_entry *remap_rte;
261 struct IO_xAPIC_route_entry new_rte;
262 int index;
263 unsigned long flags;
264 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
266 remap_rte = (struct IO_APIC_route_remap_entry *) old_rte;
267 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
269 index = apic_pin_2_ir_idx[apic][ioapic_pin];
270 if ( index < 0 )
271 {
272 index = alloc_remap_entry(iommu);
273 apic_pin_2_ir_idx[apic][ioapic_pin] = index;
274 }
276 if ( index > IREMAP_ENTRY_NR - 1 )
277 {
278 dprintk(XENLOG_ERR VTDPREFIX,
279 "%s: intremap index (%d) is larger than"
280 " the maximum index (%d)!\n",
281 __func__, index, IREMAP_ENTRY_NR - 1);
282 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
283 return -EFAULT;
284 }
286 GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index,
287 iremap_entries, iremap_entry);
289 memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));
291 if ( rte_upper )
292 {
293 #if defined(__i386__) || defined(__x86_64__)
294 if ( x2apic_enabled )
295 new_ire.lo.dst = value;
296 else
297 new_ire.lo.dst = (value >> 24) << 8;
298 #else /* __ia64__ */
299 new_ire.lo.dst = value >> 16;
300 #endif
301 }
302 else
303 {
304 *(((u32 *)&new_rte) + 0) = value;
305 new_ire.lo.fpd = 0;
306 new_ire.lo.dm = new_rte.dest_mode;
307 new_ire.lo.rh = 0;
308 new_ire.lo.tm = new_rte.trigger;
309 new_ire.lo.dlm = new_rte.delivery_mode;
310 new_ire.lo.avail = 0;
311 new_ire.lo.res_1 = 0;
312 new_ire.lo.vector = new_rte.vector;
313 new_ire.lo.res_2 = 0;
315 set_ioapic_source_id(IO_APIC_ID(apic), &new_ire);
316 new_ire.hi.res_1 = 0;
317 new_ire.lo.p = 1; /* finally, set present bit */
319 /* now construct new ioapic rte entry */
320 remap_rte->vector = new_rte.vector;
321 remap_rte->delivery_mode = 0; /* has to be 0 for remap format */
322 remap_rte->index_15 = (index >> 15) & 0x1;
323 remap_rte->index_0_14 = index & 0x7fff;
325 remap_rte->delivery_status = new_rte.delivery_status;
326 remap_rte->polarity = new_rte.polarity;
327 remap_rte->irr = new_rte.irr;
328 remap_rte->trigger = new_rte.trigger;
329 remap_rte->mask = new_rte.mask;
330 remap_rte->reserved = 0;
331 remap_rte->format = 1; /* indicate remap format */
332 }
334 memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
335 iommu_flush_cache_entry(iremap_entry, sizeof(struct iremap_entry));
336 iommu_flush_iec_index(iommu, 0, index);
337 invalidate_sync(iommu);
339 unmap_vtd_domain_page(iremap_entries);
340 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
341 return 0;
342 }
344 unsigned int io_apic_read_remap_rte(
345 unsigned int apic, unsigned int reg)
346 {
347 unsigned int ioapic_pin = (reg - 0x10) / 2;
348 int index;
349 struct IO_xAPIC_route_entry old_rte = { 0 };
350 struct IO_APIC_route_remap_entry *remap_rte;
351 int rte_upper = (reg & 1) ? 1 : 0;
352 struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
353 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
355 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 ||
356 (ir_ctrl->iremap_num == 0) ||
357 ( (index = apic_pin_2_ir_idx[apic][ioapic_pin]) < 0 ) )
358 {
359 *IO_APIC_BASE(apic) = reg;
360 return *(IO_APIC_BASE(apic)+4);
361 }
363 if ( rte_upper )
364 reg--;
366 /* read lower and upper 32-bits of rte entry */
367 *IO_APIC_BASE(apic) = reg;
368 *(((u32 *)&old_rte) + 0) = *(IO_APIC_BASE(apic)+4);
369 *IO_APIC_BASE(apic) = reg + 1;
370 *(((u32 *)&old_rte) + 1) = *(IO_APIC_BASE(apic)+4);
372 remap_rte = (struct IO_APIC_route_remap_entry *) &old_rte;
374 if ( remap_entry_to_ioapic_rte(iommu, index, &old_rte) )
375 {
376 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
377 return *(IO_APIC_BASE(apic)+4);
378 }
380 if ( rte_upper )
381 return (*(((u32 *)&old_rte) + 1));
382 else
383 return (*(((u32 *)&old_rte) + 0));
384 }
386 void io_apic_write_remap_rte(
387 unsigned int apic, unsigned int reg, unsigned int value)
388 {
389 unsigned int ioapic_pin = (reg - 0x10) / 2;
390 struct IO_xAPIC_route_entry old_rte = { 0 };
391 struct IO_APIC_route_remap_entry *remap_rte;
392 unsigned int rte_upper = (reg & 1) ? 1 : 0;
393 struct iommu *iommu = ioapic_to_iommu(IO_APIC_ID(apic));
394 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
395 int saved_mask;
397 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
398 {
399 *IO_APIC_BASE(apic) = reg;
400 *(IO_APIC_BASE(apic)+4) = value;
401 return;
402 }
404 if ( rte_upper )
405 reg--;
407 /* read both lower and upper 32-bits of rte entry */
408 *IO_APIC_BASE(apic) = reg;
409 *(((u32 *)&old_rte) + 0) = *(IO_APIC_BASE(apic)+4);
410 *IO_APIC_BASE(apic) = reg + 1;
411 *(((u32 *)&old_rte) + 1) = *(IO_APIC_BASE(apic)+4);
413 remap_rte = (struct IO_APIC_route_remap_entry *) &old_rte;
415 /* mask the interrupt while we change the intremap table */
416 saved_mask = remap_rte->mask;
417 remap_rte->mask = 1;
418 *IO_APIC_BASE(apic) = reg;
419 *(IO_APIC_BASE(apic)+4) = *(((int *)&old_rte)+0);
420 remap_rte->mask = saved_mask;
422 if ( ioapic_rte_to_remap_entry(iommu, apic, ioapic_pin,
423 &old_rte, rte_upper, value) )
424 {
425 *IO_APIC_BASE(apic) = rte_upper ? (reg + 1) : reg;
426 *(IO_APIC_BASE(apic)+4) = value;
427 return;
428 }
430 /* write new entry to ioapic */
431 *IO_APIC_BASE(apic) = reg;
432 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+0);
433 *IO_APIC_BASE(apic) = reg + 1;
434 *(IO_APIC_BASE(apic)+4) = *(((u32 *)&old_rte)+1);
435 }
437 #if defined(__i386__) || defined(__x86_64__)
439 static void set_msi_source_id(struct pci_dev *pdev, struct iremap_entry *ire)
440 {
441 int type;
442 u8 bus, devfn, secbus;
443 int ret;
445 if ( !pdev || !ire )
446 return;
448 bus = pdev->bus;
449 devfn = pdev->devfn;
450 type = pdev_type(bus, devfn);
451 switch ( type )
452 {
453 case DEV_TYPE_PCIe_BRIDGE:
454 case DEV_TYPE_PCIe2PCI_BRIDGE:
455 case DEV_TYPE_LEGACY_PCI_BRIDGE:
456 break;
458 case DEV_TYPE_PCIe_ENDPOINT:
459 set_ire_sid(ire, SVT_VERIFY_SID_SQ, SQ_ALL_16, PCI_BDF2(bus, devfn));
460 break;
462 case DEV_TYPE_PCI:
463 ret = find_upstream_bridge(&bus, &devfn, &secbus);
464 if ( ret == 0 ) /* integrated PCI device */
465 {
466 set_ire_sid(ire, SVT_VERIFY_SID_SQ, SQ_ALL_16,
467 PCI_BDF2(bus, devfn));
468 }
469 else if ( ret == 1 ) /* find upstream bridge */
470 {
471 if ( pdev_type(bus, devfn) == DEV_TYPE_PCIe2PCI_BRIDGE )
472 set_ire_sid(ire, SVT_VERIFY_BUS, SQ_ALL_16,
473 (bus << 8) | pdev->bus);
474 else if ( pdev_type(bus, devfn) == DEV_TYPE_LEGACY_PCI_BRIDGE )
475 set_ire_sid(ire, SVT_VERIFY_BUS, SQ_ALL_16,
476 PCI_BDF2(bus, devfn));
477 }
478 break;
480 default:
481 gdprintk(XENLOG_WARNING VTDPREFIX,
482 "set_msi_source_id: unknown type : bdf = %x:%x.%x\n",
483 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
484 break;
485 }
486 }
488 static int remap_entry_to_msi_msg(
489 struct iommu *iommu, struct msi_msg *msg)
490 {
491 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
492 struct msi_msg_remap_entry *remap_rte;
493 int index;
494 unsigned long flags;
495 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
497 if ( ir_ctrl == NULL )
498 {
499 dprintk(XENLOG_ERR VTDPREFIX,
500 "remap_entry_to_msi_msg: ir_ctl == NULL");
501 return -EFAULT;
502 }
504 remap_rte = (struct msi_msg_remap_entry *) msg;
505 index = (remap_rte->address_lo.index_15 << 15) |
506 remap_rte->address_lo.index_0_14;
508 if ( index < 0 || index > IREMAP_ENTRY_NR - 1 )
509 {
510 dprintk(XENLOG_ERR VTDPREFIX,
511 "%s: index (%d) for remap table is invalid !\n",
512 __func__, index);
513 return -EFAULT;
514 }
516 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
518 GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index,
519 iremap_entries, iremap_entry);
521 if ( iremap_entry->hi_val == 0 && iremap_entry->lo_val == 0 )
522 {
523 dprintk(XENLOG_ERR VTDPREFIX,
524 "%s: index (%d) get an empty entry!\n",
525 __func__, index);
526 return -EFAULT;
527 }
529 msg->address_hi = MSI_ADDR_BASE_HI;
530 msg->address_lo =
531 MSI_ADDR_BASE_LO |
532 ((iremap_entry->lo.dm == 0) ?
533 MSI_ADDR_DESTMODE_PHYS:
534 MSI_ADDR_DESTMODE_LOGIC) |
535 ((iremap_entry->lo.dlm != dest_LowestPrio) ?
536 MSI_ADDR_REDIRECTION_CPU:
537 MSI_ADDR_REDIRECTION_LOWPRI);
538 if ( x2apic_enabled )
539 msg->dest32 = iremap_entry->lo.dst;
540 else
541 msg->address_lo |=
542 ((iremap_entry->lo.dst >> 8) & 0xff ) << MSI_ADDR_DEST_ID_SHIFT;
544 msg->data =
545 MSI_DATA_TRIGGER_EDGE |
546 MSI_DATA_LEVEL_ASSERT |
547 ((iremap_entry->lo.dlm != dest_LowestPrio) ?
548 MSI_DATA_DELIVERY_FIXED:
549 MSI_DATA_DELIVERY_LOWPRI) |
550 iremap_entry->lo.vector;
552 unmap_vtd_domain_page(iremap_entries);
553 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
554 return 0;
555 }
557 static int msi_msg_to_remap_entry(
558 struct iommu *iommu, struct pci_dev *pdev,
559 struct msi_desc *msi_desc, struct msi_msg *msg)
560 {
561 struct iremap_entry *iremap_entry = NULL, *iremap_entries;
562 struct iremap_entry new_ire;
563 struct msi_msg_remap_entry *remap_rte;
564 int index;
565 unsigned long flags;
566 struct ir_ctrl *ir_ctrl = iommu_ir_ctrl(iommu);
568 remap_rte = (struct msi_msg_remap_entry *) msg;
569 spin_lock_irqsave(&ir_ctrl->iremap_lock, flags);
571 if ( msg == NULL )
572 {
573 /* Free specified unused IRTE */
574 free_remap_entry(iommu, msi_desc->remap_index);
575 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
576 return 0;
577 }
579 if ( msi_desc->remap_index < 0 )
580 {
581 /*
582 * TODO: Multiple-vector MSI requires allocating multiple continuous
583 * entries and configuring addr/data of msi_msg in different way. So
584 * alloca_remap_entry will be changed if enabling multiple-vector MSI
585 * in future.
586 */
587 index = alloc_remap_entry(iommu);
588 msi_desc->remap_index = index;
589 }
590 else
591 index = msi_desc->remap_index;
593 if ( index > IREMAP_ENTRY_NR - 1 )
594 {
595 dprintk(XENLOG_ERR VTDPREFIX,
596 "%s: intremap index (%d) is larger than"
597 " the maximum index (%d)!\n",
598 __func__, index, IREMAP_ENTRY_NR - 1);
599 msi_desc->remap_index = -1;
600 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
601 return -EFAULT;
602 }
604 GET_IREMAP_ENTRY(ir_ctrl->iremap_maddr, index,
605 iremap_entries, iremap_entry);
607 memcpy(&new_ire, iremap_entry, sizeof(struct iremap_entry));
609 /* Set interrupt remapping table entry */
610 new_ire.lo.fpd = 0;
611 new_ire.lo.dm = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1;
612 new_ire.lo.rh = 0;
613 new_ire.lo.tm = (msg->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
614 new_ire.lo.dlm = (msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x1;
615 new_ire.lo.avail = 0;
616 new_ire.lo.res_1 = 0;
617 new_ire.lo.vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) &
618 MSI_DATA_VECTOR_MASK;
619 new_ire.lo.res_2 = 0;
620 if ( x2apic_enabled )
621 new_ire.lo.dst = msg->dest32;
622 else
623 new_ire.lo.dst = ((msg->address_lo >> MSI_ADDR_DEST_ID_SHIFT)
624 & 0xff) << 8;
626 set_msi_source_id(pdev, &new_ire);
627 new_ire.hi.res_1 = 0;
628 new_ire.lo.p = 1; /* finally, set present bit */
630 /* now construct new MSI/MSI-X rte entry */
631 remap_rte->address_lo.dontcare = 0;
632 remap_rte->address_lo.index_15 = (index >> 15) & 0x1;
633 remap_rte->address_lo.index_0_14 = index & 0x7fff;
634 remap_rte->address_lo.SHV = 1;
635 remap_rte->address_lo.format = 1;
637 remap_rte->address_hi = 0;
638 remap_rte->data = 0;
640 memcpy(iremap_entry, &new_ire, sizeof(struct iremap_entry));
641 iommu_flush_cache_entry(iremap_entry, sizeof(struct iremap_entry));
642 iommu_flush_iec_index(iommu, 0, index);
643 invalidate_sync(iommu);
645 unmap_vtd_domain_page(iremap_entries);
646 spin_unlock_irqrestore(&ir_ctrl->iremap_lock, flags);
647 return 0;
648 }
650 void msi_msg_read_remap_rte(
651 struct msi_desc *msi_desc, struct msi_msg *msg)
652 {
653 struct pci_dev *pdev = msi_desc->dev;
654 struct acpi_drhd_unit *drhd = NULL;
655 struct iommu *iommu = NULL;
656 struct ir_ctrl *ir_ctrl;
658 if ( (drhd = acpi_find_matched_drhd_unit(pdev)) == NULL )
659 return;
660 iommu = drhd->iommu;
662 ir_ctrl = iommu_ir_ctrl(iommu);
663 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
664 return;
666 remap_entry_to_msi_msg(iommu, msg);
667 }
669 void msi_msg_write_remap_rte(
670 struct msi_desc *msi_desc, struct msi_msg *msg)
671 {
672 struct pci_dev *pdev = msi_desc->dev;
673 struct acpi_drhd_unit *drhd = NULL;
674 struct iommu *iommu = NULL;
675 struct ir_ctrl *ir_ctrl;
677 if ( (drhd = acpi_find_matched_drhd_unit(pdev)) == NULL )
678 return;
679 iommu = drhd->iommu;
681 ir_ctrl = iommu_ir_ctrl(iommu);
682 if ( !iommu || !ir_ctrl || ir_ctrl->iremap_maddr == 0 )
683 return;
685 msi_msg_to_remap_entry(iommu, pdev, msi_desc, msg);
686 }
687 #elif defined(__ia64__)
688 void msi_msg_read_remap_rte(
689 struct msi_desc *msi_desc, struct msi_msg *msg)
690 {
691 /* TODO. */
692 }
694 void msi_msg_write_remap_rte(
695 struct msi_desc *msi_desc, struct msi_msg *msg)
696 {
697 /* TODO. */
698 }
699 #endif
701 int enable_intremap(struct iommu *iommu)
702 {
703 struct acpi_drhd_unit *drhd;
704 struct ir_ctrl *ir_ctrl;
705 u32 sts, gcmd;
706 unsigned long flags;
708 ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
710 ir_ctrl = iommu_ir_ctrl(iommu);
711 if ( ir_ctrl->iremap_maddr == 0 )
712 {
713 drhd = iommu_to_drhd(iommu);
714 ir_ctrl->iremap_maddr = alloc_pgtable_maddr(drhd, IREMAP_ARCH_PAGE_NR );
715 if ( ir_ctrl->iremap_maddr == 0 )
716 {
717 dprintk(XENLOG_WARNING VTDPREFIX,
718 "Cannot allocate memory for ir_ctrl->iremap_maddr\n");
719 return -ENOMEM;
720 }
721 ir_ctrl->iremap_num = 0;
722 }
724 #ifdef CONFIG_X86
725 /* set extended interrupt mode bit */
726 ir_ctrl->iremap_maddr |=
727 x2apic_enabled ? (1 << IRTA_REG_EIME_SHIFT) : 0;
728 #endif
729 spin_lock_irqsave(&iommu->register_lock, flags);
731 /* set size of the interrupt remapping table */
732 ir_ctrl->iremap_maddr |= IRTA_REG_TABLE_SIZE;
733 dmar_writeq(iommu->reg, DMAR_IRTA_REG, ir_ctrl->iremap_maddr);
735 /* set SIRTP */
736 gcmd = dmar_readl(iommu->reg, DMAR_GSTS_REG);
737 gcmd |= DMA_GCMD_SIRTP;
738 dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd);
740 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
741 (sts & DMA_GSTS_SIRTPS), sts);
742 spin_unlock_irqrestore(&iommu->register_lock, flags);
744 /* After set SIRTP, must globally invalidate the interrupt entry cache */
745 iommu_flush_iec_global(iommu);
747 spin_lock_irqsave(&iommu->register_lock, flags);
748 /* enable interrupt remapping hardware */
749 gcmd |= DMA_GCMD_IRE;
750 dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd);
752 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
753 (sts & DMA_GSTS_IRES), sts);
754 spin_unlock_irqrestore(&iommu->register_lock, flags);
756 return init_apic_pin_2_ir_idx();
757 }
759 void disable_intremap(struct iommu *iommu)
760 {
761 u32 sts;
762 unsigned long flags;
764 ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
766 spin_lock_irqsave(&iommu->register_lock, flags);
767 sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
768 dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_IRE));
770 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl,
771 !(sts & DMA_GSTS_IRES), sts);
772 spin_unlock_irqrestore(&iommu->register_lock, flags);
773 }