/root/src/xen/xen/include/xen/iommu.h
Line | Count | Source |
1 | | /* |
2 | | * Copyright (c) 2006, Intel Corporation. |
3 | | * |
4 | | * This program is free software; you can redistribute it and/or modify it |
5 | | * under the terms and conditions of the GNU General Public License, |
6 | | * version 2, as published by the Free Software Foundation. |
7 | | * |
8 | | * This program is distributed in the hope it will be useful, but WITHOUT |
9 | | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
10 | | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
11 | | * more details. |
12 | | * |
13 | | * You should have received a copy of the GNU General Public License along with |
14 | | * this program; If not, see <http://www.gnu.org/licenses/>. |
15 | | * |
16 | | * Copyright (C) Allen Kay <allen.m.kay@intel.com> |
17 | | */ |
18 | | |
19 | | #ifndef _IOMMU_H_ |
20 | | #define _IOMMU_H_ |
21 | | |
22 | | #include <xen/init.h> |
23 | | #include <xen/page-defs.h> |
24 | | #include <xen/spinlock.h> |
25 | | #include <xen/pci.h> |
26 | | #include <public/hvm/ioreq.h> |
27 | | #include <public/domctl.h> |
28 | | #include <asm/device.h> |
29 | | #include <asm/iommu.h> |
30 | | |
31 | | extern bool_t iommu_enable, iommu_enabled; |
32 | | extern bool_t force_iommu, iommu_verbose; |
33 | | extern bool_t iommu_workaround_bios_bug, iommu_igfx, iommu_passthrough; |
34 | | extern bool_t iommu_snoop, iommu_qinval, iommu_intremap, iommu_intpost; |
35 | | extern bool_t iommu_hap_pt_share; |
36 | | extern bool_t iommu_debug; |
37 | | extern bool_t amd_iommu_perdev_intremap; |
38 | | |
39 | | extern unsigned int iommu_dev_iotlb_timeout; |
40 | | |
41 | | int iommu_setup(void); |
42 | | |
43 | | int iommu_domain_init(struct domain *d); |
44 | | void iommu_hwdom_init(struct domain *d); |
45 | | void iommu_domain_destroy(struct domain *d); |
46 | | int deassign_device(struct domain *d, u16 seg, u8 bus, u8 devfn); |
47 | | |
48 | | void arch_iommu_domain_destroy(struct domain *d); |
49 | | int arch_iommu_domain_init(struct domain *d); |
50 | | int arch_iommu_populate_page_table(struct domain *d); |
51 | | void arch_iommu_check_autotranslated_hwdom(struct domain *d); |
52 | | |
53 | | int iommu_construct(struct domain *d); |
54 | | |
55 | | /* Function used internally, use iommu_domain_destroy */ |
56 | | void iommu_teardown(struct domain *d); |
57 | | |
58 | | /* iommu_map_page() takes flags to direct the mapping operation. */ |
59 | 850k | #define _IOMMUF_readable 0 |
60 | 850k | #define IOMMUF_readable (1u<<_IOMMUF_readable) |
61 | 850k | #define _IOMMUF_writable 1 |
62 | 850k | #define IOMMUF_writable (1u<<_IOMMUF_writable) |
63 | | int __must_check iommu_map_page(struct domain *d, unsigned long gfn, |
64 | | unsigned long mfn, unsigned int flags); |
65 | | int __must_check iommu_unmap_page(struct domain *d, unsigned long gfn); |
66 | | |
67 | | enum iommu_feature |
68 | | { |
69 | | IOMMU_FEAT_COHERENT_WALK, |
70 | | IOMMU_FEAT_count |
71 | | }; |
72 | | |
73 | | bool_t iommu_has_feature(struct domain *d, enum iommu_feature feature); |
74 | | |
75 | | struct domain_iommu { |
76 | | struct arch_iommu arch; |
77 | | |
78 | | /* iommu_ops */ |
79 | | const struct iommu_ops *platform_ops; |
80 | | |
81 | | #ifdef CONFIG_HAS_DEVICE_TREE |
82 | | /* List of DT devices assigned to this domain */ |
83 | | struct list_head dt_devices; |
84 | | #endif |
85 | | |
86 | | /* Features supported by the IOMMU */ |
87 | | DECLARE_BITMAP(features, IOMMU_FEAT_count); |
88 | | }; |
89 | | |
90 | 18.2M | #define dom_iommu(d) (&(d)->iommu) |
91 | | #define iommu_set_feature(d, f) set_bit(f, dom_iommu(d)->features) |
92 | | #define iommu_clear_feature(d, f) clear_bit(f, dom_iommu(d)->features) |
93 | | |
94 | | #ifdef CONFIG_HAS_PCI |
95 | | void pt_pci_init(void); |
96 | | |
97 | | struct pirq; |
98 | | int hvm_do_IRQ_dpci(struct domain *, struct pirq *); |
99 | | int pt_irq_create_bind(struct domain *, const struct xen_domctl_bind_pt_irq *); |
100 | | int pt_irq_destroy_bind(struct domain *, const struct xen_domctl_bind_pt_irq *); |
101 | | |
102 | | void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq); |
103 | | struct hvm_irq_dpci *domain_get_irq_dpci(const struct domain *); |
104 | | void free_hvm_irq_dpci(struct hvm_irq_dpci *dpci); |
105 | | bool_t pt_irq_need_timer(uint32_t flags); |
106 | | |
107 | | struct msi_desc; |
108 | | struct msi_msg; |
109 | | |
110 | | int iommu_update_ire_from_msi(struct msi_desc *msi_desc, struct msi_msg *msg); |
111 | | void iommu_read_msi_from_ire(struct msi_desc *msi_desc, struct msi_msg *msg); |
112 | | |
113 | 301 | #define PT_IRQ_TIME_OUT MILLISECS(8) |
114 | | #endif /* HAS_PCI */ |
115 | | |
116 | | #ifdef CONFIG_HAS_DEVICE_TREE |
117 | | #include <xen/device_tree.h> |
118 | | |
119 | | int iommu_assign_dt_device(struct domain *d, struct dt_device_node *dev); |
120 | | int iommu_deassign_dt_device(struct domain *d, struct dt_device_node *dev); |
121 | | int iommu_dt_domain_init(struct domain *d); |
122 | | int iommu_release_dt_devices(struct domain *d); |
123 | | |
124 | | int iommu_do_dt_domctl(struct xen_domctl *, struct domain *, |
125 | | XEN_GUEST_HANDLE_PARAM(xen_domctl_t)); |
126 | | |
127 | | #endif /* HAS_DEVICE_TREE */ |
128 | | |
129 | | struct page_info; |
130 | | |
131 | | /* |
132 | | * Any non-zero value returned from callbacks of this type will cause the |
133 | | * function the callback was handed to terminate its iteration. Assigning |
134 | | * meaning of these non-zero values is left to the top level caller / |
135 | | * callback pair. |
136 | | */ |
137 | | typedef int iommu_grdm_t(xen_pfn_t start, xen_ulong_t nr, u32 id, void *ctxt); |
138 | | |
139 | | struct iommu_ops { |
140 | | int (*init)(struct domain *d); |
141 | | void (*hwdom_init)(struct domain *d); |
142 | | int (*add_device)(u8 devfn, device_t *dev); |
143 | | int (*enable_device)(device_t *dev); |
144 | | int (*remove_device)(u8 devfn, device_t *dev); |
145 | | int (*assign_device)(struct domain *, u8 devfn, device_t *dev, u32 flag); |
146 | | int (*reassign_device)(struct domain *s, struct domain *t, |
147 | | u8 devfn, device_t *dev); |
148 | | #ifdef CONFIG_HAS_PCI |
149 | | int (*get_device_group_id)(u16 seg, u8 bus, u8 devfn); |
150 | | int (*update_ire_from_msi)(struct msi_desc *msi_desc, struct msi_msg *msg); |
151 | | void (*read_msi_from_ire)(struct msi_desc *msi_desc, struct msi_msg *msg); |
152 | | #endif /* HAS_PCI */ |
153 | | |
154 | | void (*teardown)(struct domain *d); |
155 | | int __must_check (*map_page)(struct domain *d, unsigned long gfn, |
156 | | unsigned long mfn, unsigned int flags); |
157 | | int __must_check (*unmap_page)(struct domain *d, unsigned long gfn); |
158 | | void (*free_page_table)(struct page_info *); |
159 | | #ifdef CONFIG_X86 |
160 | | void (*update_ire_from_apic)(unsigned int apic, unsigned int reg, unsigned int value); |
161 | | unsigned int (*read_apic_from_ire)(unsigned int apic, unsigned int reg); |
162 | | int (*setup_hpet_msi)(struct msi_desc *); |
163 | | #endif /* CONFIG_X86 */ |
164 | | int __must_check (*suspend)(void); |
165 | | void (*resume)(void); |
166 | | void (*share_p2m)(struct domain *d); |
167 | | void (*crash_shutdown)(void); |
168 | | int __must_check (*iotlb_flush)(struct domain *d, unsigned long gfn, |
169 | | unsigned int page_count); |
170 | | int __must_check (*iotlb_flush_all)(struct domain *d); |
171 | | int (*get_reserved_device_memory)(iommu_grdm_t *, void *); |
172 | | void (*dump_p2m_table)(struct domain *d); |
173 | | }; |
174 | | |
175 | | int __must_check iommu_suspend(void); |
176 | | void iommu_resume(void); |
177 | | void iommu_crash_shutdown(void); |
178 | | int iommu_get_reserved_device_memory(iommu_grdm_t *, void *); |
179 | | |
180 | | void iommu_share_p2m_table(struct domain *d); |
181 | | |
182 | | #ifdef CONFIG_HAS_PCI |
183 | | int iommu_do_pci_domctl(struct xen_domctl *, struct domain *d, |
184 | | XEN_GUEST_HANDLE_PARAM(xen_domctl_t)); |
185 | | #endif |
186 | | |
187 | | int iommu_do_domctl(struct xen_domctl *, struct domain *d, |
188 | | XEN_GUEST_HANDLE_PARAM(xen_domctl_t)); |
189 | | |
190 | | int __must_check iommu_iotlb_flush(struct domain *d, unsigned long gfn, |
191 | | unsigned int page_count); |
192 | | int __must_check iommu_iotlb_flush_all(struct domain *d); |
193 | | |
194 | | void iommu_dev_iotlb_flush_timeout(struct domain *d, struct pci_dev *pdev); |
195 | | |
196 | | /* |
197 | | * The purpose of the iommu_dont_flush_iotlb optional cpu flag is to |
198 | | * avoid unecessary iotlb_flush in the low level IOMMU code. |
199 | | * |
200 | | * iommu_map_page/iommu_unmap_page must flush the iotlb but somethimes |
201 | | * this operation can be really expensive. This flag will be set by the |
202 | | * caller to notify the low level IOMMU code to avoid the iotlb flushes. |
203 | | * iommu_iotlb_flush/iommu_iotlb_flush_all will be explicitly called by |
204 | | * the caller. |
205 | | */ |
206 | | DECLARE_PER_CPU(bool_t, iommu_dont_flush_iotlb); |
207 | | |
208 | | extern struct spinlock iommu_pt_cleanup_lock; |
209 | | extern struct page_list_head iommu_pt_cleanup_list; |
210 | | |
211 | | #endif /* _IOMMU_H_ */ |