debuggers.hg

view xen/include/asm-x86/hvm/vmx/intel-iommu.h @ 16569:32237d8517b1

vt-d: Use bitmap to solve domain-id limitation issue.

The Capability register reports the domain-id width supported by
hardware. For implementations supporting less than 16-bit domainids,
unused bits of domain identifier field(87:72) in Context entry are
treated as reserved by hardware. For example, for an implementation
supporting 4-bit domain-ids, bits 87:76 of this field are treated as
reserved. 16 is a small number, overflow is easy to happen. What's
more,
context-entries programmed with the same domain identifier must always
reference the same address translation structure (through the ASR
field). So Dom16 will conflict with Dom0, and device assignment fails.

This patch implements a domaid id bitmap to solve above issue.

Signed-off-by: Weidong Han <weidong.han@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Dec 05 10:53:47 2007 +0000 (2007-12-05)
parents f173cd885ffb
children 2633dc4f55d4
line source
1 /*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Ashok Raj <ashok.raj@intel.com>
18 */
20 #ifndef _INTEL_IOMMU_H_
21 #define _INTEL_IOMMU_H_
23 #include <xen/types.h>
25 /*
26 * Intel IOMMU register specification per version 1.0 public spec.
27 */
29 #define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
30 #define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
31 #define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
32 #define DMAR_GCMD_REG 0x18 /* Global command register */
33 #define DMAR_GSTS_REG 0x1c /* Global status register */
34 #define DMAR_RTADDR_REG 0x20 /* Root entry table */
35 #define DMAR_CCMD_REG 0x28 /* Context command reg */
36 #define DMAR_FSTS_REG 0x34 /* Fault Status register */
37 #define DMAR_FECTL_REG 0x38 /* Fault control register */
38 #define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
39 #define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
40 #define DMAR_FEUADDR_REG 0x44 /* Upper address register */
41 #define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
42 #define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
43 #define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
44 #define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
45 #define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
46 #define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
47 #define DMAR_IQH_REG 0x80 /* invalidation queue head */
48 #define DMAR_IQT_REG 0x88 /* invalidation queue tail */
49 #define DMAR_IQA_REG 0x90 /* invalidation queue addr */
50 #define DMAR_IRTA_REG 0xB8 /* intr remap */
52 #define OFFSET_STRIDE (9)
53 #define dmar_readl(dmar, reg) readl(dmar + reg)
54 #define dmar_writel(dmar, reg, val) writel(val, dmar + reg)
55 #define dmar_readq(dmar, reg) ({ \
56 u32 lo, hi; \
57 lo = dmar_readl(dmar, reg); \
58 hi = dmar_readl(dmar, reg + 4); \
59 (((u64) hi) << 32) + lo; })
60 #define dmar_writeq(dmar, reg, val) do {\
61 dmar_writel(dmar, reg, (u32)val); \
62 dmar_writel(dmar, reg + 4, (u32)((u64) val >> 32)); \
63 } while (0)
65 #define VER_MAJOR(v) (((v) & 0xf0) >> 4)
66 #define VER_MINOR(v) ((v) & 0x0f)
68 /*
69 * Decoding Capability Register
70 */
71 #define cap_read_drain(c) (((c) >> 55) & 1)
72 #define cap_write_drain(c) (((c) >> 54) & 1)
73 #define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
74 #define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
75 #define cap_pgsel_inv(c) (((c) >> 39) & 1)
77 #define cap_super_page_val(c) (((c) >> 34) & 0xf)
78 #define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
79 * OFFSET_STRIDE) + 21)
81 #define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
83 #define cap_isoch(c) (((c) >> 23) & 1)
84 #define cap_qos(c) (((c) >> 22) & 1)
85 #define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
86 #define cap_sagaw(c) (((c) >> 8) & 0x1f)
87 #define cap_caching_mode(c) (((c) >> 7) & 1)
88 #define cap_phmr(c) (((c) >> 6) & 1)
89 #define cap_plmr(c) (((c) >> 5) & 1)
90 #define cap_rwbf(c) (((c) >> 4) & 1)
91 #define cap_afl(c) (((c) >> 3) & 1)
92 #define cap_ndoms(c) (1 << (4 + 2 * ((c) & 0x7)))
94 /*
95 * Extended Capability Register
96 */
98 #define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1)
99 #define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
100 #define ecap_coherent(e) ((e >> 0) & 0x1)
101 #define ecap_queued_inval(e) ((e >> 1) & 0x1)
102 #define ecap_dev_iotlb(e) ((e >> 2) & 0x1)
103 #define ecap_intr_remap(e) ((e >> 3) & 0x1)
104 #define ecap_ext_intr(e) ((e >> 4) & 0x1)
105 #define ecap_cache_hints(e) ((e >> 5) & 0x1)
106 #define ecap_pass_thru(e) ((e >> 6) & 0x1)
108 #define PAGE_SHIFT_4K (12)
109 #define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
110 #define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
111 #define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
113 /* IOTLB_REG */
114 #define DMA_TLB_FLUSH_GRANU_OFFSET 60
115 #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
116 #define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
117 #define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
118 #define DMA_TLB_IIRG(x) (((x) >> 60) & 7)
119 #define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
120 #define DMA_TLB_DID(x) (((u64)(x & 0xffff)) << 32)
122 #define DMA_TLB_READ_DRAIN (((u64)1) << 49)
123 #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
124 #define DMA_TLB_IVT (((u64)1) << 63)
126 #define DMA_TLB_IVA_ADDR(x) ((((u64)x) >> 12) << 12)
127 #define DMA_TLB_IVA_HINT(x) ((((u64)x) & 1) << 6)
129 /* GCMD_REG */
130 #define DMA_GCMD_TE (((u64)1) << 31)
131 #define DMA_GCMD_SRTP (((u64)1) << 30)
132 #define DMA_GCMD_SFL (((u64)1) << 29)
133 #define DMA_GCMD_EAFL (((u64)1) << 28)
134 #define DMA_GCMD_WBF (((u64)1) << 27)
135 #define DMA_GCMD_QIE (((u64)1) << 26)
136 #define DMA_GCMD_IRE (((u64)1) << 25)
137 #define DMA_GCMD_SIRTP (((u64)1) << 24)
139 /* GSTS_REG */
140 #define DMA_GSTS_TES (((u64)1) << 31)
141 #define DMA_GSTS_RTPS (((u64)1) << 30)
142 #define DMA_GSTS_FLS (((u64)1) << 29)
143 #define DMA_GSTS_AFLS (((u64)1) << 28)
144 #define DMA_GSTS_WBFS (((u64)1) << 27)
145 #define DMA_GSTS_IRTPS (((u64)1) << 24)
146 #define DMA_GSTS_QIES (((u64)1) <<26)
147 #define DMA_GSTS_IRES (((u64)1) <<25)
149 /* PMEN_REG */
150 #define DMA_PMEN_EPM (((u32)1) << 31)
151 #define DMA_PMEN_PRS (((u32)1) << 0)
153 /* CCMD_REG */
154 #define DMA_CCMD_INVL_GRANU_OFFSET 61
155 #define DMA_CCMD_ICC (((u64)1) << 63)
156 #define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
157 #define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
158 #define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
159 #define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
160 #define DMA_CCMD_CIRG(x) ((((u64)3) << 61) & x)
161 #define DMA_CCMD_MASK_NOBIT 0
162 #define DMA_CCMD_MASK_1BIT 1
163 #define DMA_CCMD_MASK_2BIT 2
164 #define DMA_CCMD_MASK_3BIT 3
165 #define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
166 #define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
168 #define DMA_CCMD_CAIG_MASK(x) (((u64)x) & ((u64) 0x3 << 59))
170 /* FECTL_REG */
171 #define DMA_FECTL_IM (((u64)1) << 31)
173 /* FSTS_REG */
174 #define DMA_FSTS_PPF ((u64)2)
175 #define DMA_FSTS_PFO ((u64)1)
176 #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
178 /* FRCD_REG, 32 bits access */
179 #define DMA_FRCD_F (((u64)1) << 31)
180 #define dma_frcd_type(d) ((d >> 30) & 1)
181 #define dma_frcd_fault_reason(c) (c & 0xff)
182 #define dma_frcd_source_id(c) (c & 0xffff)
183 #define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
185 /*
186 * 0: Present
187 * 1-11: Reserved
188 * 12-63: Context Ptr (12 - (haw-1))
189 * 64-127: Reserved
190 */
191 struct root_entry {
192 u64 val;
193 u64 rsvd1;
194 };
195 #define root_present(root) ((root).val & 1)
196 #define set_root_present(root) do {(root).val |= 1;} while(0)
197 #define get_context_addr(root) ((root).val & PAGE_MASK_4K)
198 #define set_root_value(root, value) \
199 do {(root).val |= ((value) & PAGE_MASK_4K);} while(0)
201 struct context_entry {
202 u64 lo;
203 u64 hi;
204 };
205 #define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry))
206 #define context_present(c) ((c).lo & 1)
207 #define context_fault_disable(c) (((c).lo >> 1) & 1)
208 #define context_translation_type(c) (((c).lo >> 2) & 3)
209 #define context_address_root(c) ((c).lo & PAGE_MASK_4K)
210 #define context_address_width(c) ((c).hi & 7)
211 #define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
213 #define context_set_present(c) do {(c).lo |= 1;} while(0)
214 #define context_clear_present(c) do {(c).lo &= ~1;} while(0)
215 #define context_set_fault_enable(c) \
216 do {(c).lo &= (((u64)-1) << 2) | 1;} while(0)
218 #define context_set_translation_type(c, val) do { \
219 (c).lo &= (((u64)-1) << 4) | 3; \
220 (c).lo |= (val & 3) << 2; \
221 } while(0)
222 #define CONTEXT_TT_MULTI_LEVEL 0
223 #define CONTEXT_TT_DEV_IOTLB 1
224 #define CONTEXT_TT_PASS_THRU 2
226 #define context_set_address_root(c, val) \
227 do {(c).lo &= 0xfff; (c).lo |= (val) & PAGE_MASK_4K ;} while(0)
228 #define context_set_address_width(c, val) \
229 do {(c).hi &= 0xfffffff8; (c).hi |= (val) & 7;} while(0)
230 #define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while(0)
232 /* page table handling */
233 #define LEVEL_STRIDE (9)
234 #define LEVEL_MASK ((1 << LEVEL_STRIDE) - 1)
235 #define agaw_to_level(val) ((val) + 2)
236 #define agaw_to_width(val) (30 + val * LEVEL_STRIDE)
237 #define width_to_agaw(w) ((w - 30)/LEVEL_STRIDE)
238 #define level_to_offset_bits(l) (12 + (l - 1) * LEVEL_STRIDE)
239 #define address_level_offset(addr, level) \
240 ((addr >> level_to_offset_bits(level)) & LEVEL_MASK)
241 #define level_mask(l) (((u64)(-1)) << level_to_offset_bits(l))
242 #define level_size(l) (1 << level_to_offset_bits(l))
243 #define align_to_level(addr, l) ((addr + level_size(l) - 1) & level_mask(l))
245 /*
246 * 0: readable
247 * 1: writable
248 * 2-6: reserved
249 * 7: super page
250 * 8-11: available
251 * 12-63: Host physcial address
252 */
253 struct dma_pte {
254 u64 val;
255 };
256 #define dma_clear_pte(p) do {(p).val = 0;} while(0)
257 #define dma_set_pte_readable(p) do {(p).val |= 1;} while(0)
258 #define dma_set_pte_writable(p) do {(p).val |= 2;} while(0)
259 #define dma_set_pte_superpage(p) do {(p).val |= 8;} while(0)
260 #define dma_set_pte_prot(p, prot) do { (p).val = (((p).val >> 2) << 2) | ((prot) & 3);} while (0)
261 #define dma_pte_addr(p) ((p).val & PAGE_MASK_4K)
262 #define dma_set_pte_addr(p, addr) do {(p).val |= ((addr) >> PAGE_SHIFT_4K) << PAGE_SHIFT_4K;} while(0)
263 #define DMA_PTE_READ (1)
264 #define DMA_PTE_WRITE (2)
265 #define dma_pte_present(p) (((p).val & 3) != 0)
267 /* interrupt remap entry */
268 struct iremap_entry {
269 struct {
270 u64 present : 1,
271 fpd : 1,
272 dm : 1,
273 rh : 1,
274 tm : 1,
275 dlm : 3,
276 avail : 4,
277 res_1 : 4,
278 vector : 8,
279 res_2 : 8,
280 dst : 32;
281 }lo;
282 struct {
283 u64 sid : 16,
284 sq : 2,
285 svt : 2,
286 res_1 : 44;
287 }hi;
288 };
289 #define IREMAP_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct iremap_entry))
290 #define iremap_present(v) ((v).lo & 1)
291 #define iremap_fault_disable(v) (((v).lo >> 1) & 1)
293 #define iremap_set_present(v) do {(v).lo |= 1;} while(0)
294 #define iremap_clear_present(v) do {(v).lo &= ~1;} while(0)
296 /* queue invalidation entry */
297 struct qinval_entry {
298 union {
299 struct {
300 struct {
301 u64 type : 4,
302 granu : 2,
303 res_1 : 10,
304 did : 16,
305 sid : 16,
306 fm : 2,
307 res_2 : 14;
308 }lo;
309 struct {
310 u64 res;
311 }hi;
312 }cc_inv_dsc;
313 struct {
314 struct {
315 u64 type : 4,
316 granu : 2,
317 dw : 1,
318 dr : 1,
319 res_1 : 8,
320 did : 16,
321 res_2 : 32;
322 }lo;
323 struct {
324 u64 am : 6,
325 ih : 1,
326 res_1 : 5,
327 addr : 52;
328 }hi;
329 }iotlb_inv_dsc;
330 struct {
331 struct {
332 u64 type : 4,
333 res_1 : 12,
334 max_invs_pend: 5,
335 res_2 : 11,
336 sid : 16,
337 res_3 : 16;
338 }lo;
339 struct {
340 u64 size : 1,
341 res_1 : 11,
342 addr : 52;
343 }hi;
344 }dev_iotlb_inv_dsc;
345 struct {
346 struct {
347 u64 type : 4,
348 granu : 1,
349 res_1 : 22,
350 im : 5,
351 iidx : 16,
352 res_2 : 16;
353 }lo;
354 struct {
355 u64 res;
356 }hi;
357 }iec_inv_dsc;
358 struct {
359 struct {
360 u64 type : 4,
361 iflag : 1,
362 sw : 1,
363 fn : 1,
364 res_1 : 25,
365 sdata : 32;
366 }lo;
367 struct {
368 u64 res_1 : 2,
369 saddr : 62;
370 }hi;
371 }inv_wait_dsc;
372 }q;
373 };
375 struct poll_info {
376 u64 saddr;
377 u32 udata;
378 };
380 #define QINVAL_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct qinval_entry))
381 #define qinval_present(v) ((v).lo & 1)
382 #define qinval_fault_disable(v) (((v).lo >> 1) & 1)
384 #define qinval_set_present(v) do {(v).lo |= 1;} while(0)
385 #define qinval_clear_present(v) do {(v).lo &= ~1;} while(0)
387 #define RESERVED_VAL 0
389 #define TYPE_INVAL_CONTEXT 1
390 #define TYPE_INVAL_IOTLB 2
391 #define TYPE_INVAL_DEVICE_IOTLB 3
392 #define TYPE_INVAL_IEC 4
393 #define TYPE_INVAL_WAIT 5
395 #define NOTIFY_TYPE_POLL 1
396 #define NOTIFY_TYPE_INTR 1
397 #define INTERRUTP_FLAG 1
398 #define STATUS_WRITE 1
399 #define FENCE_FLAG 1
401 #define IEC_GLOBAL_INVL 0
402 #define IEC_INDEX_INVL 1
404 #define VTD_PAGE_TABLE_LEVEL_3 3
405 #define VTD_PAGE_TABLE_LEVEL_4 4
407 typedef paddr_t dma_addr_t;
409 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
410 #define MAX_IOMMUS 32
411 #define MAX_IOMMU_REGS 0xc0
413 extern struct list_head acpi_drhd_units;
414 extern struct list_head acpi_rmrr_units;
415 extern struct list_head acpi_ioapic_units;
417 #endif