/root/src/xen/xen/drivers/passthrough/vtd/qinval.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2006, Intel Corporation. |
3 | | * |
4 | | * This program is free software; you can redistribute it and/or modify it |
5 | | * under the terms and conditions of the GNU General Public License, |
6 | | * version 2, as published by the Free Software Foundation. |
7 | | * |
8 | | * This program is distributed in the hope it will be useful, but WITHOUT |
9 | | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
10 | | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
11 | | * more details. |
12 | | * |
13 | | * You should have received a copy of the GNU General Public License along with |
14 | | * this program; If not, see <http://www.gnu.org/licenses/>. |
15 | | * |
16 | | * Copyright (C) Allen Kay <allen.m.kay@intel.com> |
17 | | * Copyright (C) Xiaohui Xin <xiaohui.xin@intel.com> |
18 | | */ |
19 | | |
20 | | |
21 | | #include <xen/sched.h> |
22 | | #include <xen/iommu.h> |
23 | | #include <xen/time.h> |
24 | | #include <xen/pci.h> |
25 | | #include <xen/pci_regs.h> |
26 | | #include "iommu.h" |
27 | | #include "dmar.h" |
28 | | #include "vtd.h" |
29 | | #include "extern.h" |
30 | | #include "../ats.h" |
31 | | |
32 | | #define VTD_QI_TIMEOUT 1 |
33 | | |
34 | | static int __must_check invalidate_sync(struct iommu *iommu); |
35 | | |
36 | | static void print_qi_regs(struct iommu *iommu) |
37 | 0 | { |
38 | 0 | u64 val; |
39 | 0 |
|
40 | 0 | val = dmar_readq(iommu->reg, DMAR_IQA_REG); |
41 | 0 | printk("DMAR_IQA_REG = %"PRIx64"\n", val); |
42 | 0 |
|
43 | 0 | val = dmar_readq(iommu->reg, DMAR_IQH_REG); |
44 | 0 | printk("DMAR_IQH_REG = %"PRIx64"\n", val); |
45 | 0 |
|
46 | 0 | val = dmar_readq(iommu->reg, DMAR_IQT_REG); |
47 | 0 | printk("DMAR_IQT_REG = %"PRIx64"\n", val); |
48 | 0 | } |
49 | | |
50 | | static unsigned int qinval_next_index(struct iommu *iommu) |
51 | 436k | { |
52 | 436k | u64 tail; |
53 | 436k | |
54 | 436k | tail = dmar_readq(iommu->reg, DMAR_IQT_REG); |
55 | 436k | tail >>= QINVAL_INDEX_SHIFT; |
56 | 436k | |
57 | 436k | /* (tail+1 == head) indicates a full queue, wait for HW */ |
58 | 436k | while ( ( tail + 1 ) % QINVAL_ENTRY_NR == |
59 | 436k | ( dmar_readq(iommu->reg, DMAR_IQH_REG) >> QINVAL_INDEX_SHIFT ) ) |
60 | 0 | cpu_relax(); |
61 | 436k | |
62 | 436k | return tail; |
63 | 436k | } |
64 | | |
65 | | static void qinval_update_qtail(struct iommu *iommu, unsigned int index) |
66 | 436k | { |
67 | 436k | u64 val; |
68 | 436k | |
69 | 436k | /* Need hold register lock when update tail */ |
70 | 436k | ASSERT( spin_is_locked(&iommu->register_lock) ); |
71 | 436k | val = (index + 1) % QINVAL_ENTRY_NR; |
72 | 436k | dmar_writeq(iommu->reg, DMAR_IQT_REG, (val << QINVAL_INDEX_SHIFT)); |
73 | 436k | } |
74 | | |
75 | | static int __must_check queue_invalidate_context_sync(struct iommu *iommu, |
76 | | u16 did, u16 source_id, |
77 | | u8 function_mask, |
78 | | u8 granu) |
79 | 2 | { |
80 | 2 | unsigned long flags; |
81 | 2 | unsigned int index; |
82 | 2 | u64 entry_base; |
83 | 2 | struct qinval_entry *qinval_entry, *qinval_entries; |
84 | 2 | |
85 | 2 | spin_lock_irqsave(&iommu->register_lock, flags); |
86 | 2 | index = qinval_next_index(iommu); |
87 | 2 | entry_base = iommu_qi_ctrl(iommu)->qinval_maddr + |
88 | 2 | ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT); |
89 | 2 | qinval_entries = map_vtd_domain_page(entry_base); |
90 | 2 | qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)]; |
91 | 2 | |
92 | 2 | qinval_entry->q.cc_inv_dsc.lo.type = TYPE_INVAL_CONTEXT; |
93 | 2 | qinval_entry->q.cc_inv_dsc.lo.granu = granu; |
94 | 2 | qinval_entry->q.cc_inv_dsc.lo.res_1 = 0; |
95 | 2 | qinval_entry->q.cc_inv_dsc.lo.did = did; |
96 | 2 | qinval_entry->q.cc_inv_dsc.lo.sid = source_id; |
97 | 2 | qinval_entry->q.cc_inv_dsc.lo.fm = function_mask; |
98 | 2 | qinval_entry->q.cc_inv_dsc.lo.res_2 = 0; |
99 | 2 | qinval_entry->q.cc_inv_dsc.hi.res = 0; |
100 | 2 | |
101 | 2 | qinval_update_qtail(iommu, index); |
102 | 2 | spin_unlock_irqrestore(&iommu->register_lock, flags); |
103 | 2 | |
104 | 2 | unmap_vtd_domain_page(qinval_entries); |
105 | 2 | |
106 | 2 | return invalidate_sync(iommu); |
107 | 2 | } |
108 | | |
109 | | static int __must_check queue_invalidate_iotlb_sync(struct iommu *iommu, |
110 | | u8 granu, u8 dr, u8 dw, |
111 | | u16 did, u8 am, u8 ih, |
112 | | u64 addr) |
113 | 218k | { |
114 | 218k | unsigned long flags; |
115 | 218k | unsigned int index; |
116 | 218k | u64 entry_base; |
117 | 218k | struct qinval_entry *qinval_entry, *qinval_entries; |
118 | 218k | |
119 | 218k | spin_lock_irqsave(&iommu->register_lock, flags); |
120 | 218k | index = qinval_next_index(iommu); |
121 | 218k | entry_base = iommu_qi_ctrl(iommu)->qinval_maddr + |
122 | 218k | ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT); |
123 | 218k | qinval_entries = map_vtd_domain_page(entry_base); |
124 | 218k | qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)]; |
125 | 218k | |
126 | 218k | qinval_entry->q.iotlb_inv_dsc.lo.type = TYPE_INVAL_IOTLB; |
127 | 218k | qinval_entry->q.iotlb_inv_dsc.lo.granu = granu; |
128 | 218k | qinval_entry->q.iotlb_inv_dsc.lo.dr = dr; |
129 | 218k | qinval_entry->q.iotlb_inv_dsc.lo.dw = dw; |
130 | 218k | qinval_entry->q.iotlb_inv_dsc.lo.res_1 = 0; |
131 | 218k | qinval_entry->q.iotlb_inv_dsc.lo.did = did; |
132 | 218k | qinval_entry->q.iotlb_inv_dsc.lo.res_2 = 0; |
133 | 218k | |
134 | 218k | qinval_entry->q.iotlb_inv_dsc.hi.am = am; |
135 | 218k | qinval_entry->q.iotlb_inv_dsc.hi.ih = ih; |
136 | 218k | qinval_entry->q.iotlb_inv_dsc.hi.res_1 = 0; |
137 | 218k | qinval_entry->q.iotlb_inv_dsc.hi.addr = addr >> PAGE_SHIFT_4K; |
138 | 218k | |
139 | 218k | unmap_vtd_domain_page(qinval_entries); |
140 | 218k | qinval_update_qtail(iommu, index); |
141 | 218k | spin_unlock_irqrestore(&iommu->register_lock, flags); |
142 | 218k | |
143 | 218k | return invalidate_sync(iommu); |
144 | 218k | } |
145 | | |
146 | | static int __must_check queue_invalidate_wait(struct iommu *iommu, |
147 | | u8 iflag, u8 sw, u8 fn, |
148 | | bool_t flush_dev_iotlb) |
149 | 218k | { |
150 | 218k | volatile u32 poll_slot = QINVAL_STAT_INIT; |
151 | 218k | unsigned int index; |
152 | 218k | unsigned long flags; |
153 | 218k | u64 entry_base; |
154 | 218k | struct qinval_entry *qinval_entry, *qinval_entries; |
155 | 218k | |
156 | 218k | spin_lock_irqsave(&iommu->register_lock, flags); |
157 | 218k | index = qinval_next_index(iommu); |
158 | 218k | entry_base = iommu_qi_ctrl(iommu)->qinval_maddr + |
159 | 218k | ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT); |
160 | 218k | qinval_entries = map_vtd_domain_page(entry_base); |
161 | 218k | qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)]; |
162 | 218k | |
163 | 218k | qinval_entry->q.inv_wait_dsc.lo.type = TYPE_INVAL_WAIT; |
164 | 218k | qinval_entry->q.inv_wait_dsc.lo.iflag = iflag; |
165 | 218k | qinval_entry->q.inv_wait_dsc.lo.sw = sw; |
166 | 218k | qinval_entry->q.inv_wait_dsc.lo.fn = fn; |
167 | 218k | qinval_entry->q.inv_wait_dsc.lo.res_1 = 0; |
168 | 218k | qinval_entry->q.inv_wait_dsc.lo.sdata = QINVAL_STAT_DONE; |
169 | 218k | qinval_entry->q.inv_wait_dsc.hi.res_1 = 0; |
170 | 218k | qinval_entry->q.inv_wait_dsc.hi.saddr = virt_to_maddr(&poll_slot) >> 2; |
171 | 218k | |
172 | 218k | unmap_vtd_domain_page(qinval_entries); |
173 | 218k | qinval_update_qtail(iommu, index); |
174 | 218k | spin_unlock_irqrestore(&iommu->register_lock, flags); |
175 | 218k | |
176 | 218k | /* Now we don't support interrupt method */ |
177 | 218k | if ( sw ) |
178 | 218k | { |
179 | 218k | s_time_t timeout; |
180 | 218k | |
181 | 218k | /* In case all wait descriptor writes to same addr with same data */ |
182 | 218k | timeout = NOW() + MILLISECS(flush_dev_iotlb ? |
183 | 218k | iommu_dev_iotlb_timeout : VTD_QI_TIMEOUT); |
184 | 218k | |
185 | 218k | while ( poll_slot != QINVAL_STAT_DONE ) |
186 | 0 | { |
187 | 0 | if ( NOW() > timeout ) |
188 | 0 | { |
189 | 0 | print_qi_regs(iommu); |
190 | 0 | printk(XENLOG_WARNING VTDPREFIX |
191 | 0 | " Queue invalidate wait descriptor timed out\n"); |
192 | 0 | return -ETIMEDOUT; |
193 | 0 | } |
194 | 0 | cpu_relax(); |
195 | 0 | } |
196 | 218k | return 0; |
197 | 218k | } |
198 | 218k | |
199 | 0 | return -EOPNOTSUPP; |
200 | 218k | } |
201 | | |
202 | | static int __must_check invalidate_sync(struct iommu *iommu) |
203 | 218k | { |
204 | 218k | struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu); |
205 | 218k | |
206 | 218k | ASSERT(qi_ctrl->qinval_maddr); |
207 | 218k | |
208 | 218k | return queue_invalidate_wait(iommu, 0, 1, 1, 0); |
209 | 218k | } |
210 | | |
211 | | static int __must_check dev_invalidate_sync(struct iommu *iommu, |
212 | | struct pci_dev *pdev, u16 did) |
213 | 0 | { |
214 | 0 | struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu); |
215 | 0 | int rc; |
216 | 0 |
|
217 | 0 | ASSERT(qi_ctrl->qinval_maddr); |
218 | 0 | rc = queue_invalidate_wait(iommu, 0, 1, 1, 1); |
219 | 0 | if ( rc == -ETIMEDOUT ) |
220 | 0 | { |
221 | 0 | struct domain *d = NULL; |
222 | 0 |
|
223 | 0 | if ( test_bit(did, iommu->domid_bitmap) ) |
224 | 0 | d = rcu_lock_domain_by_id(iommu->domid_map[did]); |
225 | 0 |
|
226 | 0 | /* |
227 | 0 | * In case the domain has been freed or the IOMMU domid bitmap is |
228 | 0 | * not valid, the device no longer belongs to this domain. |
229 | 0 | */ |
230 | 0 | if ( d == NULL ) |
231 | 0 | return rc; |
232 | 0 |
|
233 | 0 | iommu_dev_iotlb_flush_timeout(d, pdev); |
234 | 0 | rcu_unlock_domain(d); |
235 | 0 | } |
236 | 0 |
|
237 | 0 | return rc; |
238 | 0 | } |
239 | | |
240 | | int qinval_device_iotlb_sync(struct iommu *iommu, struct pci_dev *pdev, |
241 | | u16 did, u16 size, u64 addr) |
242 | 0 | { |
243 | 0 | unsigned long flags; |
244 | 0 | unsigned int index; |
245 | 0 | u64 entry_base; |
246 | 0 | struct qinval_entry *qinval_entry, *qinval_entries; |
247 | 0 |
|
248 | 0 | ASSERT(pdev); |
249 | 0 | spin_lock_irqsave(&iommu->register_lock, flags); |
250 | 0 | index = qinval_next_index(iommu); |
251 | 0 | entry_base = iommu_qi_ctrl(iommu)->qinval_maddr + |
252 | 0 | ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT); |
253 | 0 | qinval_entries = map_vtd_domain_page(entry_base); |
254 | 0 | qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)]; |
255 | 0 |
|
256 | 0 | qinval_entry->q.dev_iotlb_inv_dsc.lo.type = TYPE_INVAL_DEVICE_IOTLB; |
257 | 0 | qinval_entry->q.dev_iotlb_inv_dsc.lo.res_1 = 0; |
258 | 0 | qinval_entry->q.dev_iotlb_inv_dsc.lo.max_invs_pend = pdev->ats.queue_depth; |
259 | 0 | qinval_entry->q.dev_iotlb_inv_dsc.lo.res_2 = 0; |
260 | 0 | qinval_entry->q.dev_iotlb_inv_dsc.lo.sid = PCI_BDF2(pdev->bus, pdev->devfn); |
261 | 0 | qinval_entry->q.dev_iotlb_inv_dsc.lo.res_3 = 0; |
262 | 0 |
|
263 | 0 | qinval_entry->q.dev_iotlb_inv_dsc.hi.size = size; |
264 | 0 | qinval_entry->q.dev_iotlb_inv_dsc.hi.res_1 = 0; |
265 | 0 | qinval_entry->q.dev_iotlb_inv_dsc.hi.addr = addr >> PAGE_SHIFT_4K; |
266 | 0 |
|
267 | 0 | unmap_vtd_domain_page(qinval_entries); |
268 | 0 | qinval_update_qtail(iommu, index); |
269 | 0 | spin_unlock_irqrestore(&iommu->register_lock, flags); |
270 | 0 |
|
271 | 0 | return dev_invalidate_sync(iommu, pdev, did); |
272 | 0 | } |
273 | | |
274 | | static int __must_check queue_invalidate_iec_sync(struct iommu *iommu, |
275 | | u8 granu, u8 im, u16 iidx) |
276 | 178 | { |
277 | 178 | unsigned long flags; |
278 | 178 | unsigned int index; |
279 | 178 | u64 entry_base; |
280 | 178 | struct qinval_entry *qinval_entry, *qinval_entries; |
281 | 178 | int ret; |
282 | 178 | |
283 | 178 | spin_lock_irqsave(&iommu->register_lock, flags); |
284 | 178 | index = qinval_next_index(iommu); |
285 | 178 | entry_base = iommu_qi_ctrl(iommu)->qinval_maddr + |
286 | 178 | ((index >> QINVAL_ENTRY_ORDER) << PAGE_SHIFT); |
287 | 178 | qinval_entries = map_vtd_domain_page(entry_base); |
288 | 178 | qinval_entry = &qinval_entries[index % (1 << QINVAL_ENTRY_ORDER)]; |
289 | 178 | |
290 | 178 | qinval_entry->q.iec_inv_dsc.lo.type = TYPE_INVAL_IEC; |
291 | 178 | qinval_entry->q.iec_inv_dsc.lo.granu = granu; |
292 | 178 | qinval_entry->q.iec_inv_dsc.lo.res_1 = 0; |
293 | 178 | qinval_entry->q.iec_inv_dsc.lo.im = im; |
294 | 178 | qinval_entry->q.iec_inv_dsc.lo.iidx = iidx; |
295 | 178 | qinval_entry->q.iec_inv_dsc.lo.res_2 = 0; |
296 | 178 | qinval_entry->q.iec_inv_dsc.hi.res = 0; |
297 | 178 | |
298 | 178 | unmap_vtd_domain_page(qinval_entries); |
299 | 178 | qinval_update_qtail(iommu, index); |
300 | 178 | spin_unlock_irqrestore(&iommu->register_lock, flags); |
301 | 178 | |
302 | 178 | ret = invalidate_sync(iommu); |
303 | 178 | |
304 | 178 | /* |
305 | 178 | * reading vt-d architecture register will ensure |
306 | 178 | * draining happens in implementation independent way. |
307 | 178 | */ |
308 | 178 | (void)dmar_readq(iommu->reg, DMAR_CAP_REG); |
309 | 178 | |
310 | 178 | return ret; |
311 | 178 | } |
312 | | |
313 | | int iommu_flush_iec_global(struct iommu *iommu) |
314 | 1 | { |
315 | 1 | return queue_invalidate_iec_sync(iommu, IEC_GLOBAL_INVL, 0, 0); |
316 | 1 | } |
317 | | |
318 | | int iommu_flush_iec_index(struct iommu *iommu, u8 im, u16 iidx) |
319 | 177 | { |
320 | 177 | return queue_invalidate_iec_sync(iommu, IEC_INDEX_INVL, im, iidx); |
321 | 177 | } |
322 | | |
323 | | static int __must_check flush_context_qi(void *_iommu, u16 did, |
324 | | u16 sid, u8 fm, u64 type, |
325 | | bool_t flush_non_present_entry) |
326 | 59 | { |
327 | 59 | struct iommu *iommu = (struct iommu *)_iommu; |
328 | 59 | struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu); |
329 | 59 | |
330 | 59 | ASSERT(qi_ctrl->qinval_maddr); |
331 | 59 | |
332 | 59 | /* |
333 | 59 | * In the non-present entry flush case, if hardware doesn't cache |
334 | 59 | * non-present entry we do nothing and if hardware cache non-present |
335 | 59 | * entry, we flush entries of domain 0 (the domain id is used to cache |
336 | 59 | * any non-present entries) |
337 | 59 | */ |
338 | 59 | if ( flush_non_present_entry ) |
339 | 57 | { |
340 | 57 | if ( !cap_caching_mode(iommu->cap) ) |
341 | 57 | return 1; |
342 | 57 | else |
343 | 0 | did = 0; |
344 | 57 | } |
345 | 59 | |
346 | 2 | return queue_invalidate_context_sync(iommu, did, sid, fm, |
347 | 2 | type >> DMA_CCMD_INVL_GRANU_OFFSET); |
348 | 59 | } |
349 | | |
350 | | static int __must_check flush_iotlb_qi(void *_iommu, u16 did, u64 addr, |
351 | | unsigned int size_order, u64 type, |
352 | | bool_t flush_non_present_entry, |
353 | | bool_t flush_dev_iotlb) |
354 | 4.56M | { |
355 | 4.56M | u8 dr = 0, dw = 0; |
356 | 4.56M | int ret = 0, rc; |
357 | 4.56M | struct iommu *iommu = (struct iommu *)_iommu; |
358 | 4.56M | struct qi_ctrl *qi_ctrl = iommu_qi_ctrl(iommu); |
359 | 4.56M | |
360 | 4.56M | ASSERT(qi_ctrl->qinval_maddr); |
361 | 4.56M | |
362 | 4.56M | /* |
363 | 4.56M | * In the non-present entry flush case, if hardware doesn't cache |
364 | 4.56M | * non-present entry we do nothing and if hardware cache non-present |
365 | 4.56M | * entry, we flush entries of domain 0 (the domain id is used to cache |
366 | 4.56M | * any non-present entries) |
367 | 4.56M | */ |
368 | 4.56M | if ( flush_non_present_entry ) |
369 | 4.34M | { |
370 | 4.34M | if ( !cap_caching_mode(iommu->cap) ) |
371 | 4.34M | return 1; |
372 | 4.34M | else |
373 | 0 | did = 0; |
374 | 4.34M | } |
375 | 4.56M | |
376 | 4.56M | /* use queued invalidation */ |
377 | 218k | if (cap_write_drain(iommu->cap)) |
378 | 218k | dw = 1; |
379 | 218k | if (cap_read_drain(iommu->cap)) |
380 | 218k | dr = 1; |
381 | 218k | /* Need to conside the ih bit later */ |
382 | 218k | rc = queue_invalidate_iotlb_sync(iommu, |
383 | 218k | type >> DMA_TLB_FLUSH_GRANU_OFFSET, |
384 | 218k | dr, dw, did, size_order, 0, addr); |
385 | 218k | if ( !ret ) |
386 | 218k | ret = rc; |
387 | 218k | |
388 | 218k | if ( flush_dev_iotlb ) |
389 | 0 | { |
390 | 0 | rc = dev_invalidate_iotlb(iommu, did, addr, size_order, type); |
391 | 0 | if ( !ret ) |
392 | 0 | ret = rc; |
393 | 0 | } |
394 | 218k | return ret; |
395 | 4.56M | } |
396 | | |
397 | | int enable_qinval(struct iommu *iommu) |
398 | 2 | { |
399 | 2 | struct acpi_drhd_unit *drhd; |
400 | 2 | struct qi_ctrl *qi_ctrl; |
401 | 2 | struct iommu_flush *flush; |
402 | 2 | u32 sts; |
403 | 2 | unsigned long flags; |
404 | 2 | |
405 | 2 | if ( !ecap_queued_inval(iommu->ecap) || !iommu_qinval ) |
406 | 0 | return -ENOENT; |
407 | 2 | |
408 | 2 | qi_ctrl = iommu_qi_ctrl(iommu); |
409 | 2 | flush = iommu_get_flush(iommu); |
410 | 2 | |
411 | 2 | /* Return if already enabled by Xen */ |
412 | 2 | sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); |
413 | 2 | if ( (sts & DMA_GSTS_QIES) && qi_ctrl->qinval_maddr ) |
414 | 1 | return 0; |
415 | 2 | |
416 | 1 | if ( qi_ctrl->qinval_maddr == 0 ) |
417 | 1 | { |
418 | 1 | drhd = iommu_to_drhd(iommu); |
419 | 1 | qi_ctrl->qinval_maddr = alloc_pgtable_maddr(drhd, QINVAL_ARCH_PAGE_NR); |
420 | 1 | if ( qi_ctrl->qinval_maddr == 0 ) |
421 | 0 | { |
422 | 0 | dprintk(XENLOG_WARNING VTDPREFIX, |
423 | 0 | "Cannot allocate memory for qi_ctrl->qinval_maddr\n"); |
424 | 0 | return -ENOMEM; |
425 | 0 | } |
426 | 1 | } |
427 | 1 | |
428 | 1 | flush->context = flush_context_qi; |
429 | 1 | flush->iotlb = flush_iotlb_qi; |
430 | 1 | |
431 | 1 | /* Setup Invalidation Queue Address(IQA) register with the |
432 | 1 | * address of the page we just allocated. QS field at |
433 | 1 | * bits[2:0] to indicate size of queue is one 4KB page. |
434 | 1 | * That's 256 entries. Queued Head (IQH) and Queue Tail (IQT) |
435 | 1 | * registers are automatically reset to 0 with write |
436 | 1 | * to IQA register. |
437 | 1 | */ |
438 | 1 | qi_ctrl->qinval_maddr |= QINVAL_PAGE_ORDER; |
439 | 1 | |
440 | 1 | spin_lock_irqsave(&iommu->register_lock, flags); |
441 | 1 | dmar_writeq(iommu->reg, DMAR_IQA_REG, qi_ctrl->qinval_maddr); |
442 | 1 | |
443 | 1 | dmar_writeq(iommu->reg, DMAR_IQT_REG, 0); |
444 | 1 | |
445 | 1 | /* enable queued invalidation hardware */ |
446 | 1 | sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); |
447 | 1 | dmar_writel(iommu->reg, DMAR_GCMD_REG, sts | DMA_GCMD_QIE); |
448 | 1 | |
449 | 1 | /* Make sure hardware complete it */ |
450 | 1 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, |
451 | 1 | (sts & DMA_GSTS_QIES), sts); |
452 | 1 | spin_unlock_irqrestore(&iommu->register_lock, flags); |
453 | 1 | |
454 | 1 | return 0; |
455 | 1 | } |
456 | | |
457 | | void disable_qinval(struct iommu *iommu) |
458 | 1 | { |
459 | 1 | u32 sts; |
460 | 1 | unsigned long flags; |
461 | 1 | |
462 | 1 | if ( !ecap_queued_inval(iommu->ecap) ) |
463 | 0 | return; |
464 | 1 | |
465 | 1 | spin_lock_irqsave(&iommu->register_lock, flags); |
466 | 1 | sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); |
467 | 1 | if ( !(sts & DMA_GSTS_QIES) ) |
468 | 1 | goto out; |
469 | 1 | |
470 | 0 | dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_QIE)); |
471 | 0 |
|
472 | 0 | /* Make sure hardware complete it */ |
473 | 0 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, |
474 | 0 | !(sts & DMA_GSTS_QIES), sts); |
475 | 1 | out: |
476 | 1 | spin_unlock_irqrestore(&iommu->register_lock, flags); |
477 | 1 | } |