/root/src/xen/xen/drivers/passthrough/amd/iommu_cmd.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (C) 2011 Advanced Micro Devices, Inc. |
3 | | * Author: Leo Duran <leo.duran@amd.com> |
4 | | * Author: Wei Wang <wei.wang2@amd.com> - adapted to xen |
5 | | * |
6 | | * This program is free software; you can redistribute it and/or modify |
7 | | * it under the terms of the GNU General Public License as published by |
8 | | * the Free Software Foundation; either version 2 of the License, or |
9 | | * (at your option) any later version. |
10 | | * |
11 | | * This program is distributed in the hope that it will be useful, |
12 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | | * GNU General Public License for more details. |
15 | | * |
16 | | * You should have received a copy of the GNU General Public License |
17 | | * along with this program; If not, see <http://www.gnu.org/licenses/>. |
18 | | */ |
19 | | |
20 | | #include <xen/sched.h> |
21 | | #include <asm/amd-iommu.h> |
22 | | #include <asm/hvm/svm/amd-iommu-proto.h> |
23 | | #include "../ats.h" |
24 | | |
25 | | static int queue_iommu_command(struct amd_iommu *iommu, u32 cmd[]) |
26 | 0 | { |
27 | 0 | u32 tail, head, *cmd_buffer; |
28 | 0 | int i; |
29 | 0 |
|
30 | 0 | tail = iommu->cmd_buffer.tail; |
31 | 0 | if ( ++tail == iommu->cmd_buffer.entries ) |
32 | 0 | tail = 0; |
33 | 0 |
|
34 | 0 | head = iommu_get_rb_pointer(readl(iommu->mmio_base + |
35 | 0 | IOMMU_CMD_BUFFER_HEAD_OFFSET)); |
36 | 0 | if ( head != tail ) |
37 | 0 | { |
38 | 0 | cmd_buffer = (u32 *)(iommu->cmd_buffer.buffer + |
39 | 0 | (iommu->cmd_buffer.tail * |
40 | 0 | IOMMU_CMD_BUFFER_ENTRY_SIZE)); |
41 | 0 |
|
42 | 0 | for ( i = 0; i < IOMMU_CMD_BUFFER_U32_PER_ENTRY; i++ ) |
43 | 0 | cmd_buffer[i] = cmd[i]; |
44 | 0 |
|
45 | 0 | iommu->cmd_buffer.tail = tail; |
46 | 0 | return 1; |
47 | 0 | } |
48 | 0 |
|
49 | 0 | return 0; |
50 | 0 | } |
51 | | |
52 | | static void commit_iommu_command_buffer(struct amd_iommu *iommu) |
53 | 0 | { |
54 | 0 | u32 tail = 0; |
55 | 0 |
|
56 | 0 | iommu_set_rb_pointer(&tail, iommu->cmd_buffer.tail); |
57 | 0 | writel(tail, iommu->mmio_base+IOMMU_CMD_BUFFER_TAIL_OFFSET); |
58 | 0 | } |
59 | | |
60 | | int send_iommu_command(struct amd_iommu *iommu, u32 cmd[]) |
61 | 0 | { |
62 | 0 | if ( queue_iommu_command(iommu, cmd) ) |
63 | 0 | { |
64 | 0 | commit_iommu_command_buffer(iommu); |
65 | 0 | return 1; |
66 | 0 | } |
67 | 0 |
|
68 | 0 | return 0; |
69 | 0 | } |
70 | | |
71 | | static void flush_command_buffer(struct amd_iommu *iommu) |
72 | 0 | { |
73 | 0 | u32 cmd[4], status; |
74 | 0 | int loop_count, comp_wait; |
75 | 0 |
|
76 | 0 | /* RW1C 'ComWaitInt' in status register */ |
77 | 0 | writel(IOMMU_STATUS_COMP_WAIT_INT_MASK, |
78 | 0 | iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); |
79 | 0 |
|
80 | 0 | /* send an empty COMPLETION_WAIT command to flush command buffer */ |
81 | 0 | cmd[3] = cmd[2] = 0; |
82 | 0 | set_field_in_reg_u32(IOMMU_CMD_COMPLETION_WAIT, 0, |
83 | 0 | IOMMU_CMD_OPCODE_MASK, |
84 | 0 | IOMMU_CMD_OPCODE_SHIFT, &cmd[1]); |
85 | 0 | set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, 0, |
86 | 0 | IOMMU_COMP_WAIT_I_FLAG_MASK, |
87 | 0 | IOMMU_COMP_WAIT_I_FLAG_SHIFT, &cmd[0]); |
88 | 0 | send_iommu_command(iommu, cmd); |
89 | 0 |
|
90 | 0 | /* Make loop_count long enough for polling completion wait bit */ |
91 | 0 | loop_count = 1000; |
92 | 0 | do { |
93 | 0 | status = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); |
94 | 0 | comp_wait = get_field_from_reg_u32(status, |
95 | 0 | IOMMU_STATUS_COMP_WAIT_INT_MASK, |
96 | 0 | IOMMU_STATUS_COMP_WAIT_INT_SHIFT); |
97 | 0 | --loop_count; |
98 | 0 | } while ( !comp_wait && loop_count ); |
99 | 0 |
|
100 | 0 | if ( comp_wait ) |
101 | 0 | { |
102 | 0 | /* RW1C 'ComWaitInt' in status register */ |
103 | 0 | writel(IOMMU_STATUS_COMP_WAIT_INT_MASK, |
104 | 0 | iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET); |
105 | 0 | return; |
106 | 0 | } |
107 | 0 | AMD_IOMMU_DEBUG("Warning: ComWaitInt bit did not assert!\n"); |
108 | 0 | } |
109 | | |
110 | | /* Build low level iommu command messages */ |
111 | | static void invalidate_iommu_pages(struct amd_iommu *iommu, |
112 | | u64 io_addr, u16 domain_id, u16 order) |
113 | 0 | { |
114 | 0 | u64 addr_lo, addr_hi; |
115 | 0 | u32 cmd[4], entry; |
116 | 0 | int sflag = 0, pde = 0; |
117 | 0 |
|
118 | 0 | ASSERT ( order == 0 || order == 9 || order == 18 ); |
119 | 0 |
|
120 | 0 | /* All pages associated with the domainID are invalidated */ |
121 | 0 | if ( order || (io_addr == INV_IOMMU_ALL_PAGES_ADDRESS ) ) |
122 | 0 | { |
123 | 0 | sflag = 1; |
124 | 0 | pde = 1; |
125 | 0 | } |
126 | 0 |
|
127 | 0 | /* If sflag == 1, the size of the invalidate command is determined |
128 | 0 | by the first zero bit in the address starting from Address[12] */ |
129 | 0 | if ( order ) |
130 | 0 | { |
131 | 0 | u64 mask = 1ULL << (order - 1 + PAGE_SHIFT); |
132 | 0 | io_addr &= ~mask; |
133 | 0 | io_addr |= mask - 1; |
134 | 0 | } |
135 | 0 |
|
136 | 0 | addr_lo = io_addr & DMA_32BIT_MASK; |
137 | 0 | addr_hi = io_addr >> 32; |
138 | 0 |
|
139 | 0 | set_field_in_reg_u32(domain_id, 0, |
140 | 0 | IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK, |
141 | 0 | IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &entry); |
142 | 0 | set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_PAGES, entry, |
143 | 0 | IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, |
144 | 0 | &entry); |
145 | 0 | cmd[1] = entry; |
146 | 0 |
|
147 | 0 | set_field_in_reg_u32(sflag, 0, |
148 | 0 | IOMMU_INV_IOMMU_PAGES_S_FLAG_MASK, |
149 | 0 | IOMMU_INV_IOMMU_PAGES_S_FLAG_SHIFT, &entry); |
150 | 0 | set_field_in_reg_u32(pde, entry, |
151 | 0 | IOMMU_INV_IOMMU_PAGES_PDE_FLAG_MASK, |
152 | 0 | IOMMU_INV_IOMMU_PAGES_PDE_FLAG_SHIFT, &entry); |
153 | 0 | set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry, |
154 | 0 | IOMMU_INV_IOMMU_PAGES_ADDR_LOW_MASK, |
155 | 0 | IOMMU_INV_IOMMU_PAGES_ADDR_LOW_SHIFT, &entry); |
156 | 0 | cmd[2] = entry; |
157 | 0 |
|
158 | 0 | set_field_in_reg_u32((u32)addr_hi, 0, |
159 | 0 | IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_MASK, |
160 | 0 | IOMMU_INV_IOMMU_PAGES_ADDR_HIGH_SHIFT, &entry); |
161 | 0 | cmd[3] = entry; |
162 | 0 |
|
163 | 0 | cmd[0] = 0; |
164 | 0 | send_iommu_command(iommu, cmd); |
165 | 0 | } |
166 | | |
167 | | static void invalidate_iotlb_pages(struct amd_iommu *iommu, |
168 | | u16 maxpend, u32 pasid, u16 queueid, |
169 | | u64 io_addr, u16 dev_id, u16 order) |
170 | 0 | { |
171 | 0 | u64 addr_lo, addr_hi; |
172 | 0 | u32 cmd[4], entry; |
173 | 0 | int sflag = 0; |
174 | 0 |
|
175 | 0 | ASSERT ( order == 0 || order == 9 || order == 18 ); |
176 | 0 |
|
177 | 0 | if ( order || (io_addr == INV_IOMMU_ALL_PAGES_ADDRESS ) ) |
178 | 0 | sflag = 1; |
179 | 0 |
|
180 | 0 | /* If sflag == 1, the size of the invalidate command is determined |
181 | 0 | by the first zero bit in the address starting from Address[12] */ |
182 | 0 | if ( order ) |
183 | 0 | { |
184 | 0 | u64 mask = 1ULL << (order - 1 + PAGE_SHIFT); |
185 | 0 | io_addr &= ~mask; |
186 | 0 | io_addr |= mask - 1; |
187 | 0 | } |
188 | 0 |
|
189 | 0 | addr_lo = io_addr & DMA_32BIT_MASK; |
190 | 0 | addr_hi = io_addr >> 32; |
191 | 0 |
|
192 | 0 | set_field_in_reg_u32(dev_id, 0, |
193 | 0 | IOMMU_INV_IOTLB_PAGES_DEVICE_ID_MASK, |
194 | 0 | IOMMU_INV_IOTLB_PAGES_DEVICE_ID_SHIFT, &entry); |
195 | 0 |
|
196 | 0 | set_field_in_reg_u32(maxpend, entry, |
197 | 0 | IOMMU_INV_IOTLB_PAGES_MAXPEND_MASK, |
198 | 0 | IOMMU_INV_IOTLB_PAGES_MAXPEND_SHIFT, &entry); |
199 | 0 |
|
200 | 0 | set_field_in_reg_u32(pasid & 0xff, entry, |
201 | 0 | IOMMU_INV_IOTLB_PAGES_PASID1_MASK, |
202 | 0 | IOMMU_INV_IOTLB_PAGES_PASID1_SHIFT, &entry); |
203 | 0 | cmd[0] = entry; |
204 | 0 |
|
205 | 0 | set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOTLB_PAGES, 0, |
206 | 0 | IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, |
207 | 0 | &entry); |
208 | 0 |
|
209 | 0 | set_field_in_reg_u32(pasid >> 8, entry, |
210 | 0 | IOMMU_INV_IOTLB_PAGES_PASID2_MASK, |
211 | 0 | IOMMU_INV_IOTLB_PAGES_PASID2_SHIFT, |
212 | 0 | &entry); |
213 | 0 |
|
214 | 0 | set_field_in_reg_u32(queueid, entry, |
215 | 0 | IOMMU_INV_IOTLB_PAGES_QUEUEID_MASK, |
216 | 0 | IOMMU_INV_IOTLB_PAGES_QUEUEID_SHIFT, |
217 | 0 | &entry); |
218 | 0 | cmd[1] = entry; |
219 | 0 |
|
220 | 0 | set_field_in_reg_u32(sflag, 0, |
221 | 0 | IOMMU_INV_IOTLB_PAGES_S_FLAG_MASK, |
222 | 0 | IOMMU_INV_IOTLB_PAGES_S_FLAG_MASK, &entry); |
223 | 0 |
|
224 | 0 | set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, entry, |
225 | 0 | IOMMU_INV_IOTLB_PAGES_ADDR_LOW_MASK, |
226 | 0 | IOMMU_INV_IOTLB_PAGES_ADDR_LOW_SHIFT, &entry); |
227 | 0 | cmd[2] = entry; |
228 | 0 |
|
229 | 0 | set_field_in_reg_u32((u32)addr_hi, 0, |
230 | 0 | IOMMU_INV_IOTLB_PAGES_ADDR_HIGH_MASK, |
231 | 0 | IOMMU_INV_IOTLB_PAGES_ADDR_HIGH_SHIFT, &entry); |
232 | 0 | cmd[3] = entry; |
233 | 0 |
|
234 | 0 | send_iommu_command(iommu, cmd); |
235 | 0 | } |
236 | | |
237 | | static void invalidate_dev_table_entry(struct amd_iommu *iommu, |
238 | | u16 device_id) |
239 | 0 | { |
240 | 0 | u32 cmd[4], entry; |
241 | 0 |
|
242 | 0 | cmd[3] = cmd[2] = 0; |
243 | 0 | set_field_in_reg_u32(device_id, 0, |
244 | 0 | IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_MASK, |
245 | 0 | IOMMU_INV_DEVTAB_ENTRY_DEVICE_ID_SHIFT, &entry); |
246 | 0 | cmd[0] = entry; |
247 | 0 |
|
248 | 0 | set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_DEVTAB_ENTRY, 0, |
249 | 0 | IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, |
250 | 0 | &entry); |
251 | 0 | cmd[1] = entry; |
252 | 0 |
|
253 | 0 | send_iommu_command(iommu, cmd); |
254 | 0 | } |
255 | | |
256 | | static void invalidate_interrupt_table(struct amd_iommu *iommu, u16 device_id) |
257 | 0 | { |
258 | 0 | u32 cmd[4], entry; |
259 | 0 |
|
260 | 0 | cmd[3] = cmd[2] = 0; |
261 | 0 | set_field_in_reg_u32(device_id, 0, |
262 | 0 | IOMMU_INV_INT_TABLE_DEVICE_ID_MASK, |
263 | 0 | IOMMU_INV_INT_TABLE_DEVICE_ID_SHIFT, &entry); |
264 | 0 | cmd[0] = entry; |
265 | 0 | set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_INT_TABLE, 0, |
266 | 0 | IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, |
267 | 0 | &entry); |
268 | 0 | cmd[1] = entry; |
269 | 0 | send_iommu_command(iommu, cmd); |
270 | 0 | } |
271 | | |
272 | | void invalidate_iommu_all(struct amd_iommu *iommu) |
273 | 0 | { |
274 | 0 | u32 cmd[4], entry; |
275 | 0 |
|
276 | 0 | cmd[3] = cmd[2] = cmd[0] = 0; |
277 | 0 |
|
278 | 0 | set_field_in_reg_u32(IOMMU_CMD_INVALIDATE_IOMMU_ALL, 0, |
279 | 0 | IOMMU_CMD_OPCODE_MASK, IOMMU_CMD_OPCODE_SHIFT, |
280 | 0 | &entry); |
281 | 0 | cmd[1] = entry; |
282 | 0 |
|
283 | 0 | send_iommu_command(iommu, cmd); |
284 | 0 | } |
285 | | |
286 | | void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev, |
287 | | uint64_t gaddr, unsigned int order) |
288 | 0 | { |
289 | 0 | unsigned long flags; |
290 | 0 | struct amd_iommu *iommu; |
291 | 0 | unsigned int req_id, queueid, maxpend; |
292 | 0 |
|
293 | 0 | if ( !ats_enabled ) |
294 | 0 | return; |
295 | 0 |
|
296 | 0 | if ( !pci_ats_enabled(pdev->seg, pdev->bus, pdev->devfn) ) |
297 | 0 | return; |
298 | 0 |
|
299 | 0 | iommu = find_iommu_for_device(pdev->seg, PCI_BDF2(pdev->bus, pdev->devfn)); |
300 | 0 |
|
301 | 0 | if ( !iommu ) |
302 | 0 | { |
303 | 0 | AMD_IOMMU_DEBUG("%s: Can't find iommu for %04x:%02x:%02x.%u\n", |
304 | 0 | __func__, pdev->seg, pdev->bus, |
305 | 0 | PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); |
306 | 0 | return; |
307 | 0 | } |
308 | 0 |
|
309 | 0 | if ( !iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) ) |
310 | 0 | return; |
311 | 0 |
|
312 | 0 | req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(pdev->bus, devfn)); |
313 | 0 | queueid = req_id; |
314 | 0 | maxpend = pdev->ats.queue_depth & 0xff; |
315 | 0 |
|
316 | 0 | /* send INVALIDATE_IOTLB_PAGES command */ |
317 | 0 | spin_lock_irqsave(&iommu->lock, flags); |
318 | 0 | invalidate_iotlb_pages(iommu, maxpend, 0, queueid, gaddr, req_id, order); |
319 | 0 | flush_command_buffer(iommu); |
320 | 0 | spin_unlock_irqrestore(&iommu->lock, flags); |
321 | 0 | } |
322 | | |
323 | | static void amd_iommu_flush_all_iotlbs(struct domain *d, uint64_t gaddr, |
324 | | unsigned int order) |
325 | 0 | { |
326 | 0 | struct pci_dev *pdev; |
327 | 0 |
|
328 | 0 | if ( !ats_enabled ) |
329 | 0 | return; |
330 | 0 |
|
331 | 0 | for_each_pdev( d, pdev ) |
332 | 0 | { |
333 | 0 | u8 devfn = pdev->devfn; |
334 | 0 |
|
335 | 0 | do { |
336 | 0 | amd_iommu_flush_iotlb(devfn, pdev, gaddr, order); |
337 | 0 | devfn += pdev->phantom_stride; |
338 | 0 | } while ( devfn != pdev->devfn && |
339 | 0 | PCI_SLOT(devfn) == PCI_SLOT(pdev->devfn) ); |
340 | 0 | } |
341 | 0 | } |
342 | | |
343 | | /* Flush iommu cache after p2m changes. */ |
344 | | static void _amd_iommu_flush_pages(struct domain *d, |
345 | | uint64_t gaddr, unsigned int order) |
346 | 0 | { |
347 | 0 | unsigned long flags; |
348 | 0 | struct amd_iommu *iommu; |
349 | 0 | unsigned int dom_id = d->domain_id; |
350 | 0 |
|
351 | 0 | /* send INVALIDATE_IOMMU_PAGES command */ |
352 | 0 | for_each_amd_iommu ( iommu ) |
353 | 0 | { |
354 | 0 | spin_lock_irqsave(&iommu->lock, flags); |
355 | 0 | invalidate_iommu_pages(iommu, gaddr, dom_id, order); |
356 | 0 | flush_command_buffer(iommu); |
357 | 0 | spin_unlock_irqrestore(&iommu->lock, flags); |
358 | 0 | } |
359 | 0 |
|
360 | 0 | if ( ats_enabled ) |
361 | 0 | amd_iommu_flush_all_iotlbs(d, gaddr, order); |
362 | 0 | } |
363 | | |
364 | | void amd_iommu_flush_all_pages(struct domain *d) |
365 | 0 | { |
366 | 0 | _amd_iommu_flush_pages(d, INV_IOMMU_ALL_PAGES_ADDRESS, 0); |
367 | 0 | } |
368 | | |
369 | | void amd_iommu_flush_pages(struct domain *d, |
370 | | unsigned long gfn, unsigned int order) |
371 | 0 | { |
372 | 0 | _amd_iommu_flush_pages(d, (uint64_t) gfn << PAGE_SHIFT, order); |
373 | 0 | } |
374 | | |
375 | | void amd_iommu_flush_device(struct amd_iommu *iommu, uint16_t bdf) |
376 | 0 | { |
377 | 0 | ASSERT( spin_is_locked(&iommu->lock) ); |
378 | 0 |
|
379 | 0 | invalidate_dev_table_entry(iommu, bdf); |
380 | 0 | flush_command_buffer(iommu); |
381 | 0 | } |
382 | | |
383 | | void amd_iommu_flush_intremap(struct amd_iommu *iommu, uint16_t bdf) |
384 | 0 | { |
385 | 0 | ASSERT( spin_is_locked(&iommu->lock) ); |
386 | 0 |
|
387 | 0 | invalidate_interrupt_table(iommu, bdf); |
388 | 0 | flush_command_buffer(iommu); |
389 | 0 | } |
390 | | |
391 | | void amd_iommu_flush_all_caches(struct amd_iommu *iommu) |
392 | 0 | { |
393 | 0 | ASSERT( spin_is_locked(&iommu->lock) ); |
394 | 0 |
|
395 | 0 | invalidate_iommu_all(iommu); |
396 | 0 | flush_command_buffer(iommu); |
397 | 0 | } |
398 | | |
399 | | void amd_iommu_send_guest_cmd(struct amd_iommu *iommu, u32 cmd[]) |
400 | 0 | { |
401 | 0 | unsigned long flags; |
402 | 0 |
|
403 | 0 | spin_lock_irqsave(&iommu->lock, flags); |
404 | 0 |
|
405 | 0 | send_iommu_command(iommu, cmd); |
406 | 0 | flush_command_buffer(iommu); |
407 | 0 |
|
408 | 0 | spin_unlock_irqrestore(&iommu->lock, flags); |
409 | 0 | } |