/root/src/xen/xen/drivers/passthrough/amd/iommu_guest.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (C) 2011 Advanced Micro Devices, Inc. |
3 | | * Author: Wei Wang <wei.wang2@amd.com> |
4 | | * |
5 | | * This program is free software; you can redistribute it and/or modify |
6 | | * it under the terms of the GNU General Public License as published by |
7 | | * the Free Software Foundation; either version 2 of the License, or |
8 | | * (at your option) any later version. |
9 | | * |
10 | | * This program is distributed in the hope that it will be useful, |
11 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | | * GNU General Public License for more details. |
14 | | * |
15 | | * You should have received a copy of the GNU General Public License |
16 | | * along with this program; If not, see <http://www.gnu.org/licenses/>. |
17 | | */ |
18 | | |
19 | | #include <xen/sched.h> |
20 | | #include <asm/p2m.h> |
21 | | #include <asm/amd-iommu.h> |
22 | | #include <asm/hvm/svm/amd-iommu-proto.h> |
23 | | |
24 | | |
25 | 0 | #define IOMMU_MMIO_SIZE 0x8000 |
26 | 0 | #define IOMMU_MMIO_PAGE_NR 0x8 |
27 | 0 | #define RING_BF_LENGTH_MASK 0x0F000000 |
28 | 0 | #define RING_BF_LENGTH_SHIFT 24 |
29 | | |
30 | 0 | #define PASMAX_9_bit 0x8 |
31 | 0 | #define GUEST_CR3_1_LEVEL 0x0 |
32 | 0 | #define GUEST_ADDRESS_SIZE_6_LEVEL 0x2 |
33 | 0 | #define HOST_ADDRESS_SIZE_6_LEVEL 0x2 |
34 | | |
35 | | #define guest_iommu_set_status(iommu, bit) \ |
36 | 0 | iommu_set_bit(&((iommu)->reg_status.lo), bit) |
37 | | |
38 | | #define guest_iommu_clear_status(iommu, bit) \ |
39 | 0 | iommu_clear_bit(&((iommu)->reg_status.lo), bit) |
40 | | |
41 | 0 | #define reg_to_u64(reg) (((uint64_t)reg.hi << 32) | reg.lo ) |
42 | | #define u64_to_reg(reg, val) \ |
43 | 0 | do \ |
44 | 0 | { \ |
45 | 0 | (reg)->lo = (u32)(val); \ |
46 | 0 | (reg)->hi = (val) >> 32; \ |
47 | 0 | } while (0) |
48 | | |
49 | | static unsigned int machine_bdf(struct domain *d, uint16_t guest_bdf) |
50 | 0 | { |
51 | 0 | return guest_bdf; |
52 | 0 | } |
53 | | |
54 | | static uint16_t guest_bdf(struct domain *d, uint16_t machine_bdf) |
55 | 0 | { |
56 | 0 | return machine_bdf; |
57 | 0 | } |
58 | | |
59 | | static inline struct guest_iommu *domain_iommu(struct domain *d) |
60 | 0 | { |
61 | 0 | return dom_iommu(d)->arch.g_iommu; |
62 | 0 | } |
63 | | |
64 | | static inline struct guest_iommu *vcpu_iommu(struct vcpu *v) |
65 | 0 | { |
66 | 0 | return dom_iommu(v->domain)->arch.g_iommu; |
67 | 0 | } |
68 | | |
69 | | static void guest_iommu_enable(struct guest_iommu *iommu) |
70 | 0 | { |
71 | 0 | iommu->enabled = 1; |
72 | 0 | } |
73 | | |
74 | | static void guest_iommu_disable(struct guest_iommu *iommu) |
75 | 0 | { |
76 | 0 | iommu->enabled = 0; |
77 | 0 | } |
78 | | |
79 | | static uint64_t get_guest_cr3_from_dte(dev_entry_t *dte) |
80 | 0 | { |
81 | 0 | uint64_t gcr3_1, gcr3_2, gcr3_3; |
82 | 0 |
|
83 | 0 | gcr3_1 = get_field_from_reg_u32(dte->data[1], |
84 | 0 | IOMMU_DEV_TABLE_GCR3_1_MASK, |
85 | 0 | IOMMU_DEV_TABLE_GCR3_1_SHIFT); |
86 | 0 | gcr3_2 = get_field_from_reg_u32(dte->data[2], |
87 | 0 | IOMMU_DEV_TABLE_GCR3_2_MASK, |
88 | 0 | IOMMU_DEV_TABLE_GCR3_2_SHIFT); |
89 | 0 | gcr3_3 = get_field_from_reg_u32(dte->data[3], |
90 | 0 | IOMMU_DEV_TABLE_GCR3_3_MASK, |
91 | 0 | IOMMU_DEV_TABLE_GCR3_3_SHIFT); |
92 | 0 |
|
93 | 0 | return ((gcr3_3 << 31) | (gcr3_2 << 15 ) | (gcr3_1 << 12)) >> PAGE_SHIFT; |
94 | 0 | } |
95 | | |
96 | | static uint16_t get_domid_from_dte(dev_entry_t *dte) |
97 | 0 | { |
98 | 0 | return get_field_from_reg_u32(dte->data[2], IOMMU_DEV_TABLE_DOMAIN_ID_MASK, |
99 | 0 | IOMMU_DEV_TABLE_DOMAIN_ID_SHIFT); |
100 | 0 | } |
101 | | |
102 | | static uint16_t get_glx_from_dte(dev_entry_t *dte) |
103 | 0 | { |
104 | 0 | return get_field_from_reg_u32(dte->data[1], IOMMU_DEV_TABLE_GLX_MASK, |
105 | 0 | IOMMU_DEV_TABLE_GLX_SHIFT); |
106 | 0 | } |
107 | | |
108 | | static uint16_t get_gv_from_dte(dev_entry_t *dte) |
109 | 0 | { |
110 | 0 | return get_field_from_reg_u32(dte->data[1],IOMMU_DEV_TABLE_GV_MASK, |
111 | 0 | IOMMU_DEV_TABLE_GV_SHIFT); |
112 | 0 | } |
113 | | |
114 | | static unsigned int host_domid(struct domain *d, uint64_t g_domid) |
115 | 0 | { |
116 | 0 | /* Only support one PPR device in guest for now */ |
117 | 0 | return d->domain_id; |
118 | 0 | } |
119 | | |
120 | | static unsigned long get_gfn_from_base_reg(uint64_t base_raw) |
121 | 0 | { |
122 | 0 | base_raw &= PADDR_MASK; |
123 | 0 | ASSERT ( base_raw != 0 ); |
124 | 0 | return base_raw >> PAGE_SHIFT; |
125 | 0 | } |
126 | | |
127 | | static void guest_iommu_deliver_msi(struct domain *d) |
128 | 0 | { |
129 | 0 | uint8_t vector, dest, dest_mode, delivery_mode, trig_mode; |
130 | 0 | struct guest_iommu *iommu = domain_iommu(d); |
131 | 0 |
|
132 | 0 | vector = iommu->msi.vector; |
133 | 0 | dest = iommu->msi.dest; |
134 | 0 | dest_mode = iommu->msi.dest_mode; |
135 | 0 | delivery_mode = iommu->msi.delivery_mode; |
136 | 0 | trig_mode = iommu->msi.trig_mode; |
137 | 0 |
|
138 | 0 | vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode); |
139 | 0 | } |
140 | | |
141 | | static unsigned long guest_iommu_get_table_mfn(struct domain *d, |
142 | | uint64_t base_raw, |
143 | | unsigned int entry_size, |
144 | | unsigned int pos) |
145 | 0 | { |
146 | 0 | unsigned long idx, gfn, mfn; |
147 | 0 | p2m_type_t p2mt; |
148 | 0 |
|
149 | 0 | gfn = get_gfn_from_base_reg(base_raw); |
150 | 0 | idx = (pos * entry_size) >> PAGE_SHIFT; |
151 | 0 |
|
152 | 0 | mfn = mfn_x(get_gfn(d, gfn + idx, &p2mt)); |
153 | 0 | put_gfn(d, gfn); |
154 | 0 |
|
155 | 0 | return mfn; |
156 | 0 | } |
157 | | |
158 | | static void guest_iommu_enable_dev_table(struct guest_iommu *iommu) |
159 | 0 | { |
160 | 0 | uint32_t length_raw = get_field_from_reg_u32(iommu->dev_table.reg_base.lo, |
161 | 0 | IOMMU_DEV_TABLE_SIZE_MASK, |
162 | 0 | IOMMU_DEV_TABLE_SIZE_SHIFT); |
163 | 0 | iommu->dev_table.size = (length_raw + 1) * PAGE_SIZE; |
164 | 0 | } |
165 | | |
166 | | static void guest_iommu_enable_ring_buffer(struct guest_iommu *iommu, |
167 | | struct guest_buffer *buffer, |
168 | | uint32_t entry_size) |
169 | 0 | { |
170 | 0 | uint32_t length_raw = get_field_from_reg_u32(buffer->reg_base.hi, |
171 | 0 | RING_BF_LENGTH_MASK, |
172 | 0 | RING_BF_LENGTH_SHIFT); |
173 | 0 | buffer->entries = 1 << length_raw; |
174 | 0 | } |
175 | | |
176 | | void guest_iommu_add_ppr_log(struct domain *d, u32 entry[]) |
177 | 0 | { |
178 | 0 | uint16_t gdev_id; |
179 | 0 | unsigned long mfn, tail, head; |
180 | 0 | ppr_entry_t *log, *log_base; |
181 | 0 | struct guest_iommu *iommu; |
182 | 0 |
|
183 | 0 | if ( !is_hvm_domain(d) ) |
184 | 0 | return; |
185 | 0 |
|
186 | 0 | iommu = domain_iommu(d); |
187 | 0 | if ( !iommu ) |
188 | 0 | return; |
189 | 0 |
|
190 | 0 | tail = iommu_get_rb_pointer(iommu->ppr_log.reg_tail.lo); |
191 | 0 | head = iommu_get_rb_pointer(iommu->ppr_log.reg_head.lo); |
192 | 0 |
|
193 | 0 | if ( tail >= iommu->ppr_log.entries || head >= iommu->ppr_log.entries ) |
194 | 0 | { |
195 | 0 | AMD_IOMMU_DEBUG("Error: guest iommu ppr log overflows\n"); |
196 | 0 | guest_iommu_disable(iommu); |
197 | 0 | return; |
198 | 0 | } |
199 | 0 |
|
200 | 0 | mfn = guest_iommu_get_table_mfn(d, reg_to_u64(iommu->ppr_log.reg_base), |
201 | 0 | sizeof(ppr_entry_t), tail); |
202 | 0 | ASSERT(mfn_valid(_mfn(mfn))); |
203 | 0 |
|
204 | 0 | log_base = map_domain_page(_mfn(mfn)); |
205 | 0 | log = log_base + tail % (PAGE_SIZE / sizeof(ppr_entry_t)); |
206 | 0 |
|
207 | 0 | /* Convert physical device id back into virtual device id */ |
208 | 0 | gdev_id = guest_bdf(d, iommu_get_devid_from_cmd(entry[0])); |
209 | 0 | iommu_set_devid_to_cmd(&entry[0], gdev_id); |
210 | 0 |
|
211 | 0 | memcpy(log, entry, sizeof(ppr_entry_t)); |
212 | 0 |
|
213 | 0 | /* Now shift ppr log tail pointer */ |
214 | 0 | if ( ++tail >= iommu->ppr_log.entries ) |
215 | 0 | { |
216 | 0 | tail = 0; |
217 | 0 | guest_iommu_set_status(iommu, IOMMU_STATUS_PPR_LOG_OVERFLOW_SHIFT); |
218 | 0 | } |
219 | 0 | iommu_set_rb_pointer(&iommu->ppr_log.reg_tail.lo, tail); |
220 | 0 | unmap_domain_page(log_base); |
221 | 0 |
|
222 | 0 | guest_iommu_deliver_msi(d); |
223 | 0 | } |
224 | | |
225 | | void guest_iommu_add_event_log(struct domain *d, u32 entry[]) |
226 | 0 | { |
227 | 0 | uint16_t dev_id; |
228 | 0 | unsigned long mfn, tail, head; |
229 | 0 | event_entry_t *log, *log_base; |
230 | 0 | struct guest_iommu *iommu; |
231 | 0 |
|
232 | 0 | if ( !is_hvm_domain(d) ) |
233 | 0 | return; |
234 | 0 |
|
235 | 0 | iommu = domain_iommu(d); |
236 | 0 | if ( !iommu ) |
237 | 0 | return; |
238 | 0 |
|
239 | 0 | tail = iommu_get_rb_pointer(iommu->event_log.reg_tail.lo); |
240 | 0 | head = iommu_get_rb_pointer(iommu->event_log.reg_head.lo); |
241 | 0 |
|
242 | 0 | if ( tail >= iommu->event_log.entries || head >= iommu->event_log.entries ) |
243 | 0 | { |
244 | 0 | AMD_IOMMU_DEBUG("Error: guest iommu event overflows\n"); |
245 | 0 | guest_iommu_disable(iommu); |
246 | 0 | return; |
247 | 0 | } |
248 | 0 |
|
249 | 0 | mfn = guest_iommu_get_table_mfn(d, reg_to_u64(iommu->event_log.reg_base), |
250 | 0 | sizeof(event_entry_t), tail); |
251 | 0 | ASSERT(mfn_valid(_mfn(mfn))); |
252 | 0 |
|
253 | 0 | log_base = map_domain_page(_mfn(mfn)); |
254 | 0 | log = log_base + tail % (PAGE_SIZE / sizeof(event_entry_t)); |
255 | 0 |
|
256 | 0 | /* re-write physical device id into virtual device id */ |
257 | 0 | dev_id = guest_bdf(d, iommu_get_devid_from_cmd(entry[0])); |
258 | 0 | iommu_set_devid_to_cmd(&entry[0], dev_id); |
259 | 0 | memcpy(log, entry, sizeof(event_entry_t)); |
260 | 0 |
|
261 | 0 | /* Now shift event log tail pointer */ |
262 | 0 | if ( ++tail >= iommu->event_log.entries ) |
263 | 0 | { |
264 | 0 | tail = 0; |
265 | 0 | guest_iommu_set_status(iommu, IOMMU_STATUS_EVENT_OVERFLOW_SHIFT); |
266 | 0 | } |
267 | 0 |
|
268 | 0 | iommu_set_rb_pointer(&iommu->event_log.reg_tail.lo, tail); |
269 | 0 | unmap_domain_page(log_base); |
270 | 0 |
|
271 | 0 | guest_iommu_deliver_msi(d); |
272 | 0 | } |
273 | | |
274 | | static int do_complete_ppr_request(struct domain *d, cmd_entry_t *cmd) |
275 | 0 | { |
276 | 0 | uint16_t dev_id; |
277 | 0 | struct amd_iommu *iommu; |
278 | 0 |
|
279 | 0 | dev_id = machine_bdf(d, iommu_get_devid_from_cmd(cmd->data[0])); |
280 | 0 | iommu = find_iommu_for_device(0, dev_id); |
281 | 0 |
|
282 | 0 | if ( !iommu ) |
283 | 0 | { |
284 | 0 | AMD_IOMMU_DEBUG("%s: Fail to find iommu for bdf %x\n", |
285 | 0 | __func__, dev_id); |
286 | 0 | return -ENODEV; |
287 | 0 | } |
288 | 0 |
|
289 | 0 | /* replace virtual device id into physical */ |
290 | 0 | iommu_set_devid_to_cmd(&cmd->data[0], dev_id); |
291 | 0 | amd_iommu_send_guest_cmd(iommu, cmd->data); |
292 | 0 |
|
293 | 0 | return 0; |
294 | 0 | } |
295 | | |
296 | | static int do_invalidate_pages(struct domain *d, cmd_entry_t *cmd) |
297 | 0 | { |
298 | 0 | uint16_t gdom_id, hdom_id; |
299 | 0 | struct amd_iommu *iommu = NULL; |
300 | 0 |
|
301 | 0 | gdom_id = get_field_from_reg_u32(cmd->data[1], |
302 | 0 | IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK, |
303 | 0 | IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT); |
304 | 0 |
|
305 | 0 | hdom_id = host_domid(d, gdom_id); |
306 | 0 | set_field_in_reg_u32(hdom_id, cmd->data[1], |
307 | 0 | IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_MASK, |
308 | 0 | IOMMU_INV_IOMMU_PAGES_DOMAIN_ID_SHIFT, &cmd->data[1]); |
309 | 0 |
|
310 | 0 | for_each_amd_iommu ( iommu ) |
311 | 0 | amd_iommu_send_guest_cmd(iommu, cmd->data); |
312 | 0 |
|
313 | 0 | return 0; |
314 | 0 | } |
315 | | |
316 | | static int do_invalidate_all(struct domain *d, cmd_entry_t *cmd) |
317 | 0 | { |
318 | 0 | struct amd_iommu *iommu = NULL; |
319 | 0 |
|
320 | 0 | for_each_amd_iommu ( iommu ) |
321 | 0 | amd_iommu_flush_all_pages(d); |
322 | 0 |
|
323 | 0 | return 0; |
324 | 0 | } |
325 | | |
326 | | static int do_invalidate_iotlb_pages(struct domain *d, cmd_entry_t *cmd) |
327 | 0 | { |
328 | 0 | struct amd_iommu *iommu; |
329 | 0 | uint16_t dev_id; |
330 | 0 |
|
331 | 0 | dev_id = machine_bdf(d, iommu_get_devid_from_cmd(cmd->data[0])); |
332 | 0 |
|
333 | 0 | iommu = find_iommu_for_device(0, dev_id); |
334 | 0 | if ( !iommu ) |
335 | 0 | { |
336 | 0 | AMD_IOMMU_DEBUG("%s: Fail to find iommu for bdf %x\n", |
337 | 0 | __func__, dev_id); |
338 | 0 | return -ENODEV; |
339 | 0 | } |
340 | 0 |
|
341 | 0 | iommu_set_devid_to_cmd(&cmd->data[0], dev_id); |
342 | 0 | amd_iommu_send_guest_cmd(iommu, cmd->data); |
343 | 0 |
|
344 | 0 | return 0; |
345 | 0 | } |
346 | | |
347 | | static int do_completion_wait(struct domain *d, cmd_entry_t *cmd) |
348 | 0 | { |
349 | 0 | bool_t com_wait_int_en, com_wait_int, i, s; |
350 | 0 | struct guest_iommu *iommu; |
351 | 0 | unsigned long gfn; |
352 | 0 | p2m_type_t p2mt; |
353 | 0 |
|
354 | 0 | iommu = domain_iommu(d); |
355 | 0 |
|
356 | 0 | i = iommu_get_bit(cmd->data[0], IOMMU_COMP_WAIT_I_FLAG_SHIFT); |
357 | 0 | s = iommu_get_bit(cmd->data[0], IOMMU_COMP_WAIT_S_FLAG_SHIFT); |
358 | 0 |
|
359 | 0 | if ( i ) |
360 | 0 | guest_iommu_set_status(iommu, IOMMU_STATUS_COMP_WAIT_INT_SHIFT); |
361 | 0 |
|
362 | 0 | if ( s ) |
363 | 0 | { |
364 | 0 | uint64_t gaddr_lo, gaddr_hi, gaddr_64, data; |
365 | 0 | void *vaddr; |
366 | 0 |
|
367 | 0 | data = (uint64_t)cmd->data[3] << 32 | cmd->data[2]; |
368 | 0 | gaddr_lo = get_field_from_reg_u32(cmd->data[0], |
369 | 0 | IOMMU_COMP_WAIT_ADDR_LOW_MASK, |
370 | 0 | IOMMU_COMP_WAIT_ADDR_LOW_SHIFT); |
371 | 0 | gaddr_hi = get_field_from_reg_u32(cmd->data[1], |
372 | 0 | IOMMU_COMP_WAIT_ADDR_HIGH_MASK, |
373 | 0 | IOMMU_COMP_WAIT_ADDR_HIGH_SHIFT); |
374 | 0 |
|
375 | 0 | gaddr_64 = (gaddr_hi << 32) | (gaddr_lo << 3); |
376 | 0 |
|
377 | 0 | gfn = gaddr_64 >> PAGE_SHIFT; |
378 | 0 | vaddr = map_domain_page(get_gfn(d, gfn ,&p2mt)); |
379 | 0 | put_gfn(d, gfn); |
380 | 0 |
|
381 | 0 | write_u64_atomic((uint64_t *)(vaddr + (gaddr_64 & (PAGE_SIZE-1))), |
382 | 0 | data); |
383 | 0 | unmap_domain_page(vaddr); |
384 | 0 | } |
385 | 0 |
|
386 | 0 | com_wait_int_en = iommu_get_bit(iommu->reg_ctrl.lo, |
387 | 0 | IOMMU_CONTROL_COMP_WAIT_INT_SHIFT); |
388 | 0 | com_wait_int = iommu_get_bit(iommu->reg_status.lo, |
389 | 0 | IOMMU_STATUS_COMP_WAIT_INT_SHIFT); |
390 | 0 |
|
391 | 0 | if ( com_wait_int_en && com_wait_int ) |
392 | 0 | guest_iommu_deliver_msi(d); |
393 | 0 |
|
394 | 0 | return 0; |
395 | 0 | } |
396 | | |
397 | | static int do_invalidate_dte(struct domain *d, cmd_entry_t *cmd) |
398 | 0 | { |
399 | 0 | uint16_t gbdf, mbdf, req_id, gdom_id, hdom_id; |
400 | 0 | dev_entry_t *gdte, *mdte, *dte_base; |
401 | 0 | struct amd_iommu *iommu = NULL; |
402 | 0 | struct guest_iommu *g_iommu; |
403 | 0 | uint64_t gcr3_gfn, gcr3_mfn; |
404 | 0 | uint8_t glx, gv; |
405 | 0 | unsigned long dte_mfn, flags; |
406 | 0 | p2m_type_t p2mt; |
407 | 0 |
|
408 | 0 | g_iommu = domain_iommu(d); |
409 | 0 | gbdf = iommu_get_devid_from_cmd(cmd->data[0]); |
410 | 0 | mbdf = machine_bdf(d, gbdf); |
411 | 0 |
|
412 | 0 | /* Guest can only update DTEs for its passthru devices */ |
413 | 0 | if ( mbdf == 0 || gbdf == 0 ) |
414 | 0 | return 0; |
415 | 0 |
|
416 | 0 | /* Sometimes guest invalidates devices from non-exists dtes */ |
417 | 0 | if ( (gbdf * sizeof(dev_entry_t)) > g_iommu->dev_table.size ) |
418 | 0 | return 0; |
419 | 0 |
|
420 | 0 | dte_mfn = guest_iommu_get_table_mfn(d, |
421 | 0 | reg_to_u64(g_iommu->dev_table.reg_base), |
422 | 0 | sizeof(dev_entry_t), gbdf); |
423 | 0 | ASSERT(mfn_valid(_mfn(dte_mfn))); |
424 | 0 |
|
425 | 0 | /* Read guest dte information */ |
426 | 0 | dte_base = map_domain_page(_mfn(dte_mfn)); |
427 | 0 |
|
428 | 0 | gdte = dte_base + gbdf % (PAGE_SIZE / sizeof(dev_entry_t)); |
429 | 0 |
|
430 | 0 | gdom_id = get_domid_from_dte(gdte); |
431 | 0 | gcr3_gfn = get_guest_cr3_from_dte(gdte); |
432 | 0 | glx = get_glx_from_dte(gdte); |
433 | 0 | gv = get_gv_from_dte(gdte); |
434 | 0 |
|
435 | 0 | unmap_domain_page(dte_base); |
436 | 0 |
|
437 | 0 | /* Do not update host dte before gcr3 has been set */ |
438 | 0 | if ( gcr3_gfn == 0 ) |
439 | 0 | return 0; |
440 | 0 |
|
441 | 0 | gcr3_mfn = mfn_x(get_gfn(d, gcr3_gfn, &p2mt)); |
442 | 0 | put_gfn(d, gcr3_gfn); |
443 | 0 |
|
444 | 0 | ASSERT(mfn_valid(_mfn(gcr3_mfn))); |
445 | 0 |
|
446 | 0 | iommu = find_iommu_for_device(0, mbdf); |
447 | 0 | if ( !iommu ) |
448 | 0 | { |
449 | 0 | AMD_IOMMU_DEBUG("%s: Fail to find iommu for bdf %x!\n", |
450 | 0 | __func__, mbdf); |
451 | 0 | return -ENODEV; |
452 | 0 | } |
453 | 0 |
|
454 | 0 | /* Setup host device entry */ |
455 | 0 | hdom_id = host_domid(d, gdom_id); |
456 | 0 | req_id = get_dma_requestor_id(iommu->seg, mbdf); |
457 | 0 | mdte = iommu->dev_table.buffer + (req_id * sizeof(dev_entry_t)); |
458 | 0 |
|
459 | 0 | spin_lock_irqsave(&iommu->lock, flags); |
460 | 0 | iommu_dte_set_guest_cr3((u32 *)mdte, hdom_id, |
461 | 0 | gcr3_mfn << PAGE_SHIFT, gv, glx); |
462 | 0 |
|
463 | 0 | amd_iommu_flush_device(iommu, req_id); |
464 | 0 | spin_unlock_irqrestore(&iommu->lock, flags); |
465 | 0 |
|
466 | 0 | return 0; |
467 | 0 | } |
468 | | |
469 | | static void guest_iommu_process_command(unsigned long _d) |
470 | 0 | { |
471 | 0 | unsigned long opcode, tail, head, entries_per_page, cmd_mfn; |
472 | 0 | cmd_entry_t *cmd, *cmd_base; |
473 | 0 | struct domain *d = (struct domain *)_d; |
474 | 0 | struct guest_iommu *iommu; |
475 | 0 |
|
476 | 0 | iommu = domain_iommu(d); |
477 | 0 |
|
478 | 0 | if ( !iommu->enabled ) |
479 | 0 | return; |
480 | 0 |
|
481 | 0 | head = iommu_get_rb_pointer(iommu->cmd_buffer.reg_head.lo); |
482 | 0 | tail = iommu_get_rb_pointer(iommu->cmd_buffer.reg_tail.lo); |
483 | 0 |
|
484 | 0 | /* Tail pointer is rolled over by guest driver, value outside |
485 | 0 | * cmd_buffer_entries cause iommu disabled |
486 | 0 | */ |
487 | 0 |
|
488 | 0 | if ( tail >= iommu->cmd_buffer.entries || |
489 | 0 | head >= iommu->cmd_buffer.entries ) |
490 | 0 | { |
491 | 0 | AMD_IOMMU_DEBUG("Error: guest iommu cmd buffer overflows\n"); |
492 | 0 | guest_iommu_disable(iommu); |
493 | 0 | return; |
494 | 0 | } |
495 | 0 |
|
496 | 0 | entries_per_page = PAGE_SIZE / sizeof(cmd_entry_t); |
497 | 0 |
|
498 | 0 | while ( head != tail ) |
499 | 0 | { |
500 | 0 | int ret = 0; |
501 | 0 |
|
502 | 0 | cmd_mfn = guest_iommu_get_table_mfn(d, |
503 | 0 | reg_to_u64(iommu->cmd_buffer.reg_base), |
504 | 0 | sizeof(cmd_entry_t), head); |
505 | 0 | ASSERT(mfn_valid(_mfn(cmd_mfn))); |
506 | 0 |
|
507 | 0 | cmd_base = map_domain_page(_mfn(cmd_mfn)); |
508 | 0 | cmd = cmd_base + head % entries_per_page; |
509 | 0 |
|
510 | 0 | opcode = get_field_from_reg_u32(cmd->data[1], |
511 | 0 | IOMMU_CMD_OPCODE_MASK, |
512 | 0 | IOMMU_CMD_OPCODE_SHIFT); |
513 | 0 | switch ( opcode ) |
514 | 0 | { |
515 | 0 | case IOMMU_CMD_COMPLETION_WAIT: |
516 | 0 | ret = do_completion_wait(d, cmd); |
517 | 0 | break; |
518 | 0 | case IOMMU_CMD_INVALIDATE_DEVTAB_ENTRY: |
519 | 0 | ret = do_invalidate_dte(d, cmd); |
520 | 0 | break; |
521 | 0 | case IOMMU_CMD_INVALIDATE_IOMMU_PAGES: |
522 | 0 | ret = do_invalidate_pages(d, cmd); |
523 | 0 | break; |
524 | 0 | case IOMMU_CMD_INVALIDATE_IOTLB_PAGES: |
525 | 0 | ret = do_invalidate_iotlb_pages(d, cmd); |
526 | 0 | break; |
527 | 0 | case IOMMU_CMD_INVALIDATE_INT_TABLE: |
528 | 0 | break; |
529 | 0 | case IOMMU_CMD_COMPLETE_PPR_REQUEST: |
530 | 0 | ret = do_complete_ppr_request(d, cmd); |
531 | 0 | break; |
532 | 0 | case IOMMU_CMD_INVALIDATE_IOMMU_ALL: |
533 | 0 | ret = do_invalidate_all(d, cmd); |
534 | 0 | break; |
535 | 0 | default: |
536 | 0 | AMD_IOMMU_DEBUG("CMD: Unknown command cmd_type = %lx " |
537 | 0 | "head = %ld\n", opcode, head); |
538 | 0 | break; |
539 | 0 | } |
540 | 0 |
|
541 | 0 | unmap_domain_page(cmd_base); |
542 | 0 | if ( ++head >= iommu->cmd_buffer.entries ) |
543 | 0 | head = 0; |
544 | 0 | if ( ret ) |
545 | 0 | guest_iommu_disable(iommu); |
546 | 0 | } |
547 | 0 |
|
548 | 0 | /* Now shift cmd buffer head pointer */ |
549 | 0 | iommu_set_rb_pointer(&iommu->cmd_buffer.reg_head.lo, head); |
550 | 0 | return; |
551 | 0 | } |
552 | | |
553 | | static int guest_iommu_write_ctrl(struct guest_iommu *iommu, uint64_t newctrl) |
554 | 0 | { |
555 | 0 | bool_t cmd_en, event_en, iommu_en, ppr_en, ppr_log_en; |
556 | 0 | bool_t cmd_en_old, event_en_old, iommu_en_old; |
557 | 0 | bool_t cmd_run; |
558 | 0 |
|
559 | 0 | iommu_en = iommu_get_bit(newctrl, |
560 | 0 | IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT); |
561 | 0 | iommu_en_old = iommu_get_bit(iommu->reg_ctrl.lo, |
562 | 0 | IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT); |
563 | 0 |
|
564 | 0 | cmd_en = iommu_get_bit(newctrl, |
565 | 0 | IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT); |
566 | 0 | cmd_en_old = iommu_get_bit(iommu->reg_ctrl.lo, |
567 | 0 | IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT); |
568 | 0 | cmd_run = iommu_get_bit(iommu->reg_status.lo, |
569 | 0 | IOMMU_STATUS_CMD_BUFFER_RUN_SHIFT); |
570 | 0 | event_en = iommu_get_bit(newctrl, |
571 | 0 | IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT); |
572 | 0 | event_en_old = iommu_get_bit(iommu->reg_ctrl.lo, |
573 | 0 | IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT); |
574 | 0 |
|
575 | 0 | ppr_en = iommu_get_bit(newctrl, |
576 | 0 | IOMMU_CONTROL_PPR_ENABLE_SHIFT); |
577 | 0 | ppr_log_en = iommu_get_bit(newctrl, |
578 | 0 | IOMMU_CONTROL_PPR_LOG_ENABLE_SHIFT); |
579 | 0 |
|
580 | 0 | if ( iommu_en ) |
581 | 0 | { |
582 | 0 | guest_iommu_enable(iommu); |
583 | 0 | guest_iommu_enable_dev_table(iommu); |
584 | 0 | } |
585 | 0 |
|
586 | 0 | if ( iommu_en && cmd_en ) |
587 | 0 | { |
588 | 0 | guest_iommu_enable_ring_buffer(iommu, &iommu->cmd_buffer, |
589 | 0 | sizeof(cmd_entry_t)); |
590 | 0 | /* Enable iommu command processing */ |
591 | 0 | tasklet_schedule(&iommu->cmd_buffer_tasklet); |
592 | 0 | } |
593 | 0 |
|
594 | 0 | if ( iommu_en && event_en ) |
595 | 0 | { |
596 | 0 | guest_iommu_enable_ring_buffer(iommu, &iommu->event_log, |
597 | 0 | sizeof(event_entry_t)); |
598 | 0 | guest_iommu_set_status(iommu, IOMMU_STATUS_EVENT_LOG_RUN_SHIFT); |
599 | 0 | guest_iommu_clear_status(iommu, IOMMU_STATUS_EVENT_OVERFLOW_SHIFT); |
600 | 0 | } |
601 | 0 |
|
602 | 0 | if ( iommu_en && ppr_en && ppr_log_en ) |
603 | 0 | { |
604 | 0 | guest_iommu_enable_ring_buffer(iommu, &iommu->ppr_log, |
605 | 0 | sizeof(ppr_entry_t)); |
606 | 0 | guest_iommu_set_status(iommu, IOMMU_STATUS_PPR_LOG_RUN_SHIFT); |
607 | 0 | guest_iommu_clear_status(iommu, IOMMU_STATUS_PPR_LOG_OVERFLOW_SHIFT); |
608 | 0 | } |
609 | 0 |
|
610 | 0 | if ( iommu_en && cmd_en_old && !cmd_en ) |
611 | 0 | { |
612 | 0 | /* Disable iommu command processing */ |
613 | 0 | tasklet_kill(&iommu->cmd_buffer_tasklet); |
614 | 0 | } |
615 | 0 |
|
616 | 0 | if ( event_en_old && !event_en ) |
617 | 0 | guest_iommu_clear_status(iommu, IOMMU_STATUS_EVENT_LOG_RUN_SHIFT); |
618 | 0 |
|
619 | 0 | if ( iommu_en_old && !iommu_en ) |
620 | 0 | guest_iommu_disable(iommu); |
621 | 0 |
|
622 | 0 | u64_to_reg(&iommu->reg_ctrl, newctrl); |
623 | 0 | return 0; |
624 | 0 | } |
625 | | |
626 | | static uint64_t iommu_mmio_read64(struct guest_iommu *iommu, |
627 | | unsigned long offset) |
628 | 0 | { |
629 | 0 | uint64_t val; |
630 | 0 |
|
631 | 0 | switch ( offset ) |
632 | 0 | { |
633 | 0 | case IOMMU_DEV_TABLE_BASE_LOW_OFFSET: |
634 | 0 | val = reg_to_u64(iommu->dev_table.reg_base); |
635 | 0 | break; |
636 | 0 | case IOMMU_CMD_BUFFER_BASE_LOW_OFFSET: |
637 | 0 | val = reg_to_u64(iommu->cmd_buffer.reg_base); |
638 | 0 | break; |
639 | 0 | case IOMMU_EVENT_LOG_BASE_LOW_OFFSET: |
640 | 0 | val = reg_to_u64(iommu->event_log.reg_base); |
641 | 0 | break; |
642 | 0 | case IOMMU_PPR_LOG_BASE_LOW_OFFSET: |
643 | 0 | val = reg_to_u64(iommu->ppr_log.reg_base); |
644 | 0 | break; |
645 | 0 | case IOMMU_CMD_BUFFER_HEAD_OFFSET: |
646 | 0 | val = reg_to_u64(iommu->cmd_buffer.reg_head); |
647 | 0 | break; |
648 | 0 | case IOMMU_CMD_BUFFER_TAIL_OFFSET: |
649 | 0 | val = reg_to_u64(iommu->cmd_buffer.reg_tail); |
650 | 0 | break; |
651 | 0 | case IOMMU_EVENT_LOG_HEAD_OFFSET: |
652 | 0 | val = reg_to_u64(iommu->event_log.reg_head); |
653 | 0 | break; |
654 | 0 | case IOMMU_EVENT_LOG_TAIL_OFFSET: |
655 | 0 | val = reg_to_u64(iommu->event_log.reg_tail); |
656 | 0 | break; |
657 | 0 | case IOMMU_PPR_LOG_HEAD_OFFSET: |
658 | 0 | val = reg_to_u64(iommu->ppr_log.reg_head); |
659 | 0 | break; |
660 | 0 | case IOMMU_PPR_LOG_TAIL_OFFSET: |
661 | 0 | val = reg_to_u64(iommu->ppr_log.reg_tail); |
662 | 0 | break; |
663 | 0 | case IOMMU_CONTROL_MMIO_OFFSET: |
664 | 0 | val = reg_to_u64(iommu->reg_ctrl); |
665 | 0 | break; |
666 | 0 | case IOMMU_STATUS_MMIO_OFFSET: |
667 | 0 | val = reg_to_u64(iommu->reg_status); |
668 | 0 | break; |
669 | 0 | case IOMMU_EXT_FEATURE_MMIO_OFFSET: |
670 | 0 | val = reg_to_u64(iommu->reg_ext_feature); |
671 | 0 | break; |
672 | 0 |
|
673 | 0 | default: |
674 | 0 | AMD_IOMMU_DEBUG("Guest reads unknown mmio offset = %lx\n", offset); |
675 | 0 | val = 0; |
676 | 0 | break; |
677 | 0 | } |
678 | 0 |
|
679 | 0 | return val; |
680 | 0 | } |
681 | | |
682 | | static int guest_iommu_mmio_read(struct vcpu *v, unsigned long addr, |
683 | | unsigned int len, unsigned long *pval) |
684 | 0 | { |
685 | 0 | struct guest_iommu *iommu = vcpu_iommu(v); |
686 | 0 | unsigned long offset; |
687 | 0 | uint64_t val; |
688 | 0 | uint32_t mmio, shift; |
689 | 0 | uint64_t mask = 0; |
690 | 0 |
|
691 | 0 | offset = addr - iommu->mmio_base; |
692 | 0 |
|
693 | 0 | if ( unlikely((offset & (len - 1 )) || (len > 8)) ) |
694 | 0 | { |
695 | 0 | AMD_IOMMU_DEBUG("iommu mmio read access is not aligned:" |
696 | 0 | " offset = %lx, len = %x\n", offset, len); |
697 | 0 | return X86EMUL_UNHANDLEABLE; |
698 | 0 | } |
699 | 0 |
|
700 | 0 | mask = (len == 8) ? ~0ULL : (1ULL << (len * 8)) - 1; |
701 | 0 | shift = (offset & 7u) * 8; |
702 | 0 |
|
703 | 0 | /* mmio access is always aligned on 8-byte boundary */ |
704 | 0 | mmio = offset & (~7u); |
705 | 0 |
|
706 | 0 | spin_lock(&iommu->lock); |
707 | 0 | val = iommu_mmio_read64(iommu, mmio); |
708 | 0 | spin_unlock(&iommu->lock); |
709 | 0 |
|
710 | 0 | *pval = (val >> shift ) & mask; |
711 | 0 |
|
712 | 0 | return X86EMUL_OKAY; |
713 | 0 | } |
714 | | |
715 | | static void guest_iommu_mmio_write64(struct guest_iommu *iommu, |
716 | | unsigned long offset, uint64_t val) |
717 | 0 | { |
718 | 0 | switch ( offset ) |
719 | 0 | { |
720 | 0 | case IOMMU_DEV_TABLE_BASE_LOW_OFFSET: |
721 | 0 | u64_to_reg(&iommu->dev_table.reg_base, val); |
722 | 0 | break; |
723 | 0 | case IOMMU_CMD_BUFFER_BASE_LOW_OFFSET: |
724 | 0 | u64_to_reg(&iommu->cmd_buffer.reg_base, val); |
725 | 0 | break; |
726 | 0 | case IOMMU_EVENT_LOG_BASE_LOW_OFFSET: |
727 | 0 | u64_to_reg(&iommu->event_log.reg_base, val); |
728 | 0 | break; |
729 | 0 | case IOMMU_PPR_LOG_BASE_LOW_OFFSET: |
730 | 0 | u64_to_reg(&iommu->ppr_log.reg_base, val); |
731 | 0 | break; |
732 | 0 | case IOMMU_CONTROL_MMIO_OFFSET: |
733 | 0 | guest_iommu_write_ctrl(iommu, val); |
734 | 0 | break; |
735 | 0 | case IOMMU_CMD_BUFFER_HEAD_OFFSET: |
736 | 0 | u64_to_reg(&iommu->cmd_buffer.reg_head, val); |
737 | 0 | break; |
738 | 0 | case IOMMU_CMD_BUFFER_TAIL_OFFSET: |
739 | 0 | u64_to_reg(&iommu->cmd_buffer.reg_tail, val); |
740 | 0 | tasklet_schedule(&iommu->cmd_buffer_tasklet); |
741 | 0 | break; |
742 | 0 | case IOMMU_EVENT_LOG_HEAD_OFFSET: |
743 | 0 | u64_to_reg(&iommu->event_log.reg_head, val); |
744 | 0 | break; |
745 | 0 | case IOMMU_EVENT_LOG_TAIL_OFFSET: |
746 | 0 | u64_to_reg(&iommu->event_log.reg_tail, val); |
747 | 0 | break; |
748 | 0 | case IOMMU_PPR_LOG_HEAD_OFFSET: |
749 | 0 | u64_to_reg(&iommu->ppr_log.reg_head, val); |
750 | 0 | break; |
751 | 0 | case IOMMU_PPR_LOG_TAIL_OFFSET: |
752 | 0 | u64_to_reg(&iommu->ppr_log.reg_tail, val); |
753 | 0 | break; |
754 | 0 | case IOMMU_STATUS_MMIO_OFFSET: |
755 | 0 | val &= IOMMU_STATUS_EVENT_OVERFLOW_MASK | |
756 | 0 | IOMMU_STATUS_EVENT_LOG_INT_MASK | |
757 | 0 | IOMMU_STATUS_COMP_WAIT_INT_MASK | |
758 | 0 | IOMMU_STATUS_PPR_LOG_OVERFLOW_MASK | |
759 | 0 | IOMMU_STATUS_PPR_LOG_INT_MASK | |
760 | 0 | IOMMU_STATUS_GAPIC_LOG_OVERFLOW_MASK | |
761 | 0 | IOMMU_STATUS_GAPIC_LOG_INT_MASK; |
762 | 0 | u64_to_reg(&iommu->reg_status, reg_to_u64(iommu->reg_status) & ~val); |
763 | 0 | break; |
764 | 0 |
|
765 | 0 | default: |
766 | 0 | AMD_IOMMU_DEBUG("guest writes unknown mmio offset = %lx," |
767 | 0 | " val = %" PRIx64 "\n", offset, val); |
768 | 0 | break; |
769 | 0 | } |
770 | 0 | } |
771 | | |
772 | | static int guest_iommu_mmio_write(struct vcpu *v, unsigned long addr, |
773 | | unsigned int len, unsigned long val) |
774 | 0 | { |
775 | 0 | struct guest_iommu *iommu = vcpu_iommu(v); |
776 | 0 | unsigned long offset; |
777 | 0 | uint64_t reg_old, mmio; |
778 | 0 | uint32_t shift; |
779 | 0 | uint64_t mask = 0; |
780 | 0 |
|
781 | 0 | offset = addr - iommu->mmio_base; |
782 | 0 |
|
783 | 0 | if ( unlikely((offset & (len - 1)) || (len > 8)) ) |
784 | 0 | { |
785 | 0 | AMD_IOMMU_DEBUG("iommu mmio write access is not aligned:" |
786 | 0 | " offset = %lx, len = %x\n", offset, len); |
787 | 0 | return X86EMUL_UNHANDLEABLE; |
788 | 0 | } |
789 | 0 |
|
790 | 0 | mask = (len == 8) ? ~0ULL : (1ULL << (len * 8)) - 1; |
791 | 0 | shift = (offset & 7) * 8; |
792 | 0 |
|
793 | 0 | /* mmio access is always aligned on 8-byte boundary */ |
794 | 0 | mmio = offset & ~7; |
795 | 0 |
|
796 | 0 | spin_lock(&iommu->lock); |
797 | 0 |
|
798 | 0 | reg_old = iommu_mmio_read64(iommu, mmio); |
799 | 0 | reg_old &= ~(mask << shift); |
800 | 0 | val = reg_old | ((val & mask) << shift); |
801 | 0 | guest_iommu_mmio_write64(iommu, mmio, val); |
802 | 0 |
|
803 | 0 | spin_unlock(&iommu->lock); |
804 | 0 |
|
805 | 0 | return X86EMUL_OKAY; |
806 | 0 | } |
807 | | |
808 | | int guest_iommu_set_base(struct domain *d, uint64_t base) |
809 | 0 | { |
810 | 0 | p2m_type_t t; |
811 | 0 | struct guest_iommu *iommu = domain_iommu(d); |
812 | 0 |
|
813 | 0 | if ( !iommu ) |
814 | 0 | return -EACCES; |
815 | 0 |
|
816 | 0 | iommu->mmio_base = base; |
817 | 0 | base >>= PAGE_SHIFT; |
818 | 0 |
|
819 | 0 | for ( int i = 0; i < IOMMU_MMIO_PAGE_NR; i++ ) |
820 | 0 | { |
821 | 0 | unsigned long gfn = base + i; |
822 | 0 |
|
823 | 0 | get_gfn_query(d, gfn, &t); |
824 | 0 | p2m_change_type_one(d, gfn, t, p2m_mmio_dm); |
825 | 0 | put_gfn(d, gfn); |
826 | 0 | } |
827 | 0 |
|
828 | 0 | return 0; |
829 | 0 | } |
830 | | |
831 | | /* Initialize mmio read only bits */ |
832 | | static void guest_iommu_reg_init(struct guest_iommu *iommu) |
833 | 0 | { |
834 | 0 | uint32_t lower, upper; |
835 | 0 |
|
836 | 0 | lower = upper = 0; |
837 | 0 | /* Support prefetch */ |
838 | 0 | iommu_set_bit(&lower,IOMMU_EXT_FEATURE_PREFSUP_SHIFT); |
839 | 0 | /* Support PPR log */ |
840 | 0 | iommu_set_bit(&lower,IOMMU_EXT_FEATURE_PPRSUP_SHIFT); |
841 | 0 | /* Support guest translation */ |
842 | 0 | iommu_set_bit(&lower,IOMMU_EXT_FEATURE_GTSUP_SHIFT); |
843 | 0 | /* Support invalidate all command */ |
844 | 0 | iommu_set_bit(&lower,IOMMU_EXT_FEATURE_IASUP_SHIFT); |
845 | 0 |
|
846 | 0 | /* Host translation size has 6 levels */ |
847 | 0 | set_field_in_reg_u32(HOST_ADDRESS_SIZE_6_LEVEL, lower, |
848 | 0 | IOMMU_EXT_FEATURE_HATS_MASK, |
849 | 0 | IOMMU_EXT_FEATURE_HATS_SHIFT, |
850 | 0 | &lower); |
851 | 0 | /* Guest translation size has 6 levels */ |
852 | 0 | set_field_in_reg_u32(GUEST_ADDRESS_SIZE_6_LEVEL, lower, |
853 | 0 | IOMMU_EXT_FEATURE_GATS_MASK, |
854 | 0 | IOMMU_EXT_FEATURE_GATS_SHIFT, |
855 | 0 | &lower); |
856 | 0 | /* Single level gCR3 */ |
857 | 0 | set_field_in_reg_u32(GUEST_CR3_1_LEVEL, lower, |
858 | 0 | IOMMU_EXT_FEATURE_GLXSUP_MASK, |
859 | 0 | IOMMU_EXT_FEATURE_GLXSUP_SHIFT, &lower); |
860 | 0 | /* 9 bit PASID */ |
861 | 0 | set_field_in_reg_u32(PASMAX_9_bit, upper, |
862 | 0 | IOMMU_EXT_FEATURE_PASMAX_MASK, |
863 | 0 | IOMMU_EXT_FEATURE_PASMAX_SHIFT, &upper); |
864 | 0 |
|
865 | 0 | iommu->reg_ext_feature.lo = lower; |
866 | 0 | iommu->reg_ext_feature.hi = upper; |
867 | 0 | } |
868 | | |
869 | | static int guest_iommu_mmio_range(struct vcpu *v, unsigned long addr) |
870 | 0 | { |
871 | 0 | struct guest_iommu *iommu = vcpu_iommu(v); |
872 | 0 |
|
873 | 0 | return iommu && addr >= iommu->mmio_base && |
874 | 0 | addr < iommu->mmio_base + IOMMU_MMIO_SIZE; |
875 | 0 | } |
876 | | |
877 | | static const struct hvm_mmio_ops iommu_mmio_ops = { |
878 | | .check = guest_iommu_mmio_range, |
879 | | .read = guest_iommu_mmio_read, |
880 | | .write = guest_iommu_mmio_write |
881 | | }; |
882 | | |
883 | | /* Domain specific initialization */ |
884 | | int guest_iommu_init(struct domain* d) |
885 | 0 | { |
886 | 0 | struct guest_iommu *iommu; |
887 | 0 | struct domain_iommu *hd = dom_iommu(d); |
888 | 0 |
|
889 | 0 | if ( !is_hvm_domain(d) || !iommu_enabled || !iommuv2_enabled || |
890 | 0 | !has_viommu(d) ) |
891 | 0 | return 0; |
892 | 0 |
|
893 | 0 | iommu = xzalloc(struct guest_iommu); |
894 | 0 | if ( !iommu ) |
895 | 0 | { |
896 | 0 | AMD_IOMMU_DEBUG("Error allocating guest iommu structure.\n"); |
897 | 0 | return 1; |
898 | 0 | } |
899 | 0 |
|
900 | 0 | guest_iommu_reg_init(iommu); |
901 | 0 | iommu->mmio_base = ~0ULL; |
902 | 0 | iommu->domain = d; |
903 | 0 | hd->arch.g_iommu = iommu; |
904 | 0 |
|
905 | 0 | tasklet_init(&iommu->cmd_buffer_tasklet, |
906 | 0 | guest_iommu_process_command, (unsigned long)d); |
907 | 0 |
|
908 | 0 | spin_lock_init(&iommu->lock); |
909 | 0 |
|
910 | 0 | register_mmio_handler(d, &iommu_mmio_ops); |
911 | 0 |
|
912 | 0 | return 0; |
913 | 0 | } |
914 | | |
915 | | void guest_iommu_destroy(struct domain *d) |
916 | 0 | { |
917 | 0 | struct guest_iommu *iommu; |
918 | 0 |
|
919 | 0 | iommu = domain_iommu(d); |
920 | 0 | if ( !iommu ) |
921 | 0 | return; |
922 | 0 |
|
923 | 0 | tasklet_kill(&iommu->cmd_buffer_tasklet); |
924 | 0 | xfree(iommu); |
925 | 0 |
|
926 | 0 | dom_iommu(d)->arch.g_iommu = NULL; |
927 | 0 | } |