/root/src/xen/xen/drivers/passthrough/vtd/dmar.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (c) 2006, Intel Corporation. |
3 | | * |
4 | | * This program is free software; you can redistribute it and/or modify it |
5 | | * under the terms and conditions of the GNU General Public License, |
6 | | * version 2, as published by the Free Software Foundation. |
7 | | * |
8 | | * This program is distributed in the hope it will be useful, but WITHOUT |
9 | | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
10 | | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
11 | | * more details. |
12 | | * |
13 | | * You should have received a copy of the GNU General Public License along with |
14 | | * this program; If not, see <http://www.gnu.org/licenses/>. |
15 | | * |
16 | | * Copyright (C) Ashok Raj <ashok.raj@intel.com> |
17 | | * Copyright (C) Shaohua Li <shaohua.li@intel.com> |
18 | | * Copyright (C) Allen Kay <allen.m.kay@intel.com> - adapted to xen |
19 | | */ |
20 | | |
21 | | #include <xen/init.h> |
22 | | #include <xen/bitmap.h> |
23 | | #include <xen/errno.h> |
24 | | #include <xen/kernel.h> |
25 | | #include <xen/acpi.h> |
26 | | #include <xen/mm.h> |
27 | | #include <xen/xmalloc.h> |
28 | | #include <xen/pci.h> |
29 | | #include <xen/pci_regs.h> |
30 | | #include <asm/atomic.h> |
31 | | #include <asm/string.h> |
32 | | #include "dmar.h" |
33 | | #include "iommu.h" |
34 | | #include "extern.h" |
35 | | #include "vtd.h" |
36 | | |
37 | | #undef PREFIX |
38 | | #define PREFIX VTDPREFIX "ACPI DMAR:" |
39 | | #define DEBUG |
40 | | |
41 | 8 | #define MIN_SCOPE_LEN (sizeof(struct acpi_dmar_device_scope) + \ |
42 | 8 | sizeof(struct acpi_dmar_pci_path)) |
43 | | |
44 | | LIST_HEAD_READ_MOSTLY(acpi_drhd_units); |
45 | | LIST_HEAD_READ_MOSTLY(acpi_rmrr_units); |
46 | | static LIST_HEAD_READ_MOSTLY(acpi_atsr_units); |
47 | | static LIST_HEAD_READ_MOSTLY(acpi_rhsa_units); |
48 | | |
49 | | static struct acpi_table_header *__read_mostly dmar_table; |
50 | | static int __read_mostly dmar_flags; |
51 | | static u64 __read_mostly igd_drhd_address; |
52 | | |
53 | | static void __init dmar_scope_add_buses(struct dmar_scope *scope, u16 sec_bus, |
54 | | u16 sub_bus) |
55 | 3 | { |
56 | 3 | sub_bus &= 0xff; |
57 | 3 | if (sec_bus > sub_bus) |
58 | 0 | return; |
59 | 3 | |
60 | 6 | while ( sec_bus <= sub_bus ) |
61 | 3 | set_bit(sec_bus++, scope->buses); |
62 | 3 | } |
63 | | |
64 | | static int __init acpi_register_drhd_unit(struct acpi_drhd_unit *drhd) |
65 | 1 | { |
66 | 1 | /* |
67 | 1 | * add INCLUDE_ALL at the tail, so scan the list will find it at |
68 | 1 | * the very end. |
69 | 1 | */ |
70 | 1 | if ( drhd->include_all ) |
71 | 1 | list_add_tail(&drhd->list, &acpi_drhd_units); |
72 | 1 | else |
73 | 0 | list_add(&drhd->list, &acpi_drhd_units); |
74 | 1 | return 0; |
75 | 1 | } |
76 | | |
77 | | static int __init acpi_register_rmrr_unit(struct acpi_rmrr_unit *rmrr) |
78 | 1 | { |
79 | 1 | list_add(&rmrr->list, &acpi_rmrr_units); |
80 | 1 | return 0; |
81 | 1 | } |
82 | | |
83 | | static void scope_devices_free(struct dmar_scope *scope) |
84 | 0 | { |
85 | 0 | if ( !scope ) |
86 | 0 | return; |
87 | 0 |
|
88 | 0 | scope->devices_cnt = 0; |
89 | 0 | xfree(scope->devices); |
90 | 0 | scope->devices = NULL; |
91 | 0 | } |
92 | | |
93 | | static void __init disable_all_dmar_units(void) |
94 | 0 | { |
95 | 0 | struct acpi_drhd_unit *drhd, *_drhd; |
96 | 0 | struct acpi_rmrr_unit *rmrr, *_rmrr; |
97 | 0 | struct acpi_atsr_unit *atsr, *_atsr; |
98 | 0 |
|
99 | 0 | list_for_each_entry_safe ( drhd, _drhd, &acpi_drhd_units, list ) |
100 | 0 | { |
101 | 0 | list_del(&drhd->list); |
102 | 0 | scope_devices_free(&drhd->scope); |
103 | 0 | xfree(drhd); |
104 | 0 | } |
105 | 0 | list_for_each_entry_safe ( rmrr, _rmrr, &acpi_rmrr_units, list ) |
106 | 0 | { |
107 | 0 | list_del(&rmrr->list); |
108 | 0 | scope_devices_free(&rmrr->scope); |
109 | 0 | xfree(rmrr); |
110 | 0 | } |
111 | 0 | list_for_each_entry_safe ( atsr, _atsr, &acpi_atsr_units, list ) |
112 | 0 | { |
113 | 0 | list_del(&atsr->list); |
114 | 0 | scope_devices_free(&atsr->scope); |
115 | 0 | xfree(atsr); |
116 | 0 | } |
117 | 0 | } |
118 | | |
119 | | static int acpi_ioapic_device_match( |
120 | | struct list_head *ioapic_list, unsigned int apic_id) |
121 | 372 | { |
122 | 372 | struct acpi_ioapic_unit *ioapic; |
123 | 645 | list_for_each_entry( ioapic, ioapic_list, list ) { |
124 | 645 | if (ioapic->apic_id == apic_id) |
125 | 372 | return 1; |
126 | 645 | } |
127 | 0 | return 0; |
128 | 372 | } |
129 | | |
130 | | struct acpi_drhd_unit * ioapic_to_drhd(unsigned int apic_id) |
131 | 55 | { |
132 | 55 | struct acpi_drhd_unit *drhd; |
133 | 55 | list_for_each_entry( drhd, &acpi_drhd_units, list ) |
134 | 55 | if ( acpi_ioapic_device_match(&drhd->ioapic_list, apic_id) ) |
135 | 55 | return drhd; |
136 | 0 | return NULL; |
137 | 55 | } |
138 | | |
139 | | struct acpi_drhd_unit * iommu_to_drhd(struct iommu *iommu) |
140 | 8 | { |
141 | 8 | struct acpi_drhd_unit *drhd; |
142 | 8 | |
143 | 8 | if ( iommu == NULL ) |
144 | 0 | return NULL; |
145 | 8 | |
146 | 8 | list_for_each_entry( drhd, &acpi_drhd_units, list ) |
147 | 8 | if ( drhd->iommu == iommu ) |
148 | 8 | return drhd; |
149 | 8 | |
150 | 0 | return NULL; |
151 | 8 | } |
152 | | |
153 | | struct iommu * ioapic_to_iommu(unsigned int apic_id) |
154 | 317 | { |
155 | 317 | struct acpi_drhd_unit *drhd; |
156 | 317 | |
157 | 317 | list_for_each_entry( drhd, &acpi_drhd_units, list ) |
158 | 317 | if ( acpi_ioapic_device_match(&drhd->ioapic_list, apic_id) ) |
159 | 317 | return drhd->iommu; |
160 | 0 | return NULL; |
161 | 317 | } |
162 | | |
163 | | static bool_t acpi_hpet_device_match( |
164 | | struct list_head *list, unsigned int hpet_id) |
165 | 0 | { |
166 | 0 | struct acpi_hpet_unit *hpet; |
167 | 0 |
|
168 | 0 | list_for_each_entry( hpet, list, list ) |
169 | 0 | if (hpet->id == hpet_id) |
170 | 0 | return 1; |
171 | 0 | return 0; |
172 | 0 | } |
173 | | |
174 | | struct acpi_drhd_unit *hpet_to_drhd(unsigned int hpet_id) |
175 | 0 | { |
176 | 0 | struct acpi_drhd_unit *drhd; |
177 | 0 |
|
178 | 0 | list_for_each_entry( drhd, &acpi_drhd_units, list ) |
179 | 0 | if ( acpi_hpet_device_match(&drhd->hpet_list, hpet_id) ) |
180 | 0 | return drhd; |
181 | 0 | return NULL; |
182 | 0 | } |
183 | | |
184 | | struct iommu *hpet_to_iommu(unsigned int hpet_id) |
185 | 0 | { |
186 | 0 | struct acpi_drhd_unit *drhd = hpet_to_drhd(hpet_id); |
187 | 0 |
|
188 | 0 | return drhd ? drhd->iommu : NULL; |
189 | 0 | } |
190 | | |
191 | | static int __init acpi_register_atsr_unit(struct acpi_atsr_unit *atsr) |
192 | 1 | { |
193 | 1 | /* |
194 | 1 | * add ALL_PORTS at the tail, so scan the list will find it at |
195 | 1 | * the very end. |
196 | 1 | */ |
197 | 1 | if ( atsr->all_ports ) |
198 | 0 | list_add_tail(&atsr->list, &acpi_atsr_units); |
199 | 1 | else |
200 | 1 | list_add(&atsr->list, &acpi_atsr_units); |
201 | 1 | return 0; |
202 | 1 | } |
203 | | |
204 | | struct acpi_drhd_unit *acpi_find_matched_drhd_unit(const struct pci_dev *pdev) |
205 | 8.27k | { |
206 | 8.27k | u8 bus, devfn; |
207 | 8.27k | struct acpi_drhd_unit *drhd; |
208 | 8.27k | struct acpi_drhd_unit *include_all = NULL; |
209 | 8.27k | int i; |
210 | 8.27k | |
211 | 8.27k | if ( pdev == NULL ) |
212 | 0 | return NULL; |
213 | 8.27k | |
214 | 8.27k | if ( pdev->info.is_virtfn ) |
215 | 0 | { |
216 | 0 | bus = pdev->info.physfn.bus; |
217 | 0 | devfn = !pdev->info.is_extfn ? pdev->info.physfn.devfn : 0; |
218 | 0 | } |
219 | 8.27k | else if ( pdev->info.is_extfn ) |
220 | 0 | { |
221 | 0 | bus = pdev->bus; |
222 | 0 | devfn = 0; |
223 | 0 | } |
224 | 8.27k | else |
225 | 8.27k | { |
226 | 8.27k | bus = pdev->bus; |
227 | 8.27k | devfn = pdev->devfn; |
228 | 8.27k | } |
229 | 8.27k | |
230 | 8.27k | list_for_each_entry ( drhd, &acpi_drhd_units, list ) |
231 | 8.27k | { |
232 | 8.27k | if ( drhd->segment != pdev->seg ) |
233 | 0 | continue; |
234 | 8.27k | |
235 | 33.0k | for (i = 0; i < drhd->scope.devices_cnt; i++) |
236 | 24.8k | if ( drhd->scope.devices[i] == PCI_BDF2(bus, devfn) ) |
237 | 3 | return drhd; |
238 | 8.27k | |
239 | 8.27k | if ( test_bit(bus, drhd->scope.buses) ) |
240 | 0 | return drhd; |
241 | 8.27k | |
242 | 8.27k | if ( drhd->include_all ) |
243 | 8.27k | include_all = drhd; |
244 | 8.27k | } |
245 | 8.27k | return include_all; |
246 | 8.27k | } |
247 | | |
248 | | struct acpi_atsr_unit *acpi_find_matched_atsr_unit(const struct pci_dev *pdev) |
249 | 0 | { |
250 | 0 | struct acpi_atsr_unit *atsr; |
251 | 0 | struct acpi_atsr_unit *all_ports = NULL; |
252 | 0 |
|
253 | 0 | list_for_each_entry ( atsr, &acpi_atsr_units, list ) |
254 | 0 | { |
255 | 0 | if ( atsr->segment != pdev->seg ) |
256 | 0 | continue; |
257 | 0 |
|
258 | 0 | if ( test_bit(pdev->bus, atsr->scope.buses) ) |
259 | 0 | return atsr; |
260 | 0 |
|
261 | 0 | if ( atsr->all_ports ) |
262 | 0 | all_ports = atsr; |
263 | 0 | } |
264 | 0 | return all_ports; |
265 | 0 | } |
266 | | |
267 | | struct acpi_rhsa_unit * drhd_to_rhsa(struct acpi_drhd_unit *drhd) |
268 | 8.09k | { |
269 | 8.09k | struct acpi_rhsa_unit *rhsa; |
270 | 8.09k | |
271 | 8.09k | if ( drhd == NULL ) |
272 | 0 | return NULL; |
273 | 8.09k | |
274 | 8.09k | list_for_each_entry ( rhsa, &acpi_rhsa_units, list ) |
275 | 8.09k | { |
276 | 8.09k | if ( rhsa->address == drhd->address ) |
277 | 8.09k | return rhsa; |
278 | 8.09k | } |
279 | 1 | return NULL; |
280 | 8.09k | } |
281 | | |
282 | | int is_igd_drhd(struct acpi_drhd_unit *drhd) |
283 | 4.56M | { |
284 | 4.56M | return drhd && (drhd->address == igd_drhd_address); |
285 | 4.56M | } |
286 | | |
287 | | /* |
288 | | * Count number of devices in device scope. Do not include PCI sub |
289 | | * hierarchies. |
290 | | */ |
291 | | static int __init scope_device_count(const void *start, const void *end) |
292 | 3 | { |
293 | 3 | const struct acpi_dmar_device_scope *scope; |
294 | 3 | int count = 0; |
295 | 3 | |
296 | 11 | while ( start < end ) |
297 | 8 | { |
298 | 8 | scope = start; |
299 | 8 | if ( scope->length < MIN_SCOPE_LEN ) |
300 | 0 | { |
301 | 0 | printk(XENLOG_WARNING VTDPREFIX "Invalid device scope\n"); |
302 | 0 | return -EINVAL; |
303 | 0 | } |
304 | 8 | |
305 | 8 | if ( scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE || |
306 | 5 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || |
307 | 3 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC || |
308 | 1 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET ) |
309 | 8 | count++; |
310 | 8 | |
311 | 8 | start += scope->length; |
312 | 8 | } |
313 | 3 | |
314 | 3 | return count; |
315 | 3 | } |
316 | | |
317 | | |
318 | | static int __init acpi_parse_dev_scope( |
319 | | const void *start, const void *end, struct dmar_scope *scope, |
320 | | int type, u16 seg) |
321 | 3 | { |
322 | 3 | struct acpi_ioapic_unit *acpi_ioapic_unit; |
323 | 3 | const struct acpi_dmar_device_scope *acpi_scope; |
324 | 3 | u16 bus, sub_bus, sec_bus; |
325 | 3 | const struct acpi_dmar_pci_path *path; |
326 | 3 | struct acpi_drhd_unit *drhd = type == DMAR_TYPE ? |
327 | 2 | container_of(scope, struct acpi_drhd_unit, scope) : NULL; |
328 | 3 | int depth, cnt, didx = 0, ret; |
329 | 3 | |
330 | 3 | if ( (cnt = scope_device_count(start, end)) < 0 ) |
331 | 0 | return cnt; |
332 | 3 | |
333 | 3 | if ( cnt > 0 ) |
334 | 3 | { |
335 | 3 | scope->devices = xzalloc_array(u16, cnt); |
336 | 3 | if ( !scope->devices ) |
337 | 0 | return -ENOMEM; |
338 | 3 | } |
339 | 3 | scope->devices_cnt = cnt; |
340 | 3 | |
341 | 11 | while ( start < end ) |
342 | 8 | { |
343 | 8 | acpi_scope = start; |
344 | 8 | path = (const void *)(acpi_scope + 1); |
345 | 8 | depth = (acpi_scope->length - sizeof(*acpi_scope)) / sizeof(*path); |
346 | 8 | bus = acpi_scope->bus; |
347 | 8 | |
348 | 8 | while ( --depth > 0 ) |
349 | 0 | { |
350 | 0 | bus = pci_conf_read8(seg, bus, path->dev, path->fn, |
351 | 0 | PCI_SECONDARY_BUS); |
352 | 0 | path++; |
353 | 0 | } |
354 | 8 | |
355 | 8 | switch ( acpi_scope->entry_type ) |
356 | 8 | { |
357 | 3 | case ACPI_DMAR_SCOPE_TYPE_BRIDGE: |
358 | 3 | sec_bus = pci_conf_read8(seg, bus, path->dev, path->fn, |
359 | 3 | PCI_SECONDARY_BUS); |
360 | 3 | sub_bus = pci_conf_read8(seg, bus, path->dev, path->fn, |
361 | 3 | PCI_SUBORDINATE_BUS); |
362 | 3 | if ( iommu_verbose ) |
363 | 3 | printk(VTDPREFIX |
364 | 3 | " bridge: %04x:%02x:%02x.%u start=%x sec=%x sub=%x\n", |
365 | 3 | seg, bus, path->dev, path->fn, |
366 | 3 | acpi_scope->bus, sec_bus, sub_bus); |
367 | 3 | |
368 | 3 | dmar_scope_add_buses(scope, sec_bus, sub_bus); |
369 | 3 | break; |
370 | 3 | |
371 | 1 | case ACPI_DMAR_SCOPE_TYPE_HPET: |
372 | 1 | if ( iommu_verbose ) |
373 | 1 | printk(VTDPREFIX " MSI HPET: %04x:%02x:%02x.%u\n", |
374 | 1 | seg, bus, path->dev, path->fn); |
375 | 1 | |
376 | 1 | if ( drhd ) |
377 | 1 | { |
378 | 1 | struct acpi_hpet_unit *acpi_hpet_unit; |
379 | 1 | |
380 | 1 | ret = -ENOMEM; |
381 | 1 | acpi_hpet_unit = xmalloc(struct acpi_hpet_unit); |
382 | 1 | if ( !acpi_hpet_unit ) |
383 | 0 | goto out; |
384 | 1 | acpi_hpet_unit->id = acpi_scope->enumeration_id; |
385 | 1 | acpi_hpet_unit->bus = bus; |
386 | 1 | acpi_hpet_unit->dev = path->dev; |
387 | 1 | acpi_hpet_unit->func = path->fn; |
388 | 1 | list_add(&acpi_hpet_unit->list, &drhd->hpet_list); |
389 | 1 | } |
390 | 1 | |
391 | 1 | break; |
392 | 1 | |
393 | 2 | case ACPI_DMAR_SCOPE_TYPE_ENDPOINT: |
394 | 2 | if ( iommu_verbose ) |
395 | 2 | printk(VTDPREFIX " endpoint: %04x:%02x:%02x.%u\n", |
396 | 2 | seg, bus, path->dev, path->fn); |
397 | 2 | |
398 | 2 | if ( drhd ) |
399 | 0 | { |
400 | 0 | if ( (seg == 0) && (bus == 0) && (path->dev == 2) && |
401 | 0 | (path->fn == 0) ) |
402 | 0 | igd_drhd_address = drhd->address; |
403 | 0 | } |
404 | 2 | |
405 | 2 | break; |
406 | 1 | |
407 | 2 | case ACPI_DMAR_SCOPE_TYPE_IOAPIC: |
408 | 2 | if ( iommu_verbose ) |
409 | 2 | printk(VTDPREFIX " IOAPIC: %04x:%02x:%02x.%u\n", |
410 | 2 | seg, bus, path->dev, path->fn); |
411 | 2 | |
412 | 2 | if ( drhd ) |
413 | 2 | { |
414 | 2 | ret = -ENOMEM; |
415 | 2 | acpi_ioapic_unit = xmalloc(struct acpi_ioapic_unit); |
416 | 2 | if ( !acpi_ioapic_unit ) |
417 | 0 | goto out; |
418 | 2 | acpi_ioapic_unit->apic_id = acpi_scope->enumeration_id; |
419 | 2 | acpi_ioapic_unit->ioapic.bdf.bus = bus; |
420 | 2 | acpi_ioapic_unit->ioapic.bdf.dev = path->dev; |
421 | 2 | acpi_ioapic_unit->ioapic.bdf.func = path->fn; |
422 | 2 | list_add(&acpi_ioapic_unit->list, &drhd->ioapic_list); |
423 | 2 | } |
424 | 2 | |
425 | 2 | break; |
426 | 2 | |
427 | 0 | default: |
428 | 0 | if ( iommu_verbose ) |
429 | 0 | printk(XENLOG_WARNING VTDPREFIX "Unknown scope type %#x\n", |
430 | 0 | acpi_scope->entry_type); |
431 | 0 | start += acpi_scope->length; |
432 | 0 | continue; |
433 | 8 | } |
434 | 8 | scope->devices[didx++] = PCI_BDF(bus, path->dev, path->fn); |
435 | 8 | start += acpi_scope->length; |
436 | 8 | } |
437 | 3 | |
438 | 3 | ret = 0; |
439 | 3 | |
440 | 3 | out: |
441 | 3 | if ( ret ) |
442 | 0 | scope_devices_free(scope); |
443 | 3 | |
444 | 3 | return ret; |
445 | 3 | } |
446 | | |
447 | | static int __init acpi_dmar_check_length( |
448 | | const struct acpi_dmar_header *h, unsigned int min_len) |
449 | 8 | { |
450 | 8 | if ( h->length >= min_len ) |
451 | 8 | return 0; |
452 | 0 | printk(XENLOG_ERR VTDPREFIX "Invalid ACPI DMAR entry length: %#x\n", |
453 | 0 | h->length); |
454 | 0 | return -EINVAL; |
455 | 8 | } |
456 | | |
457 | | static int __init |
458 | | acpi_parse_one_drhd(struct acpi_dmar_header *header) |
459 | 1 | { |
460 | 1 | struct acpi_dmar_hardware_unit *drhd = |
461 | 1 | container_of(header, struct acpi_dmar_hardware_unit, header); |
462 | 1 | void *dev_scope_start, *dev_scope_end; |
463 | 1 | struct acpi_drhd_unit *dmaru; |
464 | 1 | int ret; |
465 | 1 | static int include_all = 0; |
466 | 1 | |
467 | 1 | if ( (ret = acpi_dmar_check_length(header, sizeof(*drhd))) != 0 ) |
468 | 0 | return ret; |
469 | 1 | |
470 | 1 | if ( !drhd->address || !(drhd->address + 1) ) |
471 | 0 | return -ENODEV; |
472 | 1 | |
473 | 1 | dmaru = xzalloc(struct acpi_drhd_unit); |
474 | 1 | if ( !dmaru ) |
475 | 0 | return -ENOMEM; |
476 | 1 | |
477 | 1 | dmaru->address = drhd->address; |
478 | 1 | dmaru->segment = drhd->segment; |
479 | 1 | dmaru->include_all = drhd->flags & ACPI_DMAR_INCLUDE_ALL; |
480 | 1 | INIT_LIST_HEAD(&dmaru->ioapic_list); |
481 | 1 | INIT_LIST_HEAD(&dmaru->hpet_list); |
482 | 1 | if ( iommu_verbose ) |
483 | 1 | printk(VTDPREFIX " dmaru->address = %"PRIx64"\n", dmaru->address); |
484 | 1 | |
485 | 1 | ret = iommu_alloc(dmaru); |
486 | 1 | if ( ret ) |
487 | 0 | goto out; |
488 | 1 | |
489 | 1 | dev_scope_start = (void *)(drhd + 1); |
490 | 1 | dev_scope_end = ((void *)drhd) + header->length; |
491 | 1 | ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end, |
492 | 1 | &dmaru->scope, DMAR_TYPE, drhd->segment); |
493 | 1 | |
494 | 1 | if ( dmaru->include_all ) |
495 | 1 | { |
496 | 1 | if ( iommu_verbose ) |
497 | 1 | printk(VTDPREFIX " flags: INCLUDE_ALL\n"); |
498 | 1 | /* Only allow one INCLUDE_ALL */ |
499 | 1 | if ( drhd->segment == 0 && include_all ) |
500 | 0 | { |
501 | 0 | printk(XENLOG_WARNING VTDPREFIX |
502 | 0 | "Only one INCLUDE_ALL device scope is allowed\n"); |
503 | 0 | ret = -EINVAL; |
504 | 0 | } |
505 | 1 | if ( drhd->segment == 0 ) |
506 | 1 | include_all = 1; |
507 | 1 | } |
508 | 1 | |
509 | 1 | if ( ret ) |
510 | 0 | goto out; |
511 | 1 | else if ( force_iommu || dmaru->include_all ) |
512 | 1 | acpi_register_drhd_unit(dmaru); |
513 | 1 | else |
514 | 0 | { |
515 | 0 | u8 b, d, f; |
516 | 0 | unsigned int i = 0, invalid_cnt = 0; |
517 | 0 | union { |
518 | 0 | const void *raw; |
519 | 0 | const struct acpi_dmar_device_scope *scope; |
520 | 0 | } p; |
521 | 0 |
|
522 | 0 | /* Skip checking if segment is not accessible yet. */ |
523 | 0 | if ( !pci_known_segment(drhd->segment) ) |
524 | 0 | i = UINT_MAX; |
525 | 0 |
|
526 | 0 | for ( p.raw = dev_scope_start; i < dmaru->scope.devices_cnt; |
527 | 0 | i++, p.raw += p.scope->length ) |
528 | 0 | { |
529 | 0 | if ( p.scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC || |
530 | 0 | p.scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET ) |
531 | 0 | continue; |
532 | 0 |
|
533 | 0 | b = PCI_BUS(dmaru->scope.devices[i]); |
534 | 0 | d = PCI_SLOT(dmaru->scope.devices[i]); |
535 | 0 | f = PCI_FUNC(dmaru->scope.devices[i]); |
536 | 0 |
|
537 | 0 | if ( !pci_device_detect(drhd->segment, b, d, f) ) |
538 | 0 | { |
539 | 0 | printk(XENLOG_WARNING VTDPREFIX |
540 | 0 | " Non-existent device (%04x:%02x:%02x.%u) in this DRHD's scope!\n", |
541 | 0 | drhd->segment, b, d, f); |
542 | 0 | invalid_cnt++; |
543 | 0 | } |
544 | 0 | } |
545 | 0 |
|
546 | 0 | if ( invalid_cnt ) |
547 | 0 | { |
548 | 0 | if ( iommu_workaround_bios_bug && |
549 | 0 | invalid_cnt == dmaru->scope.devices_cnt ) |
550 | 0 | { |
551 | 0 | printk(XENLOG_WARNING VTDPREFIX |
552 | 0 | " Workaround BIOS bug: ignoring DRHD (no devices in its scope are PCI discoverable)\n"); |
553 | 0 |
|
554 | 0 | scope_devices_free(&dmaru->scope); |
555 | 0 | iommu_free(dmaru); |
556 | 0 | xfree(dmaru); |
557 | 0 | } |
558 | 0 | else |
559 | 0 | { |
560 | 0 | printk(XENLOG_WARNING VTDPREFIX |
561 | 0 | " DRHD is invalid (some devices in its scope are not PCI discoverable)\n"); |
562 | 0 | printk(XENLOG_WARNING VTDPREFIX |
563 | 0 | " Try \"iommu=force\" or \"iommu=workaround_bios_bug\" if you really want VT-d\n"); |
564 | 0 | ret = -EINVAL; |
565 | 0 | } |
566 | 0 | } |
567 | 0 | else |
568 | 0 | acpi_register_drhd_unit(dmaru); |
569 | 0 | } |
570 | 1 | |
571 | 1 | out: |
572 | 1 | if ( ret ) |
573 | 0 | { |
574 | 0 | scope_devices_free(&dmaru->scope); |
575 | 0 | iommu_free(dmaru); |
576 | 0 | xfree(dmaru); |
577 | 0 | } |
578 | 1 | |
579 | 1 | return ret; |
580 | 1 | } |
581 | | |
582 | | static int register_one_rmrr(struct acpi_rmrr_unit *rmrru) |
583 | 1 | { |
584 | 1 | bool ignore = false; |
585 | 1 | unsigned int i = 0; |
586 | 1 | int ret = 0; |
587 | 1 | |
588 | 1 | /* Skip checking if segment is not accessible yet. */ |
589 | 1 | if ( !pci_known_segment(rmrru->segment) ) |
590 | 1 | i = UINT_MAX; |
591 | 1 | |
592 | 1 | for ( ; i < rmrru->scope.devices_cnt; i++ ) |
593 | 0 | { |
594 | 0 | u8 b = PCI_BUS(rmrru->scope.devices[i]); |
595 | 0 | u8 d = PCI_SLOT(rmrru->scope.devices[i]); |
596 | 0 | u8 f = PCI_FUNC(rmrru->scope.devices[i]); |
597 | 0 |
|
598 | 0 | if ( pci_device_detect(rmrru->segment, b, d, f) == 0 ) |
599 | 0 | { |
600 | 0 | dprintk(XENLOG_WARNING VTDPREFIX, |
601 | 0 | " Non-existent device (%04x:%02x:%02x.%u) is reported" |
602 | 0 | " in RMRR (%"PRIx64", %"PRIx64")'s scope!\n", |
603 | 0 | rmrru->segment, b, d, f, |
604 | 0 | rmrru->base_address, rmrru->end_address); |
605 | 0 | ignore = true; |
606 | 0 | } |
607 | 0 | else |
608 | 0 | { |
609 | 0 | ignore = false; |
610 | 0 | break; |
611 | 0 | } |
612 | 0 | } |
613 | 1 | |
614 | 1 | if ( ignore ) |
615 | 0 | { |
616 | 0 | dprintk(XENLOG_WARNING VTDPREFIX, |
617 | 0 | " Ignore the RMRR (%"PRIx64", %"PRIx64") due to " |
618 | 0 | "devices under its scope are not PCI discoverable!\n", |
619 | 0 | rmrru->base_address, rmrru->end_address); |
620 | 0 | scope_devices_free(&rmrru->scope); |
621 | 0 | xfree(rmrru); |
622 | 0 | return 1; |
623 | 0 | } |
624 | 1 | else if ( rmrru->base_address > rmrru->end_address ) |
625 | 0 | { |
626 | 0 | dprintk(XENLOG_WARNING VTDPREFIX, |
627 | 0 | " The RMRR (%"PRIx64", %"PRIx64") is incorrect!\n", |
628 | 0 | rmrru->base_address, rmrru->end_address); |
629 | 0 | scope_devices_free(&rmrru->scope); |
630 | 0 | xfree(rmrru); |
631 | 0 | ret = -EFAULT; |
632 | 0 | } |
633 | 1 | else |
634 | 1 | { |
635 | 1 | if ( iommu_verbose ) |
636 | 1 | dprintk(VTDPREFIX, |
637 | 1 | " RMRR region: base_addr %"PRIx64" end_addr %"PRIx64"\n", |
638 | 1 | rmrru->base_address, rmrru->end_address); |
639 | 1 | acpi_register_rmrr_unit(rmrru); |
640 | 1 | } |
641 | 1 | |
642 | 1 | return ret; |
643 | 1 | } |
644 | | |
645 | | static int __init |
646 | | acpi_parse_one_rmrr(struct acpi_dmar_header *header) |
647 | 1 | { |
648 | 1 | struct acpi_dmar_reserved_memory *rmrr = |
649 | 1 | container_of(header, struct acpi_dmar_reserved_memory, header); |
650 | 1 | struct acpi_rmrr_unit *rmrru; |
651 | 1 | void *dev_scope_start, *dev_scope_end; |
652 | 1 | u64 base_addr = rmrr->base_address, end_addr = rmrr->end_address; |
653 | 1 | int ret; |
654 | 1 | |
655 | 1 | if ( (ret = acpi_dmar_check_length(header, sizeof(*rmrr))) != 0 ) |
656 | 0 | return ret; |
657 | 1 | |
658 | 1 | list_for_each_entry(rmrru, &acpi_rmrr_units, list) |
659 | 0 | if ( base_addr <= rmrru->end_address && rmrru->base_address <= end_addr ) |
660 | 0 | { |
661 | 0 | printk(XENLOG_ERR VTDPREFIX |
662 | 0 | "Overlapping RMRRs [%"PRIx64",%"PRIx64"] and [%"PRIx64",%"PRIx64"]\n", |
663 | 0 | rmrru->base_address, rmrru->end_address, |
664 | 0 | base_addr, end_addr); |
665 | 0 | return -EEXIST; |
666 | 0 | } |
667 | 1 | |
668 | 1 | /* This check is here simply to detect when RMRR values are |
669 | 1 | * not properly represented in the system memory map and |
670 | 1 | * inform the user |
671 | 1 | */ |
672 | 1 | if ( (!page_is_ram_type(paddr_to_pfn(base_addr), RAM_TYPE_RESERVED)) || |
673 | 1 | (!page_is_ram_type(paddr_to_pfn(end_addr), RAM_TYPE_RESERVED)) ) |
674 | 0 | { |
675 | 0 | printk(XENLOG_WARNING VTDPREFIX |
676 | 0 | " RMRR address range %"PRIx64"..%"PRIx64" not in reserved memory;" |
677 | 0 | " need \"iommu_inclusive_mapping=1\"?\n", |
678 | 0 | base_addr, end_addr); |
679 | 0 | } |
680 | 1 | |
681 | 1 | rmrru = xzalloc(struct acpi_rmrr_unit); |
682 | 1 | if ( !rmrru ) |
683 | 0 | return -ENOMEM; |
684 | 1 | |
685 | 1 | rmrru->base_address = base_addr; |
686 | 1 | rmrru->end_address = end_addr; |
687 | 1 | rmrru->segment = rmrr->segment; |
688 | 1 | |
689 | 1 | dev_scope_start = (void *)(rmrr + 1); |
690 | 1 | dev_scope_end = ((void *)rmrr) + header->length; |
691 | 1 | ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end, |
692 | 1 | &rmrru->scope, RMRR_TYPE, rmrr->segment); |
693 | 1 | |
694 | 1 | if ( !ret && (rmrru->scope.devices_cnt != 0) ) |
695 | 1 | { |
696 | 1 | ret = register_one_rmrr(rmrru); |
697 | 1 | /* |
698 | 1 | * register_one_rmrr() returns greater than 0 when a specified |
699 | 1 | * PCIe device cannot be detected. To prevent VT-d from being |
700 | 1 | * disabled in such cases, reset the return value to 0 here. |
701 | 1 | */ |
702 | 1 | if ( ret > 0 ) |
703 | 0 | ret = 0; |
704 | 1 | |
705 | 1 | } |
706 | 1 | else |
707 | 0 | xfree(rmrru); |
708 | 1 | |
709 | 1 | return ret; |
710 | 1 | } |
711 | | |
712 | | static int __init |
713 | | acpi_parse_one_atsr(struct acpi_dmar_header *header) |
714 | 1 | { |
715 | 1 | struct acpi_dmar_atsr *atsr = |
716 | 1 | container_of(header, struct acpi_dmar_atsr, header); |
717 | 1 | struct acpi_atsr_unit *atsru; |
718 | 1 | int ret; |
719 | 1 | static int all_ports; |
720 | 1 | void *dev_scope_start, *dev_scope_end; |
721 | 1 | |
722 | 1 | if ( (ret = acpi_dmar_check_length(header, sizeof(*atsr))) != 0 ) |
723 | 0 | return ret; |
724 | 1 | |
725 | 1 | atsru = xzalloc(struct acpi_atsr_unit); |
726 | 1 | if ( !atsru ) |
727 | 0 | return -ENOMEM; |
728 | 1 | |
729 | 1 | atsru->segment = atsr->segment; |
730 | 1 | atsru->all_ports = atsr->flags & ACPI_DMAR_ALL_PORTS; |
731 | 1 | if ( iommu_verbose ) |
732 | 1 | printk(VTDPREFIX " atsru->all_ports: %x\n", atsru->all_ports); |
733 | 1 | if ( !atsru->all_ports ) |
734 | 1 | { |
735 | 1 | dev_scope_start = (void *)(atsr + 1); |
736 | 1 | dev_scope_end = ((void *)atsr) + header->length; |
737 | 1 | ret = acpi_parse_dev_scope(dev_scope_start, dev_scope_end, |
738 | 1 | &atsru->scope, ATSR_TYPE, atsr->segment); |
739 | 1 | } |
740 | 1 | else |
741 | 0 | { |
742 | 0 | if ( iommu_verbose ) |
743 | 0 | printk(VTDPREFIX " flags: ALL_PORTS\n"); |
744 | 0 | /* Only allow one ALL_PORTS */ |
745 | 0 | if ( atsr->segment == 0 && all_ports ) |
746 | 0 | { |
747 | 0 | printk(XENLOG_WARNING VTDPREFIX |
748 | 0 | "Only one ALL_PORTS device scope is allowed\n"); |
749 | 0 | ret = -EINVAL; |
750 | 0 | } |
751 | 0 | if ( atsr->segment == 0 ) |
752 | 0 | all_ports = 1; |
753 | 0 | } |
754 | 1 | |
755 | 1 | if ( ret ) |
756 | 0 | { |
757 | 0 | scope_devices_free(&atsru->scope); |
758 | 0 | xfree(atsru); |
759 | 0 | } |
760 | 1 | else |
761 | 1 | acpi_register_atsr_unit(atsru); |
762 | 1 | return ret; |
763 | 1 | } |
764 | | |
765 | | static int __init |
766 | | acpi_parse_one_rhsa(struct acpi_dmar_header *header) |
767 | 1 | { |
768 | 1 | struct acpi_dmar_rhsa *rhsa = |
769 | 1 | container_of(header, struct acpi_dmar_rhsa, header); |
770 | 1 | struct acpi_rhsa_unit *rhsau; |
771 | 1 | int ret; |
772 | 1 | |
773 | 1 | if ( (ret = acpi_dmar_check_length(header, sizeof(*rhsa))) != 0 ) |
774 | 0 | return ret; |
775 | 1 | |
776 | 1 | rhsau = xzalloc(struct acpi_rhsa_unit); |
777 | 1 | if ( !rhsau ) |
778 | 0 | return -ENOMEM; |
779 | 1 | |
780 | 1 | rhsau->address = rhsa->base_address; |
781 | 1 | rhsau->proximity_domain = rhsa->proximity_domain; |
782 | 1 | list_add_tail(&rhsau->list, &acpi_rhsa_units); |
783 | 1 | if ( iommu_verbose ) |
784 | 1 | printk(VTDPREFIX |
785 | 1 | " rhsau->address: %"PRIx64" rhsau->proximity_domain: %"PRIx32"\n", |
786 | 1 | rhsau->address, rhsau->proximity_domain); |
787 | 1 | |
788 | 1 | return ret; |
789 | 1 | } |
790 | | |
791 | | static int __init acpi_parse_dmar(struct acpi_table_header *table) |
792 | 1 | { |
793 | 1 | struct acpi_table_dmar *dmar; |
794 | 1 | struct acpi_dmar_header *entry_header; |
795 | 1 | u8 dmar_host_address_width; |
796 | 1 | int ret = 0; |
797 | 1 | |
798 | 1 | dmar = (struct acpi_table_dmar *)table; |
799 | 1 | dmar_flags = dmar->flags; |
800 | 1 | |
801 | 1 | if ( !iommu_enable && !iommu_intremap ) |
802 | 0 | { |
803 | 0 | ret = -EINVAL; |
804 | 0 | goto out; |
805 | 0 | } |
806 | 1 | |
807 | 1 | if ( !dmar->width ) |
808 | 0 | { |
809 | 0 | printk(XENLOG_WARNING VTDPREFIX "Zero: Invalid DMAR width\n"); |
810 | 0 | ret = -EINVAL; |
811 | 0 | goto out; |
812 | 0 | } |
813 | 1 | |
814 | 1 | dmar_host_address_width = dmar->width + 1; |
815 | 1 | if ( iommu_verbose ) |
816 | 1 | printk(VTDPREFIX "Host address width %d\n", dmar_host_address_width); |
817 | 1 | |
818 | 1 | entry_header = (void *)(dmar + 1); |
819 | 5 | while ( ((unsigned long)entry_header) < |
820 | 5 | (((unsigned long)dmar) + table->length) ) |
821 | 4 | { |
822 | 4 | ret = acpi_dmar_check_length(entry_header, sizeof(*entry_header)); |
823 | 4 | if ( ret ) |
824 | 0 | break; |
825 | 4 | |
826 | 4 | switch ( entry_header->type ) |
827 | 4 | { |
828 | 1 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: |
829 | 1 | if ( iommu_verbose ) |
830 | 1 | printk(VTDPREFIX "found ACPI_DMAR_DRHD:\n"); |
831 | 1 | ret = acpi_parse_one_drhd(entry_header); |
832 | 1 | break; |
833 | 1 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: |
834 | 1 | if ( iommu_verbose ) |
835 | 1 | printk(VTDPREFIX "found ACPI_DMAR_RMRR:\n"); |
836 | 1 | ret = acpi_parse_one_rmrr(entry_header); |
837 | 1 | break; |
838 | 1 | case ACPI_DMAR_TYPE_ATSR: |
839 | 1 | if ( iommu_verbose ) |
840 | 1 | printk(VTDPREFIX "found ACPI_DMAR_ATSR:\n"); |
841 | 1 | ret = acpi_parse_one_atsr(entry_header); |
842 | 1 | break; |
843 | 1 | case ACPI_DMAR_HARDWARE_AFFINITY: |
844 | 1 | if ( iommu_verbose ) |
845 | 1 | printk(VTDPREFIX "found ACPI_DMAR_RHSA:\n"); |
846 | 1 | ret = acpi_parse_one_rhsa(entry_header); |
847 | 1 | break; |
848 | 0 | default: |
849 | 0 | dprintk(XENLOG_WARNING VTDPREFIX, |
850 | 0 | "Ignore unknown DMAR structure type (%#x)\n", |
851 | 0 | entry_header->type); |
852 | 0 | break; |
853 | 4 | } |
854 | 4 | if ( ret ) |
855 | 0 | break; |
856 | 4 | |
857 | 4 | entry_header = ((void *)entry_header + entry_header->length); |
858 | 4 | } |
859 | 1 | |
860 | 1 | if ( ret ) |
861 | 0 | { |
862 | 0 | printk(XENLOG_WARNING |
863 | 0 | "Failed to parse ACPI DMAR. Disabling VT-d.\n"); |
864 | 0 | disable_all_dmar_units(); |
865 | 0 | } |
866 | 1 | |
867 | 1 | out: |
868 | 1 | /* Zap ACPI DMAR signature to prevent dom0 using vt-d HW. */ |
869 | 1 | acpi_dmar_zap(); |
870 | 1 | return ret; |
871 | 1 | } |
872 | | |
873 | 0 | #define MAX_USER_RMRR_PAGES 16 |
874 | 0 | #define MAX_USER_RMRR 10 |
875 | | |
876 | | /* RMRR units derived from command line rmrr option. */ |
877 | 0 | #define MAX_USER_RMRR_DEV 20 |
878 | | struct user_rmrr { |
879 | | struct list_head list; |
880 | | unsigned long base_pfn, end_pfn; |
881 | | unsigned int dev_count; |
882 | | u32 sbdf[MAX_USER_RMRR_DEV]; |
883 | | }; |
884 | | |
885 | | static unsigned int __initdata nr_rmrr; |
886 | | static struct user_rmrr __initdata user_rmrrs[MAX_USER_RMRR]; |
887 | | |
888 | | /* Macro for RMRR inclusive range formatting. */ |
889 | | #define ERMRRU_FMT "[%lx-%lx]" |
890 | 0 | #define ERMRRU_ARG(eru) eru.base_pfn, eru.end_pfn |
891 | | |
892 | | static int __init add_user_rmrr(void) |
893 | 1 | { |
894 | 1 | struct acpi_rmrr_unit *rmrr, *rmrru; |
895 | 1 | unsigned int idx, seg, i; |
896 | 1 | unsigned long base, end; |
897 | 1 | bool overlap; |
898 | 1 | |
899 | 1 | for ( i = 0; i < nr_rmrr; i++ ) |
900 | 0 | { |
901 | 0 | base = user_rmrrs[i].base_pfn; |
902 | 0 | end = user_rmrrs[i].end_pfn; |
903 | 0 |
|
904 | 0 | if ( base > end ) |
905 | 0 | { |
906 | 0 | printk(XENLOG_ERR VTDPREFIX |
907 | 0 | "Invalid RMRR Range "ERMRRU_FMT"\n", |
908 | 0 | ERMRRU_ARG(user_rmrrs[i])); |
909 | 0 | continue; |
910 | 0 | } |
911 | 0 |
|
912 | 0 | if ( (end - base) >= MAX_USER_RMRR_PAGES ) |
913 | 0 | { |
914 | 0 | printk(XENLOG_ERR VTDPREFIX |
915 | 0 | "RMRR range "ERMRRU_FMT" exceeds "\ |
916 | 0 | __stringify(MAX_USER_RMRR_PAGES)" pages\n", |
917 | 0 | ERMRRU_ARG(user_rmrrs[i])); |
918 | 0 | continue; |
919 | 0 | } |
920 | 0 |
|
921 | 0 | overlap = false; |
922 | 0 | list_for_each_entry(rmrru, &acpi_rmrr_units, list) |
923 | 0 | { |
924 | 0 | if ( pfn_to_paddr(base) <= rmrru->end_address && |
925 | 0 | rmrru->base_address <= pfn_to_paddr(end) ) |
926 | 0 | { |
927 | 0 | printk(XENLOG_ERR VTDPREFIX |
928 | 0 | "Overlapping RMRRs: "ERMRRU_FMT" and [%lx-%lx]\n", |
929 | 0 | ERMRRU_ARG(user_rmrrs[i]), |
930 | 0 | paddr_to_pfn(rmrru->base_address), |
931 | 0 | paddr_to_pfn(rmrru->end_address)); |
932 | 0 | overlap = true; |
933 | 0 | break; |
934 | 0 | } |
935 | 0 | } |
936 | 0 | /* Don't add overlapping RMRR. */ |
937 | 0 | if ( overlap ) |
938 | 0 | continue; |
939 | 0 |
|
940 | 0 | do |
941 | 0 | { |
942 | 0 | if ( !mfn_valid(_mfn(base)) ) |
943 | 0 | { |
944 | 0 | printk(XENLOG_ERR VTDPREFIX |
945 | 0 | "Invalid pfn in RMRR range "ERMRRU_FMT"\n", |
946 | 0 | ERMRRU_ARG(user_rmrrs[i])); |
947 | 0 | break; |
948 | 0 | } |
949 | 0 | } while ( base++ < end ); |
950 | 0 |
|
951 | 0 | /* Invalid pfn in range as the loop ended before end_pfn was reached. */ |
952 | 0 | if ( base <= end ) |
953 | 0 | continue; |
954 | 0 |
|
955 | 0 | rmrr = xzalloc(struct acpi_rmrr_unit); |
956 | 0 | if ( !rmrr ) |
957 | 0 | return -ENOMEM; |
958 | 0 |
|
959 | 0 | rmrr->scope.devices = xmalloc_array(u16, user_rmrrs[i].dev_count); |
960 | 0 | if ( !rmrr->scope.devices ) |
961 | 0 | { |
962 | 0 | xfree(rmrr); |
963 | 0 | return -ENOMEM; |
964 | 0 | } |
965 | 0 |
|
966 | 0 | seg = 0; |
967 | 0 | for ( idx = 0; idx < user_rmrrs[i].dev_count; idx++ ) |
968 | 0 | { |
969 | 0 | rmrr->scope.devices[idx] = user_rmrrs[i].sbdf[idx]; |
970 | 0 | seg |= PCI_SEG(user_rmrrs[i].sbdf[idx]); |
971 | 0 | } |
972 | 0 | if ( seg != PCI_SEG(user_rmrrs[i].sbdf[0]) ) |
973 | 0 | { |
974 | 0 | printk(XENLOG_ERR VTDPREFIX |
975 | 0 | "Segments are not equal for RMRR range "ERMRRU_FMT"\n", |
976 | 0 | ERMRRU_ARG(user_rmrrs[i])); |
977 | 0 | scope_devices_free(&rmrr->scope); |
978 | 0 | xfree(rmrr); |
979 | 0 | continue; |
980 | 0 | } |
981 | 0 |
|
982 | 0 | rmrr->segment = seg; |
983 | 0 | rmrr->base_address = pfn_to_paddr(user_rmrrs[i].base_pfn); |
984 | 0 | /* Align the end_address to the end of the page */ |
985 | 0 | rmrr->end_address = pfn_to_paddr(user_rmrrs[i].end_pfn) | ~PAGE_MASK; |
986 | 0 | rmrr->scope.devices_cnt = user_rmrrs[i].dev_count; |
987 | 0 |
|
988 | 0 | if ( register_one_rmrr(rmrr) ) |
989 | 0 | printk(XENLOG_ERR VTDPREFIX |
990 | 0 | "Could not register RMMR range "ERMRRU_FMT"\n", |
991 | 0 | ERMRRU_ARG(user_rmrrs[i])); |
992 | 0 | } |
993 | 1 | |
994 | 1 | return 0; |
995 | 1 | } |
996 | | |
997 | | #include <asm/tboot.h> |
998 | | /* ACPI tables may not be DMA protected by tboot, so use DMAR copy */ |
999 | | /* SINIT saved in SinitMleData in TXT heap (which is DMA protected) */ |
1000 | 1 | #define parse_dmar_table(h) tboot_parse_dmar_table(h) |
1001 | | |
1002 | | int __init acpi_dmar_init(void) |
1003 | 1 | { |
1004 | 1 | acpi_physical_address dmar_addr; |
1005 | 1 | acpi_native_uint dmar_len; |
1006 | 1 | int ret; |
1007 | 1 | |
1008 | 1 | if ( ACPI_SUCCESS(acpi_get_table_phys(ACPI_SIG_DMAR, 0, |
1009 | 1 | &dmar_addr, &dmar_len)) ) |
1010 | 1 | { |
1011 | 1 | map_pages_to_xen((unsigned long)__va(dmar_addr), PFN_DOWN(dmar_addr), |
1012 | 1 | PFN_UP(dmar_addr + dmar_len) - PFN_DOWN(dmar_addr), |
1013 | 1 | PAGE_HYPERVISOR); |
1014 | 1 | dmar_table = __va(dmar_addr); |
1015 | 1 | } |
1016 | 1 | |
1017 | 1 | ret = parse_dmar_table(acpi_parse_dmar); |
1018 | 1 | |
1019 | 1 | if ( !ret ) |
1020 | 1 | return add_user_rmrr(); |
1021 | 1 | |
1022 | 0 | return ret; |
1023 | 1 | } |
1024 | | |
1025 | | void acpi_dmar_reinstate(void) |
1026 | 1 | { |
1027 | 1 | uint32_t sig = 0x52414d44; /* "DMAR" */ |
1028 | 1 | |
1029 | 1 | if ( dmar_table ) |
1030 | 1 | write_atomic((uint32_t*)&dmar_table->signature[0], sig); |
1031 | 1 | } |
1032 | | |
1033 | | void acpi_dmar_zap(void) |
1034 | 1 | { |
1035 | 1 | uint32_t sig = 0x44414d52; /* "RMAD" - doesn't alter table checksum */ |
1036 | 1 | |
1037 | 1 | if ( dmar_table ) |
1038 | 1 | write_atomic((uint32_t*)&dmar_table->signature[0], sig); |
1039 | 1 | } |
1040 | | |
1041 | | bool_t platform_supports_intremap(void) |
1042 | 2 | { |
1043 | 2 | const unsigned int mask = ACPI_DMAR_INTR_REMAP; |
1044 | 2 | |
1045 | 2 | return (dmar_flags & mask) == ACPI_DMAR_INTR_REMAP; |
1046 | 2 | } |
1047 | | |
1048 | | bool_t __init platform_supports_x2apic(void) |
1049 | 1 | { |
1050 | 1 | const unsigned int mask = ACPI_DMAR_INTR_REMAP | ACPI_DMAR_X2APIC_OPT_OUT; |
1051 | 1 | |
1052 | 1 | return cpu_has_x2apic && ((dmar_flags & mask) == ACPI_DMAR_INTR_REMAP); |
1053 | 1 | } |
1054 | | |
1055 | | int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt) |
1056 | 0 | { |
1057 | 0 | struct acpi_rmrr_unit *rmrr, *rmrr_cur = NULL; |
1058 | 0 | unsigned int i; |
1059 | 0 | u16 bdf; |
1060 | 0 |
|
1061 | 0 | for_each_rmrr_device ( rmrr, bdf, i ) |
1062 | 0 | { |
1063 | 0 | int rc; |
1064 | 0 |
|
1065 | 0 | if ( rmrr == rmrr_cur ) |
1066 | 0 | continue; |
1067 | 0 |
|
1068 | 0 | rc = func(PFN_DOWN(rmrr->base_address), |
1069 | 0 | PFN_UP(rmrr->end_address) - PFN_DOWN(rmrr->base_address), |
1070 | 0 | PCI_SBDF2(rmrr->segment, bdf), ctxt); |
1071 | 0 |
|
1072 | 0 | if ( unlikely(rc < 0) ) |
1073 | 0 | return rc; |
1074 | 0 |
|
1075 | 0 | if ( rc ) |
1076 | 0 | rmrr_cur = rmrr; |
1077 | 0 | } |
1078 | 0 |
|
1079 | 0 | return 0; |
1080 | 0 | } |
1081 | | |
1082 | | /* |
1083 | | * Parse rmrr Xen command line options and add parsed devices and regions into |
1084 | | * acpi_rmrr_unit list to mapped as RMRRs parsed from ACPI. |
1085 | | * Format: |
1086 | | * rmrr=start<-end>=[s1]bdf1[,[s1]bdf2[,...]];start<-end>=[s2]bdf1[,[s2]bdf2[,...]] |
1087 | | * If the segment of the first device is not specified, |
1088 | | * segment zero will be used. |
1089 | | * If other segments are not specified, first device segment will be used. |
1090 | | * If a segment is specified for other than the first device, and it does not |
1091 | | * match the one specified for the first one, an error will be reported. |
1092 | | */ |
1093 | | static int __init parse_rmrr_param(const char *str) |
1094 | 0 | { |
1095 | 0 | const char *s = str, *cur, *stmp; |
1096 | 0 | unsigned int seg, bus, dev, func, dev_count; |
1097 | 0 | unsigned long start, end; |
1098 | 0 |
|
1099 | 0 | do { |
1100 | 0 | if ( nr_rmrr >= MAX_USER_RMRR ) |
1101 | 0 | return -E2BIG; |
1102 | 0 |
|
1103 | 0 | start = simple_strtoul(cur = s, &s, 16); |
1104 | 0 | if ( cur == s ) |
1105 | 0 | return -EINVAL; |
1106 | 0 |
|
1107 | 0 | if ( *s == '-' ) |
1108 | 0 | { |
1109 | 0 | end = simple_strtoul(cur = s + 1, &s, 16); |
1110 | 0 | if ( cur == s ) |
1111 | 0 | return -EINVAL; |
1112 | 0 | } |
1113 | 0 | else |
1114 | 0 | end = start; |
1115 | 0 |
|
1116 | 0 | user_rmrrs[nr_rmrr].base_pfn = start; |
1117 | 0 | user_rmrrs[nr_rmrr].end_pfn = end; |
1118 | 0 |
|
1119 | 0 | if ( *s != '=' ) |
1120 | 0 | continue; |
1121 | 0 |
|
1122 | 0 | do { |
1123 | 0 | bool def_seg = false; |
1124 | 0 |
|
1125 | 0 | stmp = parse_pci_seg(s + 1, &seg, &bus, &dev, &func, &def_seg); |
1126 | 0 | if ( !stmp ) |
1127 | 0 | return -EINVAL; |
1128 | 0 |
|
1129 | 0 | /* |
1130 | 0 | * Not specified. |
1131 | 0 | * Segment will be replaced with one from first device. |
1132 | 0 | */ |
1133 | 0 | if ( user_rmrrs[nr_rmrr].dev_count && def_seg ) |
1134 | 0 | seg = PCI_SEG(user_rmrrs[nr_rmrr].sbdf[0]); |
1135 | 0 |
|
1136 | 0 | /* Keep sbdf's even if they differ and later report an error. */ |
1137 | 0 | dev_count = user_rmrrs[nr_rmrr].dev_count; |
1138 | 0 | user_rmrrs[nr_rmrr].sbdf[dev_count] = PCI_SBDF(seg, bus, dev, func); |
1139 | 0 |
|
1140 | 0 | user_rmrrs[nr_rmrr].dev_count++; |
1141 | 0 | s = stmp; |
1142 | 0 | } while ( *s == ',' && |
1143 | 0 | user_rmrrs[nr_rmrr].dev_count < MAX_USER_RMRR_DEV ); |
1144 | 0 |
|
1145 | 0 | if ( user_rmrrs[nr_rmrr].dev_count ) |
1146 | 0 | nr_rmrr++; |
1147 | 0 |
|
1148 | 0 | } while ( *s++ == ';' ); |
1149 | 0 |
|
1150 | 0 | return s[-1] ? -EINVAL : 0; |
1151 | 0 | } |
1152 | | custom_param("rmrr", parse_rmrr_param); |