debuggers.hg
changeset 16425:03d6d0f96e12
vt-d: Clean up VT-d code
Signed-off-by: Weidong Han <weidong.han@intel.com>
Signed-off-by: Weidong Han <weidong.han@intel.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Fri Nov 16 17:01:50 2007 +0000 (2007-11-16) |
parents | 614dad9f8fdc |
children | e948f402c356 |
files | xen/arch/x86/hvm/vmx/vtd/dmar.c xen/arch/x86/hvm/vmx/vtd/intel-iommu.c xen/arch/x86/hvm/vmx/vtd/pci-direct.h |
line diff
1.1 --- a/xen/arch/x86/hvm/vmx/vtd/dmar.c Fri Nov 16 16:53:43 2007 +0000 1.2 +++ b/xen/arch/x86/hvm/vmx/vtd/dmar.c Fri Nov 16 17:01:50 2007 +0000 1.3 @@ -38,7 +38,8 @@ boolean_param("vtd", vtd_enabled); 1.4 #define PREFIX VTDPREFIX "ACPI DMAR:" 1.5 #define DEBUG 1.6 1.7 -#define MIN_SCOPE_LEN (sizeof(struct acpi_pci_path) + sizeof(struct acpi_dev_scope)) 1.8 +#define MIN_SCOPE_LEN (sizeof(struct acpi_pci_path) + \ 1.9 + sizeof(struct acpi_dev_scope)) 1.10 1.11 LIST_HEAD(acpi_drhd_units); 1.12 LIST_HEAD(acpi_rmrr_units); 1.13 @@ -53,7 +54,7 @@ static int __init acpi_register_drhd_uni 1.14 * add INCLUDE_ALL at the tail, so scan the list will find it at 1.15 * the very end. 1.16 */ 1.17 - if (drhd->include_all) 1.18 + if ( drhd->include_all ) 1.19 list_add_tail(&drhd->list, &acpi_drhd_units); 1.20 else 1.21 list_add(&drhd->list, &acpi_drhd_units); 1.22 @@ -67,13 +68,14 @@ static int __init acpi_register_rmrr_uni 1.23 } 1.24 1.25 static int acpi_pci_device_match(struct pci_dev *devices, int cnt, 1.26 - struct pci_dev *dev) 1.27 + struct pci_dev *dev) 1.28 { 1.29 int i; 1.30 1.31 - for (i = 0; i < cnt; i++) { 1.32 - if ((dev->bus == devices->bus) && 1.33 - (dev->devfn == devices->devfn)) 1.34 + for ( i = 0; i < cnt; i++ ) 1.35 + { 1.36 + if ( (dev->bus == devices->bus) && 1.37 + (dev->devfn == devices->devfn) ) 1.38 return 1; 1.39 devices++; 1.40 } 1.41 @@ -86,7 +88,7 @@ static int __init acpi_register_atsr_uni 1.42 * add ALL_PORTS at the tail, so scan the list will find it at 1.43 * the very end. 1.44 */ 1.45 - if (atsr->all_ports) 1.46 + if ( atsr->all_ports ) 1.47 list_add_tail(&atsr->list, &acpi_atsr_units); 1.48 else 1.49 list_add(&atsr->list, &acpi_atsr_units); 1.50 @@ -99,11 +101,12 @@ struct acpi_drhd_unit * acpi_find_matche 1.51 struct acpi_drhd_unit *include_all_drhd; 1.52 1.53 include_all_drhd = NULL; 1.54 - list_for_each_entry(drhd, &acpi_drhd_units, list) { 1.55 - if (drhd->include_all) 1.56 + list_for_each_entry ( drhd, &acpi_drhd_units, list ) 1.57 + { 1.58 + if ( drhd->include_all ) 1.59 include_all_drhd = drhd; 1.60 - if (acpi_pci_device_match(drhd->devices, 1.61 - drhd->devices_cnt, dev)) 1.62 + if ( acpi_pci_device_match(drhd->devices, 1.63 + drhd->devices_cnt, dev) ) 1.64 { 1.65 gdprintk(XENLOG_INFO VTDPREFIX, 1.66 "acpi_find_matched_drhd_unit: drhd->address = %lx\n", 1.67 @@ -112,28 +115,29 @@ struct acpi_drhd_unit * acpi_find_matche 1.68 } 1.69 } 1.70 1.71 - if (include_all_drhd) { 1.72 + if ( include_all_drhd ) 1.73 + { 1.74 gdprintk(XENLOG_INFO VTDPREFIX, 1.75 "acpi_find_matched_drhd_unit:include_all_drhd->addr = %lx\n", 1.76 include_all_drhd->address); 1.77 return include_all_drhd;; 1.78 } 1.79 1.80 - return(NULL); 1.81 + return NULL; 1.82 } 1.83 1.84 struct acpi_rmrr_unit * acpi_find_matched_rmrr_unit(struct pci_dev *dev) 1.85 { 1.86 struct acpi_rmrr_unit *rmrr; 1.87 1.88 - list_for_each_entry(rmrr, &acpi_rmrr_units, list) { 1.89 - if (acpi_pci_device_match(rmrr->devices, 1.90 - rmrr->devices_cnt, dev)) 1.91 - goto out; 1.92 + list_for_each_entry ( rmrr, &acpi_rmrr_units, list ) 1.93 + { 1.94 + if ( acpi_pci_device_match(rmrr->devices, 1.95 + rmrr->devices_cnt, dev) ) 1.96 + return rmrr; 1.97 } 1.98 - rmrr = NULL; 1.99 -out: 1.100 - return rmrr; 1.101 + 1.102 + return NULL; 1.103 } 1.104 1.105 struct acpi_atsr_unit * acpi_find_matched_atsr_unit(struct pci_dev *dev) 1.106 @@ -142,142 +146,186 @@ struct acpi_atsr_unit * acpi_find_matche 1.107 struct acpi_atsr_unit *all_ports_atsru; 1.108 1.109 all_ports_atsru = NULL; 1.110 - list_for_each_entry(atsru, &acpi_atsr_units, list) { 1.111 - if (atsru->all_ports) 1.112 + list_for_each_entry ( atsru, &acpi_atsr_units, list ) 1.113 + { 1.114 + if ( atsru->all_ports ) 1.115 all_ports_atsru = atsru; 1.116 - if (acpi_pci_device_match(atsru->devices, atsru->devices_cnt, dev)) 1.117 + if ( acpi_pci_device_match(atsru->devices, 1.118 + atsru->devices_cnt, dev) ) 1.119 return atsru; 1.120 } 1.121 - if (all_ports_atsru) { 1.122 - gdprintk(XENLOG_INFO VTDPREFIX, 1.123 + 1.124 + if ( all_ports_atsru ) 1.125 + { 1.126 + gdprintk(XENLOG_INFO VTDPREFIX, 1.127 "acpi_find_matched_atsr_unit: all_ports_atsru\n"); 1.128 return all_ports_atsru;; 1.129 } 1.130 - return(NULL); 1.131 + 1.132 + return NULL; 1.133 +} 1.134 + 1.135 +static int scope_device_count(void *start, void *end) 1.136 +{ 1.137 + struct acpi_dev_scope *scope; 1.138 + u8 bus, sub_bus, sec_bus; 1.139 + struct acpi_pci_path *path; 1.140 + int depth, count = 0; 1.141 + u8 dev, func; 1.142 + u32 l; 1.143 + 1.144 + while ( start < end ) 1.145 + { 1.146 + scope = start; 1.147 + if ( scope->length < MIN_SCOPE_LEN ) 1.148 + { 1.149 + printk(KERN_WARNING PREFIX "Invalid device scope\n"); 1.150 + return -EINVAL; 1.151 + } 1.152 + 1.153 + path = (struct acpi_pci_path *)(scope + 1); 1.154 + bus = scope->start_bus; 1.155 + depth = (scope->length - sizeof(struct acpi_dev_scope)) 1.156 + / sizeof(struct acpi_pci_path); 1.157 + while ( --depth ) 1.158 + { 1.159 + bus = read_pci_config_byte( 1.160 + bus, path->dev, path->fn, PCI_SECONDARY_BUS); 1.161 + path++; 1.162 + } 1.163 + 1.164 + if ( scope->dev_type == ACPI_DEV_ENDPOINT ) 1.165 + { 1.166 + printk(KERN_INFO PREFIX 1.167 + "found endpoint: bdf = %x:%x:%x\n", 1.168 + bus, path->dev, path->fn); 1.169 + count++; 1.170 + } 1.171 + else if ( scope->dev_type == ACPI_DEV_P2PBRIDGE ) 1.172 + { 1.173 + printk(KERN_INFO PREFIX 1.174 + "found bridge: bdf = %x:%x:%x\n", 1.175 + bus, path->dev, path->fn); 1.176 + sec_bus = read_pci_config_byte( 1.177 + bus, path->dev, path->fn, PCI_SECONDARY_BUS); 1.178 + sub_bus = read_pci_config_byte( 1.179 + bus, path->dev, path->fn, PCI_SUBORDINATE_BUS); 1.180 + 1.181 + while ( sec_bus <= sub_bus ) 1.182 + { 1.183 + for ( dev = 0; dev < 32; dev++ ) 1.184 + { 1.185 + for ( func = 0; func < 8; func++ ) 1.186 + { 1.187 + l = read_pci_config( 1.188 + sec_bus, dev, func, PCI_VENDOR_ID); 1.189 + 1.190 + /* some broken boards return 0 or 1.191 + * ~0 if a slot is empty 1.192 + */ 1.193 + if ( l == 0xffffffff || l == 0x00000000 || 1.194 + l == 0x0000ffff || l == 0xffff0000 ) 1.195 + break; 1.196 + count++; 1.197 + } 1.198 + } 1.199 + sec_bus++; 1.200 + } 1.201 + } 1.202 + else if ( scope->dev_type == ACPI_DEV_IOAPIC ) 1.203 + { 1.204 + printk(KERN_INFO PREFIX 1.205 + "found IOAPIC: bdf = %x:%x:%x\n", 1.206 + bus, path->dev, path->fn); 1.207 + count++; 1.208 + } 1.209 + else 1.210 + { 1.211 + printk(KERN_INFO PREFIX 1.212 + "found MSI HPET: bdf = %x:%x:%x\n", 1.213 + bus, path->dev, path->fn); 1.214 + count++; 1.215 + } 1.216 + 1.217 + start += scope->length; 1.218 + } 1.219 + 1.220 + return count; 1.221 } 1.222 1.223 static int __init acpi_parse_dev_scope(void *start, void *end, int *cnt, 1.224 - struct pci_dev **devices) 1.225 + struct pci_dev **devices) 1.226 { 1.227 struct acpi_dev_scope *scope; 1.228 u8 bus, sub_bus, sec_bus; 1.229 struct acpi_pci_path *path; 1.230 struct acpi_ioapic_unit *acpi_ioapic_unit = NULL; 1.231 - int count, dev_count=0; 1.232 + int depth; 1.233 struct pci_dev *pdev; 1.234 u8 dev, func; 1.235 u32 l; 1.236 - void *tmp; 1.237 1.238 - *cnt = 0; 1.239 - tmp = start; 1.240 - while (start < end) { 1.241 - scope = start; 1.242 - if (scope->length < MIN_SCOPE_LEN || 1.243 - (scope->dev_type != ACPI_DEV_ENDPOINT && 1.244 - scope->dev_type != ACPI_DEV_P2PBRIDGE)) { 1.245 - printk(KERN_WARNING PREFIX "Invalid device scope\n"); 1.246 - return -EINVAL; 1.247 - } 1.248 - (*cnt)++; 1.249 - start += scope->length; 1.250 + *cnt = scope_device_count(start, end); 1.251 + if ( *cnt == 0 ) 1.252 + { 1.253 + printk(KERN_INFO PREFIX "acpi_parse_dev_scope: no device\n"); 1.254 + return 0; 1.255 } 1.256 1.257 - start = tmp; 1.258 - while (start < end) { 1.259 - scope = start; 1.260 - path = (struct acpi_pci_path *)(scope + 1); 1.261 - count = (scope->length - sizeof(struct acpi_dev_scope)) 1.262 - /sizeof(struct acpi_pci_path); 1.263 - bus = scope->start_bus; 1.264 - 1.265 - while (--count) { 1.266 - bus = read_pci_config_byte(bus, path->dev, 1.267 - path->fn, PCI_SECONDARY_BUS); 1.268 - path++; 1.269 - } 1.270 - 1.271 - if (scope->dev_type == ACPI_DEV_ENDPOINT) { 1.272 - printk(KERN_WARNING PREFIX 1.273 - "found endpoint: bdf = %x:%x:%x\n", bus, path->dev, path->fn); 1.274 - dev_count++; 1.275 - } else if (scope->dev_type == ACPI_DEV_P2PBRIDGE) { 1.276 - printk(KERN_WARNING PREFIX 1.277 - "found bridge: bdf = %x:%x:%x\n", bus, path->dev, path->fn); 1.278 - 1.279 - sec_bus = read_pci_config_byte(bus, path->dev, 1.280 - path->fn, PCI_SECONDARY_BUS); 1.281 - sub_bus = read_pci_config_byte(bus, path->dev, 1.282 - path->fn, PCI_SUBORDINATE_BUS); 1.283 - while (sec_bus <= sub_bus) { 1.284 - for (dev = 0; dev < 32; dev++) { 1.285 - for (func = 0; func < 8; func++) { 1.286 - l = read_pci_config(sec_bus, dev, func, PCI_VENDOR_ID); 1.287 - 1.288 - /* some broken boards return 0 or ~0 if a slot is empty: */ 1.289 - if (l == 0xffffffff || l == 0x00000000 || 1.290 - l == 0x0000ffff || l == 0xffff0000) 1.291 - break; 1.292 - dev_count++; 1.293 - } 1.294 - } 1.295 - sec_bus++; 1.296 - } 1.297 - } else if (scope->dev_type == ACPI_DEV_IOAPIC) { 1.298 - printk(KERN_WARNING PREFIX 1.299 - "found IOAPIC: bdf = %x:%x:%x\n", bus, path->dev, path->fn); 1.300 - dev_count++; 1.301 - } else { 1.302 - printk(KERN_WARNING PREFIX 1.303 - "found MSI HPET: bdf = %x:%x:%x\n", bus, path->dev, path->fn); 1.304 - dev_count++; 1.305 - } 1.306 - 1.307 - start += scope->length; 1.308 - } 1.309 - 1.310 - *cnt = dev_count; 1.311 *devices = xmalloc_array(struct pci_dev, *cnt); 1.312 - if (!*devices) 1.313 + if ( !*devices ) 1.314 return -ENOMEM; 1.315 memset(*devices, 0, sizeof(struct pci_dev) * (*cnt)); 1.316 1.317 pdev = *devices; 1.318 - start = tmp; 1.319 - while (start < end) { 1.320 + while ( start < end ) 1.321 + { 1.322 scope = start; 1.323 path = (struct acpi_pci_path *)(scope + 1); 1.324 - count = (scope->length - sizeof(struct acpi_dev_scope)) 1.325 - /sizeof(struct acpi_pci_path); 1.326 + depth = (scope->length - sizeof(struct acpi_dev_scope)) 1.327 + / sizeof(struct acpi_pci_path); 1.328 bus = scope->start_bus; 1.329 1.330 - while (--count) { 1.331 - bus = read_pci_config_byte(bus, path->dev, path->fn, PCI_SECONDARY_BUS); 1.332 + while ( --depth ) 1.333 + { 1.334 + bus = read_pci_config_byte( 1.335 + bus, path->dev, path->fn, PCI_SECONDARY_BUS); 1.336 path++; 1.337 } 1.338 1.339 - if (scope->dev_type == ACPI_DEV_ENDPOINT) { 1.340 - printk(KERN_WARNING PREFIX 1.341 - "found endpoint: bdf = %x:%x:%x\n", bus, path->dev, path->fn); 1.342 - 1.343 + if ( scope->dev_type == ACPI_DEV_ENDPOINT ) 1.344 + { 1.345 + printk(KERN_INFO PREFIX 1.346 + "found endpoint: bdf = %x:%x:%x\n", 1.347 + bus, path->dev, path->fn); 1.348 pdev->bus = bus; 1.349 pdev->devfn = PCI_DEVFN(path->dev, path->fn); 1.350 pdev++; 1.351 - } else if (scope->dev_type == ACPI_DEV_P2PBRIDGE) { 1.352 - printk(KERN_WARNING PREFIX 1.353 - "found bridge: bus = %x dev = %x func = %x\n", bus, path->dev, path->fn); 1.354 - 1.355 - sec_bus = read_pci_config_byte(bus, path->dev, path->fn, PCI_SECONDARY_BUS); 1.356 - sub_bus = read_pci_config_byte(bus, path->dev, path->fn, PCI_SUBORDINATE_BUS); 1.357 + } 1.358 + else if ( scope->dev_type == ACPI_DEV_P2PBRIDGE ) 1.359 + { 1.360 + printk(KERN_INFO PREFIX 1.361 + "found bridge: bus = %x dev = %x func = %x\n", 1.362 + bus, path->dev, path->fn); 1.363 + sec_bus = read_pci_config_byte( 1.364 + bus, path->dev, path->fn, PCI_SECONDARY_BUS); 1.365 + sub_bus = read_pci_config_byte( 1.366 + bus, path->dev, path->fn, PCI_SUBORDINATE_BUS); 1.367 1.368 - while (sec_bus <= sub_bus) { 1.369 - for (dev = 0; dev < 32; dev++) { 1.370 - for (func = 0; func < 8; func++) { 1.371 - l = read_pci_config(sec_bus, dev, func, PCI_VENDOR_ID); 1.372 + while ( sec_bus <= sub_bus ) 1.373 + { 1.374 + for ( dev = 0; dev < 32; dev++ ) 1.375 + { 1.376 + for ( func = 0; func < 8; func++ ) 1.377 + { 1.378 + l = read_pci_config( 1.379 + sec_bus, dev, func, PCI_VENDOR_ID); 1.380 1.381 - /* some broken boards return 0 or ~0 if a slot is empty: */ 1.382 - if (l == 0xffffffff || l == 0x00000000 || 1.383 - l == 0x0000ffff || l == 0xffff0000) 1.384 + /* some broken boards return 0 or 1.385 + * ~0 if a slot is empty 1.386 + */ 1.387 + if ( l == 0xffffffff || l == 0x00000000 || 1.388 + l == 0x0000ffff || l == 0xffff0000 ) 1.389 break; 1.390 1.391 pdev->bus = sec_bus; 1.392 @@ -287,19 +335,25 @@ static int __init acpi_parse_dev_scope(v 1.393 } 1.394 sec_bus++; 1.395 } 1.396 - } else if (scope->dev_type == ACPI_DEV_IOAPIC) { 1.397 + } 1.398 + else if ( scope->dev_type == ACPI_DEV_IOAPIC ) 1.399 + { 1.400 acpi_ioapic_unit = xmalloc(struct acpi_ioapic_unit); 1.401 + if ( !acpi_ioapic_unit ) 1.402 + return -ENOMEM; 1.403 acpi_ioapic_unit->apic_id = scope->enum_id; 1.404 acpi_ioapic_unit->ioapic.bdf.bus = bus; 1.405 acpi_ioapic_unit->ioapic.bdf.dev = path->dev; 1.406 acpi_ioapic_unit->ioapic.bdf.func = path->fn; 1.407 list_add(&acpi_ioapic_unit->list, &acpi_ioapic_units); 1.408 - printk(KERN_WARNING PREFIX 1.409 - "found IOAPIC: bus = %x dev = %x func = %x\n", bus, path->dev, path->fn); 1.410 - } else { 1.411 - printk(KERN_WARNING PREFIX 1.412 - "found MSI HPET: bus = %x dev = %x func = %x\n", bus, path->dev, path->fn); 1.413 + printk(KERN_INFO PREFIX 1.414 + "found IOAPIC: bus = %x dev = %x func = %x\n", 1.415 + bus, path->dev, path->fn); 1.416 } 1.417 + else 1.418 + printk(KERN_INFO PREFIX 1.419 + "found MSI HPET: bus = %x dev = %x func = %x\n", 1.420 + bus, path->dev, path->fn); 1.421 1.422 start += scope->length; 1.423 } 1.424 @@ -316,31 +370,33 @@ acpi_parse_one_drhd(struct acpi_dmar_ent 1.425 static int include_all; 1.426 1.427 dmaru = xmalloc(struct acpi_drhd_unit); 1.428 - if (!dmaru) 1.429 + if ( !dmaru ) 1.430 return -ENOMEM; 1.431 memset(dmaru, 0, sizeof(struct acpi_drhd_unit)); 1.432 1.433 dmaru->address = drhd->address; 1.434 dmaru->include_all = drhd->flags & 1; /* BIT0: INCLUDE_ALL */ 1.435 - printk(KERN_WARNING PREFIX "dmaru->address = %lx\n", dmaru->address); 1.436 + printk(KERN_INFO PREFIX "dmaru->address = %lx\n", dmaru->address); 1.437 1.438 - if (!dmaru->include_all) { 1.439 - ret = acpi_parse_dev_scope((void *)(drhd + 1), 1.440 - ((void *)drhd) + header->length, 1.441 - &dmaru->devices_cnt, &dmaru->devices); 1.442 - } 1.443 - else { 1.444 - printk(KERN_WARNING PREFIX "found INCLUDE_ALL\n"); 1.445 + if ( !dmaru->include_all ) 1.446 + ret = acpi_parse_dev_scope( 1.447 + (void *)(drhd + 1), 1.448 + ((void *)drhd) + header->length, 1.449 + &dmaru->devices_cnt, &dmaru->devices); 1.450 + else 1.451 + { 1.452 + printk(KERN_INFO PREFIX "found INCLUDE_ALL\n"); 1.453 /* Only allow one INCLUDE_ALL */ 1.454 - if (include_all) { 1.455 + if ( include_all ) 1.456 + { 1.457 printk(KERN_WARNING PREFIX "Only one INCLUDE_ALL " 1.458 - "device scope is allowed\n"); 1.459 + "device scope is allowed\n"); 1.460 ret = -EINVAL; 1.461 } 1.462 include_all = 1; 1.463 } 1.464 1.465 - if (ret) 1.466 + if ( ret ) 1.467 xfree(dmaru); 1.468 else 1.469 acpi_register_drhd_unit(dmaru); 1.470 @@ -355,23 +411,22 @@ acpi_parse_one_rmrr(struct acpi_dmar_ent 1.471 int ret = 0; 1.472 1.473 rmrru = xmalloc(struct acpi_rmrr_unit); 1.474 - if (!rmrru) 1.475 + if ( !rmrru ) 1.476 return -ENOMEM; 1.477 memset(rmrru, 0, sizeof(struct acpi_rmrr_unit)); 1.478 1.479 -#ifdef VTD_DEBUG 1.480 - gdprintk(XENLOG_INFO VTDPREFIX, 1.481 - "acpi_parse_one_rmrr: base = %lx end = %lx\n", 1.482 - rmrr->base_address, rmrr->end_address); 1.483 -#endif 1.484 - 1.485 rmrru->base_address = rmrr->base_address; 1.486 rmrru->end_address = rmrr->end_address; 1.487 - ret = acpi_parse_dev_scope((void *)(rmrr + 1), 1.488 - ((void*)rmrr) + header->length, 1.489 - &rmrru->devices_cnt, &rmrru->devices); 1.490 + printk(KERN_INFO PREFIX 1.491 + "acpi_parse_one_rmrr: base=%"PRIx64" end=%"PRIx64"\n", 1.492 + rmrr->base_address, rmrr->end_address); 1.493 1.494 - if (ret || (rmrru->devices_cnt == 0)) 1.495 + ret = acpi_parse_dev_scope( 1.496 + (void *)(rmrr + 1), 1.497 + ((void*)rmrr) + header->length, 1.498 + &rmrru->devices_cnt, &rmrru->devices); 1.499 + 1.500 + if ( ret || (rmrru->devices_cnt == 0) ) 1.501 xfree(rmrru); 1.502 else 1.503 acpi_register_rmrr_unit(rmrru); 1.504 @@ -387,90 +442,79 @@ acpi_parse_one_atsr(struct acpi_dmar_ent 1.505 static int all_ports; 1.506 1.507 atsru = xmalloc(struct acpi_atsr_unit); 1.508 - if (!atsru) 1.509 + if ( !atsru ) 1.510 return -ENOMEM; 1.511 memset(atsru, 0, sizeof(struct acpi_atsr_unit)); 1.512 1.513 atsru->all_ports = atsr->flags & 1; /* BIT0: ALL_PORTS */ 1.514 - if (!atsru->all_ports) { 1.515 - ret = acpi_parse_dev_scope((void *)(atsr + 1), 1.516 - ((void *)atsr) + header->length, 1.517 - &atsru->devices_cnt, &atsru->devices); 1.518 - } 1.519 - else { 1.520 - printk(KERN_WARNING PREFIX "found ALL_PORTS\n"); 1.521 + if ( !atsru->all_ports ) 1.522 + ret = acpi_parse_dev_scope( 1.523 + (void *)(atsr + 1), 1.524 + ((void *)atsr) + header->length, 1.525 + &atsru->devices_cnt, &atsru->devices); 1.526 + else 1.527 + { 1.528 + printk(KERN_INFO PREFIX "found ALL_PORTS\n"); 1.529 /* Only allow one ALL_PORTS */ 1.530 - if (all_ports) { 1.531 + if ( all_ports ) 1.532 + { 1.533 printk(KERN_WARNING PREFIX "Only one ALL_PORTS " 1.534 - "device scope is allowed\n"); 1.535 + "device scope is allowed\n"); 1.536 ret = -EINVAL; 1.537 } 1.538 all_ports = 1; 1.539 } 1.540 1.541 - if (ret) 1.542 + if ( ret ) 1.543 xfree(atsr); 1.544 else 1.545 acpi_register_atsr_unit(atsru); 1.546 return ret; 1.547 } 1.548 1.549 -static void __init 1.550 -acpi_table_print_dmar_entry(struct acpi_dmar_entry_header *header) 1.551 -{ 1.552 - struct acpi_table_drhd *drhd; 1.553 - struct acpi_table_rmrr *rmrr; 1.554 - 1.555 - switch (header->type) { 1.556 - case ACPI_DMAR_DRHD: 1.557 - drhd = (struct acpi_table_drhd *)header; 1.558 - break; 1.559 - case ACPI_DMAR_RMRR: 1.560 - rmrr = (struct acpi_table_rmrr *)header; 1.561 - break; 1.562 - } 1.563 -} 1.564 - 1.565 -static int __init 1.566 -acpi_parse_dmar(unsigned long phys_addr, unsigned long size) 1.567 +static int __init acpi_parse_dmar(unsigned long phys_addr, 1.568 + unsigned long size) 1.569 { 1.570 struct acpi_table_dmar *dmar = NULL; 1.571 struct acpi_dmar_entry_header *entry_header; 1.572 int ret = 0; 1.573 1.574 - if (!phys_addr || !size) 1.575 + if ( !phys_addr || !size ) 1.576 return -EINVAL; 1.577 1.578 dmar = (struct acpi_table_dmar *)__acpi_map_table(phys_addr, size); 1.579 - if (!dmar) { 1.580 - printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); 1.581 + if ( !dmar ) 1.582 + { 1.583 + printk(KERN_WARNING PREFIX "Unable to map DMAR\n"); 1.584 return -ENODEV; 1.585 } 1.586 1.587 - if (!dmar->haw) { 1.588 - printk (KERN_WARNING PREFIX "Zero: Invalid DMAR haw\n"); 1.589 + if ( !dmar->haw ) 1.590 + { 1.591 + printk(KERN_WARNING PREFIX "Zero: Invalid DMAR haw\n"); 1.592 return -EINVAL; 1.593 } 1.594 1.595 dmar_host_address_width = dmar->haw; 1.596 - printk (KERN_INFO PREFIX "Host address width %d\n", 1.597 - dmar_host_address_width); 1.598 + printk(KERN_INFO PREFIX "Host address width %d\n", 1.599 + dmar_host_address_width); 1.600 1.601 entry_header = (struct acpi_dmar_entry_header *)(dmar + 1); 1.602 - while (((unsigned long)entry_header) < (((unsigned long)dmar) + size)) { 1.603 - acpi_table_print_dmar_entry(entry_header); 1.604 - 1.605 - switch (entry_header->type) { 1.606 + while ( ((unsigned long)entry_header) < 1.607 + (((unsigned long)dmar) + size) ) 1.608 + { 1.609 + switch ( entry_header->type ) 1.610 + { 1.611 case ACPI_DMAR_DRHD: 1.612 - printk (KERN_INFO PREFIX "found ACPI_DMAR_DRHD\n"); 1.613 + printk(KERN_INFO PREFIX "found ACPI_DMAR_DRHD\n"); 1.614 ret = acpi_parse_one_drhd(entry_header); 1.615 break; 1.616 case ACPI_DMAR_RMRR: 1.617 - printk (KERN_INFO PREFIX "found ACPI_DMAR_RMRR\n"); 1.618 + printk(KERN_INFO PREFIX "found ACPI_DMAR_RMRR\n"); 1.619 ret = acpi_parse_one_rmrr(entry_header); 1.620 break; 1.621 case ACPI_DMAR_ATSR: 1.622 - printk (KERN_INFO PREFIX "found ACPI_DMAR_RMRR\n"); 1.623 + printk(KERN_INFO PREFIX "found ACPI_DMAR_RMRR\n"); 1.624 ret = acpi_parse_one_atsr(entry_header); 1.625 break; 1.626 default: 1.627 @@ -478,7 +522,7 @@ acpi_parse_dmar(unsigned long phys_addr, 1.628 ret = -EINVAL; 1.629 break; 1.630 } 1.631 - if (ret) 1.632 + if ( ret ) 1.633 break; 1.634 1.635 entry_header = ((void *)entry_header + entry_header->length); 1.636 @@ -494,15 +538,16 @@ int acpi_dmar_init(void) 1.637 { 1.638 int rc; 1.639 1.640 - if (!vtd_enabled) 1.641 + if ( !vtd_enabled ) 1.642 return -ENODEV; 1.643 1.644 - if ((rc = vtd_hw_check()) != 0) 1.645 + if ( (rc = vtd_hw_check()) != 0 ) 1.646 return rc; 1.647 1.648 acpi_table_parse(ACPI_DMAR, acpi_parse_dmar); 1.649 1.650 - if (list_empty(&acpi_drhd_units)) { 1.651 + if ( list_empty(&acpi_drhd_units) ) 1.652 + { 1.653 printk(KERN_ERR PREFIX "No DMAR devices found\n"); 1.654 vtd_enabled = 0; 1.655 return -ENODEV;
2.1 --- a/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c Fri Nov 16 16:53:43 2007 +0000 2.2 +++ b/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c Fri Nov 16 17:01:50 2007 +0000 2.3 @@ -533,6 +533,7 @@ static void dma_pte_clear_one(struct dom 2.4 iommu_flush_write_buffer(iommu); 2.5 } 2.6 } 2.7 + unmap_domain_page(pte); 2.8 } 2.9 2.10 /* clear last level pte, a tlb flush should be followed */ 2.11 @@ -899,7 +900,7 @@ struct iommu *iommu_alloc(void *hw_data) 2.12 { 2.13 struct acpi_drhd_unit *drhd = (struct acpi_drhd_unit *) hw_data; 2.14 struct iommu *iommu; 2.15 - 2.16 + 2.17 if ( nr_iommus > MAX_IOMMUS ) 2.18 { 2.19 gdprintk(XENLOG_ERR VTDPREFIX, 2.20 @@ -914,7 +915,7 @@ struct iommu *iommu_alloc(void *hw_data) 2.21 2.22 set_fixmap_nocache(FIX_IOMMU_REGS_BASE_0 + nr_iommus, drhd->address); 2.23 iommu->reg = (void *) fix_to_virt(FIX_IOMMU_REGS_BASE_0 + nr_iommus); 2.24 - dprintk(XENLOG_INFO VTDPREFIX, 2.25 + dprintk(XENLOG_ERR VTDPREFIX, 2.26 "iommu_alloc: iommu->reg = %p drhd->address = %lx\n", 2.27 iommu->reg, drhd->address); 2.28 nr_iommus++; 2.29 @@ -1009,28 +1010,22 @@ static int domain_context_mapping_one( 2.30 context = device_to_context_entry(iommu, bus, devfn); 2.31 if ( !context ) 2.32 { 2.33 - gdprintk(XENLOG_INFO VTDPREFIX, 2.34 + gdprintk(XENLOG_ERR VTDPREFIX, 2.35 "domain_context_mapping_one:context == NULL:" 2.36 "bdf = %x:%x:%x\n", 2.37 bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 2.38 return -ENOMEM; 2.39 } 2.40 - spin_lock_irqsave(&iommu->lock, flags); 2.41 + 2.42 if ( context_present(*context) ) 2.43 { 2.44 - spin_unlock_irqrestore(&iommu->lock, flags); 2.45 gdprintk(XENLOG_INFO VTDPREFIX, 2.46 "domain_context_mapping_one:context present:bdf=%x:%x:%x\n", 2.47 bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 2.48 return 0; 2.49 } 2.50 2.51 -#ifdef VTD_DEBUG 2.52 - dprintk(XENLOG_INFO VTDPREFIX, 2.53 - "context_mapping_one_1-%x:%x:%x-*context = %lx %lx\n", 2.54 - bus, PCI_SLOT(devfn), PCI_FUNC(devfn), context->hi, context->lo); 2.55 -#endif 2.56 - 2.57 + spin_lock_irqsave(&iommu->lock, flags); 2.58 /* 2.59 * domain_id 0 is not valid on Intel's IOMMU, force domain_id to 2.60 * be 1 based as required by intel's iommu hw. 2.61 @@ -1039,9 +1034,7 @@ static int domain_context_mapping_one( 2.62 context_set_address_width(*context, hd->agaw); 2.63 2.64 if ( ecap_pass_thru(iommu->ecap) ) 2.65 - { 2.66 context_set_translation_type(*context, CONTEXT_TT_PASS_THRU); 2.67 - } 2.68 else 2.69 { 2.70 context_set_address_root(*context, virt_to_maddr(hd->pgd)); 2.71 @@ -1052,15 +1045,14 @@ static int domain_context_mapping_one( 2.72 context_set_present(*context); 2.73 iommu_flush_cache_entry(iommu, context); 2.74 2.75 -#ifdef VTD_DEBUG 2.76 - dprintk(XENLOG_INFO VTDPREFIX, 2.77 - "context_mapping_one_2-%x:%x:%x-*context=%lx %lx hd->pgd = %p\n", 2.78 - bus, PCI_SLOT(devfn), PCI_FUNC(devfn), 2.79 - context->hi, context->lo, hd->pgd); 2.80 -#endif 2.81 + gdprintk(XENLOG_INFO VTDPREFIX, 2.82 + "context_mapping_one-%x:%x:%x-*context=%"PRIx64":%"PRIx64 2.83 + " hd->pgd=%p\n", 2.84 + bus, PCI_SLOT(devfn), PCI_FUNC(devfn), 2.85 + context->hi, context->lo, hd->pgd); 2.86 2.87 if ( iommu_flush_context_device(iommu, domain->domain_id, 2.88 - (((u16)bus) << 8) | devfn, 2.89 + (((u16)bus) << 8) | devfn, 2.90 DMA_CCMD_MASK_NOBIT, 1) ) 2.91 iommu_flush_write_buffer(iommu); 2.92 else 2.93 @@ -1177,14 +1169,12 @@ static int domain_context_mapping( 2.94 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 2.95 2.96 if ( pdev->bus == 0 ) 2.97 - { 2.98 ret = domain_context_mapping_one( 2.99 domain, iommu, (u8)(pdev->bus), (u8)(pdev->devfn)); 2.100 - } 2.101 else 2.102 { 2.103 if ( bus2bridge[pdev->bus].bus != 0 ) 2.104 - gdprintk(XENLOG_ERR VTDPREFIX, 2.105 + gdprintk(XENLOG_WARNING VTDPREFIX, 2.106 "domain_context_mapping:bus2bridge" 2.107 "[pdev->bus].bus != 0\n"); 2.108 2.109 @@ -1229,17 +1219,15 @@ static int domain_context_unmap_one( 2.110 context = device_to_context_entry(iommu, bus, devfn); 2.111 if ( !context ) 2.112 { 2.113 - gdprintk(XENLOG_INFO VTDPREFIX, 2.114 + gdprintk(XENLOG_ERR VTDPREFIX, 2.115 "domain_context_unmap_one-%x:%x:%x- context == NULL:return\n", 2.116 bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 2.117 return -ENOMEM; 2.118 } 2.119 2.120 - spin_lock_irqsave(&iommu->lock, flags); 2.121 if ( !context_present(*context) ) 2.122 { 2.123 - spin_unlock_irqrestore(&iommu->lock, flags); 2.124 - gdprintk(XENLOG_INFO VTDPREFIX, 2.125 + gdprintk(XENLOG_WARNING VTDPREFIX, 2.126 "domain_context_unmap_one-%x:%x:%x- " 2.127 "context NOT present:return\n", 2.128 bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 2.129 @@ -1247,9 +1235,10 @@ static int domain_context_unmap_one( 2.130 } 2.131 2.132 gdprintk(XENLOG_INFO VTDPREFIX, 2.133 - "domain_context_unmap_one_1:bdf = %x:%x:%x\n", 2.134 + "domain_context_unmap_one: bdf = %x:%x:%x\n", 2.135 bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 2.136 2.137 + spin_lock_irqsave(&iommu->lock, flags); 2.138 context_clear_present(*context); 2.139 context_clear_entry(*context); 2.140 iommu_flush_cache_entry(iommu, context); 2.141 @@ -1257,10 +1246,6 @@ static int domain_context_unmap_one( 2.142 iommu_flush_iotlb_global(iommu, 0); 2.143 spin_unlock_irqrestore(&iommu->lock, flags); 2.144 2.145 - gdprintk(XENLOG_INFO VTDPREFIX, 2.146 - "domain_context_unmap_one_2:bdf = %x:%x:%x\n", 2.147 - bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 2.148 - 2.149 return 0; 2.150 } 2.151 2.152 @@ -1302,15 +1287,13 @@ static int domain_context_unmap( 2.153 "domain_context_unmap:PCI: bdf = %x:%x:%x\n", 2.154 pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 2.155 if ( pdev->bus == 0 ) 2.156 - { 2.157 ret = domain_context_unmap_one( 2.158 domain, iommu, 2.159 (u8)(pdev->bus), (u8)(pdev->devfn)); 2.160 - } 2.161 else 2.162 { 2.163 if ( bus2bridge[pdev->bus].bus != 0 ) 2.164 - gdprintk(XENLOG_INFO VTDPREFIX, 2.165 + gdprintk(XENLOG_WARNING VTDPREFIX, 2.166 "domain_context_mapping:" 2.167 "bus2bridge[pdev->bus].bus != 0\n"); 2.168 2.169 @@ -1318,7 +1301,7 @@ static int domain_context_unmap( 2.170 (u8)(bus2bridge[pdev->bus].bus), 2.171 (u8)(bus2bridge[pdev->bus].devfn)); 2.172 2.173 - /* now map everything behind the PCI bridge */ 2.174 + /* Unmap everything behind the PCI bridge */ 2.175 for ( dev = 0; dev < 32; dev++ ) 2.176 { 2.177 for ( func = 0; func < 8; func++ ) 2.178 @@ -1356,7 +1339,7 @@ void reassign_device_ownership( 2.179 int status; 2.180 unsigned long flags; 2.181 2.182 - gdprintk(XENLOG_ERR VTDPREFIX, 2.183 + gdprintk(XENLOG_INFO VTDPREFIX, 2.184 "reassign_device-%x:%x:%x- source = %d target = %d\n", 2.185 bus, PCI_SLOT(devfn), PCI_FUNC(devfn), 2.186 source->domain_id, target->domain_id); 2.187 @@ -1366,8 +1349,6 @@ void reassign_device_ownership( 2.188 if ( (pdev->bus != bus) || (pdev->devfn != devfn) ) 2.189 continue; 2.190 2.191 - pdev->bus = bus; 2.192 - pdev->devfn = devfn; 2.193 drhd = acpi_find_matched_drhd_unit(pdev); 2.194 iommu = drhd->iommu; 2.195 domain_context_unmap(source, iommu, pdev); 2.196 @@ -1825,9 +1806,9 @@ int assign_device(struct domain *d, u8 b 2.197 if ( list_empty(&acpi_drhd_units) ) 2.198 return ret; 2.199 2.200 - dprintk(XENLOG_INFO VTDPREFIX, 2.201 - "assign_device: bus = %x dev = %x func = %x\n", 2.202 - bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 2.203 + gdprintk(XENLOG_INFO VTDPREFIX, 2.204 + "assign_device: bus = %x dev = %x func = %x\n", 2.205 + bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 2.206 2.207 reassign_device_ownership(dom0, d, bus, devfn); 2.208 2.209 @@ -1870,12 +1851,16 @@ void iommu_set_pgd(struct domain *d) 2.210 if ( !hd->pgd ) 2.211 { 2.212 pgd = (struct dma_pte *)alloc_xenheap_page(); 2.213 - memset((u8*)pgd, 0, PAGE_SIZE); 2.214 - if ( !hd->pgd ) 2.215 - hd->pgd = pgd; 2.216 - else /* somebody is fast */ 2.217 - free_xenheap_page((void *) pgd); 2.218 - } 2.219 + if ( !pgd ) 2.220 + { 2.221 + spin_unlock_irqrestore(&hd->mapping_lock, flags); 2.222 + gdprintk(XENLOG_ERR VTDPREFIX, 2.223 + "Allocate pgd memory failed!\n"); 2.224 + return; 2.225 + } 2.226 + memset(pgd, 0, PAGE_SIZE); 2.227 + hd->pgd = pgd; 2.228 + } 2.229 2.230 l3e = map_domain_page(p2m_table); 2.231 switch ( level ) 2.232 @@ -1892,6 +1877,14 @@ void iommu_set_pgd(struct domain *d) 2.233 case VTD_PAGE_TABLE_LEVEL_4: /* Stoakley */ 2.234 /* We allocate one more page for the top vtd page table. */ 2.235 pmd = (struct dma_pte *)alloc_xenheap_page(); 2.236 + if ( !pmd ) 2.237 + { 2.238 + unmap_domain_page(l3e); 2.239 + spin_unlock_irqrestore(&hd->mapping_lock, flags); 2.240 + gdprintk(XENLOG_ERR VTDPREFIX, 2.241 + "Allocate pmd memory failed!\n"); 2.242 + return; 2.243 + } 2.244 memset((u8*)pmd, 0, PAGE_SIZE); 2.245 pte = &pgd[0]; 2.246 dma_set_pte_addr(*pte, virt_to_maddr(pmd));
3.1 --- a/xen/arch/x86/hvm/vmx/vtd/pci-direct.h Fri Nov 16 16:53:43 2007 +0000 3.2 +++ b/xen/arch/x86/hvm/vmx/vtd/pci-direct.h Fri Nov 16 17:01:50 2007 +0000 3.3 @@ -5,15 +5,15 @@ 3.4 #include <asm/io.h> 3.5 3.6 /* Direct PCI access. This is used for PCI accesses in early boot before 3.7 - the PCI subsystem works. */ 3.8 + the PCI subsystem works. */ 3.9 3.10 #define PDprintk(x...) 3.11 3.12 static inline u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset) 3.13 { 3.14 - u32 v; 3.15 + u32 v; 3.16 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 3.17 - v = inl(0xcfc); 3.18 + v = inl(0xcfc); 3.19 if (v != 0xffffffff) 3.20 PDprintk("%x reading 4 from %x: %x\n", slot, offset, v); 3.21 return v; 3.22 @@ -21,28 +21,28 @@ static inline u32 read_pci_config(u8 bus 3.23 3.24 static inline u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset) 3.25 { 3.26 - u8 v; 3.27 + u8 v; 3.28 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 3.29 - v = inb(0xcfc + (offset&3)); 3.30 + v = inb(0xcfc + (offset&3)); 3.31 PDprintk("%x reading 1 from %x: %x\n", slot, offset, v); 3.32 return v; 3.33 } 3.34 3.35 static inline u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset) 3.36 { 3.37 - u16 v; 3.38 + u16 v; 3.39 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 3.40 - v = inw(0xcfc + (offset&2)); 3.41 + v = inw(0xcfc + (offset&2)); 3.42 PDprintk("%x reading 2 from %x: %x\n", slot, offset, v); 3.43 return v; 3.44 } 3.45 3.46 -static inline void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, 3.47 - u32 val) 3.48 +static inline void write_pci_config( 3.49 + u8 bus, u8 slot, u8 func, u8 offset, u32 val) 3.50 { 3.51 - PDprintk("%x writing to %x: %x\n", slot, offset, val); 3.52 + PDprintk("%x writing to %x: %x\n", slot, offset, val); 3.53 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 3.54 - outl(val, 0xcfc); 3.55 + outl(val, 0xcfc); 3.56 } 3.57 3.58 #endif