debuggers.hg
changeset 16398:a1247c2df2b4
merge with xen-unstable.hg (staging)
author | Alex Williamson <alex.williamson@hp.com> |
---|---|
date | Wed Nov 07 14:48:48 2007 -0700 (2007-11-07) |
parents | ef6415fdaf8a d4c5a1cdcf2e |
children | d0cd18d78074 |
files | tools/ioemu/hw/xen_machine_fv.c |
line diff
1.1 --- a/tools/examples/block Wed Nov 07 11:01:23 2007 -0700 1.2 +++ b/tools/examples/block Wed Nov 07 14:48:48 2007 -0700 1.3 @@ -326,7 +326,10 @@ mount it read-write in a guest domain." 1.4 fatal 'Failed to find an unused loop device' 1.5 fi 1.6 1.7 - do_or_die losetup "$loopdev" "$file" 1.8 + status=$(losetup "$loopdev" "$file" || echo "failed") 1.9 + if [ -n "$status" ]; then 1.10 + do_or_die losetup -r "$loopdev" "$file" 1.11 + fi 1.12 xenstore_write "$XENBUS_PATH/node" "$loopdev" 1.13 write_dev "$loopdev" 1.14 release_lock "block"
2.1 --- a/tools/examples/network-bridge Wed Nov 07 11:01:23 2007 -0700 2.2 +++ b/tools/examples/network-bridge Wed Nov 07 14:48:48 2007 -0700 2.3 @@ -72,8 +72,8 @@ find_alt_device () { 2.4 echo "$ifs" 2.5 } 2.6 2.7 -netdev=${netdev:-$(ip route list | awk '/^default / { print $NF }' | 2.8 - sed 's/.* dev //')} 2.9 +netdev=${netdev:-$(ip route list 0.0.0.0/0 | \ 2.10 + sed 's/.*dev \([a-z]\+[0-9]\+\).*$/\1/')} 2.11 if is_network_root ; then 2.12 altdevs=$(find_alt_device $netdev) 2.13 for netdev in $altdevs; do break; done
3.1 --- a/tools/ioemu/hw/pass-through.c Wed Nov 07 11:01:23 2007 -0700 3.2 +++ b/tools/ioemu/hw/pass-through.c Wed Nov 07 14:48:48 2007 -0700 3.3 @@ -20,8 +20,8 @@ 3.4 * Guy Zana <guy@neocleus.com> 3.5 * 3.6 * This file implements direct PCI assignment to a HVM guest 3.7 - * 3.8 */ 3.9 + 3.10 #include "vl.h" 3.11 #include "pass-through.h" 3.12 #include "pci/header.h" 3.13 @@ -127,9 +127,10 @@ void pt_iomem_map(PCIDevice *d, int i, u 3.14 if ( !first_map ) 3.15 { 3.16 /* Remove old mapping */ 3.17 - ret = xc_domain_memory_mapping(xc_handle, domid, old_ebase >> 12, 3.18 - assigned_device->bases[i].access.maddr >> 12, 3.19 - (e_size+0xFFF) >> 12, 3.20 + ret = xc_domain_memory_mapping(xc_handle, domid, 3.21 + old_ebase >> XC_PAGE_SHIFT, 3.22 + assigned_device->bases[i].access.maddr >> XC_PAGE_SHIFT, 3.23 + (e_size+XC_PAGE_MASK) >> XC_PAGE_SHIFT, 3.24 DPCI_REMOVE_MAPPING); 3.25 if ( ret != 0 ) 3.26 { 3.27 @@ -140,9 +141,9 @@ void pt_iomem_map(PCIDevice *d, int i, u 3.28 3.29 /* Create new mapping */ 3.30 ret = xc_domain_memory_mapping(xc_handle, domid, 3.31 - assigned_device->bases[i].e_physbase >> 12, 3.32 - assigned_device->bases[i].access.maddr >> 12, 3.33 - (e_size+0xFFF) >> 12, 3.34 + assigned_device->bases[i].e_physbase >> XC_PAGE_SHIFT, 3.35 + assigned_device->bases[i].access.maddr >> XC_PAGE_SHIFT, 3.36 + (e_size+XC_PAGE_MASK) >> XC_PAGE_SHIFT, 3.37 DPCI_ADD_MAPPING); 3.38 if ( ret != 0 ) 3.39 PT_LOG("Error: create new mapping failed!\n");
4.1 --- a/tools/ioemu/hw/pass-through.h Wed Nov 07 11:01:23 2007 -0700 4.2 +++ b/tools/ioemu/hw/pass-through.h Wed Nov 07 14:48:48 2007 -0700 4.3 @@ -40,7 +40,7 @@ 4.4 /* Misc PCI constants that should be moved to a separate library :) */ 4.5 #define PCI_CONFIG_SIZE (256) 4.6 #define PCI_EXP_DEVCAP_FLR (1 << 28) 4.7 -#define PCI_EXP_DEVCTL_FLR (0x1b) 4.8 +#define PCI_EXP_DEVCTL_FLR (1 << 15) 4.9 #define PCI_BAR_ENTRIES (6) 4.10 4.11 struct pt_region {
5.1 --- a/tools/ioemu/hw/xen_machine_fv.c Wed Nov 07 11:01:23 2007 -0700 5.2 +++ b/tools/ioemu/hw/xen_machine_fv.c Wed Nov 07 14:48:48 2007 -0700 5.3 @@ -27,13 +27,6 @@ 5.4 #include <xen/hvm/params.h> 5.5 #include <sys/mman.h> 5.6 5.7 -#ifndef PAGE_SIZE 5.8 -#define PAGE_SIZE XC_PAGE_SIZE 5.9 -#endif 5.10 -#ifndef PAGE_SHIFT 5.11 -#define PAGE_SHIFT XC_PAGE_SHIFT 5.12 -#endif 5.13 - 5.14 #if defined(MAPCACHE) 5.15 5.16 #if defined(__i386__) 5.17 @@ -57,7 +50,7 @@ 5.18 struct map_cache { 5.19 unsigned long paddr_index; 5.20 uint8_t *vaddr_base; 5.21 - DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE>>PAGE_SHIFT); 5.22 + DECLARE_BITMAP(valid_mapping, MCACHE_BUCKET_SIZE>>XC_PAGE_SHIFT); 5.23 }; 5.24 5.25 static struct map_cache *mapcache_entry; 5.26 @@ -71,9 +64,9 @@ static int qemu_map_cache_init(void) 5.27 { 5.28 unsigned long size; 5.29 5.30 - nr_buckets = (((MAX_MCACHE_SIZE >> PAGE_SHIFT) + 5.31 - (1UL << (MCACHE_BUCKET_SHIFT - PAGE_SHIFT)) - 1) >> 5.32 - (MCACHE_BUCKET_SHIFT - PAGE_SHIFT)); 5.33 + nr_buckets = (((MAX_MCACHE_SIZE >> XC_PAGE_SHIFT) + 5.34 + (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >> 5.35 + (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)); 5.36 5.37 /* 5.38 * Use mmap() directly: lets us allocate a big hash table with no up-front 5.39 @@ -81,7 +74,7 @@ static int qemu_map_cache_init(void) 5.40 * that we actually use. All others will contain all zeroes. 5.41 */ 5.42 size = nr_buckets * sizeof(struct map_cache); 5.43 - size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); 5.44 + size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1); 5.45 fprintf(logfile, "qemu_map_cache_init nr_buckets = %lx size %lu\n", nr_buckets, size); 5.46 mapcache_entry = mmap(NULL, size, PROT_READ|PROT_WRITE, 5.47 MAP_SHARED|MAP_ANON, -1, 0); 5.48 @@ -97,7 +90,7 @@ static void qemu_remap_bucket(struct map 5.49 unsigned long address_index) 5.50 { 5.51 uint8_t *vaddr_base; 5.52 - unsigned long pfns[MCACHE_BUCKET_SIZE >> PAGE_SHIFT]; 5.53 + unsigned long pfns[MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT]; 5.54 unsigned int i, j; 5.55 5.56 if (entry->vaddr_base != NULL) { 5.57 @@ -108,11 +101,11 @@ static void qemu_remap_bucket(struct map 5.58 } 5.59 } 5.60 5.61 - for (i = 0; i < MCACHE_BUCKET_SIZE >> PAGE_SHIFT; i++) 5.62 - pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-PAGE_SHIFT)) + i; 5.63 + for (i = 0; i < MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT; i++) 5.64 + pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i; 5.65 5.66 vaddr_base = xc_map_foreign_batch(xc_handle, domid, PROT_READ|PROT_WRITE, 5.67 - pfns, MCACHE_BUCKET_SIZE >> PAGE_SHIFT); 5.68 + pfns, MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT); 5.69 if (vaddr_base == NULL) { 5.70 fprintf(logfile, "xc_map_foreign_batch error %d\n", errno); 5.71 exit(-1); 5.72 @@ -121,10 +114,10 @@ static void qemu_remap_bucket(struct map 5.73 entry->vaddr_base = vaddr_base; 5.74 entry->paddr_index = address_index; 5.75 5.76 - for (i = 0; i < MCACHE_BUCKET_SIZE >> PAGE_SHIFT; i += BITS_PER_LONG) { 5.77 + for (i = 0; i < MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT; i += BITS_PER_LONG) { 5.78 unsigned long word = 0; 5.79 - j = ((i + BITS_PER_LONG) > (MCACHE_BUCKET_SIZE >> PAGE_SHIFT)) ? 5.80 - (MCACHE_BUCKET_SIZE >> PAGE_SHIFT) % BITS_PER_LONG : BITS_PER_LONG; 5.81 + j = ((i + BITS_PER_LONG) > (MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT)) ? 5.82 + (MCACHE_BUCKET_SIZE >> XC_PAGE_SHIFT) % BITS_PER_LONG : BITS_PER_LONG; 5.83 while (j > 0) 5.84 word = (word << 1) | (((pfns[i + --j] >> 28) & 0xf) != 0xf); 5.85 entry->valid_mapping[i / BITS_PER_LONG] = word; 5.86 @@ -143,10 +136,10 @@ uint8_t *qemu_map_cache(target_phys_addr 5.87 entry = &mapcache_entry[address_index % nr_buckets]; 5.88 5.89 if (entry->vaddr_base == NULL || entry->paddr_index != address_index || 5.90 - !test_bit(address_offset>>PAGE_SHIFT, entry->valid_mapping)) 5.91 + !test_bit(address_offset>>XC_PAGE_SHIFT, entry->valid_mapping)) 5.92 qemu_remap_bucket(entry, address_index); 5.93 5.94 - if (!test_bit(address_offset>>PAGE_SHIFT, entry->valid_mapping)) 5.95 + if (!test_bit(address_offset>>XC_PAGE_SHIFT, entry->valid_mapping)) 5.96 return NULL; 5.97 5.98 last_address_index = address_index; 5.99 @@ -213,7 +206,7 @@ static void xen_init_fv(uint64_t ram_siz 5.100 5.101 xc_get_hvm_param(xc_handle, domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn); 5.102 fprintf(logfile, "shared page at pfn %lx\n", ioreq_pfn); 5.103 - shared_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE, 5.104 + shared_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE, 5.105 PROT_READ|PROT_WRITE, ioreq_pfn); 5.106 if (shared_page == NULL) { 5.107 fprintf(logfile, "map shared IO page returned error %d\n", errno); 5.108 @@ -222,7 +215,7 @@ static void xen_init_fv(uint64_t ram_siz 5.109 5.110 xc_get_hvm_param(xc_handle, domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn); 5.111 fprintf(logfile, "buffered io page at pfn %lx\n", ioreq_pfn); 5.112 - buffered_io_page = xc_map_foreign_range(xc_handle, domid, PAGE_SIZE, 5.113 + buffered_io_page = xc_map_foreign_range(xc_handle, domid, XC_PAGE_SIZE, 5.114 PROT_READ|PROT_WRITE, ioreq_pfn); 5.115 if (buffered_io_page == NULL) { 5.116 fprintf(logfile, "map buffered IO page returned error %d\n", errno); 5.117 @@ -272,9 +265,9 @@ static void xen_init_fv(uint64_t ram_siz 5.118 /* VTI will not use memory between 3G~4G, so we just pass a legal pfn 5.119 to make QEMU map continuous virtual memory space */ 5.120 if (ram_size > MMIO_START) { 5.121 - for (i = 0 ; i < (MEM_G >> PAGE_SHIFT); i++) 5.122 - page_array[(MMIO_START >> PAGE_SHIFT) + i] = 5.123 - (STORE_PAGE_START >> PAGE_SHIFT); 5.124 + for (i = 0 ; i < (MEM_G >> XC_PAGE_SHIFT); i++) 5.125 + page_array[(MMIO_START >> XC_PAGE_SHIFT) + i] = 5.126 + (STORE_XC_PAGE_START >> XC_PAGE_SHIFT); 5.127 } 5.128 5.129 phys_ram_base = xc_map_foreign_batch(xc_handle, domid,
6.1 --- a/xen/arch/ia64/xen/mm.c Wed Nov 07 11:01:23 2007 -0700 6.2 +++ b/xen/arch/ia64/xen/mm.c Wed Nov 07 14:48:48 2007 -0700 6.3 @@ -2894,11 +2894,9 @@ arch_memory_op(int op, XEN_GUEST_HANDLE( 6.4 return 0; 6.5 } 6.6 6.7 -int 6.8 -iomem_page_test(unsigned long mfn, struct page_info *page) 6.9 +int is_iomem_page(unsigned long mfn) 6.10 { 6.11 - return unlikely(!mfn_valid(mfn)) || 6.12 - unlikely(page_get_owner(page) == dom_io); 6.13 + return (!mfn_valid(mfn) || (page_get_owner(mfn_to_page(mfn)) == dom_io)); 6.14 } 6.15 6.16 /*
7.1 --- a/xen/arch/x86/domain.c Wed Nov 07 11:01:23 2007 -0700 7.2 +++ b/xen/arch/x86/domain.c Wed Nov 07 14:48:48 2007 -0700 7.3 @@ -415,7 +415,8 @@ int vcpu_initialise(struct vcpu *v) 7.4 v->arch.cr3 = __pa(idle_pg_table); 7.5 } 7.6 7.7 - v->arch.guest_context.ctrlreg[4] = mmu_cr4_features; 7.8 + v->arch.guest_context.ctrlreg[4] = 7.9 + real_cr4_to_pv_guest_cr4(mmu_cr4_features); 7.10 } 7.11 7.12 v->arch.perdomain_ptes = 7.13 @@ -573,17 +574,18 @@ void arch_domain_destroy(struct domain * 7.14 7.15 unsigned long pv_guest_cr4_fixup(unsigned long guest_cr4) 7.16 { 7.17 - unsigned long hv_cr4 = read_cr4(), hv_cr4_mask = ~X86_CR4_TSD; 7.18 + unsigned long hv_cr4_mask, hv_cr4 = real_cr4_to_pv_guest_cr4(read_cr4()); 7.19 + 7.20 + hv_cr4_mask = ~X86_CR4_TSD; 7.21 if ( cpu_has_de ) 7.22 hv_cr4_mask &= ~X86_CR4_DE; 7.23 7.24 - if ( (guest_cr4 & hv_cr4_mask) != 7.25 - (hv_cr4 & hv_cr4_mask & ~(X86_CR4_PGE|X86_CR4_PSE)) ) 7.26 + if ( (guest_cr4 & hv_cr4_mask) != (hv_cr4 & hv_cr4_mask) ) 7.27 gdprintk(XENLOG_WARNING, 7.28 "Attempt to change CR4 flags %08lx -> %08lx\n", 7.29 hv_cr4 & ~(X86_CR4_PGE|X86_CR4_PSE), guest_cr4); 7.30 7.31 - return (hv_cr4 & hv_cr4_mask) | (guest_cr4 & ~hv_cr4_mask); 7.32 + return (hv_cr4 & hv_cr4_mask) | (guest_cr4 & ~hv_cr4_mask); 7.33 } 7.34 7.35 /* This is called by arch_final_setup_guest and do_boot_vcpu */ 7.36 @@ -684,8 +686,8 @@ int arch_set_info_guest( 7.37 v->arch.guest_context.user_regs.eflags |= EF_IE; 7.38 7.39 cr4 = v->arch.guest_context.ctrlreg[4]; 7.40 - v->arch.guest_context.ctrlreg[4] = 7.41 - (cr4 == 0) ? mmu_cr4_features : pv_guest_cr4_fixup(cr4); 7.42 + v->arch.guest_context.ctrlreg[4] = cr4 ? pv_guest_cr4_fixup(cr4) : 7.43 + real_cr4_to_pv_guest_cr4(mmu_cr4_features); 7.44 7.45 memset(v->arch.guest_context.debugreg, 0, 7.46 sizeof(v->arch.guest_context.debugreg)); 7.47 @@ -1223,11 +1225,14 @@ static void paravirt_ctxt_switch_from(st 7.48 7.49 static void paravirt_ctxt_switch_to(struct vcpu *v) 7.50 { 7.51 + unsigned long cr4; 7.52 + 7.53 set_int80_direct_trap(v); 7.54 switch_kernel_stack(v); 7.55 7.56 - if ( unlikely(read_cr4() != v->arch.guest_context.ctrlreg[4]) ) 7.57 - write_cr4(v->arch.guest_context.ctrlreg[4]); 7.58 + cr4 = pv_guest_cr4_to_real_cr4(v->arch.guest_context.ctrlreg[4]); 7.59 + if ( unlikely(cr4 != read_cr4()) ) 7.60 + write_cr4(cr4); 7.61 7.62 if ( unlikely(v->arch.guest_context.debugreg[7]) ) 7.63 {
8.1 --- a/xen/arch/x86/hvm/irq.c Wed Nov 07 11:01:23 2007 -0700 8.2 +++ b/xen/arch/x86/hvm/irq.c Wed Nov 07 14:48:48 2007 -0700 8.3 @@ -192,15 +192,12 @@ void hvm_set_pci_link_route(struct domai 8.4 hvm_irq->pci_link.route[link] = isa_irq; 8.5 8.6 /* PCI pass-through fixup. */ 8.7 - if ( hvm_irq->dpci && hvm_irq->dpci->girq[old_isa_irq].valid ) 8.8 + if ( hvm_irq->dpci && hvm_irq->dpci->link[link].valid ) 8.9 { 8.10 - uint32_t device = hvm_irq->dpci->girq[old_isa_irq].device; 8.11 - uint32_t intx = hvm_irq->dpci->girq[old_isa_irq].intx; 8.12 - if ( link == hvm_pci_intx_link(device, intx) ) 8.13 - { 8.14 - hvm_irq->dpci->girq[isa_irq] = hvm_irq->dpci->girq[old_isa_irq]; 8.15 + hvm_irq->dpci->girq[isa_irq] = hvm_irq->dpci->link[link]; 8.16 + if ( hvm_irq->dpci->girq[old_isa_irq].device == 8.17 + hvm_irq->dpci->link[link].device ) 8.18 hvm_irq->dpci->girq[old_isa_irq].valid = 0; 8.19 - } 8.20 } 8.21 8.22 if ( hvm_irq->pci_link_assert_count[link] == 0 )
9.1 --- a/xen/arch/x86/hvm/vmx/intr.c Wed Nov 07 11:01:23 2007 -0700 9.2 +++ b/xen/arch/x86/hvm/vmx/intr.c Wed Nov 07 14:48:48 2007 -0700 9.3 @@ -113,6 +113,7 @@ static void vmx_dirq_assist(struct vcpu 9.4 uint32_t device, intx; 9.5 struct domain *d = v->domain; 9.6 struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; 9.7 + struct dev_intx_gsi *dig; 9.8 9.9 if ( !vtd_enabled || (v->vcpu_id != 0) || (hvm_irq_dpci == NULL) ) 9.10 return; 9.11 @@ -122,11 +123,17 @@ static void vmx_dirq_assist(struct vcpu 9.12 irq = find_next_bit(hvm_irq_dpci->dirq_mask, NR_IRQS, irq + 1) ) 9.13 { 9.14 stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)]); 9.15 + clear_bit(irq, &hvm_irq_dpci->dirq_mask); 9.16 9.17 - test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask); 9.18 - device = hvm_irq_dpci->mirq[irq].device; 9.19 - intx = hvm_irq_dpci->mirq[irq].intx; 9.20 - hvm_pci_intx_assert(d, device, intx); 9.21 + list_for_each_entry ( dig, &hvm_irq_dpci->mirq[irq].dig_list, list ) 9.22 + { 9.23 + device = dig->device; 9.24 + intx = dig->intx; 9.25 + hvm_pci_intx_assert(d, device, intx); 9.26 + spin_lock(&hvm_irq_dpci->dirq_lock); 9.27 + hvm_irq_dpci->mirq[irq].pending++; 9.28 + spin_unlock(&hvm_irq_dpci->dirq_lock); 9.29 + } 9.30 9.31 /* 9.32 * Set a timer to see if the guest can finish the interrupt or not. For
10.1 --- a/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c Wed Nov 07 11:01:23 2007 -0700 10.2 +++ b/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c Wed Nov 07 14:48:48 2007 -0700 10.3 @@ -688,6 +688,9 @@ static int iommu_enable_translation(stru 10.4 break; 10.5 cpu_relax(); 10.6 } 10.7 + 10.8 + /* Disable PMRs when VT-d engine takes effect per spec definition */ 10.9 + disable_pmr(iommu); 10.10 spin_unlock_irqrestore(&iommu->register_lock, flags); 10.11 return 0; 10.12 } 10.13 @@ -1767,7 +1770,7 @@ int iommu_setup(void) 10.14 struct hvm_iommu *hd = domain_hvm_iommu(dom0); 10.15 struct acpi_drhd_unit *drhd; 10.16 struct iommu *iommu; 10.17 - unsigned long i, status; 10.18 + unsigned long i; 10.19 10.20 if ( !vtd_enabled ) 10.21 return 0; 10.22 @@ -1797,10 +1800,6 @@ int iommu_setup(void) 10.23 if ( enable_vtd_translation() ) 10.24 goto error; 10.25 10.26 - status = dmar_readl(iommu->reg, DMAR_PMEN_REG); 10.27 - if (status & DMA_PMEN_PRS) 10.28 - disable_pmr(iommu); 10.29 - 10.30 return 0; 10.31 10.32 error:
11.1 --- a/xen/arch/x86/hvm/vmx/vtd/io.c Wed Nov 07 11:01:23 2007 -0700 11.2 +++ b/xen/arch/x86/hvm/vmx/vtd/io.c Wed Nov 07 14:48:48 2007 -0700 11.3 @@ -47,14 +47,27 @@ 11.4 11.5 static void pt_irq_time_out(void *data) 11.6 { 11.7 - struct hvm_irq_dpci_mapping *irq_map = data; 11.8 - unsigned int guest_gsi, machine_gsi; 11.9 - struct domain *d = irq_map->dom; 11.10 + struct hvm_mirq_dpci_mapping *irq_map = data; 11.11 + unsigned int guest_gsi, machine_gsi = 0; 11.12 + struct hvm_irq_dpci *dpci = irq_map->dom->arch.hvm_domain.irq.dpci; 11.13 + struct dev_intx_gsi *dig; 11.14 + uint32_t device, intx; 11.15 11.16 - guest_gsi = irq_map->guest_gsi; 11.17 - machine_gsi = d->arch.hvm_domain.irq.dpci->girq[guest_gsi].machine_gsi; 11.18 - clear_bit(machine_gsi, d->arch.hvm_domain.irq.dpci->dirq_mask); 11.19 - hvm_dpci_eoi(irq_map->dom, guest_gsi, NULL); 11.20 + list_for_each_entry ( dig, &irq_map->dig_list, list ) 11.21 + { 11.22 + guest_gsi = dig->gsi; 11.23 + machine_gsi = dpci->girq[guest_gsi].machine_gsi; 11.24 + device = dig->device; 11.25 + intx = dig->intx; 11.26 + hvm_pci_intx_deassert(irq_map->dom, device, intx); 11.27 + } 11.28 + 11.29 + clear_bit(machine_gsi, dpci->dirq_mask); 11.30 + stop_timer(&dpci->hvm_timer[irq_to_vector(machine_gsi)]); 11.31 + spin_lock(&dpci->dirq_lock); 11.32 + dpci->mirq[machine_gsi].pending = 0; 11.33 + spin_unlock(&dpci->dirq_lock); 11.34 + pirq_guest_eoi(irq_map->dom, machine_gsi); 11.35 } 11.36 11.37 int pt_irq_create_bind_vtd( 11.38 @@ -62,8 +75,8 @@ int pt_irq_create_bind_vtd( 11.39 { 11.40 struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; 11.41 uint32_t machine_gsi, guest_gsi; 11.42 - uint32_t device, intx; 11.43 - uint32_t link, isa_irq; 11.44 + uint32_t device, intx, link; 11.45 + struct dev_intx_gsi *dig; 11.46 11.47 if ( hvm_irq_dpci == NULL ) 11.48 { 11.49 @@ -72,6 +85,9 @@ int pt_irq_create_bind_vtd( 11.50 return -ENOMEM; 11.51 11.52 memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci)); 11.53 + spin_lock_init(&hvm_irq_dpci->dirq_lock); 11.54 + for ( int i = 0; i < NR_IRQS; i++ ) 11.55 + INIT_LIST_HEAD(&hvm_irq_dpci->mirq[i].dig_list); 11.56 11.57 if ( cmpxchg((unsigned long *)&d->arch.hvm_domain.irq.dpci, 11.58 0, (unsigned long)hvm_irq_dpci) != 0 ) 11.59 @@ -85,35 +101,42 @@ int pt_irq_create_bind_vtd( 11.60 intx = pt_irq_bind->u.pci.intx; 11.61 guest_gsi = hvm_pci_intx_gsi(device, intx); 11.62 link = hvm_pci_intx_link(device, intx); 11.63 - isa_irq = d->arch.hvm_domain.irq.pci_link.route[link]; 11.64 + 11.65 + dig = xmalloc(struct dev_intx_gsi); 11.66 + if ( !dig ) 11.67 + return -ENOMEM; 11.68 11.69 - hvm_irq_dpci->mirq[machine_gsi].valid = 1; 11.70 - hvm_irq_dpci->mirq[machine_gsi].device = device; 11.71 - hvm_irq_dpci->mirq[machine_gsi].intx = intx; 11.72 - hvm_irq_dpci->mirq[machine_gsi].guest_gsi = guest_gsi; 11.73 - hvm_irq_dpci->mirq[machine_gsi].dom = d; 11.74 - 11.75 + dig->device = device; 11.76 + dig->intx = intx; 11.77 + dig->gsi = guest_gsi; 11.78 + list_add_tail(&dig->list, 11.79 + &hvm_irq_dpci->mirq[machine_gsi].dig_list); 11.80 + 11.81 hvm_irq_dpci->girq[guest_gsi].valid = 1; 11.82 hvm_irq_dpci->girq[guest_gsi].device = device; 11.83 hvm_irq_dpci->girq[guest_gsi].intx = intx; 11.84 hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi; 11.85 - hvm_irq_dpci->girq[guest_gsi].dom = d; 11.86 11.87 - hvm_irq_dpci->girq[isa_irq].valid = 1; 11.88 - hvm_irq_dpci->girq[isa_irq].device = device; 11.89 - hvm_irq_dpci->girq[isa_irq].intx = intx; 11.90 - hvm_irq_dpci->girq[isa_irq].machine_gsi = machine_gsi; 11.91 - hvm_irq_dpci->girq[isa_irq].dom = d; 11.92 + hvm_irq_dpci->link[link].valid = 1; 11.93 + hvm_irq_dpci->link[link].device = device; 11.94 + hvm_irq_dpci->link[link].intx = intx; 11.95 + hvm_irq_dpci->link[link].machine_gsi = machine_gsi; 11.96 11.97 - init_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)], 11.98 - pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0); 11.99 + /* Bind the same mirq once in the same domain */ 11.100 + if ( !hvm_irq_dpci->mirq[machine_gsi].valid ) 11.101 + { 11.102 + hvm_irq_dpci->mirq[machine_gsi].valid = 1; 11.103 + hvm_irq_dpci->mirq[machine_gsi].dom = d; 11.104 11.105 - /* Deal with GSI for legacy devices. */ 11.106 - pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE); 11.107 - gdprintk(XENLOG_ERR, 11.108 - "XEN_DOMCTL_irq_mapping: m_irq = %x device = %x intx = %x\n", 11.109 + init_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)], 11.110 + pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0); 11.111 + /* Deal with gsi for legacy devices */ 11.112 + pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE); 11.113 + } 11.114 + 11.115 + gdprintk(XENLOG_INFO, 11.116 + "VT-d irq bind: m_irq = %x device = %x intx = %x\n", 11.117 machine_gsi, device, intx); 11.118 - 11.119 return 0; 11.120 } 11.121 11.122 @@ -150,14 +173,22 @@ void hvm_dpci_eoi(struct domain *d, unsi 11.123 return; 11.124 11.125 machine_gsi = hvm_irq_dpci->girq[guest_gsi].machine_gsi; 11.126 - stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)]); 11.127 device = hvm_irq_dpci->girq[guest_gsi].device; 11.128 intx = hvm_irq_dpci->girq[guest_gsi].intx; 11.129 - gdprintk(XENLOG_INFO, "hvm_dpci_eoi:: device %x intx %x\n", 11.130 - device, intx); 11.131 hvm_pci_intx_deassert(d, device, intx); 11.132 - if ( (ent == NULL) || !ent->fields.mask ) 11.133 - pirq_guest_eoi(d, machine_gsi); 11.134 + 11.135 + spin_lock(&hvm_irq_dpci->dirq_lock); 11.136 + if ( --hvm_irq_dpci->mirq[machine_gsi].pending == 0 ) 11.137 + { 11.138 + spin_unlock(&hvm_irq_dpci->dirq_lock); 11.139 + 11.140 + gdprintk(XENLOG_INFO, "hvm_dpci_eoi:: mirq = %x\n", machine_gsi); 11.141 + stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(machine_gsi)]); 11.142 + if ( (ent == NULL) || !ent->fields.mask ) 11.143 + pirq_guest_eoi(d, machine_gsi); 11.144 + } 11.145 + else 11.146 + spin_unlock(&hvm_irq_dpci->dirq_lock); 11.147 } 11.148 11.149 void iommu_domain_destroy(struct domain *d) 11.150 @@ -165,8 +196,9 @@ void iommu_domain_destroy(struct domain 11.151 struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; 11.152 uint32_t i; 11.153 struct hvm_iommu *hd = domain_hvm_iommu(d); 11.154 - struct list_head *ioport_list, *tmp; 11.155 + struct list_head *ioport_list, *dig_list, *tmp; 11.156 struct g2m_ioport *ioport; 11.157 + struct dev_intx_gsi *dig; 11.158 11.159 if ( !vtd_enabled ) 11.160 return; 11.161 @@ -178,7 +210,16 @@ void iommu_domain_destroy(struct domain 11.162 { 11.163 pirq_guest_unbind(d, i); 11.164 kill_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(i)]); 11.165 + 11.166 + list_for_each_safe ( dig_list, tmp, 11.167 + &hvm_irq_dpci->mirq[i].dig_list ) 11.168 + { 11.169 + dig = list_entry(dig_list, struct dev_intx_gsi, list); 11.170 + list_del(&dig->list); 11.171 + xfree(dig); 11.172 + } 11.173 } 11.174 + 11.175 d->arch.hvm_domain.irq.dpci = NULL; 11.176 xfree(hvm_irq_dpci); 11.177 }
12.1 --- a/xen/arch/x86/hvm/vmx/vtd/utils.c Wed Nov 07 11:01:23 2007 -0700 12.2 +++ b/xen/arch/x86/hvm/vmx/vtd/utils.c Wed Nov 07 14:48:48 2007 -0700 12.3 @@ -67,25 +67,30 @@ int vtd_hw_check(void) 12.4 /* Disable vt-d protected memory registers. */ 12.5 void disable_pmr(struct iommu *iommu) 12.6 { 12.7 - unsigned long start_time, status; 12.8 + unsigned long start_time; 12.9 unsigned int val; 12.10 12.11 val = dmar_readl(iommu->reg, DMAR_PMEN_REG); 12.12 + if ( !(val & DMA_PMEN_PRS) ) 12.13 + return; 12.14 + 12.15 dmar_writel(iommu->reg, DMAR_PMEN_REG, val & ~DMA_PMEN_EPM); 12.16 start_time = jiffies; 12.17 12.18 for ( ; ; ) 12.19 { 12.20 - status = dmar_readl(iommu->reg, DMAR_PMEN_REG); 12.21 - if ( (status & DMA_PMEN_PRS) == 0 ) 12.22 + val = dmar_readl(iommu->reg, DMAR_PMEN_REG); 12.23 + if ( (val & DMA_PMEN_PRS) == 0 ) 12.24 break; 12.25 + 12.26 if ( time_after(jiffies, start_time + DMAR_OPERATION_TIMEOUT) ) 12.27 - panic("Cannot set QIE field for queue invalidation\n"); 12.28 + panic("Disable PMRs timeout\n"); 12.29 + 12.30 cpu_relax(); 12.31 } 12.32 12.33 dprintk(XENLOG_INFO VTDPREFIX, 12.34 - "disabled protected memory registers\n"); 12.35 + "Disabled protected memory registers\n"); 12.36 } 12.37 12.38 #if defined(__x86_64__)
13.1 --- a/xen/arch/x86/hvm/vpt.c Wed Nov 07 11:01:23 2007 -0700 13.2 +++ b/xen/arch/x86/hvm/vpt.c Wed Nov 07 14:48:48 2007 -0700 13.3 @@ -59,7 +59,7 @@ static void pt_process_missed_ticks(stru 13.4 if ( mode_is(pt->vcpu->domain, no_missed_tick_accounting) ) 13.5 { 13.6 pt->pending_intr_nr = 1; 13.7 - pt->scheduled = now + pt->scheduled; 13.8 + pt->scheduled = now + pt->period; 13.9 } 13.10 else 13.11 {
14.1 --- a/xen/arch/x86/mm.c Wed Nov 07 11:01:23 2007 -0700 14.2 +++ b/xen/arch/x86/mm.c Wed Nov 07 14:48:48 2007 -0700 14.3 @@ -607,10 +607,9 @@ get_##level##_linear_pagetable( 14.4 } 14.5 14.6 14.7 -int iomem_page_test(unsigned long mfn, struct page_info *page) 14.8 +int is_iomem_page(unsigned long mfn) 14.9 { 14.10 - return unlikely(!mfn_valid(mfn)) || 14.11 - unlikely(page_get_owner(page) == dom_io); 14.12 + return (!mfn_valid(mfn) || (page_get_owner(mfn_to_page(mfn)) == dom_io)); 14.13 } 14.14 14.15 14.16 @@ -620,19 +619,19 @@ get_page_from_l1e( 14.17 { 14.18 unsigned long mfn = l1e_get_pfn(l1e); 14.19 struct page_info *page = mfn_to_page(mfn); 14.20 + uint32_t l1f = l1e_get_flags(l1e); 14.21 int okay; 14.22 14.23 - if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) ) 14.24 + if ( !(l1f & _PAGE_PRESENT) ) 14.25 return 1; 14.26 14.27 - if ( unlikely(l1e_get_flags(l1e) & l1_disallow_mask(d)) ) 14.28 + if ( unlikely(l1f & l1_disallow_mask(d)) ) 14.29 { 14.30 - MEM_LOG("Bad L1 flags %x", 14.31 - l1e_get_flags(l1e) & l1_disallow_mask(d)); 14.32 + MEM_LOG("Bad L1 flags %x", l1f & l1_disallow_mask(d)); 14.33 return 0; 14.34 } 14.35 14.36 - if ( iomem_page_test(mfn, page) ) 14.37 + if ( is_iomem_page(mfn) ) 14.38 { 14.39 /* DOMID_IO reverts to caller for privilege checks. */ 14.40 if ( d == dom_io ) 14.41 @@ -657,7 +656,7 @@ get_page_from_l1e( 14.42 * contribute to writeable mapping refcounts. (This allows the 14.43 * qemu-dm helper process in dom0 to map the domain's memory without 14.44 * messing up the count of "real" writable mappings.) */ 14.45 - okay = (((l1e_get_flags(l1e) & _PAGE_RW) && 14.46 + okay = (((l1f & _PAGE_RW) && 14.47 !(unlikely(paging_mode_external(d) && (d != current->domain)))) 14.48 ? get_page_and_type(page, d, PGT_writable_page) 14.49 : get_page(page, d)); 14.50 @@ -668,6 +667,36 @@ get_page_from_l1e( 14.51 mfn, get_gpfn_from_mfn(mfn), 14.52 l1e_get_intpte(l1e), d->domain_id); 14.53 } 14.54 + else if ( (pte_flags_to_cacheattr(l1f) != 14.55 + ((page->count_info >> PGC_cacheattr_base) & 7)) && 14.56 + !is_iomem_page(mfn) ) 14.57 + { 14.58 + uint32_t x, nx, y = page->count_info; 14.59 + uint32_t cacheattr = pte_flags_to_cacheattr(l1f); 14.60 + 14.61 + if ( is_xen_heap_frame(page) ) 14.62 + { 14.63 + if ( (l1f & _PAGE_RW) && 14.64 + !(unlikely(paging_mode_external(d) && 14.65 + (d != current->domain))) ) 14.66 + put_page_type(page); 14.67 + put_page(page); 14.68 + MEM_LOG("Attempt to change cache attributes of Xen heap page"); 14.69 + return 0; 14.70 + } 14.71 + 14.72 + while ( ((y >> PGC_cacheattr_base) & 7) != cacheattr ) 14.73 + { 14.74 + x = y; 14.75 + nx = (x & ~PGC_cacheattr_mask) | (cacheattr << PGC_cacheattr_base); 14.76 + y = cmpxchg(&page->count_info, x, nx); 14.77 + } 14.78 + 14.79 +#ifdef __x86_64__ 14.80 + map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1, 14.81 + PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr)); 14.82 +#endif 14.83 + } 14.84 14.85 return okay; 14.86 } 14.87 @@ -1828,6 +1857,24 @@ int get_page_type(struct page_info *page 14.88 } 14.89 14.90 14.91 +void cleanup_page_cacheattr(struct page_info *page) 14.92 +{ 14.93 + uint32_t cacheattr = (page->count_info >> PGC_cacheattr_base) & 7; 14.94 + 14.95 + if ( likely(cacheattr == 0) ) 14.96 + return; 14.97 + 14.98 + page->count_info &= ~PGC_cacheattr_mask; 14.99 + 14.100 + BUG_ON(is_xen_heap_frame(page)); 14.101 + 14.102 +#ifdef __x86_64__ 14.103 + map_pages_to_xen((unsigned long)page_to_virt(page), page_to_mfn(page), 14.104 + 1, PAGE_HYPERVISOR); 14.105 +#endif 14.106 +} 14.107 + 14.108 + 14.109 int new_guest_cr3(unsigned long mfn) 14.110 { 14.111 struct vcpu *v = current; 14.112 @@ -3803,7 +3850,7 @@ static void __memguard_change_range(void 14.113 { 14.114 unsigned long _p = (unsigned long)p; 14.115 unsigned long _l = (unsigned long)l; 14.116 - unsigned long flags = __PAGE_HYPERVISOR | MAP_SMALL_PAGES; 14.117 + unsigned int flags = __PAGE_HYPERVISOR | MAP_SMALL_PAGES; 14.118 14.119 /* Ensure we are dealing with a page-aligned whole number of pages. */ 14.120 ASSERT((_p&~PAGE_MASK) == 0);
15.1 --- a/xen/arch/x86/mm/shadow/multi.c Wed Nov 07 11:01:23 2007 -0700 15.2 +++ b/xen/arch/x86/mm/shadow/multi.c Wed Nov 07 14:48:48 2007 -0700 15.3 @@ -716,12 +716,14 @@ static always_inline void 15.4 goto done; 15.5 } 15.6 15.7 - // Must have a valid target_mfn unless this is a prefetch. In the 15.8 - // case of a prefetch, an invalid mfn means that we can not usefully 15.9 - // shadow anything, and so we return early. 15.10 + // Must have a valid target_mfn unless this is a prefetch or an l1 15.11 + // pointing at MMIO space. In the case of a prefetch, an invalid 15.12 + // mfn means that we can not usefully shadow anything, and so we 15.13 + // return early. 15.14 // 15.15 - if ( shadow_mode_refcounts(d) && 15.16 - !mfn_valid(target_mfn) && (p2mt != p2m_mmio_direct) ) 15.17 + if ( !mfn_valid(target_mfn) 15.18 + && !(level == 1 && (!shadow_mode_refcounts(d) 15.19 + || p2mt == p2m_mmio_direct)) ) 15.20 { 15.21 ASSERT((ft == ft_prefetch)); 15.22 *sp = shadow_l1e_empty();
16.1 --- a/xen/arch/x86/traps.c Wed Nov 07 11:01:23 2007 -0700 16.2 +++ b/xen/arch/x86/traps.c Wed Nov 07 14:48:48 2007 -0700 16.3 @@ -1797,7 +1797,8 @@ static int emulate_privileged_op(struct 16.4 16.5 case 4: /* Write CR4 */ 16.6 v->arch.guest_context.ctrlreg[4] = pv_guest_cr4_fixup(*reg); 16.7 - write_cr4(v->arch.guest_context.ctrlreg[4]); 16.8 + write_cr4(pv_guest_cr4_to_real_cr4( 16.9 + v->arch.guest_context.ctrlreg[4])); 16.10 break; 16.11 16.12 default:
17.1 --- a/xen/common/grant_table.c Wed Nov 07 11:01:23 2007 -0700 17.2 +++ b/xen/common/grant_table.c Wed Nov 07 14:48:48 2007 -0700 17.3 @@ -332,7 +332,7 @@ static void 17.4 if ( op->flags & GNTMAP_host_map ) 17.5 { 17.6 /* Could be an iomem page for setting up permission */ 17.7 - if ( iomem_page_test(frame, mfn_to_page(frame)) ) 17.8 + if ( is_iomem_page(frame) ) 17.9 { 17.10 is_iomem = 1; 17.11 if ( iomem_permit_access(ld, frame, frame) ) 17.12 @@ -527,7 +527,7 @@ static void 17.13 op->flags)) < 0 ) 17.14 goto unmap_out; 17.15 } 17.16 - else if ( iomem_page_test(op->frame, mfn_to_page(op->frame)) && 17.17 + else if ( is_iomem_page(op->frame) && 17.18 iomem_access_permitted(ld, op->frame, op->frame) ) 17.19 { 17.20 if ( (rc = iomem_deny_access(ld, op->frame, op->frame)) < 0 ) 17.21 @@ -1651,7 +1651,7 @@ gnttab_release_mappings( 17.22 BUG_ON(!(act->pin & GNTPIN_hstw_mask)); 17.23 act->pin -= GNTPIN_hstw_inc; 17.24 17.25 - if ( iomem_page_test(act->frame, mfn_to_page(act->frame)) && 17.26 + if ( is_iomem_page(act->frame) && 17.27 iomem_access_permitted(rd, act->frame, act->frame) ) 17.28 rc = iomem_deny_access(rd, act->frame, act->frame); 17.29 else
18.1 --- a/xen/include/asm-ia64/mm.h Wed Nov 07 11:01:23 2007 -0700 18.2 +++ b/xen/include/asm-ia64/mm.h Wed Nov 07 14:48:48 2007 -0700 18.3 @@ -185,8 +185,7 @@ static inline int get_page(struct page_i 18.4 return 1; 18.5 } 18.6 18.7 -/* Decide whether this page looks like iomem or real memory */ 18.8 -int iomem_page_test(unsigned long mfn, struct page_info *page); 18.9 +int is_iomem_page(unsigned long mfn); 18.10 18.11 extern void put_page_type(struct page_info *page); 18.12 extern int get_page_type(struct page_info *page, u32 type);
19.1 --- a/xen/include/asm-x86/domain.h Wed Nov 07 11:01:23 2007 -0700 19.2 +++ b/xen/include/asm-x86/domain.h Wed Nov 07 14:48:48 2007 -0700 19.3 @@ -350,8 +350,15 @@ struct arch_vcpu 19.4 /* Continue the current hypercall via func(data) on specified cpu. */ 19.5 int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data); 19.6 19.7 +/* Clean up CR4 bits that are not under guest control. */ 19.8 unsigned long pv_guest_cr4_fixup(unsigned long guest_cr4); 19.9 19.10 +/* Convert between guest-visible and real CR4 values. */ 19.11 +#define pv_guest_cr4_to_real_cr4(c) \ 19.12 + ((c) | (mmu_cr4_features & (X86_CR4_PGE | X86_CR4_PSE))) 19.13 +#define real_cr4_to_pv_guest_cr4(c) \ 19.14 + ((c) & ~(X86_CR4_PGE | X86_CR4_PSE)) 19.15 + 19.16 #endif /* __ASM_DOMAIN_H__ */ 19.17 19.18 /*
20.1 --- a/xen/include/asm-x86/hvm/irq.h Wed Nov 07 11:01:23 2007 -0700 20.2 +++ b/xen/include/asm-x86/hvm/irq.h Wed Nov 07 14:48:48 2007 -0700 20.3 @@ -30,22 +30,35 @@ 20.4 #include <asm/hvm/vioapic.h> 20.5 #include <public/hvm/save.h> 20.6 20.7 -struct hvm_irq_dpci_mapping { 20.8 +struct dev_intx_gsi { 20.9 + struct list_head list; 20.10 + uint8_t device; 20.11 + uint8_t intx; 20.12 + uint8_t gsi; 20.13 +}; 20.14 + 20.15 +struct hvm_mirq_dpci_mapping { 20.16 + uint8_t valid; 20.17 + int pending; 20.18 + struct list_head dig_list; 20.19 + struct domain *dom; 20.20 +}; 20.21 + 20.22 +struct hvm_girq_dpci_mapping { 20.23 uint8_t valid; 20.24 uint8_t device; 20.25 uint8_t intx; 20.26 - struct domain *dom; 20.27 - union { 20.28 - uint8_t guest_gsi; 20.29 - uint8_t machine_gsi; 20.30 - }; 20.31 + uint8_t machine_gsi; 20.32 }; 20.33 20.34 struct hvm_irq_dpci { 20.35 + spinlock_t dirq_lock; 20.36 /* Machine IRQ to guest device/intx mapping. */ 20.37 - struct hvm_irq_dpci_mapping mirq[NR_IRQS]; 20.38 + struct hvm_mirq_dpci_mapping mirq[NR_IRQS]; 20.39 /* Guest IRQ to guest device/intx mapping. */ 20.40 - struct hvm_irq_dpci_mapping girq[NR_IRQS]; 20.41 + struct hvm_girq_dpci_mapping girq[NR_IRQS]; 20.42 + /* Link to guest device/intx mapping. */ 20.43 + struct hvm_girq_dpci_mapping link[4]; 20.44 DECLARE_BITMAP(dirq_mask, NR_IRQS); 20.45 struct timer hvm_timer[NR_IRQS]; 20.46 };
21.1 --- a/xen/include/asm-x86/mm.h Wed Nov 07 11:01:23 2007 -0700 21.2 +++ b/xen/include/asm-x86/mm.h Wed Nov 07 14:48:48 2007 -0700 21.3 @@ -84,25 +84,23 @@ struct page_info 21.4 #define _PGT_pae_xen_l2 26 21.5 #define PGT_pae_xen_l2 (1U<<_PGT_pae_xen_l2) 21.6 21.7 - /* 16-bit count of uses of this frame as its current type. */ 21.8 -#define PGT_count_mask ((1U<<16)-1) 21.9 + /* 26-bit count of uses of this frame as its current type. */ 21.10 +#define PGT_count_mask ((1U<<26)-1) 21.11 21.12 /* Cleared when the owning guest 'frees' this page. */ 21.13 #define _PGC_allocated 31 21.14 #define PGC_allocated (1U<<_PGC_allocated) 21.15 /* Set on a *guest* page to mark it out-of-sync with its shadow */ 21.16 -#define _PGC_out_of_sync 30 21.17 +#define _PGC_out_of_sync 30 21.18 #define PGC_out_of_sync (1U<<_PGC_out_of_sync) 21.19 /* Set when is using a page as a page table */ 21.20 -#define _PGC_page_table 29 21.21 +#define _PGC_page_table 29 21.22 #define PGC_page_table (1U<<_PGC_page_table) 21.23 - /* 29-bit count of references to this frame. */ 21.24 -#define PGC_count_mask ((1U<<29)-1) 21.25 - 21.26 -/* We trust the slab allocator in slab.c, and our use of it. */ 21.27 -#define PageSlab(page) (1) 21.28 -#define PageSetSlab(page) ((void)0) 21.29 -#define PageClearSlab(page) ((void)0) 21.30 + /* 3-bit PAT/PCD/PWT cache-attribute hint. */ 21.31 +#define PGC_cacheattr_base 26 21.32 +#define PGC_cacheattr_mask (7U<<PGC_cacheattr_base) 21.33 + /* 26-bit count of references to this frame. */ 21.34 +#define PGC_count_mask ((1U<<26)-1) 21.35 21.36 #define is_xen_heap_frame(pfn) ({ \ 21.37 paddr_t maddr = page_to_maddr(pfn); \ 21.38 @@ -147,6 +145,8 @@ void init_frametable(void); 21.39 void free_page_type(struct page_info *page, unsigned long type); 21.40 int _shadow_mode_refcounts(struct domain *d); 21.41 21.42 +void cleanup_page_cacheattr(struct page_info *page); 21.43 + 21.44 static inline void put_page(struct page_info *page) 21.45 { 21.46 u32 nx, x, y = page->count_info; 21.47 @@ -158,7 +158,10 @@ static inline void put_page(struct page_ 21.48 while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) ); 21.49 21.50 if ( unlikely((nx & PGC_count_mask) == 0) ) 21.51 + { 21.52 + cleanup_page_cacheattr(page); 21.53 free_domheap_page(page); 21.54 + } 21.55 } 21.56 21.57 21.58 @@ -196,8 +199,7 @@ static inline int get_page(struct page_i 21.59 return 1; 21.60 } 21.61 21.62 -/* Decide whether this page looks like iomem or real memory */ 21.63 -int iomem_page_test(unsigned long mfn, struct page_info *page); 21.64 +int is_iomem_page(unsigned long mfn); 21.65 21.66 void put_page_type(struct page_info *page); 21.67 int get_page_type(struct page_info *page, unsigned long type);
22.1 --- a/xen/include/asm-x86/page.h Wed Nov 07 11:01:23 2007 -0700 22.2 +++ b/xen/include/asm-x86/page.h Wed Nov 07 14:48:48 2007 -0700 22.3 @@ -360,6 +360,16 @@ int map_pages_to_xen( 22.4 unsigned int flags); 22.5 void destroy_xen_mappings(unsigned long v, unsigned long e); 22.6 22.7 +/* Convert between PAT/PCD/PWT embedded in PTE flags and 3-bit cacheattr. */ 22.8 +static inline uint32_t pte_flags_to_cacheattr(uint32_t flags) 22.9 +{ 22.10 + return ((flags >> 5) & 4) | ((flags >> 3) & 3); 22.11 +} 22.12 +static inline uint32_t cacheattr_to_pte_flags(uint32_t cacheattr) 22.13 +{ 22.14 + return ((cacheattr & 4) << 5) | ((cacheattr & 3) << 3); 22.15 +} 22.16 + 22.17 #endif /* !__ASSEMBLY__ */ 22.18 22.19 #define PFN_DOWN(x) ((x) >> PAGE_SHIFT)