xen-vtx-unstable
changeset 6355:d760699356fd
merge?
line diff
1.1 --- a/.hgignore Tue Aug 23 17:32:44 2005 +0000 1.2 +++ b/.hgignore Tue Aug 23 17:33:11 2005 +0000 1.3 @@ -147,6 +147,7 @@ 1.4 ^tools/xcs/xcsdump$ 1.5 ^tools/xcutils/xc_restore$ 1.6 ^tools/xcutils/xc_save$ 1.7 +^tools/xenstat/xentop/xentop$ 1.8 ^tools/xenstore/testsuite/tmp/.*$ 1.9 ^tools/xenstore/xen$ 1.10 ^tools/xenstore/xenstored$
11.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 Tue Aug 23 17:32:44 2005 +0000 11.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 Tue Aug 23 17:33:11 2005 +0000 11.3 @@ -807,7 +807,107 @@ CONFIG_DUMMY_CONSOLE=y 11.4 # 11.5 CONFIG_USB_ARCH_HAS_HCD=y 11.6 CONFIG_USB_ARCH_HAS_OHCI=y 11.7 -# CONFIG_USB is not set 11.8 +CONFIG_USB=y 11.9 +# CONFIG_USB_DEBUG is not set 11.10 + 11.11 +# 11.12 +# Miscellaneous USB options 11.13 +# 11.14 +# CONFIG_USB_DEVICEFS is not set 11.15 +# CONFIG_USB_BANDWIDTH is not set 11.16 +# CONFIG_USB_DYNAMIC_MINORS is not set 11.17 +# CONFIG_USB_OTG is not set 11.18 + 11.19 +# 11.20 +# USB Host Controller Drivers 11.21 +# 11.22 +# CONFIG_USB_EHCI_HCD is not set 11.23 +CONFIG_USB_OHCI_HCD=y 11.24 +# CONFIG_USB_OHCI_BIG_ENDIAN is not set 11.25 +CONFIG_USB_OHCI_LITTLE_ENDIAN=y 11.26 +CONFIG_USB_UHCI_HCD=y 11.27 +# CONFIG_USB_SL811_HCD is not set 11.28 + 11.29 +# 11.30 +# USB Device Class drivers 11.31 +# 11.32 +# CONFIG_USB_BLUETOOTH_TTY is not set 11.33 +# CONFIG_USB_ACM is not set 11.34 +# CONFIG_USB_PRINTER is not set 11.35 + 11.36 +# 11.37 +# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information 11.38 +# 11.39 +# CONFIG_USB_STORAGE is not set 11.40 + 11.41 +# 11.42 +# USB Input Devices 11.43 +# 11.44 +CONFIG_USB_HID=y 11.45 +CONFIG_USB_HIDINPUT=y 11.46 +# CONFIG_HID_FF is not set 11.47 +# CONFIG_USB_HIDDEV is not set 11.48 +# CONFIG_USB_AIPTEK is not set 11.49 +# CONFIG_USB_WACOM is not set 11.50 +# CONFIG_USB_KBTAB is not set 11.51 +# CONFIG_USB_POWERMATE is not set 11.52 +# CONFIG_USB_MTOUCH is not set 11.53 +# CONFIG_USB_EGALAX is not set 11.54 +# CONFIG_USB_XPAD is not set 11.55 +# CONFIG_USB_ATI_REMOTE is not set 11.56 + 11.57 +# 11.58 +# USB Imaging devices 11.59 +# 11.60 +# CONFIG_USB_MDC800 is not set 11.61 +# CONFIG_USB_MICROTEK is not set 11.62 + 11.63 +# 11.64 +# USB Multimedia devices 11.65 +# 11.66 +# CONFIG_USB_DABUSB is not set 11.67 + 11.68 +# 11.69 +# Video4Linux support is needed for USB Multimedia device support 11.70 +# 11.71 + 11.72 +# 11.73 +# USB Network Adapters 11.74 +# 11.75 +# CONFIG_USB_CATC is not set 11.76 +# CONFIG_USB_KAWETH is not set 11.77 +# CONFIG_USB_PEGASUS is not set 11.78 +# CONFIG_USB_RTL8150 is not set 11.79 +# CONFIG_USB_USBNET is not set 11.80 +CONFIG_USB_MON=y 11.81 + 11.82 +# 11.83 +# USB port drivers 11.84 +# 11.85 + 11.86 +# 11.87 +# USB Serial Converter support 11.88 +# 11.89 +# CONFIG_USB_SERIAL is not set 11.90 + 11.91 +# 11.92 +# USB Miscellaneous drivers 11.93 +# 11.94 +# CONFIG_USB_EMI62 is not set 11.95 +# CONFIG_USB_EMI26 is not set 11.96 +# CONFIG_USB_AUERSWALD is not set 11.97 +# CONFIG_USB_RIO500 is not set 11.98 +# CONFIG_USB_LEGOTOWER is not set 11.99 +# CONFIG_USB_LCD is not set 11.100 +# CONFIG_USB_LED is not set 11.101 +# CONFIG_USB_CYTHERM is not set 11.102 +# CONFIG_USB_PHIDGETKIT is not set 11.103 +# CONFIG_USB_PHIDGETSERVO is not set 11.104 +# CONFIG_USB_IDMOUSE is not set 11.105 + 11.106 +# 11.107 +# USB ATM/DSL drivers 11.108 +# 11.109 11.110 # 11.111 # USB Gadget Support
26.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c Tue Aug 23 17:32:44 2005 +0000 26.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c Tue Aug 23 17:33:11 2005 +0000 26.3 @@ -1575,19 +1575,20 @@ void __init setup_arch(char **cmdline_p) 26.4 /* Make sure we have a correctly sized P->M table. */ 26.5 if (max_pfn != xen_start_info.nr_pages) { 26.6 phys_to_machine_mapping = alloc_bootmem_low_pages( 26.7 - max_pfn * sizeof(unsigned long)); 26.8 + max_pfn * sizeof(unsigned int)); 26.9 26.10 if (max_pfn > xen_start_info.nr_pages) { 26.11 /* set to INVALID_P2M_ENTRY */ 26.12 memset(phys_to_machine_mapping, ~0, 26.13 - max_pfn * sizeof(unsigned long)); 26.14 + max_pfn * sizeof(unsigned int)); 26.15 memcpy(phys_to_machine_mapping, 26.16 - (unsigned long *)xen_start_info.mfn_list, 26.17 - xen_start_info.nr_pages * sizeof(unsigned long)); 26.18 + (unsigned int *)xen_start_info.mfn_list, 26.19 + xen_start_info.nr_pages * sizeof(unsigned int)); 26.20 } else { 26.21 memcpy(phys_to_machine_mapping, 26.22 - (unsigned long *)xen_start_info.mfn_list, 26.23 - max_pfn * sizeof(unsigned long)); 26.24 + (unsigned int *)xen_start_info.mfn_list, 26.25 + max_pfn * sizeof(unsigned int)); 26.26 + /* N.B. below relies on sizeof(int) == sizeof(long). */ 26.27 if (HYPERVISOR_dom_mem_op( 26.28 MEMOP_decrease_reservation, 26.29 (unsigned long *)xen_start_info.mfn_list + max_pfn, 26.30 @@ -1597,11 +1598,11 @@ void __init setup_arch(char **cmdline_p) 26.31 free_bootmem( 26.32 __pa(xen_start_info.mfn_list), 26.33 PFN_PHYS(PFN_UP(xen_start_info.nr_pages * 26.34 - sizeof(unsigned long)))); 26.35 + sizeof(unsigned int)))); 26.36 } 26.37 26.38 pfn_to_mfn_frame_list = alloc_bootmem_low_pages(PAGE_SIZE); 26.39 - for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ ) 26.40 + for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned int)), j++ ) 26.41 { 26.42 pfn_to_mfn_frame_list[j] = 26.43 virt_to_mfn(&phys_to_machine_mapping[i]);
30.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/fault.c Tue Aug 23 17:32:44 2005 +0000 30.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/fault.c Tue Aug 23 17:33:11 2005 +0000 30.3 @@ -281,7 +281,7 @@ fastcall void do_page_fault(struct pt_re 30.4 siginfo_t info; 30.5 30.6 /* Set the "privileged fault" bit to something sane. */ 30.7 - error_code &= 3; 30.8 + error_code &= ~4; 30.9 error_code |= (regs->xcs & 2) << 1; 30.10 if (regs->eflags & X86_EFLAGS_VM) 30.11 error_code |= 4;
33.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c Tue Aug 23 17:32:44 2005 +0000 33.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c Tue Aug 23 17:33:11 2005 +0000 33.3 @@ -348,9 +348,12 @@ static void __init pagetable_init (void) 33.4 { 33.5 unsigned long vaddr; 33.6 pgd_t *pgd_base = (pgd_t *)xen_start_info.pt_base; 33.7 + int i; 33.8 33.9 swapper_pg_dir = pgd_base; 33.10 init_mm.pgd = pgd_base; 33.11 + for (i = 0; i < NR_CPUS; i++) 33.12 + per_cpu(cur_pgd, i) = pgd_base; 33.13 33.14 /* Enable PSE if available */ 33.15 if (cpu_has_pse) {
34.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c Tue Aug 23 17:32:44 2005 +0000 34.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c Tue Aug 23 17:33:11 2005 +0000 34.3 @@ -36,6 +36,8 @@ void iounmap(volatile void __iomem *addr 34.4 { 34.5 } 34.6 34.7 +#ifdef __i386__ 34.8 + 34.9 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) 34.10 { 34.11 return NULL; 34.12 @@ -45,6 +47,8 @@ void __init bt_iounmap(void *addr, unsig 34.13 { 34.14 } 34.15 34.16 +#endif /* __i386__ */ 34.17 + 34.18 #else 34.19 34.20 /* 34.21 @@ -58,7 +62,7 @@ static inline int is_local_lowmem(unsign 34.22 extern unsigned long max_low_pfn; 34.23 unsigned long mfn = address >> PAGE_SHIFT; 34.24 unsigned long pfn = mfn_to_pfn(mfn); 34.25 - return ((pfn < max_low_pfn) && (pfn_to_mfn(pfn) == mfn)); 34.26 + return ((pfn < max_low_pfn) && (phys_to_machine_mapping[pfn] == mfn)); 34.27 } 34.28 34.29 /* 34.30 @@ -126,10 +130,12 @@ void __iomem * __ioremap(unsigned long p 34.31 return NULL; 34.32 area->phys_addr = phys_addr; 34.33 addr = (void __iomem *) area->addr; 34.34 + flags |= _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED; 34.35 +#ifdef __x86_64__ 34.36 + flags |= _PAGE_USER; 34.37 +#endif 34.38 if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr, 34.39 - size, __pgprot(_PAGE_PRESENT | _PAGE_RW | 34.40 - _PAGE_DIRTY | _PAGE_ACCESSED 34.41 - | flags), domid)) { 34.42 + size, __pgprot(flags), domid)) { 34.43 vunmap((void __force *) addr); 34.44 return NULL; 34.45 } 34.46 @@ -218,6 +224,8 @@ void iounmap(volatile void __iomem *addr 34.47 kfree(p); 34.48 } 34.49 34.50 +#ifdef __i386__ 34.51 + 34.52 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) 34.53 { 34.54 unsigned long offset, last_addr; 34.55 @@ -289,6 +297,8 @@ void __init bt_iounmap(void *addr, unsig 34.56 } 34.57 } 34.58 34.59 +#endif /* __i386__ */ 34.60 + 34.61 #endif /* CONFIG_XEN_PHYSDEV_ACCESS */ 34.62 34.63 /* These hacky macros avoid phys->machine translations. */ 34.64 @@ -346,7 +356,7 @@ int direct_remap_area_pages(struct mm_st 34.65 * Fill in the machine address: PTE ptr is done later by 34.66 * __direct_remap_area_pages(). 34.67 */ 34.68 - v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot); 34.69 + v->val = pte_val_ma(pfn_pte_ma(machine_addr >> PAGE_SHIFT, prot)); 34.70 34.71 machine_addr += PAGE_SIZE; 34.72 address += PAGE_SIZE;
44.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/Makefile Tue Aug 23 17:32:44 2005 +0000 44.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/Makefile Tue Aug 23 17:33:11 2005 +0000 44.3 @@ -44,7 +44,7 @@ obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o 44.4 44.5 c-obj-$(CONFIG_MODULES) += module.o 44.6 44.7 -#obj-y += topology.o 44.8 +obj-y += topology.o 44.9 c-obj-y += intel_cacheinfo.o 44.10 44.11 bootflag-y += ../../../i386/kernel/bootflag.o
50.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c Tue Aug 23 17:32:44 2005 +0000 50.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c Tue Aug 23 17:33:11 2005 +0000 50.3 @@ -778,21 +778,21 @@ void __init setup_arch(char **cmdline_p) 50.4 /* Make sure we have a large enough P->M table. */ 50.5 if (end_pfn > xen_start_info.nr_pages) { 50.6 phys_to_machine_mapping = alloc_bootmem( 50.7 - max_pfn * sizeof(unsigned long)); 50.8 + max_pfn * sizeof(u32)); 50.9 memset(phys_to_machine_mapping, ~0, 50.10 - max_pfn * sizeof(unsigned long)); 50.11 + max_pfn * sizeof(u32)); 50.12 memcpy(phys_to_machine_mapping, 50.13 - (unsigned long *)xen_start_info.mfn_list, 50.14 - xen_start_info.nr_pages * sizeof(unsigned long)); 50.15 + (u32 *)xen_start_info.mfn_list, 50.16 + xen_start_info.nr_pages * sizeof(u32)); 50.17 free_bootmem( 50.18 __pa(xen_start_info.mfn_list), 50.19 PFN_PHYS(PFN_UP(xen_start_info.nr_pages * 50.20 - sizeof(unsigned long)))); 50.21 + sizeof(u32)))); 50.22 } 50.23 50.24 pfn_to_mfn_frame_list = alloc_bootmem(PAGE_SIZE); 50.25 50.26 - for ( i=0, j=0; i < end_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ ) 50.27 + for ( i=0, j=0; i < end_pfn; i+=(PAGE_SIZE/sizeof(u32)), j++ ) 50.28 { 50.29 pfn_to_mfn_frame_list[j] = 50.30 virt_to_mfn(&phys_to_machine_mapping[i]);
54.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/Makefile Tue Aug 23 17:32:44 2005 +0000 54.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/Makefile Tue Aug 23 17:33:11 2005 +0000 54.3 @@ -6,10 +6,10 @@ XENARCH := $(subst ",,$(CONFIG_XENARCH)) 54.4 54.5 CFLAGS += -Iarch/$(XENARCH)/mm 54.6 54.7 -obj-y := init.o fault.o ioremap.o pageattr.o 54.8 +obj-y := init.o fault.o pageattr.o 54.9 c-obj-y := extable.o 54.10 54.11 -i386-obj-y := hypervisor.o 54.12 +i386-obj-y := hypervisor.o ioremap.o 54.13 54.14 #obj-y := init.o fault.o ioremap.o extable.o pageattr.o 54.15 #c-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
56.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c Tue Aug 23 17:32:44 2005 +0000 56.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c Tue Aug 23 17:33:11 2005 +0000 56.3 @@ -559,6 +559,11 @@ static void xen_copy_pt(void) 56.4 56.5 void __init xen_init_pt(void) 56.6 { 56.7 + int i; 56.8 + 56.9 + for (i = 0; i < NR_CPUS; i++) 56.10 + per_cpu(cur_pgd, i) = init_mm.pgd; 56.11 + 56.12 memcpy((void *)init_level4_pgt, 56.13 (void *)xen_start_info.pt_base, PAGE_SIZE); 56.14
57.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/ioremap.c Tue Aug 23 17:32:44 2005 +0000 57.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 57.3 @@ -1,499 +0,0 @@ 57.4 -/* 57.5 - * arch/x86_64/mm/ioremap.c 57.6 - * 57.7 - * Re-map IO memory to kernel address space so that we can access it. 57.8 - * This is needed for high PCI addresses that aren't mapped in the 57.9 - * 640k-1MB IO memory area on PC's 57.10 - * 57.11 - * (C) Copyright 1995 1996 Linus Torvalds 57.12 - */ 57.13 - 57.14 -#include <linux/vmalloc.h> 57.15 -#include <linux/init.h> 57.16 -#include <linux/slab.h> 57.17 -#include <linux/module.h> 57.18 -#include <asm/io.h> 57.19 -#include <asm/fixmap.h> 57.20 -#include <asm/cacheflush.h> 57.21 -#include <asm/tlbflush.h> 57.22 -#include <asm/pgtable.h> 57.23 -#include <asm/pgalloc.h> 57.24 - 57.25 -/* 57.26 - * Reuse arch/xen/i396/mm/ioremap.c. Need to merge later 57.27 - */ 57.28 -#ifndef CONFIG_XEN_PHYSDEV_ACCESS 57.29 - 57.30 -void * __ioremap(unsigned long phys_addr, unsigned long size, 57.31 - unsigned long flags) 57.32 -{ 57.33 - return NULL; 57.34 -} 57.35 - 57.36 -void *ioremap_nocache (unsigned long phys_addr, unsigned long size) 57.37 -{ 57.38 - return NULL; 57.39 -} 57.40 - 57.41 -void iounmap(volatile void __iomem *addr) 57.42 -{ 57.43 -} 57.44 - 57.45 -void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) 57.46 -{ 57.47 - return NULL; 57.48 -} 57.49 - 57.50 -void __init bt_iounmap(void *addr, unsigned long size) 57.51 -{ 57.52 -} 57.53 - 57.54 -#else 57.55 - 57.56 -#if defined(__i386__) 57.57 -/* 57.58 - * Does @address reside within a non-highmem page that is local to this virtual 57.59 - * machine (i.e., not an I/O page, nor a memory page belonging to another VM). 57.60 - * See the comment that accompanies pte_pfn() in pgtable-2level.h to understand 57.61 - * why this works. 57.62 - */ 57.63 -static inline int is_local_lowmem(unsigned long address) 57.64 -{ 57.65 - extern unsigned long max_low_pfn; 57.66 - unsigned long mfn = address >> PAGE_SHIFT; 57.67 - unsigned long pfn = mfn_to_pfn(mfn); 57.68 - return ((pfn < max_low_pfn) && (pfn_to_mfn(pfn) == mfn)); 57.69 -} 57.70 -#elif defined(__x86_64__) 57.71 -/* 57.72 - * 57.73 - */ 57.74 -static inline int is_local_lowmem(unsigned long address) 57.75 -{ 57.76 - return 0; 57.77 -} 57.78 -#endif 57.79 - 57.80 -/* 57.81 - * Generic mapping function (not visible outside): 57.82 - */ 57.83 - 57.84 -/* 57.85 - * Remap an arbitrary physical address space into the kernel virtual 57.86 - * address space. Needed when the kernel wants to access high addresses 57.87 - * directly. 57.88 - * 57.89 - * NOTE! We need to allow non-page-aligned mappings too: we will obviously 57.90 - * have to convert them into an offset in a page-aligned mapping, but the 57.91 - * caller shouldn't need to know that small detail. 57.92 - */ 57.93 -void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) 57.94 -{ 57.95 - void __iomem * addr; 57.96 - struct vm_struct * area; 57.97 - unsigned long offset, last_addr; 57.98 - domid_t domid = DOMID_IO; 57.99 - 57.100 - /* Don't allow wraparound or zero size */ 57.101 - last_addr = phys_addr + size - 1; 57.102 - if (!size || last_addr < phys_addr) 57.103 - return NULL; 57.104 - 57.105 -#ifdef CONFIG_XEN_PRIVILEGED_GUEST 57.106 - /* 57.107 - * Don't remap the low PCI/ISA area, it's always mapped.. 57.108 - */ 57.109 - if (phys_addr >= 0x0 && last_addr < 0x100000) 57.110 - return isa_bus_to_virt(phys_addr); 57.111 -#endif 57.112 - 57.113 - /* 57.114 - * Don't allow anybody to remap normal RAM that we're using.. 57.115 - */ 57.116 - if (is_local_lowmem(phys_addr)) { 57.117 - char *t_addr, *t_end; 57.118 - struct page *page; 57.119 - 57.120 - t_addr = bus_to_virt(phys_addr); 57.121 - t_end = t_addr + (size - 1); 57.122 - 57.123 - for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) 57.124 - if(!PageReserved(page)) 57.125 - return NULL; 57.126 - 57.127 - domid = DOMID_LOCAL; 57.128 - } 57.129 - 57.130 - /* 57.131 - * Mappings have to be page-aligned 57.132 - */ 57.133 - offset = phys_addr & ~PAGE_MASK; 57.134 - phys_addr &= PAGE_MASK; 57.135 - size = PAGE_ALIGN(last_addr+1) - phys_addr; 57.136 - 57.137 - /* 57.138 - * Ok, go for it.. 57.139 - */ 57.140 - area = get_vm_area(size, VM_IOREMAP | (flags << 20)); 57.141 - if (!area) 57.142 - return NULL; 57.143 - area->phys_addr = phys_addr; 57.144 - addr = (void __iomem *) area->addr; 57.145 - if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr, 57.146 - size, __pgprot(_PAGE_PRESENT | _PAGE_RW | 57.147 - _PAGE_DIRTY | _PAGE_ACCESSED 57.148 -#if defined(__x86_64__) 57.149 - | _PAGE_USER 57.150 -#endif 57.151 - | flags), domid)) { 57.152 - vunmap((void __force *) addr); 57.153 - return NULL; 57.154 - } 57.155 - return (void __iomem *) (offset + (char __iomem *)addr); 57.156 -} 57.157 - 57.158 - 57.159 -/** 57.160 - * ioremap_nocache - map bus memory into CPU space 57.161 - * @offset: bus address of the memory 57.162 - * @size: size of the resource to map 57.163 - * 57.164 - * ioremap_nocache performs a platform specific sequence of operations to 57.165 - * make bus memory CPU accessible via the readb/readw/readl/writeb/ 57.166 - * writew/writel functions and the other mmio helpers. The returned 57.167 - * address is not guaranteed to be usable directly as a virtual 57.168 - * address. 57.169 - * 57.170 - * This version of ioremap ensures that the memory is marked uncachable 57.171 - * on the CPU as well as honouring existing caching rules from things like 57.172 - * the PCI bus. Note that there are other caches and buffers on many 57.173 - * busses. In particular driver authors should read up on PCI writes 57.174 - * 57.175 - * It's useful if some control registers are in such an area and 57.176 - * write combining or read caching is not desirable: 57.177 - * 57.178 - * Must be freed with iounmap. 57.179 - */ 57.180 - 57.181 -void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) 57.182 -{ 57.183 - unsigned long last_addr; 57.184 - void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD); 57.185 - if (!p) 57.186 - return p; 57.187 - 57.188 - /* Guaranteed to be > phys_addr, as per __ioremap() */ 57.189 - last_addr = phys_addr + size - 1; 57.190 - 57.191 - if (is_local_lowmem(last_addr)) { 57.192 - struct page *ppage = virt_to_page(bus_to_virt(phys_addr)); 57.193 - unsigned long npages; 57.194 - 57.195 - phys_addr &= PAGE_MASK; 57.196 - 57.197 - /* This might overflow and become zero.. */ 57.198 - last_addr = PAGE_ALIGN(last_addr); 57.199 - 57.200 - /* .. but that's ok, because modulo-2**n arithmetic will make 57.201 - * the page-aligned "last - first" come out right. 57.202 - */ 57.203 - npages = (last_addr - phys_addr) >> PAGE_SHIFT; 57.204 - 57.205 - if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { 57.206 - iounmap(p); 57.207 - p = NULL; 57.208 - } 57.209 - global_flush_tlb(); 57.210 - } 57.211 - 57.212 - return p; 57.213 -} 57.214 - 57.215 -void iounmap(volatile void __iomem *addr) 57.216 -{ 57.217 - struct vm_struct *p; 57.218 - if ((void __force *) addr <= high_memory) 57.219 - return; 57.220 -#ifdef CONFIG_XEN_PRIVILEGED_GUEST 57.221 - if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN)) 57.222 - return; 57.223 -#endif 57.224 - p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr)); 57.225 - if (!p) { 57.226 - printk("__iounmap: bad address %p\n", addr); 57.227 - return; 57.228 - } 57.229 - 57.230 - if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) { 57.231 - /* p->size includes the guard page, but cpa doesn't like that */ 57.232 - change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)), 57.233 - (p->size - PAGE_SIZE) >> PAGE_SHIFT, 57.234 - PAGE_KERNEL); 57.235 - global_flush_tlb(); 57.236 - } 57.237 - kfree(p); 57.238 -} 57.239 - 57.240 -#if defined(__i386__) 57.241 -void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) 57.242 -{ 57.243 - unsigned long offset, last_addr; 57.244 - unsigned int nrpages; 57.245 - enum fixed_addresses idx; 57.246 - 57.247 - /* Don't allow wraparound or zero size */ 57.248 - last_addr = phys_addr + size - 1; 57.249 - if (!size || last_addr < phys_addr) 57.250 - return NULL; 57.251 - 57.252 -#ifdef CONFIG_XEN_PRIVILEGED_GUEST 57.253 - /* 57.254 - * Don't remap the low PCI/ISA area, it's always mapped.. 57.255 - */ 57.256 - if (phys_addr >= 0x0 && last_addr < 0x100000) 57.257 - return isa_bus_to_virt(phys_addr); 57.258 -#endif 57.259 - 57.260 - /* 57.261 - * Mappings have to be page-aligned 57.262 - */ 57.263 - offset = phys_addr & ~PAGE_MASK; 57.264 - phys_addr &= PAGE_MASK; 57.265 - size = PAGE_ALIGN(last_addr) - phys_addr; 57.266 - 57.267 - /* 57.268 - * Mappings have to fit in the FIX_BTMAP area. 57.269 - */ 57.270 - nrpages = size >> PAGE_SHIFT; 57.271 - if (nrpages > NR_FIX_BTMAPS) 57.272 - return NULL; 57.273 - 57.274 - /* 57.275 - * Ok, go for it.. 57.276 - */ 57.277 - idx = FIX_BTMAP_BEGIN; 57.278 - while (nrpages > 0) { 57.279 - set_fixmap(idx, phys_addr); 57.280 - phys_addr += PAGE_SIZE; 57.281 - --idx; 57.282 - --nrpages; 57.283 - } 57.284 - return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN)); 57.285 -} 57.286 - 57.287 -void __init bt_iounmap(void *addr, unsigned long size) 57.288 -{ 57.289 - unsigned long virt_addr; 57.290 - unsigned long offset; 57.291 - unsigned int nrpages; 57.292 - enum fixed_addresses idx; 57.293 - 57.294 - virt_addr = (unsigned long)addr; 57.295 - if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) 57.296 - return; 57.297 -#ifdef CONFIG_XEN_PRIVILEGED_GUEST 57.298 - if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN)) 57.299 - return; 57.300 -#endif 57.301 - offset = virt_addr & ~PAGE_MASK; 57.302 - nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; 57.303 - 57.304 - idx = FIX_BTMAP_BEGIN; 57.305 - while (nrpages > 0) { 57.306 - clear_fixmap(idx); 57.307 - --idx; 57.308 - --nrpages; 57.309 - } 57.310 -} 57.311 -#endif /* defined(__i386__) */ 57.312 - 57.313 -#endif /* CONFIG_XEN_PHYSDEV_ACCESS */ 57.314 - 57.315 -/* These hacky macros avoid phys->machine translations. */ 57.316 -#define __direct_pte(x) ((pte_t) { (x) } ) 57.317 -#define __direct_mk_pte(page_nr,pgprot) \ 57.318 - __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot)) 57.319 -#define direct_mk_pte_phys(physpage, pgprot) \ 57.320 - __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot) 57.321 - 57.322 -static inline void direct_remap_area_pte(pte_t *pte, 57.323 - unsigned long address, 57.324 - unsigned long size, 57.325 - mmu_update_t **v) 57.326 -{ 57.327 - unsigned long end; 57.328 - 57.329 - address &= ~PMD_MASK; 57.330 - end = address + size; 57.331 - if (end > PMD_SIZE) 57.332 - end = PMD_SIZE; 57.333 - if (address >= end) 57.334 - BUG(); 57.335 - 57.336 - do { 57.337 - (*v)->ptr = virt_to_machine(pte); 57.338 - (*v)++; 57.339 - address += PAGE_SIZE; 57.340 - pte++; 57.341 - } while (address && (address < end)); 57.342 -} 57.343 - 57.344 -static inline int direct_remap_area_pmd(struct mm_struct *mm, 57.345 - pmd_t *pmd, 57.346 - unsigned long address, 57.347 - unsigned long size, 57.348 - mmu_update_t **v) 57.349 -{ 57.350 - unsigned long end; 57.351 - 57.352 - address &= ~PGDIR_MASK; 57.353 - end = address + size; 57.354 - if (end > PGDIR_SIZE) 57.355 - end = PGDIR_SIZE; 57.356 - if (address >= end) 57.357 - BUG(); 57.358 - do { 57.359 - pte_t *pte = (mm == &init_mm) ? 57.360 - pte_alloc_kernel(mm, pmd, address) : 57.361 - pte_alloc_map(mm, pmd, address); 57.362 - if (!pte) 57.363 - return -ENOMEM; 57.364 - direct_remap_area_pte(pte, address, end - address, v); 57.365 - pte_unmap(pte); 57.366 - address = (address + PMD_SIZE) & PMD_MASK; 57.367 - pmd++; 57.368 - } while (address && (address < end)); 57.369 - return 0; 57.370 -} 57.371 - 57.372 -int __direct_remap_area_pages(struct mm_struct *mm, 57.373 - unsigned long address, 57.374 - unsigned long size, 57.375 - mmu_update_t *v) 57.376 -{ 57.377 - pgd_t * dir; 57.378 - unsigned long end = address + size; 57.379 - int error; 57.380 - 57.381 -#if defined(__i386__) 57.382 - dir = pgd_offset(mm, address); 57.383 -#elif defined (__x86_64) 57.384 - dir = (mm == &init_mm) ? 57.385 - pgd_offset_k(address): 57.386 - pgd_offset(mm, address); 57.387 -#endif 57.388 - if (address >= end) 57.389 - BUG(); 57.390 - spin_lock(&mm->page_table_lock); 57.391 - do { 57.392 - pud_t *pud; 57.393 - pmd_t *pmd; 57.394 - 57.395 - error = -ENOMEM; 57.396 - pud = pud_alloc(mm, dir, address); 57.397 - if (!pud) 57.398 - break; 57.399 - pmd = pmd_alloc(mm, pud, address); 57.400 - if (!pmd) 57.401 - break; 57.402 - error = 0; 57.403 - direct_remap_area_pmd(mm, pmd, address, end - address, &v); 57.404 - address = (address + PGDIR_SIZE) & PGDIR_MASK; 57.405 - dir++; 57.406 - 57.407 - } while (address && (address < end)); 57.408 - spin_unlock(&mm->page_table_lock); 57.409 - return error; 57.410 -} 57.411 - 57.412 - 57.413 -int direct_remap_area_pages(struct mm_struct *mm, 57.414 - unsigned long address, 57.415 - unsigned long machine_addr, 57.416 - unsigned long size, 57.417 - pgprot_t prot, 57.418 - domid_t domid) 57.419 -{ 57.420 - int i; 57.421 - unsigned long start_address; 57.422 -#define MAX_DIRECTMAP_MMU_QUEUE 130 57.423 - mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *v = u; 57.424 - 57.425 - start_address = address; 57.426 - 57.427 - flush_cache_all(); 57.428 - 57.429 - for (i = 0; i < size; i += PAGE_SIZE) { 57.430 - if ((v - u) == MAX_DIRECTMAP_MMU_QUEUE) { 57.431 - /* Fill in the PTE pointers. */ 57.432 - __direct_remap_area_pages(mm, 57.433 - start_address, 57.434 - address-start_address, 57.435 - u); 57.436 - 57.437 - if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0) 57.438 - return -EFAULT; 57.439 - v = u; 57.440 - start_address = address; 57.441 - } 57.442 - 57.443 - /* 57.444 - * Fill in the machine address: PTE ptr is done later by 57.445 - * __direct_remap_area_pages(). 57.446 - */ 57.447 - v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot); 57.448 - 57.449 - machine_addr += PAGE_SIZE; 57.450 - address += PAGE_SIZE; 57.451 - v++; 57.452 - } 57.453 - 57.454 - if (v != u) { 57.455 - /* get the ptep's filled in */ 57.456 - __direct_remap_area_pages(mm, 57.457 - start_address, 57.458 - address-start_address, 57.459 - u); 57.460 - if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)) 57.461 - return -EFAULT; 57.462 - } 57.463 - 57.464 - flush_tlb_all(); 57.465 - 57.466 - return 0; 57.467 -} 57.468 - 57.469 -EXPORT_SYMBOL(direct_remap_area_pages); 57.470 - 57.471 -static int lookup_pte_fn( 57.472 - pte_t *pte, struct page *pte_page, unsigned long addr, void *data) 57.473 -{ 57.474 - unsigned long *ptep = (unsigned long *)data; 57.475 - if (ptep) *ptep = (pfn_to_mfn(page_to_pfn(pte_page)) << PAGE_SHIFT) 57.476 - | ((unsigned long)pte & ~PAGE_MASK); 57.477 - return 0; 57.478 -} 57.479 - 57.480 -int create_lookup_pte_addr(struct mm_struct *mm, 57.481 - unsigned long address, 57.482 - unsigned long *ptep) 57.483 -{ 57.484 - return generic_page_range(mm, address, PAGE_SIZE, lookup_pte_fn, ptep); 57.485 -} 57.486 - 57.487 -EXPORT_SYMBOL(create_lookup_pte_addr); 57.488 - 57.489 -static int noop_fn( 57.490 - pte_t *pte, struct page *pte_page, unsigned long addr, void *data) 57.491 -{ 57.492 - return 0; 57.493 -} 57.494 - 57.495 -int touch_pte_range(struct mm_struct *mm, 57.496 - unsigned long address, 57.497 - unsigned long size) 57.498 -{ 57.499 - return generic_page_range(mm, address, size, noop_fn, NULL); 57.500 -} 57.501 - 57.502 -EXPORT_SYMBOL(touch_pte_range);
78.1 --- a/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c Tue Aug 23 17:32:44 2005 +0000 78.2 +++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c Tue Aug 23 17:33:11 2005 +0000 78.3 @@ -167,7 +167,7 @@ static int privcmd_ioctl(struct inode *i 78.4 if (ret) 78.5 goto batch_err; 78.6 78.7 - u.val = (mfn << PAGE_SHIFT) | pgprot_val(vma->vm_page_prot); 78.8 + u.val = pte_val_ma(pfn_pte_ma(mfn, vma->vm_page_prot)); 78.9 u.ptr = ptep; 78.10 78.11 if ( unlikely(HYPERVISOR_mmu_update(&u, 1, NULL, m.dom) < 0) )
91.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/page.h Tue Aug 23 17:32:44 2005 +0000 91.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/page.h Tue Aug 23 17:33:11 2005 +0000 91.3 @@ -60,9 +60,13 @@ 91.4 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 91.5 91.6 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ 91.7 +#define INVALID_P2M_ENTRY (~0U) 91.8 +#define FOREIGN_FRAME(m) ((m) | 0x80000000U) 91.9 extern unsigned int *phys_to_machine_mapping; 91.10 -#define pfn_to_mfn(_pfn) ((unsigned long)(phys_to_machine_mapping[(_pfn)])) 91.11 -#define mfn_to_pfn(_mfn) ((unsigned long)(machine_to_phys_mapping[(_mfn)])) 91.12 +#define pfn_to_mfn(pfn) \ 91.13 +((unsigned long)phys_to_machine_mapping[(unsigned int)(pfn)] & 0x7FFFFFFFUL) 91.14 +#define mfn_to_pfn(mfn) \ 91.15 +((unsigned long)machine_to_phys_mapping[(unsigned int)(mfn)]) 91.16 91.17 /* Definitions for machine and pseudophysical addresses. */ 91.18 #ifdef CONFIG_X86_PAE
94.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h Tue Aug 23 17:32:44 2005 +0000 94.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h Tue Aug 23 17:33:11 2005 +0000 94.3 @@ -63,17 +63,15 @@ inline static void set_pte_at_sync(struc 94.4 * 94.5 * NB2. When deliberately mapping foreign pages into the p2m table, you *must* 94.6 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we 94.7 - * require. In all the cases we care about, the high bit gets shifted out 94.8 - * (e.g., phys_to_machine()) so behaviour there is correct. 94.9 + * require. In all the cases we care about, the FOREIGN_FRAME bit is 94.10 + * masked (e.g., pfn_to_mfn()) so behaviour there is correct. 94.11 */ 94.12 -#define INVALID_P2M_ENTRY (~0U) 94.13 -#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1))) 94.14 #define pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT) 94.15 #define pte_pfn(_pte) \ 94.16 ({ \ 94.17 unsigned long mfn = pte_mfn(_pte); \ 94.18 unsigned long pfn = mfn_to_pfn(mfn); \ 94.19 - if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \ 94.20 + if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\ 94.21 pfn = max_mapnr; /* special: force !pfn_valid() */ \ 94.22 pfn; \ 94.23 })
95.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-3level.h Tue Aug 23 17:32:44 2005 +0000 95.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable-3level.h Tue Aug 23 17:33:11 2005 +0000 95.3 @@ -150,15 +150,13 @@ static inline int pte_none(pte_t pte) 95.4 return !pte.pte_low && !pte.pte_high; 95.5 } 95.6 95.7 -#define INVALID_P2M_ENTRY (~0U) 95.8 -#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1))) 95.9 #define pte_mfn(_pte) ( ((_pte).pte_low >> PAGE_SHIFT) |\ 95.10 (((_pte).pte_high & 0xfff) << (32-PAGE_SHIFT)) ) 95.11 #define pte_pfn(_pte) \ 95.12 ({ \ 95.13 unsigned long mfn = pte_mfn(_pte); \ 95.14 unsigned long pfn = mfn_to_pfn(mfn); \ 95.15 - if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \ 95.16 + if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\ 95.17 pfn = max_mapnr; /* special: force !pfn_valid() */ \ 95.18 pfn; \ 95.19 })
100.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/page.h Tue Aug 23 17:32:44 2005 +0000 100.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/page.h Tue Aug 23 17:33:11 2005 +0000 100.3 @@ -62,9 +62,13 @@ void copy_page(void *, void *); 100.4 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 100.5 100.6 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ 100.7 +#define INVALID_P2M_ENTRY (~0U) 100.8 +#define FOREIGN_FRAME(m) ((m) | 0x80000000U) 100.9 extern u32 *phys_to_machine_mapping; 100.10 -#define pfn_to_mfn(_pfn) ((unsigned long) phys_to_machine_mapping[(unsigned int)(_pfn)]) 100.11 -#define mfn_to_pfn(_mfn) ((unsigned long) machine_to_phys_mapping[(unsigned int)(_mfn)]) 100.12 +#define pfn_to_mfn(pfn) \ 100.13 +((unsigned long)phys_to_machine_mapping[(unsigned int)(pfn)] & 0x7FFFFFFFUL) 100.14 +#define mfn_to_pfn(mfn) \ 100.15 +((unsigned long)machine_to_phys_mapping[(unsigned int)(mfn)]) 100.16 100.17 /* Definitions for machine and pseudophysical addresses. */ 100.18 typedef unsigned long paddr_t;
102.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h Tue Aug 23 17:32:44 2005 +0000 102.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h Tue Aug 23 17:33:11 2005 +0000 102.3 @@ -300,17 +300,15 @@ inline static void set_pte_at(struct mm_ 102.4 * 102.5 * NB2. When deliberately mapping foreign pages into the p2m table, you *must* 102.6 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we 102.7 - * require. In all the cases we care about, the high bit gets shifted out 102.8 - * (e.g., phys_to_machine()) so behaviour there is correct. 102.9 + * require. In all the cases we care about, the FOREIGN_FRAME bit is 102.10 + * masked (e.g., pfn_to_mfn()) so behaviour there is correct. 102.11 */ 102.12 -#define INVALID_P2M_ENTRY (~0U) 102.13 -#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1))) 102.14 #define pte_mfn(_pte) (((_pte).pte & PTE_MASK) >> PAGE_SHIFT) 102.15 #define pte_pfn(_pte) \ 102.16 ({ \ 102.17 unsigned long mfn = pte_mfn(_pte); \ 102.18 unsigned pfn = mfn_to_pfn(mfn); \ 102.19 - if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \ 102.20 + if ((pfn >= max_mapnr) || (phys_to_machine_mapping[pfn] != mfn))\ 102.21 pfn = max_mapnr; /* special: force !pfn_valid() */ \ 102.22 pfn; \ 102.23 })
172.1 --- a/tools/xenstat/xentop/Makefile Tue Aug 23 17:32:44 2005 +0000 172.2 +++ b/tools/xenstat/xentop/Makefile Tue Aug 23 17:33:11 2005 +0000 172.3 @@ -28,7 +28,7 @@ sbindir=$(prefix)/sbin 172.4 172.5 CFLAGS += -DGCC_PRINTF -Wall -Werror -I$(XEN_LIBXENSTAT) 172.6 LDFLAGS += -L$(XEN_LIBXENSTAT) 172.7 -LDLIBS += -lxenstat -lcurses 172.8 +LDLIBS += -lxenstat -lncurses 172.9 172.10 all: xentop 172.11
185.1 --- a/xen/arch/x86/io_apic.c Tue Aug 23 17:32:44 2005 +0000 185.2 +++ b/xen/arch/x86/io_apic.c Tue Aug 23 17:33:11 2005 +0000 185.3 @@ -1751,8 +1751,30 @@ int ioapic_guest_write(int apicid, int a 185.4 185.5 pin = (address - 0x10) >> 1; 185.6 185.7 + *(u32 *)&rte = val; 185.8 rte.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); 185.9 - *(int *)&rte = val; 185.10 + 185.11 + /* 185.12 + * What about weird destination types? 185.13 + * SMI: Ignore? Ought to be set up by the BIOS. 185.14 + * NMI: Ignore? Watchdog functionality is Xen's concern. 185.15 + * INIT: Definitely ignore: probably a guest OS bug. 185.16 + * ExtINT: Ignore? Linux only asserts this at start of day. 185.17 + * For now, print a message and return an error. We can fix up on demand. 185.18 + */ 185.19 + if ( rte.delivery_mode > dest_LowestPrio ) 185.20 + { 185.21 + printk("ERROR: Attempt to write weird IOAPIC destination mode!\n"); 185.22 + printk(" APIC=%d/%d, lo-reg=%x\n", apicid, pin, val); 185.23 + return -EINVAL; 185.24 + } 185.25 + 185.26 + /* 185.27 + * The guest does not know physical APIC arrangement (flat vs. cluster). 185.28 + * Apply genapic conventions for this platform. 185.29 + */ 185.30 + rte.delivery_mode = INT_DELIVERY_MODE; 185.31 + rte.dest_mode = INT_DEST_MODE; 185.32 185.33 if ( rte.vector >= FIRST_DEVICE_VECTOR ) 185.34 {
186.1 --- a/xen/arch/x86/mm.c Tue Aug 23 17:32:44 2005 +0000 186.2 +++ b/xen/arch/x86/mm.c Tue Aug 23 17:33:11 2005 +0000 186.3 @@ -444,7 +444,7 @@ get_page_from_l1e( 186.4 186.5 if ( unlikely(l1e_get_flags(l1e) & L1_DISALLOW_MASK) ) 186.6 { 186.7 - MEM_LOG("Bad L1 flags %x\n", l1e_get_flags(l1e) & L1_DISALLOW_MASK); 186.8 + MEM_LOG("Bad L1 flags %x", l1e_get_flags(l1e) & L1_DISALLOW_MASK); 186.9 return 0; 186.10 } 186.11 186.12 @@ -490,7 +490,7 @@ get_page_from_l2e( 186.13 186.14 if ( unlikely((l2e_get_flags(l2e) & L2_DISALLOW_MASK)) ) 186.15 { 186.16 - MEM_LOG("Bad L2 flags %x\n", l2e_get_flags(l2e) & L2_DISALLOW_MASK); 186.17 + MEM_LOG("Bad L2 flags %x", l2e_get_flags(l2e) & L2_DISALLOW_MASK); 186.18 return 0; 186.19 } 186.20 186.21 @@ -523,7 +523,7 @@ get_page_from_l3e( 186.22 186.23 if ( unlikely((l3e_get_flags(l3e) & L3_DISALLOW_MASK)) ) 186.24 { 186.25 - MEM_LOG("Bad L3 flags %x\n", l3e_get_flags(l3e) & L3_DISALLOW_MASK); 186.26 + MEM_LOG("Bad L3 flags %x", l3e_get_flags(l3e) & L3_DISALLOW_MASK); 186.27 return 0; 186.28 } 186.29 186.30 @@ -557,7 +557,7 @@ get_page_from_l4e( 186.31 186.32 if ( unlikely((l4e_get_flags(l4e) & L4_DISALLOW_MASK)) ) 186.33 { 186.34 - MEM_LOG("Bad L4 flags %x\n", l4e_get_flags(l4e) & L4_DISALLOW_MASK); 186.35 + MEM_LOG("Bad L4 flags %x", l4e_get_flags(l4e) & L4_DISALLOW_MASK); 186.36 return 0; 186.37 } 186.38 186.39 @@ -1025,7 +1025,7 @@ static inline int update_l1e(l1_pgentry_ 186.40 unlikely(o != l1e_get_intpte(ol1e)) ) 186.41 { 186.42 MEM_LOG("Failed to update %" PRIpte " -> %" PRIpte 186.43 - ": saw %" PRIpte "\n", 186.44 + ": saw %" PRIpte, 186.45 l1e_get_intpte(ol1e), 186.46 l1e_get_intpte(nl1e), 186.47 o); 186.48 @@ -1051,7 +1051,7 @@ static int mod_l1_entry(l1_pgentry_t *pl 186.49 { 186.50 if ( unlikely(l1e_get_flags(nl1e) & L1_DISALLOW_MASK) ) 186.51 { 186.52 - MEM_LOG("Bad L1 flags %x\n", 186.53 + MEM_LOG("Bad L1 flags %x", 186.54 l1e_get_flags(nl1e) & L1_DISALLOW_MASK); 186.55 return 0; 186.56 } 186.57 @@ -1113,7 +1113,7 @@ static int mod_l2_entry(l2_pgentry_t *pl 186.58 { 186.59 if ( unlikely(l2e_get_flags(nl2e) & L2_DISALLOW_MASK) ) 186.60 { 186.61 - MEM_LOG("Bad L2 flags %x\n", 186.62 + MEM_LOG("Bad L2 flags %x", 186.63 l2e_get_flags(nl2e) & L2_DISALLOW_MASK); 186.64 return 0; 186.65 } 186.66 @@ -1175,7 +1175,7 @@ static int mod_l3_entry(l3_pgentry_t *pl 186.67 { 186.68 if ( unlikely(l3e_get_flags(nl3e) & L3_DISALLOW_MASK) ) 186.69 { 186.70 - MEM_LOG("Bad L3 flags %x\n", 186.71 + MEM_LOG("Bad L3 flags %x", 186.72 l3e_get_flags(nl3e) & L3_DISALLOW_MASK); 186.73 return 0; 186.74 } 186.75 @@ -1237,7 +1237,7 @@ static int mod_l4_entry(l4_pgentry_t *pl 186.76 { 186.77 if ( unlikely(l4e_get_flags(nl4e) & L4_DISALLOW_MASK) ) 186.78 { 186.79 - MEM_LOG("Bad L4 flags %x\n", 186.80 + MEM_LOG("Bad L4 flags %x", 186.81 l4e_get_flags(nl4e) & L4_DISALLOW_MASK); 186.82 return 0; 186.83 } 186.84 @@ -1598,7 +1598,7 @@ static int set_foreigndom(unsigned int c 186.85 percpu_info[cpu].foreign = dom_io; 186.86 break; 186.87 default: 186.88 - MEM_LOG("Dom %u cannot set foreign dom\n", d->domain_id); 186.89 + MEM_LOG("Dom %u cannot set foreign dom", d->domain_id); 186.90 okay = 0; 186.91 break; 186.92 } 186.93 @@ -1831,7 +1831,7 @@ int do_mmuext_op( 186.94 case MMUEXT_FLUSH_CACHE: 186.95 if ( unlikely(!IS_CAPABLE_PHYSDEV(d)) ) 186.96 { 186.97 - MEM_LOG("Non-physdev domain tried to FLUSH_CACHE.\n"); 186.98 + MEM_LOG("Non-physdev domain tried to FLUSH_CACHE."); 186.99 okay = 0; 186.100 } 186.101 else 186.102 @@ -1845,7 +1845,7 @@ int do_mmuext_op( 186.103 if ( shadow_mode_external(d) ) 186.104 { 186.105 MEM_LOG("ignoring SET_LDT hypercall from external " 186.106 - "domain %u\n", d->domain_id); 186.107 + "domain %u", d->domain_id); 186.108 okay = 0; 186.109 break; 186.110 } 186.111 @@ -1916,7 +1916,7 @@ int do_mmuext_op( 186.112 unlikely(IS_XEN_HEAP_FRAME(page)) ) 186.113 { 186.114 MEM_LOG("Transferee has no reservation headroom (%d,%d), or " 186.115 - "page is in Xen heap (%lx), or dom is dying (%ld).\n", 186.116 + "page is in Xen heap (%lx), or dom is dying (%ld).", 186.117 e->tot_pages, e->max_pages, op.mfn, e->domain_flags); 186.118 okay = 0; 186.119 goto reassign_fail; 186.120 @@ -1937,7 +1937,7 @@ int do_mmuext_op( 186.121 unlikely(_nd != _d) ) 186.122 { 186.123 MEM_LOG("Bad page values %lx: ed=%p(%u), sd=%p," 186.124 - " caf=%08x, taf=%" PRtype_info "\n", 186.125 + " caf=%08x, taf=%" PRtype_info, 186.126 page_to_pfn(page), d, d->domain_id, 186.127 unpickle_domptr(_nd), x, page->u.inuse.type_info); 186.128 okay = 0; 186.129 @@ -2301,7 +2301,7 @@ int update_grant_pte_mapping( 186.130 if ( ((type_info & PGT_type_mask) != PGT_l1_page_table) || 186.131 !get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask)) ) 186.132 { 186.133 - DPRINTK("Grant map attempted to update a non-L1 page\n"); 186.134 + MEM_LOG("Grant map attempted to update a non-L1 page"); 186.135 rc = GNTST_general_error; 186.136 goto failed; 186.137 } 186.138 @@ -2363,7 +2363,7 @@ int clear_grant_pte_mapping( 186.139 if ( ((type_info & PGT_type_mask) != PGT_l1_page_table) || 186.140 !get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask)) ) 186.141 { 186.142 - DPRINTK("Grant map attempted to update a non-L1 page\n"); 186.143 + MEM_LOG("Grant map attempted to update a non-L1 page"); 186.144 rc = GNTST_general_error; 186.145 goto failed; 186.146 } 186.147 @@ -2378,7 +2378,7 @@ int clear_grant_pte_mapping( 186.148 /* Check that the virtual address supplied is actually mapped to frame. */ 186.149 if ( unlikely((l1e_get_intpte(ol1e) >> PAGE_SHIFT) != frame) ) 186.150 { 186.151 - DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n", 186.152 + MEM_LOG("PTE entry %lx for address %lx doesn't match frame %lx", 186.153 (unsigned long)l1e_get_intpte(ol1e), addr, frame); 186.154 put_page_type(page); 186.155 rc = GNTST_general_error; 186.156 @@ -2388,7 +2388,7 @@ int clear_grant_pte_mapping( 186.157 /* Delete pagetable entry. */ 186.158 if ( unlikely(__put_user(0, (intpte_t *)va))) 186.159 { 186.160 - DPRINTK("Cannot delete PTE entry at %p.\n", va); 186.161 + MEM_LOG("Cannot delete PTE entry at %p", va); 186.162 put_page_type(page); 186.163 rc = GNTST_general_error; 186.164 goto failed; 186.165 @@ -2452,7 +2452,7 @@ int clear_grant_va_mapping(unsigned long 186.166 186.167 if ( unlikely(__get_user(ol1e.l1, &pl1e->l1) != 0) ) 186.168 { 186.169 - DPRINTK("Could not find PTE entry for address %lx\n", addr); 186.170 + MEM_LOG("Could not find PTE entry for address %lx", addr); 186.171 return GNTST_general_error; 186.172 } 186.173 186.174 @@ -2462,7 +2462,7 @@ int clear_grant_va_mapping(unsigned long 186.175 */ 186.176 if ( unlikely(l1e_get_pfn(ol1e) != frame) ) 186.177 { 186.178 - DPRINTK("PTE entry %lx for address %lx doesn't match frame %lx\n", 186.179 + MEM_LOG("PTE entry %lx for address %lx doesn't match frame %lx", 186.180 l1e_get_pfn(ol1e), addr, frame); 186.181 return GNTST_general_error; 186.182 } 186.183 @@ -2470,7 +2470,7 @@ int clear_grant_va_mapping(unsigned long 186.184 /* Delete pagetable entry. */ 186.185 if ( unlikely(__put_user(0, &pl1e->l1)) ) 186.186 { 186.187 - DPRINTK("Cannot delete PTE entry at %p.\n", (unsigned long *)pl1e); 186.188 + MEM_LOG("Cannot delete PTE entry at %p", (unsigned long *)pl1e); 186.189 return GNTST_general_error; 186.190 } 186.191 186.192 @@ -2930,7 +2930,7 @@ int revalidate_l1( 186.193 186.194 if ( unlikely(!get_page_from_l1e(nl1e, d)) ) 186.195 { 186.196 - MEM_LOG("ptwr: Could not re-validate l1 page\n"); 186.197 + MEM_LOG("ptwr: Could not re-validate l1 page"); 186.198 /* 186.199 * Make the remaining p.t's consistent before crashing, so the 186.200 * reference counts are correct. 186.201 @@ -3056,7 +3056,7 @@ static int ptwr_emulated_update( 186.202 /* Aligned access only, thank you. */ 186.203 if ( !access_ok(addr, bytes) || ((addr & (bytes-1)) != 0) ) 186.204 { 186.205 - MEM_LOG("ptwr_emulate: Unaligned or bad size ptwr access (%d, %lx)\n", 186.206 + MEM_LOG("ptwr_emulate: Unaligned or bad size ptwr access (%d, %lx)", 186.207 bytes, addr); 186.208 return X86EMUL_UNHANDLEABLE; 186.209 } 186.210 @@ -3089,7 +3089,7 @@ static int ptwr_emulated_update( 186.211 if (__copy_from_user(&pte, &linear_pg_table[l1_linear_offset(addr)], 186.212 sizeof(pte))) 186.213 { 186.214 - MEM_LOG("ptwr_emulate: Cannot read thru linear_pg_table\n"); 186.215 + MEM_LOG("ptwr_emulate: Cannot read thru linear_pg_table"); 186.216 return X86EMUL_UNHANDLEABLE; 186.217 } 186.218 186.219 @@ -3102,7 +3102,7 @@ static int ptwr_emulated_update( 186.220 (page_get_owner(page) != d) ) 186.221 { 186.222 MEM_LOG("ptwr_emulate: Page is mistyped or bad pte " 186.223 - "(%lx, %" PRtype_info ")\n", 186.224 + "(%lx, %" PRtype_info ")", 186.225 l1e_get_pfn(pte), page->u.inuse.type_info); 186.226 return X86EMUL_UNHANDLEABLE; 186.227 }
193.1 --- a/xen/arch/x86/vmx.c Tue Aug 23 17:32:44 2005 +0000 193.2 +++ b/xen/arch/x86/vmx.c Tue Aug 23 17:33:11 2005 +0000 193.3 @@ -1712,9 +1712,6 @@ asmlinkage void vmx_vmexit_handler(struc 193.4 default: 193.5 __vmx_bug(®s); /* should not happen */ 193.6 } 193.7 - 193.8 - vmx_intr_assist(v); 193.9 - return; 193.10 } 193.11 193.12 asmlinkage void load_cr2(void)
194.1 --- a/xen/arch/x86/vmx_io.c Tue Aug 23 17:32:44 2005 +0000 194.2 +++ b/xen/arch/x86/vmx_io.c Tue Aug 23 17:33:11 2005 +0000 194.3 @@ -631,12 +631,14 @@ static inline int irq_masked(unsigned lo 194.4 return ((eflags & X86_EFLAGS_IF) == 0); 194.5 } 194.6 194.7 -void vmx_intr_assist(struct vcpu *v) 194.8 +asmlinkage void vmx_intr_assist(void) 194.9 { 194.10 int intr_type = 0; 194.11 - int highest_vector = find_highest_pending_irq(v, &intr_type); 194.12 + int highest_vector; 194.13 unsigned long intr_fields, eflags, interruptibility, cpu_exec_control; 194.14 + struct vcpu *v = current; 194.15 194.16 + highest_vector = find_highest_pending_irq(v, &intr_type); 194.17 __vmread(CPU_BASED_VM_EXEC_CONTROL, &cpu_exec_control); 194.18 194.19 if (highest_vector == -1) { 194.20 @@ -712,9 +714,6 @@ void vmx_do_resume(struct vcpu *d) 194.21 194.22 /* We can't resume the guest if we're waiting on I/O */ 194.23 ASSERT(!test_bit(ARCH_VMX_IO_WAIT, &d->arch.arch_vmx.flags)); 194.24 - 194.25 - /* We always check for interrupts before resuming guest */ 194.26 - vmx_intr_assist(d); 194.27 } 194.28 194.29 #endif /* CONFIG_VMX */
195.1 --- a/xen/arch/x86/x86_32/entry.S Tue Aug 23 17:32:44 2005 +0000 195.2 +++ b/xen/arch/x86/x86_32/entry.S Tue Aug 23 17:33:11 2005 +0000 195.3 @@ -140,6 +140,7 @@ 1: 195.4 jnz 2f 195.5 195.6 /* vmx_restore_all_guest */ 195.7 + call vmx_intr_assist 195.8 call load_cr2 195.9 .endif 195.10 VMX_RESTORE_ALL_NOSEGREGS
196.1 --- a/xen/arch/x86/x86_32/traps.c Tue Aug 23 17:32:44 2005 +0000 196.2 +++ b/xen/arch/x86/x86_32/traps.c Tue Aug 23 17:33:11 2005 +0000 196.3 @@ -1,5 +1,6 @@ 196.4 196.5 #include <xen/config.h> 196.6 +#include <xen/domain_page.h> 196.7 #include <xen/init.h> 196.8 #include <xen/sched.h> 196.9 #include <xen/lib.h> 196.10 @@ -86,24 +87,33 @@ void show_registers(struct cpu_user_regs 196.11 196.12 void show_page_walk(unsigned long addr) 196.13 { 196.14 - l2_pgentry_t pmd; 196.15 - l1_pgentry_t *pte; 196.16 - 196.17 - if ( addr < PAGE_OFFSET ) 196.18 - return; 196.19 + unsigned long pfn = read_cr3() >> PAGE_SHIFT; 196.20 + intpte_t *ptab, ent; 196.21 196.22 printk("Pagetable walk from %08lx:\n", addr); 196.23 - 196.24 - pmd = idle_pg_table_l2[l2_linear_offset(addr)]; 196.25 - printk(" L2 = %"PRIpte" %s\n", l2e_get_intpte(pmd), 196.26 - (l2e_get_flags(pmd) & _PAGE_PSE) ? "(2/4MB)" : ""); 196.27 - if ( !(l2e_get_flags(pmd) & _PAGE_PRESENT) || 196.28 - (l2e_get_flags(pmd) & _PAGE_PSE) ) 196.29 + 196.30 +#ifdef CONFIG_X86_PAE 196.31 + ptab = map_domain_page(pfn); 196.32 + ent = ptab[l3_table_offset(addr)]; 196.33 + printk(" L3 = %"PRIpte"\n", ent); 196.34 + unmap_domain_page(ptab); 196.35 + if ( !(ent & _PAGE_PRESENT) ) 196.36 return; 196.37 + pfn = ent >> PAGE_SHIFT; 196.38 +#endif 196.39 196.40 - pte = __va(l2e_get_paddr(pmd)); 196.41 - pte += l1_table_offset(addr); 196.42 - printk(" L1 = %"PRIpte"\n", l1e_get_intpte(*pte)); 196.43 + ptab = map_domain_page(pfn); 196.44 + ent = ptab[l2_table_offset(addr)]; 196.45 + printk(" L2 = %"PRIpte" %s\n", ent, (ent & _PAGE_PSE) ? "(PSE)" : ""); 196.46 + unmap_domain_page(ptab); 196.47 + if ( !(ent & _PAGE_PRESENT) || (ent & _PAGE_PSE) ) 196.48 + return; 196.49 + pfn = ent >> PAGE_SHIFT; 196.50 + 196.51 + ptab = map_domain_page(ent >> PAGE_SHIFT); 196.52 + ent = ptab[l2_table_offset(addr)]; 196.53 + printk(" L1 = %"PRIpte"\n", ent); 196.54 + unmap_domain_page(ptab); 196.55 } 196.56 196.57 #define DOUBLEFAULT_STACK_SIZE 1024
197.1 --- a/xen/arch/x86/x86_64/entry.S Tue Aug 23 17:32:44 2005 +0000 197.2 +++ b/xen/arch/x86/x86_64/entry.S Tue Aug 23 17:33:11 2005 +0000 197.3 @@ -233,6 +233,7 @@ 1: 197.4 jnz 2f 197.5 197.6 /* vmx_restore_all_guest */ 197.7 + call vmx_intr_assist 197.8 call load_cr2 197.9 .endif 197.10 /*
212.1 --- a/xen/include/asm-x86/vmx.h Tue Aug 23 17:32:44 2005 +0000 212.2 +++ b/xen/include/asm-x86/vmx.h Tue Aug 23 17:33:11 2005 +0000 212.3 @@ -31,7 +31,7 @@ 212.4 extern void vmx_asm_vmexit_handler(struct cpu_user_regs); 212.5 extern void vmx_asm_do_resume(void); 212.6 extern void vmx_asm_do_launch(void); 212.7 -extern void vmx_intr_assist(struct vcpu *d); 212.8 +extern void vmx_intr_assist(void); 212.9 212.10 extern void arch_vmx_do_launch(struct vcpu *); 212.11 extern void arch_vmx_do_resume(struct vcpu *); 212.12 @@ -355,7 +355,7 @@ static inline int __vmxon (u64 addr) 212.13 } 212.14 212.15 /* Make sure that xen intercepts any FP accesses from current */ 212.16 -static inline void vmx_stts() 212.17 +static inline void vmx_stts(void) 212.18 { 212.19 unsigned long cr0; 212.20