xen-vtx-unstable
changeset 6542:287d36b46fa3
Merge.
line diff
104.1 --- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 Mon Aug 29 16:05:29 2005 -0700 104.2 +++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 Tue Aug 30 13:36:49 2005 -0700 104.3 @@ -195,6 +195,7 @@ CONFIG_BLK_DEV_LOOP=y 104.4 # CONFIG_BLK_DEV_CRYPTOLOOP is not set 104.5 # CONFIG_BLK_DEV_NBD is not set 104.6 # CONFIG_BLK_DEV_SX8 is not set 104.7 +# CONFIG_BLK_DEV_UB is not set 104.8 CONFIG_BLK_DEV_RAM=y 104.9 CONFIG_BLK_DEV_RAM_COUNT=16 104.10 CONFIG_BLK_DEV_RAM_SIZE=16384
129.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/pci-dma.c Mon Aug 29 16:05:29 2005 -0700 129.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/pci-dma.c Tue Aug 30 13:36:49 2005 -0700 129.3 @@ -115,9 +115,9 @@ dma_supported(struct device *dev, u64 ma 129.4 if (swiotlb) 129.5 return swiotlb_dma_supported(dev, mask); 129.6 /* 129.7 - * By default we'll BUG when an infeasible DMA is requested, and 129.8 - * request swiotlb=force (see IOMMU_BUG_ON). 129.9 - */ 129.10 + * By default we'll BUG when an infeasible DMA is requested, and 129.11 + * request swiotlb=force (see IOMMU_BUG_ON). 129.12 + */ 129.13 return 1; 129.14 } 129.15 EXPORT_SYMBOL(dma_supported);
131.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c Mon Aug 29 16:05:29 2005 -0700 131.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c Tue Aug 30 13:36:49 2005 -0700 131.3 @@ -55,6 +55,7 @@ 131.4 #include <asm/io.h> 131.5 #include <asm-xen/hypervisor.h> 131.6 #include <asm-xen/xen-public/physdev.h> 131.7 +#include <asm-xen/xen-public/memory.h> 131.8 #include "setup_arch_pre.h" 131.9 #include <bios_ebda.h> 131.10 131.11 @@ -1585,15 +1586,21 @@ void __init setup_arch(char **cmdline_p) 131.12 (unsigned int *)xen_start_info.mfn_list, 131.13 xen_start_info.nr_pages * sizeof(unsigned int)); 131.14 } else { 131.15 + struct xen_memory_reservation reservation = { 131.16 + .extent_start = (unsigned long *)xen_start_info.mfn_list + max_pfn, 131.17 + .nr_extents = xen_start_info.nr_pages - max_pfn, 131.18 + .extent_order = 0, 131.19 + .domid = DOMID_SELF 131.20 + }; 131.21 + 131.22 memcpy(phys_to_machine_mapping, 131.23 (unsigned int *)xen_start_info.mfn_list, 131.24 max_pfn * sizeof(unsigned int)); 131.25 /* N.B. below relies on sizeof(int) == sizeof(long). */ 131.26 - if (HYPERVISOR_dom_mem_op( 131.27 - MEMOP_decrease_reservation, 131.28 - (unsigned long *)xen_start_info.mfn_list + max_pfn, 131.29 - xen_start_info.nr_pages - max_pfn, 0) != 131.30 - (xen_start_info.nr_pages - max_pfn)) BUG(); 131.31 + BUG_ON(HYPERVISOR_memory_op( 131.32 + XENMEM_decrease_reservation, 131.33 + &reservation) != 131.34 + (xen_start_info.nr_pages - max_pfn)); 131.35 } 131.36 free_bootmem( 131.37 __pa(xen_start_info.mfn_list),
143.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c Mon Aug 29 16:05:29 2005 -0700 143.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c Tue Aug 30 13:36:49 2005 -0700 143.3 @@ -35,6 +35,7 @@ 143.4 #include <asm/pgtable.h> 143.5 #include <asm-xen/hypervisor.h> 143.6 #include <asm-xen/balloon.h> 143.7 +#include <asm-xen/xen-public/memory.h> 143.8 #include <linux/module.h> 143.9 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 143.10 #include <linux/percpu.h> 143.11 @@ -320,6 +321,12 @@ void xen_create_contiguous_region(unsign 143.12 pmd_t *pmd; 143.13 pte_t *pte; 143.14 unsigned long mfn, i, flags; 143.15 + struct xen_memory_reservation reservation = { 143.16 + .extent_start = &mfn, 143.17 + .nr_extents = 1, 143.18 + .extent_order = 0, 143.19 + .domid = DOMID_SELF 143.20 + }; 143.21 143.22 scrub_pages(vstart, 1 << order); 143.23 143.24 @@ -336,13 +343,15 @@ void xen_create_contiguous_region(unsign 143.25 vstart + (i*PAGE_SIZE), __pte_ma(0), 0)); 143.26 phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = 143.27 INVALID_P2M_ENTRY; 143.28 - BUG_ON(HYPERVISOR_dom_mem_op( 143.29 - MEMOP_decrease_reservation, &mfn, 1, 0) != 1); 143.30 + BUG_ON(HYPERVISOR_memory_op( 143.31 + XENMEM_decrease_reservation, &reservation) != 1); 143.32 } 143.33 143.34 /* 2. Get a new contiguous memory extent. */ 143.35 - BUG_ON(HYPERVISOR_dom_mem_op( 143.36 - MEMOP_increase_reservation, &mfn, 1, order | (32<<8)) != 1); 143.37 + reservation.extent_order = order; 143.38 + reservation.address_bits = 31; /* aacraid limitation */ 143.39 + BUG_ON(HYPERVISOR_memory_op( 143.40 + XENMEM_increase_reservation, &reservation) != 1); 143.41 143.42 /* 3. Map the new extent in place of old pages. */ 143.43 for (i = 0; i < (1<<order); i++) { 143.44 @@ -367,6 +376,12 @@ void xen_destroy_contiguous_region(unsig 143.45 pmd_t *pmd; 143.46 pte_t *pte; 143.47 unsigned long mfn, i, flags; 143.48 + struct xen_memory_reservation reservation = { 143.49 + .extent_start = &mfn, 143.50 + .nr_extents = 1, 143.51 + .extent_order = 0, 143.52 + .domid = DOMID_SELF 143.53 + }; 143.54 143.55 scrub_pages(vstart, 1 << order); 143.56 143.57 @@ -385,14 +400,14 @@ void xen_destroy_contiguous_region(unsig 143.58 vstart + (i*PAGE_SIZE), __pte_ma(0), 0)); 143.59 phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = 143.60 INVALID_P2M_ENTRY; 143.61 - BUG_ON(HYPERVISOR_dom_mem_op( 143.62 - MEMOP_decrease_reservation, &mfn, 1, 0) != 1); 143.63 + BUG_ON(HYPERVISOR_memory_op( 143.64 + XENMEM_decrease_reservation, &reservation) != 1); 143.65 } 143.66 143.67 /* 2. Map new pages in place of old pages. */ 143.68 for (i = 0; i < (1<<order); i++) { 143.69 - BUG_ON(HYPERVISOR_dom_mem_op( 143.70 - MEMOP_increase_reservation, &mfn, 1, 0) != 1); 143.71 + BUG_ON(HYPERVISOR_memory_op( 143.72 + XENMEM_increase_reservation, &reservation) != 1); 143.73 BUG_ON(HYPERVISOR_update_va_mapping( 143.74 vstart + (i*PAGE_SIZE), 143.75 pfn_pte_ma(mfn, PAGE_KERNEL), 0));
152.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c Mon Aug 29 16:05:29 2005 -0700 152.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c Tue Aug 30 13:36:49 2005 -0700 152.3 @@ -44,13 +44,6 @@ 152.4 #include <asm-xen/hypervisor.h> 152.5 #include <asm-xen/evtchn.h> 152.6 152.7 -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 152.8 -EXPORT_SYMBOL(force_evtchn_callback); 152.9 -EXPORT_SYMBOL(evtchn_do_upcall); 152.10 -EXPORT_SYMBOL(bind_evtchn_to_irq); 152.11 -EXPORT_SYMBOL(unbind_evtchn_from_irq); 152.12 -#endif 152.13 - 152.14 /* 152.15 * This lock protects updates to the following mapping and reference-count 152.16 * arrays. The lock does not need to be acquired to read the mapping tables. 152.17 @@ -133,6 +126,7 @@ void force_evtchn_callback(void) 152.18 { 152.19 (void)HYPERVISOR_xen_version(0); 152.20 } 152.21 +EXPORT_SYMBOL(force_evtchn_callback); 152.22 152.23 /* NB. Interrupts are disabled on entry. */ 152.24 asmlinkage void evtchn_do_upcall(struct pt_regs *regs) 152.25 @@ -165,6 +159,7 @@ asmlinkage void evtchn_do_upcall(struct 152.26 } 152.27 } 152.28 } 152.29 +EXPORT_SYMBOL(evtchn_do_upcall); 152.30 152.31 static int find_unbound_irq(void) 152.32 { 152.33 @@ -211,6 +206,7 @@ int bind_virq_to_irq(int virq) 152.34 152.35 return irq; 152.36 } 152.37 +EXPORT_SYMBOL(bind_virq_to_irq); 152.38 152.39 void unbind_virq_from_irq(int virq) 152.40 { 152.41 @@ -244,6 +240,7 @@ void unbind_virq_from_irq(int virq) 152.42 152.43 spin_unlock(&irq_mapping_update_lock); 152.44 } 152.45 +EXPORT_SYMBOL(unbind_virq_from_irq); 152.46 152.47 int bind_ipi_to_irq(int ipi) 152.48 { 152.49 @@ -279,6 +276,7 @@ int bind_ipi_to_irq(int ipi) 152.50 152.51 return irq; 152.52 } 152.53 +EXPORT_SYMBOL(bind_ipi_to_irq); 152.54 152.55 void unbind_ipi_from_irq(int ipi) 152.56 { 152.57 @@ -306,6 +304,7 @@ void unbind_ipi_from_irq(int ipi) 152.58 152.59 spin_unlock(&irq_mapping_update_lock); 152.60 } 152.61 +EXPORT_SYMBOL(unbind_ipi_from_irq); 152.62 152.63 int bind_evtchn_to_irq(unsigned int evtchn) 152.64 { 152.65 @@ -326,6 +325,7 @@ int bind_evtchn_to_irq(unsigned int evtc 152.66 152.67 return irq; 152.68 } 152.69 +EXPORT_SYMBOL(bind_evtchn_to_irq); 152.70 152.71 void unbind_evtchn_from_irq(unsigned int evtchn) 152.72 { 152.73 @@ -341,6 +341,7 @@ void unbind_evtchn_from_irq(unsigned int 152.74 152.75 spin_unlock(&irq_mapping_update_lock); 152.76 } 152.77 +EXPORT_SYMBOL(unbind_evtchn_from_irq); 152.78 152.79 int bind_evtchn_to_irqhandler( 152.80 unsigned int evtchn, 152.81 @@ -359,6 +360,7 @@ int bind_evtchn_to_irqhandler( 152.82 152.83 return retval; 152.84 } 152.85 +EXPORT_SYMBOL(bind_evtchn_to_irqhandler); 152.86 152.87 void unbind_evtchn_from_irqhandler(unsigned int evtchn, void *dev_id) 152.88 { 152.89 @@ -366,6 +368,7 @@ void unbind_evtchn_from_irqhandler(unsig 152.90 free_irq(irq, dev_id); 152.91 unbind_evtchn_from_irq(evtchn); 152.92 } 152.93 +EXPORT_SYMBOL(unbind_evtchn_from_irqhandler); 152.94 152.95 #ifdef CONFIG_SMP 152.96 static void do_nothing_function(void *ign)
155.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c Mon Aug 29 16:05:29 2005 -0700 155.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c Tue Aug 30 13:36:49 2005 -0700 155.3 @@ -65,17 +65,12 @@ static int shutting_down = SHUTDOWN_INVA 155.4 #define cpu_up(x) (-EOPNOTSUPP) 155.5 #endif 155.6 155.7 -#ifdef CONFIG_SMP 155.8 -#endif 155.9 155.10 static int __do_suspend(void *ignore) 155.11 { 155.12 int i, j; 155.13 suspend_record_t *suspend_record; 155.14 155.15 - /* Hmmm... a cleaner interface to suspend/resume blkdevs would be nice. */ 155.16 - /* XXX SMH: yes it would :-( */ 155.17 - 155.18 #ifdef CONFIG_XEN_USB_FRONTEND 155.19 extern void usbif_resume(); 155.20 #else 155.21 @@ -108,7 +103,8 @@ static int __do_suspend(void *ignore) 155.22 155.23 #if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU) 155.24 if (num_online_cpus() > 1) { 155.25 - printk(KERN_WARNING "Can't suspend SMP guests without CONFIG_HOTPLUG_CPU\n"); 155.26 + printk(KERN_WARNING 155.27 + "Can't suspend SMP guests without CONFIG_HOTPLUG_CPU\n"); 155.28 return -EOPNOTSUPP; 155.29 } 155.30 #endif
182.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c Mon Aug 29 16:05:29 2005 -0700 182.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/kernel/setup.c Tue Aug 30 13:36:49 2005 -0700 182.3 @@ -734,9 +734,9 @@ void __init setup_arch(char **cmdline_p) 182.4 /* Make sure we have a large enough P->M table. */ 182.5 if (end_pfn > xen_start_info.nr_pages) { 182.6 phys_to_machine_mapping = alloc_bootmem( 182.7 - max_pfn * sizeof(u32)); 182.8 + end_pfn * sizeof(u32)); 182.9 memset(phys_to_machine_mapping, ~0, 182.10 - max_pfn * sizeof(u32)); 182.11 + end_pfn * sizeof(u32)); 182.12 memcpy(phys_to_machine_mapping, 182.13 (u32 *)xen_start_info.mfn_list, 182.14 xen_start_info.nr_pages * sizeof(u32)); 182.15 @@ -749,11 +749,8 @@ void __init setup_arch(char **cmdline_p) 182.16 pfn_to_mfn_frame_list = alloc_bootmem(PAGE_SIZE); 182.17 182.18 for ( i=0, j=0; i < end_pfn; i+=(PAGE_SIZE/sizeof(u32)), j++ ) 182.19 - { 182.20 pfn_to_mfn_frame_list[j] = 182.21 virt_to_mfn(&phys_to_machine_mapping[i]); 182.22 - } 182.23 - 182.24 } 182.25 #endif 182.26
192.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c Mon Aug 29 16:05:29 2005 -0700 192.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/fault.c Tue Aug 30 13:36:49 2005 -0700 192.3 @@ -149,7 +149,7 @@ void dump_pagetable(unsigned long addres 192.4 pmd_t *pmd; 192.5 pte_t *pte; 192.6 192.7 - pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id()); 192.8 + pgd = (pgd_t *)per_cpu(cur_pgd, smp_processor_id()); 192.9 pgd += pgd_index(address); 192.10 192.11 printk("PGD %lx ", pgd_val(*pgd)); 192.12 @@ -296,9 +296,9 @@ int exception_trace = 1; 192.13 #define MEM_VERBOSE 1 192.14 192.15 #ifdef MEM_VERBOSE 192.16 -#define MEM_LOG(_f, _a...) \ 192.17 - printk("fault.c:[%d]-> " _f "\n", \ 192.18 - __LINE__ , ## _a ) 192.19 +#define MEM_LOG(_f, _a...) \ 192.20 + printk("fault.c:[%d]-> " _f "\n", \ 192.21 + __LINE__ , ## _a ) 192.22 #else 192.23 #define MEM_LOG(_f, _a...) ((void)0) 192.24 #endif 192.25 @@ -325,7 +325,7 @@ asmlinkage void do_page_fault(struct pt_ 192.26 siginfo_t info; 192.27 192.28 if (!user_mode(regs)) 192.29 - error_code &= ~4; /* means kernel */ 192.30 + error_code &= ~4; /* means kernel */ 192.31 192.32 #ifdef CONFIG_CHECKING 192.33 {
193.1 --- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c Mon Aug 29 16:05:29 2005 -0700 193.2 +++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c Tue Aug 30 13:36:49 2005 -0700 193.3 @@ -62,14 +62,16 @@ static int init_mapping_done; 193.4 * avaialble in init_memory_mapping(). 193.5 */ 193.6 193.7 -#define addr_to_page(addr, page) \ 193.8 - (addr) &= PHYSICAL_PAGE_MASK; \ 193.9 - (page) = ((unsigned long *) ((unsigned long)(((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) + __START_KERNEL_map))) 193.10 +#define addr_to_page(addr, page) \ 193.11 + (addr) &= PHYSICAL_PAGE_MASK; \ 193.12 + (page) = ((unsigned long *) ((unsigned long) \ 193.13 + (((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) + \ 193.14 + __START_KERNEL_map))) 193.15 193.16 static void __make_page_readonly(unsigned long va) 193.17 { 193.18 - unsigned long addr; 193.19 - pte_t pte, *ptep; 193.20 + unsigned long addr; 193.21 + pte_t pte, *ptep; 193.22 unsigned long *page = (unsigned long *) init_level4_pgt; 193.23 193.24 addr = (unsigned long) page[pgd_index(va)]; 193.25 @@ -89,22 +91,22 @@ static void __make_page_readonly(unsigne 193.26 193.27 static void __make_page_writable(unsigned long va) 193.28 { 193.29 - unsigned long addr; 193.30 - pte_t pte, *ptep; 193.31 - unsigned long *page = (unsigned long *) init_level4_pgt; 193.32 + unsigned long addr; 193.33 + pte_t pte, *ptep; 193.34 + unsigned long *page = (unsigned long *) init_level4_pgt; 193.35 193.36 - addr = (unsigned long) page[pgd_index(va)]; 193.37 - addr_to_page(addr, page); 193.38 + addr = (unsigned long) page[pgd_index(va)]; 193.39 + addr_to_page(addr, page); 193.40 193.41 - addr = page[pud_index(va)]; 193.42 - addr_to_page(addr, page); 193.43 - 193.44 - addr = page[pmd_index(va)]; 193.45 - addr_to_page(addr, page); 193.46 + addr = page[pud_index(va)]; 193.47 + addr_to_page(addr, page); 193.48 + 193.49 + addr = page[pmd_index(va)]; 193.50 + addr_to_page(addr, page); 193.51 193.52 - ptep = (pte_t *) &page[pte_index(va)]; 193.53 + ptep = (pte_t *) &page[pte_index(va)]; 193.54 pte.pte = (ptep->pte | _PAGE_RW); 193.55 - xen_l1_entry_update(ptep, pte); 193.56 + xen_l1_entry_update(ptep, pte); 193.57 __flush_tlb_one(addr); 193.58 } 193.59 193.60 @@ -115,55 +117,55 @@ static void __make_page_writable(unsigne 193.61 void make_page_readonly(void *va) 193.62 { 193.63 pgd_t* pgd; pud_t *pud; pmd_t* pmd; pte_t pte, *ptep; 193.64 - unsigned long addr = (unsigned long) va; 193.65 + unsigned long addr = (unsigned long) va; 193.66 193.67 - if (!init_mapping_done) { 193.68 - __make_page_readonly(addr); 193.69 - return; 193.70 - } 193.71 - 193.72 - pgd = pgd_offset_k(addr); 193.73 - pud = pud_offset(pgd, addr); 193.74 - pmd = pmd_offset(pud, addr); 193.75 - ptep = pte_offset_kernel(pmd, addr); 193.76 + if (!init_mapping_done) { 193.77 + __make_page_readonly(addr); 193.78 + return; 193.79 + } 193.80 + 193.81 + pgd = pgd_offset_k(addr); 193.82 + pud = pud_offset(pgd, addr); 193.83 + pmd = pmd_offset(pud, addr); 193.84 + ptep = pte_offset_kernel(pmd, addr); 193.85 pte.pte = (ptep->pte & ~_PAGE_RW); 193.86 - xen_l1_entry_update(ptep, pte); 193.87 + xen_l1_entry_update(ptep, pte); 193.88 __flush_tlb_one(addr); 193.89 } 193.90 193.91 void make_page_writable(void *va) 193.92 { 193.93 - pgd_t* pgd; pud_t *pud; pmd_t* pmd; pte_t pte, *ptep; 193.94 - unsigned long addr = (unsigned long) va; 193.95 + pgd_t* pgd; pud_t *pud; pmd_t* pmd; pte_t pte, *ptep; 193.96 + unsigned long addr = (unsigned long) va; 193.97 193.98 - if (!init_mapping_done) { 193.99 - __make_page_writable(addr); 193.100 - return; 193.101 - } 193.102 + if (!init_mapping_done) { 193.103 + __make_page_writable(addr); 193.104 + return; 193.105 + } 193.106 193.107 - pgd = pgd_offset_k(addr); 193.108 - pud = pud_offset(pgd, addr); 193.109 - pmd = pmd_offset(pud, addr); 193.110 - ptep = pte_offset_kernel(pmd, addr); 193.111 + pgd = pgd_offset_k(addr); 193.112 + pud = pud_offset(pgd, addr); 193.113 + pmd = pmd_offset(pud, addr); 193.114 + ptep = pte_offset_kernel(pmd, addr); 193.115 pte.pte = (ptep->pte | _PAGE_RW); 193.116 - xen_l1_entry_update(ptep, pte); 193.117 + xen_l1_entry_update(ptep, pte); 193.118 __flush_tlb_one(addr); 193.119 } 193.120 193.121 void make_pages_readonly(void* va, unsigned nr) 193.122 { 193.123 - while ( nr-- != 0 ) { 193.124 - make_page_readonly(va); 193.125 - va = (void*)((unsigned long)va + PAGE_SIZE); 193.126 - } 193.127 + while (nr-- != 0) { 193.128 + make_page_readonly(va); 193.129 + va = (void*)((unsigned long)va + PAGE_SIZE); 193.130 + } 193.131 } 193.132 193.133 void make_pages_writable(void* va, unsigned nr) 193.134 { 193.135 - while ( nr-- != 0 ) { 193.136 - make_page_writable(va); 193.137 - va = (void*)((unsigned long)va + PAGE_SIZE); 193.138 - } 193.139 + while (nr-- != 0) { 193.140 + make_page_writable(va); 193.141 + va = (void*)((unsigned long)va + PAGE_SIZE); 193.142 + } 193.143 } 193.144 193.145 /* 193.146 @@ -389,7 +391,7 @@ void __set_fixmap_user (enum fixed_addre 193.147 set_pte_phys(address, phys, prot, SET_FIXMAP_USER); 193.148 } 193.149 193.150 -unsigned long __initdata table_start, table_end, tables_space; 193.151 +unsigned long __initdata table_start, tables_space; 193.152 193.153 unsigned long get_machine_pfn(unsigned long addr) 193.154 { 193.155 @@ -400,38 +402,13 @@ unsigned long get_machine_pfn(unsigned l 193.156 return pte_mfn(*pte); 193.157 } 193.158 193.159 -#define ALIGN_TO_4K __attribute__((section(".data.page_aligned"))) 193.160 -#define MAX_LOW_PAGES 0x20 193.161 -static unsigned long __init_pgt[MAX_LOW_PAGES][512] ALIGN_TO_4K; 193.162 -static int __init_pgt_index; 193.163 - 193.164 -/* 193.165 - * We start using from start_pfn 193.166 - */ 193.167 static __init void *alloc_static_page(unsigned long *phys) 193.168 { 193.169 - int i = __init_pgt_index++; 193.170 - 193.171 - if (__init_pgt_index >= MAX_LOW_PAGES) { 193.172 - printk("Need to increase MAX_LOW_PAGES"); 193.173 - BUG(); 193.174 - } 193.175 - 193.176 - *phys = __pa(__init_pgt[i]); 193.177 - 193.178 - return (void *) __init_pgt[i]; 193.179 -} 193.180 - 193.181 -/* 193.182 - * Get RO page 193.183 - */ 193.184 -static void __init *alloc_low_page(unsigned long *phys) 193.185 -{ 193.186 - unsigned long pfn = table_end++; 193.187 - 193.188 - *phys = (pfn << PAGE_SHIFT); 193.189 - memset((void *) ((pfn << PAGE_SHIFT) + __START_KERNEL_map), 0, PAGE_SIZE); 193.190 - return (void *)((pfn << PAGE_SHIFT) + __START_KERNEL_map); 193.191 + unsigned long va = (start_pfn << PAGE_SHIFT) + __START_KERNEL_map; 193.192 + *phys = start_pfn << PAGE_SHIFT; 193.193 + start_pfn++; 193.194 + memset((void *)va, 0, PAGE_SIZE); 193.195 + return (void *)va; 193.196 } 193.197 193.198 #define PTE_SIZE PAGE_SIZE 193.199 @@ -443,30 +420,24 @@ static inline void __set_pte(pte_t *dst, 193.200 193.201 static inline int make_readonly(unsigned long paddr) 193.202 { 193.203 - int readonly = 0; 193.204 - 193.205 - /* Make new page tables read-only. */ 193.206 - if ((paddr < ((table_start << PAGE_SHIFT) + tables_space)) && 193.207 - (paddr >= (table_start << PAGE_SHIFT))) 193.208 - readonly = 1; 193.209 + int readonly = 0; 193.210 193.211 - /* Make old page tables read-only. */ 193.212 - if ((paddr < ((xen_start_info.pt_base - __START_KERNEL_map) + 193.213 - (xen_start_info.nr_pt_frames << PAGE_SHIFT))) && 193.214 - (paddr >= (xen_start_info.pt_base - __START_KERNEL_map))) 193.215 - readonly = 1; 193.216 + /* Make old and new page tables read-only. */ 193.217 + if ((paddr >= (xen_start_info.pt_base - __START_KERNEL_map)) 193.218 + && (paddr < ((table_start << PAGE_SHIFT) + tables_space))) 193.219 + readonly = 1; 193.220 + /* 193.221 + * No need for writable mapping of kernel image. This also ensures that 193.222 + * page and descriptor tables embedded inside don't have writable 193.223 + * mappings. 193.224 + */ 193.225 + if ((paddr >= __pa_symbol(&_text)) && (paddr < __pa_symbol(&_end))) 193.226 + readonly = 1; 193.227 193.228 - /* 193.229 - * No need for writable mapping of kernel image. This also ensures that 193.230 - * page and descriptor tables embedded inside don't have writable mappings. 193.231 - */ 193.232 - if ((paddr >= __pa_symbol(&_text)) && (paddr < __pa_symbol(&_end))) 193.233 - readonly = 1; 193.234 - 193.235 - return readonly; 193.236 + return readonly; 193.237 } 193.238 193.239 -void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) 193.240 +static void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) 193.241 { 193.242 long i, j, k; 193.243 unsigned long paddr; 193.244 @@ -485,7 +456,7 @@ void __init phys_pud_init(pud_t *pud, un 193.245 break; 193.246 } 193.247 193.248 - pmd = alloc_low_page(&pmd_phys); 193.249 + pmd = alloc_static_page(&pmd_phys); 193.250 make_page_readonly(pmd); 193.251 xen_pmd_pin(pmd_phys); 193.252 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); 193.253 @@ -499,19 +470,20 @@ void __init phys_pud_init(pud_t *pud, un 193.254 set_pmd(pmd, __pmd(0)); 193.255 break; 193.256 } 193.257 - pte = alloc_low_page(&pte_phys); 193.258 + pte = alloc_static_page(&pte_phys); 193.259 pte_save = pte; 193.260 for (k = 0; k < PTRS_PER_PTE; pte++, k++, paddr += PTE_SIZE) { 193.261 + if ((paddr >= end) || 193.262 + ((paddr >> PAGE_SHIFT) 193.263 + >= xen_start_info.nr_pages)) { 193.264 + __set_pte(pte, __pte(0)); 193.265 + continue; 193.266 + } 193.267 if (make_readonly(paddr)) { 193.268 __set_pte(pte, 193.269 __pte(paddr | (_KERNPG_TABLE & ~_PAGE_RW))); 193.270 continue; 193.271 } 193.272 - if (paddr >= end) { 193.273 - for (; k < PTRS_PER_PTE; k++, pte++) 193.274 - __set_pte(pte, __pte(0)); 193.275 - break; 193.276 - } 193.277 __set_pte(pte, __pte(paddr | _KERNPG_TABLE)); 193.278 } 193.279 pte = pte_save; 193.280 @@ -525,15 +497,16 @@ void __init phys_pud_init(pud_t *pud, un 193.281 193.282 static void __init find_early_table_space(unsigned long end) 193.283 { 193.284 - unsigned long puds, pmds, ptes; 193.285 + unsigned long puds, pmds, ptes; 193.286 193.287 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 193.288 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 193.289 - ptes = (end + PTE_SIZE - 1) >> PAGE_SHIFT; 193.290 + ptes = (end + PTE_SIZE - 1) >> PAGE_SHIFT; 193.291 193.292 - tables_space = round_up(puds * 8, PAGE_SIZE) + 193.293 - round_up(pmds * 8, PAGE_SIZE) + 193.294 - round_up(ptes * 8, PAGE_SIZE); 193.295 + tables_space = 193.296 + round_up(puds * 8, PAGE_SIZE) + 193.297 + round_up(pmds * 8, PAGE_SIZE) + 193.298 + round_up(ptes * 8, PAGE_SIZE); 193.299 } 193.300 193.301 void __init xen_init_pt(void) 193.302 @@ -579,66 +552,59 @@ void __init xen_init_pt(void) 193.303 mk_kernel_pgd(__pa_symbol(level3_user_pgt))); 193.304 } 193.305 193.306 -/* 193.307 - * Extend kernel mapping to access pages for page tables. The initial 193.308 - * mapping done by Xen is minimal (e.g. 8MB) and we need to extend the 193.309 - * mapping for early initialization. 193.310 - */ 193.311 -static unsigned long current_size, extended_size; 193.312 - 193.313 void __init extend_init_mapping(void) 193.314 { 193.315 unsigned long va = __START_KERNEL_map; 193.316 unsigned long phys, addr, *pte_page; 193.317 - pmd_t *pmd; 193.318 + pmd_t *pmd; 193.319 pte_t *pte, new_pte; 193.320 - unsigned long *page = (unsigned long *) init_level4_pgt; 193.321 - int i; 193.322 + unsigned long *page = (unsigned long *)init_level4_pgt; 193.323 193.324 addr = page[pgd_index(va)]; 193.325 addr_to_page(addr, page); 193.326 addr = page[pud_index(va)]; 193.327 addr_to_page(addr, page); 193.328 193.329 - for (;;) { 193.330 - pmd = (pmd_t *)&page[pmd_index(va)]; 193.331 - if (!pmd_present(*pmd)) 193.332 - break; 193.333 - addr = page[pmd_index(va)]; 193.334 - addr_to_page(addr, pte_page); 193.335 - for (i = 0; i < PTRS_PER_PTE; i++) { 193.336 - pte = (pte_t *) &pte_page[pte_index(va)]; 193.337 - if (!pte_present(*pte)) 193.338 - break; 193.339 - va += PAGE_SIZE; 193.340 - current_size += PAGE_SIZE; 193.341 - } 193.342 + /* Kill mapping of low 1MB. */ 193.343 + while (va < (unsigned long)&_text) { 193.344 + HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0); 193.345 + va += PAGE_SIZE; 193.346 } 193.347 193.348 - while (va < __START_KERNEL_map + current_size + tables_space) { 193.349 - pmd = (pmd_t *) &page[pmd_index(va)]; 193.350 - if (!pmd_none(*pmd)) 193.351 - continue; 193.352 - pte_page = (unsigned long *) alloc_static_page(&phys); 193.353 - make_page_readonly(pte_page); 193.354 - xen_pte_pin(phys); 193.355 - set_pmd(pmd, __pmd(phys | _KERNPG_TABLE | _PAGE_USER)); 193.356 - for (i = 0; i < PTRS_PER_PTE; i++, va += PAGE_SIZE) { 193.357 + /* Ensure init mappings cover kernel text/data and initial tables. */ 193.358 + while (va < (__START_KERNEL_map 193.359 + + (start_pfn << PAGE_SHIFT) 193.360 + + tables_space)) { 193.361 + pmd = (pmd_t *)&page[pmd_index(va)]; 193.362 + if (pmd_none(*pmd)) { 193.363 + pte_page = alloc_static_page(&phys); 193.364 + make_page_readonly(pte_page); 193.365 + xen_pte_pin(phys); 193.366 + set_pmd(pmd, __pmd(phys | _KERNPG_TABLE | _PAGE_USER)); 193.367 + } else { 193.368 + addr = page[pmd_index(va)]; 193.369 + addr_to_page(addr, pte_page); 193.370 + } 193.371 + pte = (pte_t *)&pte_page[pte_index(va)]; 193.372 + if (pte_none(*pte)) { 193.373 new_pte = pfn_pte( 193.374 (va - __START_KERNEL_map) >> PAGE_SHIFT, 193.375 __pgprot(_KERNPG_TABLE | _PAGE_USER)); 193.376 - pte = (pte_t *)&pte_page[pte_index(va)]; 193.377 xen_l1_entry_update(pte, new_pte); 193.378 - extended_size += PAGE_SIZE; 193.379 } 193.380 + va += PAGE_SIZE; 193.381 } 193.382 193.383 - /* Kill mapping of low 1MB. */ 193.384 - for (va = __START_KERNEL_map; va < (unsigned long)&_text; va += PAGE_SIZE) 193.385 + /* Finally, blow away any spurious initial mappings. */ 193.386 + while (1) { 193.387 + pmd = (pmd_t *)&page[pmd_index(va)]; 193.388 + if (pmd_none(*pmd)) 193.389 + break; 193.390 HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0); 193.391 + va += PAGE_SIZE; 193.392 + } 193.393 } 193.394 193.395 - 193.396 /* Setup the direct mapping of the physical memory at PAGE_OFFSET. 193.397 This runs before bootmem is initialized and gets pages directly from the 193.398 physical memory. To access them they are temporarily mapped. */ 193.399 @@ -650,34 +616,31 @@ void __init init_memory_mapping(unsigned 193.400 193.401 find_early_table_space(end); 193.402 extend_init_mapping(); 193.403 - start_pfn = current_size >> PAGE_SHIFT; 193.404 193.405 table_start = start_pfn; 193.406 - table_end = table_start; 193.407 193.408 start = (unsigned long)__va(start); 193.409 end = (unsigned long)__va(end); 193.410 193.411 for (; start < end; start = next) { 193.412 unsigned long pud_phys; 193.413 - pud_t *pud = alloc_low_page(&pud_phys); 193.414 - make_page_readonly(pud); 193.415 - xen_pud_pin(pud_phys); 193.416 + pud_t *pud = alloc_static_page(&pud_phys); 193.417 + make_page_readonly(pud); 193.418 + xen_pud_pin(pud_phys); 193.419 next = start + PGDIR_SIZE; 193.420 if (next > end) 193.421 next = end; 193.422 phys_pud_init(pud, __pa(start), __pa(next)); 193.423 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys)); 193.424 - } 193.425 + } 193.426 193.427 - printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end, 193.428 - table_start<<PAGE_SHIFT, 193.429 - table_end<<PAGE_SHIFT); 193.430 + printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", 193.431 + __pa(end), table_start<<PAGE_SHIFT, start_pfn<<PAGE_SHIFT); 193.432 193.433 - start_pfn = ((current_size + extended_size) >> PAGE_SHIFT); 193.434 + BUG_ON(start_pfn != (table_start + (tables_space >> PAGE_SHIFT))); 193.435 193.436 __flush_tlb_all(); 193.437 - init_mapping_done = 1; 193.438 + init_mapping_done = 1; 193.439 } 193.440 193.441 extern struct x8664_pda cpu_pda[NR_CPUS]; 193.442 @@ -1002,3 +965,13 @@ int in_gate_area_no_task(unsigned long a 193.443 { 193.444 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); 193.445 } 193.446 + 193.447 +/* 193.448 + * Local variables: 193.449 + * c-file-style: "linux" 193.450 + * indent-tabs-mode: t 193.451 + * c-indent-level: 8 193.452 + * c-basic-offset: 8 193.453 + * tab-width: 8 193.454 + * End: 193.455 + */
203.1 --- a/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c Mon Aug 29 16:05:29 2005 -0700 203.2 +++ b/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c Tue Aug 30 13:36:49 2005 -0700 203.3 @@ -44,6 +44,7 @@ 203.4 #include <asm-xen/xen_proc.h> 203.5 #include <asm-xen/hypervisor.h> 203.6 #include <asm-xen/balloon.h> 203.7 +#include <asm-xen/xen-public/memory.h> 203.8 #include <asm/pgalloc.h> 203.9 #include <asm/pgtable.h> 203.10 #include <asm/uaccess.h> 203.11 @@ -168,6 +169,11 @@ static void balloon_process(void *unused 203.12 struct page *page; 203.13 long credit, debt, rc; 203.14 void *v; 203.15 + struct xen_memory_reservation reservation = { 203.16 + .address_bits = 0, 203.17 + .extent_order = 0, 203.18 + .domid = DOMID_SELF 203.19 + }; 203.20 203.21 down(&balloon_mutex); 203.22 203.23 @@ -180,14 +186,18 @@ static void balloon_process(void *unused 203.24 goto out; 203.25 203.26 balloon_lock(flags); 203.27 - rc = HYPERVISOR_dom_mem_op( 203.28 - MEMOP_increase_reservation, mfn_list, credit, 0); 203.29 + reservation.extent_start = mfn_list; 203.30 + reservation.nr_extents = credit; 203.31 + rc = HYPERVISOR_memory_op( 203.32 + XENMEM_increase_reservation, &reservation); 203.33 balloon_unlock(flags); 203.34 if (rc < credit) { 203.35 /* We hit the Xen hard limit: reprobe. */ 203.36 - BUG_ON(HYPERVISOR_dom_mem_op( 203.37 - MEMOP_decrease_reservation, 203.38 - mfn_list, rc, 0) != rc); 203.39 + reservation.extent_start = mfn_list; 203.40 + reservation.nr_extents = rc; 203.41 + BUG_ON(HYPERVISOR_memory_op( 203.42 + XENMEM_decrease_reservation, 203.43 + &reservation) != rc); 203.44 hard_limit = current_pages + rc - driver_pages; 203.45 vfree(mfn_list); 203.46 goto retry; 203.47 @@ -261,8 +271,10 @@ static void balloon_process(void *unused 203.48 balloon_append(pfn_to_page(pfn)); 203.49 } 203.50 203.51 - BUG_ON(HYPERVISOR_dom_mem_op( 203.52 - MEMOP_decrease_reservation,mfn_list, debt, 0) != debt); 203.53 + reservation.extent_start = mfn_list; 203.54 + reservation.nr_extents = debt; 203.55 + BUG_ON(HYPERVISOR_memory_op( 203.56 + XENMEM_decrease_reservation, &reservation) != debt); 203.57 203.58 current_pages -= debt; 203.59 } 203.60 @@ -438,11 +450,17 @@ static int dealloc_pte_fn( 203.61 pte_t *pte, struct page *pte_page, unsigned long addr, void *data) 203.62 { 203.63 unsigned long mfn = pte_mfn(*pte); 203.64 + struct xen_memory_reservation reservation = { 203.65 + .extent_start = &mfn, 203.66 + .nr_extents = 1, 203.67 + .extent_order = 0, 203.68 + .domid = DOMID_SELF 203.69 + }; 203.70 set_pte(pte, __pte_ma(0)); 203.71 phys_to_machine_mapping[__pa(addr) >> PAGE_SHIFT] = 203.72 INVALID_P2M_ENTRY; 203.73 - BUG_ON(HYPERVISOR_dom_mem_op( 203.74 - MEMOP_decrease_reservation, &mfn, 1, 0) != 1); 203.75 + BUG_ON(HYPERVISOR_memory_op( 203.76 + XENMEM_decrease_reservation, &reservation) != 1); 203.77 return 0; 203.78 } 203.79
212.1 --- a/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c Mon Aug 29 16:05:29 2005 -0700 212.2 +++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c Tue Aug 30 13:36:49 2005 -0700 212.3 @@ -368,7 +368,7 @@ static void blkif_free(struct blkfront_i 212.4 free_page((unsigned long)info->ring.sring); 212.5 info->ring.sring = NULL; 212.6 } 212.7 - unbind_evtchn_from_irqhandler(info->evtchn, NULL); 212.8 + unbind_evtchn_from_irqhandler(info->evtchn, info); 212.9 info->evtchn = 0; 212.10 } 212.11
221.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/Makefile Mon Aug 29 16:05:29 2005 -0700 221.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/Makefile Tue Aug 30 13:36:49 2005 -0700 221.3 @@ -1,2 +1,2 @@ 221.4 221.5 -obj-y := console.o 221.6 +obj-y := console.o xencons_ring.o
222.1 --- a/linux-2.6-xen-sparse/drivers/xen/console/console.c Mon Aug 29 16:05:29 2005 -0700 222.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/console.c Tue Aug 30 13:36:49 2005 -0700 222.3 @@ -51,8 +51,8 @@ 222.4 #include <asm-xen/xen-public/event_channel.h> 222.5 #include <asm-xen/hypervisor.h> 222.6 #include <asm-xen/evtchn.h> 222.7 -#include <asm-xen/ctrl_if.h> 222.8 222.9 +#include "xencons_ring.h" 222.10 /* 222.11 * Modes: 222.12 * 'xencons=off' [XC_OFF]: Console is disabled. 222.13 @@ -118,13 +118,6 @@ static spinlock_t xencons_lock = SPIN_LO 222.14 /* Common transmit-kick routine. */ 222.15 static void __xencons_tx_flush(void); 222.16 222.17 -/* This task is used to defer sending console data until there is space. */ 222.18 -static void xencons_tx_flush_task_routine(void *data); 222.19 - 222.20 -static DECLARE_TQUEUE(xencons_tx_flush_task, 222.21 - xencons_tx_flush_task_routine, 222.22 - NULL); 222.23 - 222.24 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 222.25 static struct tty_driver *xencons_driver; 222.26 #else 222.27 @@ -264,39 +257,22 @@ asmlinkage int xprintk(const char *fmt, 222.28 /*** Forcibly flush console data before dying. ***/ 222.29 void xencons_force_flush(void) 222.30 { 222.31 - ctrl_msg_t msg; 222.32 int sz; 222.33 222.34 /* Emergency console is synchronous, so there's nothing to flush. */ 222.35 if ( xen_start_info.flags & SIF_INITDOMAIN ) 222.36 return; 222.37 222.38 - /* 222.39 - * We use dangerous control-interface functions that require a quiescent 222.40 - * system and no interrupts. Try to ensure this with a global cli(). 222.41 - */ 222.42 - local_irq_disable(); /* XXXsmp */ 222.43 222.44 /* Spin until console data is flushed through to the domain controller. */ 222.45 - while ( (wc != wp) && !ctrl_if_transmitter_empty() ) 222.46 + while ( (wc != wp) ) 222.47 { 222.48 - /* Interrupts are disabled -- we must manually reap responses. */ 222.49 - ctrl_if_discard_responses(); 222.50 - 222.51 + int sent = 0; 222.52 if ( (sz = wp - wc) == 0 ) 222.53 continue; 222.54 - if ( sz > sizeof(msg.msg) ) 222.55 - sz = sizeof(msg.msg); 222.56 - if ( sz > (wbuf_size - WBUF_MASK(wc)) ) 222.57 - sz = wbuf_size - WBUF_MASK(wc); 222.58 - 222.59 - msg.type = CMSG_CONSOLE; 222.60 - msg.subtype = CMSG_CONSOLE_DATA; 222.61 - msg.length = sz; 222.62 - memcpy(msg.msg, &wbuf[WBUF_MASK(wc)], sz); 222.63 - 222.64 - if ( ctrl_if_send_message_noblock(&msg, NULL, 0) == 0 ) 222.65 - wc += sz; 222.66 + sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz); 222.67 + if (sent > 0) 222.68 + wc += sent; 222.69 } 222.70 } 222.71 222.72 @@ -320,7 +296,7 @@ static int xencons_priv_irq; 222.73 static char x_char; 222.74 222.75 /* Non-privileged receive callback. */ 222.76 -static void xencons_rx(ctrl_msg_t *msg, unsigned long id) 222.77 +static void xencons_rx(char *buf, unsigned len) 222.78 { 222.79 int i; 222.80 unsigned long flags; 222.81 @@ -328,21 +304,18 @@ static void xencons_rx(ctrl_msg_t *msg, 222.82 spin_lock_irqsave(&xencons_lock, flags); 222.83 if ( xencons_tty != NULL ) 222.84 { 222.85 - for ( i = 0; i < msg->length; i++ ) 222.86 - tty_insert_flip_char(xencons_tty, msg->msg[i], 0); 222.87 + for ( i = 0; i < len; i++ ) 222.88 + tty_insert_flip_char(xencons_tty, buf[i], 0); 222.89 tty_flip_buffer_push(xencons_tty); 222.90 } 222.91 spin_unlock_irqrestore(&xencons_lock, flags); 222.92 222.93 - msg->length = 0; 222.94 - ctrl_if_send_response(msg); 222.95 } 222.96 222.97 /* Privileged and non-privileged transmit worker. */ 222.98 static void __xencons_tx_flush(void) 222.99 { 222.100 int sz, work_done = 0; 222.101 - ctrl_msg_t msg; 222.102 222.103 if ( xen_start_info.flags & SIF_INITDOMAIN ) 222.104 { 222.105 @@ -367,38 +340,23 @@ static void __xencons_tx_flush(void) 222.106 { 222.107 while ( x_char ) 222.108 { 222.109 - msg.type = CMSG_CONSOLE; 222.110 - msg.subtype = CMSG_CONSOLE_DATA; 222.111 - msg.length = 1; 222.112 - msg.msg[0] = x_char; 222.113 - 222.114 - if ( ctrl_if_send_message_noblock(&msg, NULL, 0) == 0 ) 222.115 - x_char = 0; 222.116 - else if ( ctrl_if_enqueue_space_callback(&xencons_tx_flush_task) ) 222.117 - break; 222.118 - 222.119 - work_done = 1; 222.120 + if (xencons_ring_send(&x_char, 1) == 1) { 222.121 + x_char = 0; 222.122 + work_done = 1; 222.123 + } 222.124 } 222.125 222.126 while ( wc != wp ) 222.127 { 222.128 + int sent; 222.129 sz = wp - wc; 222.130 - if ( sz > sizeof(msg.msg) ) 222.131 - sz = sizeof(msg.msg); 222.132 - if ( sz > (wbuf_size - WBUF_MASK(wc)) ) 222.133 - sz = wbuf_size - WBUF_MASK(wc); 222.134 - 222.135 - msg.type = CMSG_CONSOLE; 222.136 - msg.subtype = CMSG_CONSOLE_DATA; 222.137 - msg.length = sz; 222.138 - memcpy(msg.msg, &wbuf[WBUF_MASK(wc)], sz); 222.139 - 222.140 - if ( ctrl_if_send_message_noblock(&msg, NULL, 0) == 0 ) 222.141 - wc += sz; 222.142 - else if ( ctrl_if_enqueue_space_callback(&xencons_tx_flush_task) ) 222.143 - break; 222.144 - 222.145 - work_done = 1; 222.146 + if ( sz > (wbuf_size - WBUF_MASK(wc)) ) 222.147 + sz = wbuf_size - WBUF_MASK(wc); 222.148 + sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz); 222.149 + if ( sent > 0 ) { 222.150 + wc += sent; 222.151 + work_done = 1; 222.152 + } 222.153 } 222.154 } 222.155 222.156 @@ -411,15 +369,6 @@ static void __xencons_tx_flush(void) 222.157 } 222.158 } 222.159 222.160 -/* Non-privileged transmit kicker. */ 222.161 -static void xencons_tx_flush_task_routine(void *data) 222.162 -{ 222.163 - unsigned long flags; 222.164 - spin_lock_irqsave(&xencons_lock, flags); 222.165 - __xencons_tx_flush(); 222.166 - spin_unlock_irqrestore(&xencons_lock, flags); 222.167 -} 222.168 - 222.169 /* Privileged receive callback and transmit kicker. */ 222.170 static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id, 222.171 struct pt_regs *regs) 222.172 @@ -726,6 +675,8 @@ static int __init xencons_init(void) 222.173 if ( xc_mode == XC_OFF ) 222.174 return 0; 222.175 222.176 + xencons_ring_init(); 222.177 + 222.178 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) 222.179 xencons_driver = alloc_tty_driver((xc_mode == XC_SERIAL) ? 222.180 1 : MAX_NR_CONSOLES); 222.181 @@ -802,7 +753,8 @@ static int __init xencons_init(void) 222.182 } 222.183 else 222.184 { 222.185 - (void)ctrl_if_register_receiver(CMSG_CONSOLE, xencons_rx, 0); 222.186 + 222.187 + xencons_ring_register_receiver(xencons_rx); 222.188 } 222.189 222.190 printk("Xen virtual console successfully installed as %s%d\n",
223.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 223.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.c Tue Aug 30 13:36:49 2005 -0700 223.3 @@ -0,0 +1,124 @@ 223.4 +#include <linux/version.h> 223.5 +#include <linux/module.h> 223.6 +#include <linux/errno.h> 223.7 +#include <linux/signal.h> 223.8 +#include <linux/sched.h> 223.9 +#include <linux/interrupt.h> 223.10 +#include <linux/tty.h> 223.11 +#include <linux/tty_flip.h> 223.12 +#include <linux/serial.h> 223.13 +#include <linux/major.h> 223.14 +#include <linux/ptrace.h> 223.15 +#include <linux/ioport.h> 223.16 +#include <linux/mm.h> 223.17 +#include <linux/slab.h> 223.18 + 223.19 +#include <asm-xen/hypervisor.h> 223.20 +#include <asm-xen/evtchn.h> 223.21 +#include <linux/wait.h> 223.22 +#include <linux/interrupt.h> 223.23 +#include <linux/sched.h> 223.24 +#include <linux/err.h> 223.25 +#include "xencons_ring.h" 223.26 + 223.27 + 223.28 +struct ring_head 223.29 +{ 223.30 + u32 cons; 223.31 + u32 prod; 223.32 + char buf[0]; 223.33 +} __attribute__((packed)); 223.34 + 223.35 + 223.36 +#define XENCONS_RING_SIZE (PAGE_SIZE/2 - sizeof (struct ring_head)) 223.37 +#define XENCONS_IDX(cnt) ((cnt) % XENCONS_RING_SIZE) 223.38 +#define XENCONS_FULL(ring) (((ring)->prod - (ring)->cons) == XENCONS_RING_SIZE) 223.39 + 223.40 +static inline struct ring_head *outring(void) 223.41 +{ 223.42 + return machine_to_virt(xen_start_info.console_mfn << PAGE_SHIFT); 223.43 +} 223.44 + 223.45 +static inline struct ring_head *inring(void) 223.46 +{ 223.47 + return machine_to_virt(xen_start_info.console_mfn << PAGE_SHIFT) 223.48 + + PAGE_SIZE/2; 223.49 +} 223.50 + 223.51 + 223.52 +/* don't block - write as much as possible and return */ 223.53 +static int __xencons_ring_send(struct ring_head *ring, const char *data, unsigned len) 223.54 +{ 223.55 + int copied = 0; 223.56 + 223.57 + mb(); 223.58 + while (copied < len && !XENCONS_FULL(ring)) { 223.59 + ring->buf[XENCONS_IDX(ring->prod)] = data[copied]; 223.60 + ring->prod++; 223.61 + copied++; 223.62 + } 223.63 + mb(); 223.64 + 223.65 + return copied; 223.66 +} 223.67 + 223.68 +int xencons_ring_send(const char *data, unsigned len) 223.69 +{ 223.70 + struct ring_head *out = outring(); 223.71 + int sent = 0; 223.72 + 223.73 + sent = __xencons_ring_send(out, data, len); 223.74 + notify_via_evtchn(xen_start_info.console_evtchn); 223.75 + return sent; 223.76 + 223.77 +} 223.78 + 223.79 + 223.80 +static xencons_receiver_func *xencons_receiver; 223.81 + 223.82 +static irqreturn_t handle_input(int irq, void *unused, struct pt_regs *regs) 223.83 +{ 223.84 + struct ring_head *ring = inring(); 223.85 + while (ring->cons < ring->prod) { 223.86 + if (xencons_receiver != NULL) { 223.87 + xencons_receiver(ring->buf + XENCONS_IDX(ring->cons), 223.88 + 1); 223.89 + } 223.90 + ring->cons++; 223.91 + } 223.92 + return IRQ_HANDLED; 223.93 +} 223.94 + 223.95 +void xencons_ring_register_receiver(xencons_receiver_func *f) 223.96 +{ 223.97 + xencons_receiver = f; 223.98 +} 223.99 + 223.100 +int xencons_ring_init(void) 223.101 +{ 223.102 + int err; 223.103 + 223.104 + if (!xen_start_info.console_evtchn) 223.105 + return 0; 223.106 + 223.107 + err = bind_evtchn_to_irqhandler( 223.108 + xen_start_info.console_evtchn, handle_input, 223.109 + 0, "xencons", inring()); 223.110 + if (err) { 223.111 + xprintk(KERN_ERR "XEN console request irq failed %i\n", err); 223.112 + unbind_evtchn_from_irq(xen_start_info.console_evtchn); 223.113 + return err; 223.114 + } 223.115 + 223.116 + return 0; 223.117 +} 223.118 + 223.119 +void xencons_suspend_comms(void) 223.120 +{ 223.121 + 223.122 + if (!xen_start_info.console_evtchn) 223.123 + return; 223.124 + 223.125 + unbind_evtchn_from_irqhandler(xen_start_info.console_evtchn, inring()); 223.126 +} 223.127 +
224.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 224.2 +++ b/linux-2.6-xen-sparse/drivers/xen/console/xencons_ring.h Tue Aug 30 13:36:49 2005 -0700 224.3 @@ -0,0 +1,13 @@ 224.4 +#ifndef _XENCONS_RING_H 224.5 +#define _XENCONS_RING_H 224.6 + 224.7 +asmlinkage int xprintk(const char *fmt, ...); 224.8 + 224.9 + 224.10 +int xencons_ring_init(void); 224.11 +int xencons_ring_send(const char *data, unsigned len); 224.12 + 224.13 +typedef void (xencons_receiver_func)(char *buf, unsigned len); 224.14 +void xencons_ring_register_receiver(xencons_receiver_func *f); 224.15 + 224.16 +#endif /* _XENCONS_RING_H */
231.1 --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Mon Aug 29 16:05:29 2005 -0700 231.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Tue Aug 30 13:36:49 2005 -0700 231.3 @@ -12,6 +12,7 @@ 231.4 231.5 #include "common.h" 231.6 #include <asm-xen/balloon.h> 231.7 +#include <asm-xen/xen-public/memory.h> 231.8 231.9 #if defined(CONFIG_XEN_NETDEV_GRANT_TX) || defined(CONFIG_XEN_NETDEV_GRANT_RX) 231.10 #include <asm-xen/xen-public/grant_table.h> 231.11 @@ -110,10 +111,16 @@ static spinlock_t mfn_lock = SPIN_LOCK_U 231.12 static unsigned long alloc_mfn(void) 231.13 { 231.14 unsigned long mfn = 0, flags; 231.15 + struct xen_memory_reservation reservation = { 231.16 + .extent_start = mfn_list, 231.17 + .nr_extents = MAX_MFN_ALLOC, 231.18 + .extent_order = 0, 231.19 + .domid = DOMID_SELF 231.20 + }; 231.21 spin_lock_irqsave(&mfn_lock, flags); 231.22 if ( unlikely(alloc_index == 0) ) 231.23 - alloc_index = HYPERVISOR_dom_mem_op( 231.24 - MEMOP_increase_reservation, mfn_list, MAX_MFN_ALLOC, 0); 231.25 + alloc_index = HYPERVISOR_memory_op( 231.26 + XENMEM_increase_reservation, &reservation); 231.27 if ( alloc_index != 0 ) 231.28 mfn = mfn_list[--alloc_index]; 231.29 spin_unlock_irqrestore(&mfn_lock, flags); 231.30 @@ -124,11 +131,17 @@ static unsigned long alloc_mfn(void) 231.31 static void free_mfn(unsigned long mfn) 231.32 { 231.33 unsigned long flags; 231.34 + struct xen_memory_reservation reservation = { 231.35 + .extent_start = &mfn, 231.36 + .nr_extents = 1, 231.37 + .extent_order = 0, 231.38 + .domid = DOMID_SELF 231.39 + }; 231.40 spin_lock_irqsave(&mfn_lock, flags); 231.41 if ( alloc_index != MAX_MFN_ALLOC ) 231.42 mfn_list[alloc_index++] = mfn; 231.43 - else if ( HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 231.44 - &mfn, 1, 0) != 1 ) 231.45 + else if ( HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation) 231.46 + != 1 ) 231.47 BUG(); 231.48 spin_unlock_irqrestore(&mfn_lock, flags); 231.49 }
235.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Mon Aug 29 16:05:29 2005 -0700 235.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Tue Aug 30 13:36:49 2005 -0700 235.3 @@ -50,6 +50,7 @@ 235.4 #include <asm-xen/evtchn.h> 235.5 #include <asm-xen/xenbus.h> 235.6 #include <asm-xen/xen-public/io/netif.h> 235.7 +#include <asm-xen/xen-public/memory.h> 235.8 #include <asm-xen/balloon.h> 235.9 #include <asm/page.h> 235.10 #include <asm/uaccess.h> 235.11 @@ -328,6 +329,7 @@ static void network_alloc_rx_buffers(str 235.12 struct sk_buff *skb; 235.13 int i, batch_target; 235.14 NETIF_RING_IDX req_prod = np->rx->req_prod; 235.15 + struct xen_memory_reservation reservation; 235.16 #ifdef CONFIG_XEN_NETDEV_GRANT_RX 235.17 int ref; 235.18 #endif 235.19 @@ -388,12 +390,15 @@ static void network_alloc_rx_buffers(str 235.20 rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; 235.21 235.22 /* Give away a batch of pages. */ 235.23 - rx_mcl[i].op = __HYPERVISOR_dom_mem_op; 235.24 - rx_mcl[i].args[0] = MEMOP_decrease_reservation; 235.25 - rx_mcl[i].args[1] = (unsigned long)rx_pfn_array; 235.26 - rx_mcl[i].args[2] = (unsigned long)i; 235.27 - rx_mcl[i].args[3] = 0; 235.28 - rx_mcl[i].args[4] = DOMID_SELF; 235.29 + rx_mcl[i].op = __HYPERVISOR_memory_op; 235.30 + rx_mcl[i].args[0] = XENMEM_decrease_reservation; 235.31 + rx_mcl[i].args[1] = (unsigned long)&reservation; 235.32 + 235.33 + reservation.extent_start = rx_pfn_array; 235.34 + reservation.nr_extents = i; 235.35 + reservation.extent_order = 0; 235.36 + reservation.address_bits = 0; 235.37 + reservation.domid = DOMID_SELF; 235.38 235.39 /* Tell the ballon driver what is going on. */ 235.40 balloon_update_driver_allowance(i); 235.41 @@ -401,7 +406,7 @@ static void network_alloc_rx_buffers(str 235.42 /* Zap PTEs and give away pages in one big multicall. */ 235.43 (void)HYPERVISOR_multicall(rx_mcl, i+1); 235.44 235.45 - /* Check return status of HYPERVISOR_dom_mem_op(). */ 235.46 + /* Check return status of HYPERVISOR_memory_op(). */ 235.47 if (unlikely(rx_mcl[i].result != i)) 235.48 panic("Unable to reduce memory reservation\n"); 235.49
237.1 --- a/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c Mon Aug 29 16:05:29 2005 -0700 237.2 +++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c Tue Aug 30 13:36:49 2005 -0700 237.3 @@ -66,7 +66,7 @@ static int privcmd_ioctl(struct inode *i 237.4 { 237.5 long ign1, ign2, ign3; 237.6 __asm__ __volatile__ ( 237.7 - "movq %5,%%r10; movq %6,%%r8;" TRAP_INSTR 237.8 + "movq %8,%%r10; movq %9,%%r8;" TRAP_INSTR 237.9 : "=a" (ret), "=D" (ign1), "=S" (ign2), "=d" (ign3) 237.10 : "0" ((unsigned long)hypercall.op), 237.11 "1" ((unsigned long)hypercall.arg[0]),
247.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c Mon Aug 29 16:05:29 2005 -0700 247.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_probe.c Tue Aug 30 13:36:49 2005 -0700 247.3 @@ -209,6 +209,7 @@ int xenbus_register_device(struct xenbus 247.4 { 247.5 return xenbus_register_driver(drv, &xenbus_frontend); 247.6 } 247.7 +EXPORT_SYMBOL(xenbus_register_device); 247.8 247.9 int xenbus_register_backend(struct xenbus_driver *drv) 247.10 {
248.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c Mon Aug 29 16:05:29 2005 -0700 248.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c Tue Aug 30 13:36:49 2005 -0700 248.3 @@ -45,7 +45,9 @@ 248.4 248.5 static char printf_buffer[4096]; 248.6 static LIST_HEAD(watches); 248.7 + 248.8 DECLARE_MUTEX(xenbus_lock); 248.9 +EXPORT_SYMBOL(xenbus_lock); 248.10 248.11 static int get_error(const char *errorstring) 248.12 { 248.13 @@ -224,6 +226,7 @@ char **xenbus_directory(const char *dir, 248.14 ret[(*num)++] = p; 248.15 return ret; 248.16 } 248.17 +EXPORT_SYMBOL(xenbus_directory); 248.18 248.19 /* Check if a path exists. Return 1 if it does. */ 248.20 int xenbus_exists(const char *dir, const char *node) 248.21 @@ -237,6 +240,7 @@ int xenbus_exists(const char *dir, const 248.22 kfree(d); 248.23 return 1; 248.24 } 248.25 +EXPORT_SYMBOL(xenbus_exists); 248.26 248.27 /* Get the value of a single file. 248.28 * Returns a kmalloced value: call free() on it after use. 248.29 @@ -246,6 +250,7 @@ void *xenbus_read(const char *dir, const 248.30 { 248.31 return xs_single(XS_READ, join(dir, node), len); 248.32 } 248.33 +EXPORT_SYMBOL(xenbus_read); 248.34 248.35 /* Write the value of a single file. 248.36 * Returns -err on failure. createflags can be 0, O_CREAT, or O_CREAT|O_EXCL. 248.37 @@ -276,18 +281,21 @@ int xenbus_write(const char *dir, const 248.38 248.39 return xs_error(xs_talkv(XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL)); 248.40 } 248.41 +EXPORT_SYMBOL(xenbus_write); 248.42 248.43 /* Create a new directory. */ 248.44 int xenbus_mkdir(const char *dir, const char *node) 248.45 { 248.46 return xs_error(xs_single(XS_MKDIR, join(dir, node), NULL)); 248.47 } 248.48 +EXPORT_SYMBOL(xenbus_mkdir); 248.49 248.50 /* Destroy a file or directory (directories must be empty). */ 248.51 int xenbus_rm(const char *dir, const char *node) 248.52 { 248.53 return xs_error(xs_single(XS_RM, join(dir, node), NULL)); 248.54 } 248.55 +EXPORT_SYMBOL(xenbus_rm); 248.56 248.57 /* Start a transaction: changes by others will not be seen during this 248.58 * transaction, and changes will not be visible to others until end. 248.59 @@ -298,6 +306,7 @@ int xenbus_transaction_start(const char 248.60 { 248.61 return xs_error(xs_single(XS_TRANSACTION_START, subtree, NULL)); 248.62 } 248.63 +EXPORT_SYMBOL(xenbus_transaction_start); 248.64 248.65 /* End a transaction. 248.66 * If abandon is true, transaction is discarded instead of committed. 248.67 @@ -312,6 +321,7 @@ int xenbus_transaction_end(int abort) 248.68 strcpy(abortstr, "T"); 248.69 return xs_error(xs_single(XS_TRANSACTION_END, abortstr, NULL)); 248.70 } 248.71 +EXPORT_SYMBOL(xenbus_transaction_end); 248.72 248.73 /* Single read and scanf: returns -errno or num scanned. */ 248.74 int xenbus_scanf(const char *dir, const char *node, const char *fmt, ...) 248.75 @@ -333,6 +343,7 @@ int xenbus_scanf(const char *dir, const 248.76 return -ERANGE; 248.77 return ret; 248.78 } 248.79 +EXPORT_SYMBOL(xenbus_scanf); 248.80 248.81 /* Single printf and write: returns -errno or 0. */ 248.82 int xenbus_printf(const char *dir, const char *node, const char *fmt, ...) 248.83 @@ -348,6 +359,7 @@ int xenbus_printf(const char *dir, const 248.84 BUG_ON(ret > sizeof(printf_buffer)-1); 248.85 return xenbus_write(dir, node, printf_buffer, O_CREAT); 248.86 } 248.87 +EXPORT_SYMBOL(xenbus_printf); 248.88 248.89 /* Report a (negative) errno into the store, with explanation. */ 248.90 void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) 248.91 @@ -369,6 +381,7 @@ void xenbus_dev_error(struct xenbus_devi 248.92 printk("xenbus: failed to write error node for %s (%s)\n", 248.93 dev->nodename, printf_buffer); 248.94 } 248.95 +EXPORT_SYMBOL(xenbus_dev_error); 248.96 248.97 /* Clear any error. */ 248.98 void xenbus_dev_ok(struct xenbus_device *dev) 248.99 @@ -381,6 +394,7 @@ void xenbus_dev_ok(struct xenbus_device 248.100 dev->has_error = 0; 248.101 } 248.102 } 248.103 +EXPORT_SYMBOL(xenbus_dev_ok); 248.104 248.105 /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */ 248.106 int xenbus_gather(const char *dir, ...) 248.107 @@ -410,6 +424,7 @@ int xenbus_gather(const char *dir, ...) 248.108 va_end(ap); 248.109 return ret; 248.110 } 248.111 +EXPORT_SYMBOL(xenbus_gather); 248.112 248.113 static int xs_watch(const char *path, const char *token) 248.114 { 248.115 @@ -482,6 +497,7 @@ int register_xenbus_watch(struct xenbus_ 248.116 list_add(&watch->list, &watches); 248.117 return err; 248.118 } 248.119 +EXPORT_SYMBOL(register_xenbus_watch); 248.120 248.121 void unregister_xenbus_watch(struct xenbus_watch *watch) 248.122 { 248.123 @@ -499,6 +515,7 @@ void unregister_xenbus_watch(struct xenb 248.124 "XENBUS Failed to release watch %s: %i\n", 248.125 watch->node, err); 248.126 } 248.127 +EXPORT_SYMBOL(unregister_xenbus_watch); 248.128 248.129 /* Re-register callbacks to all watches. */ 248.130 void reregister_xenbus_watches(void)
257.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypercall.h Mon Aug 29 16:05:29 2005 -0700 257.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/hypercall.h Tue Aug 30 13:36:49 2005 -0700 257.3 @@ -236,12 +236,10 @@ HYPERVISOR_update_descriptor( 257.4 } 257.5 257.6 static inline int 257.7 -HYPERVISOR_dom_mem_op( 257.8 - unsigned int op, unsigned long *extent_list, 257.9 - unsigned long nr_extents, unsigned int extent_order) 257.10 +HYPERVISOR_memory_op( 257.11 + unsigned int cmd, void *arg) 257.12 { 257.13 - return _hypercall5(int, dom_mem_op, op, extent_list, 257.14 - nr_extents, extent_order, DOMID_SELF); 257.15 + return _hypercall2(int, memory_op, cmd, arg); 257.16 } 257.17 257.18 static inline int
291.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h Mon Aug 29 16:05:29 2005 -0700 291.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h Tue Aug 30 13:36:49 2005 -0700 291.3 @@ -231,12 +231,10 @@ HYPERVISOR_update_descriptor( 291.4 } 291.5 291.6 static inline int 291.7 -HYPERVISOR_dom_mem_op( 291.8 - unsigned int op, unsigned long *extent_list, 291.9 - unsigned long nr_extents, unsigned int extent_order) 291.10 +HYPERVISOR_memory_op( 291.11 + unsigned int cmd, void *arg) 291.12 { 291.13 - return _hypercall5(int, dom_mem_op, op, extent_list, 291.14 - nr_extents, extent_order, DOMID_SELF); 291.15 + return _hypercall2(int, memory_op, cmd, arg); 291.16 } 291.17 291.18 static inline int
346.1 --- a/tools/Makefile Mon Aug 29 16:05:29 2005 -0700 346.2 +++ b/tools/Makefile Tue Aug 30 13:36:49 2005 -0700 346.3 @@ -7,15 +7,19 @@ SUBDIRS += xenstore 346.4 SUBDIRS += misc 346.5 SUBDIRS += examples 346.6 SUBDIRS += xentrace 346.7 -SUBDIRS += python 346.8 SUBDIRS += xcs 346.9 SUBDIRS += xcutils 346.10 -#SUBDIRS += pygrub 346.11 SUBDIRS += firmware 346.12 SUBDIRS += security 346.13 SUBDIRS += console 346.14 SUBDIRS += xenstat 346.15 346.16 +# These don't cross-compile 346.17 +ifeq ($(XEN_COMPILE_ARCH),$(XEN_TARGET_ARCH)) 346.18 +SUBDIRS += python 346.19 +#SUBDIRS += pygrub 346.20 +endif 346.21 + 346.22 .PHONY: all install clean check check_clean ioemu eioemuinstall ioemuclean 346.23 346.24 all: check
353.1 --- a/tools/console/Makefile Mon Aug 29 16:05:29 2005 -0700 353.2 +++ b/tools/console/Makefile Tue Aug 30 13:36:49 2005 -0700 353.3 @@ -9,8 +9,7 @@ INSTALL = install 353.4 INSTALL_PROG = $(INSTALL) -m0755 353.5 INSTALL_DIR = $(INSTALL) -d -m0755 353.6 353.7 -CC = gcc 353.8 -CFLAGS = -Wall -Werror -g3 353.9 +CFLAGS += -Wall -Werror -g3 353.10 353.11 CFLAGS += -I $(XEN_XCS) 353.12 CFLAGS += -I $(XEN_LIBXC)
355.1 --- a/tools/console/daemon/io.c Mon Aug 29 16:05:29 2005 -0700 355.2 +++ b/tools/console/daemon/io.c Tue Aug 30 13:36:49 2005 -0700 355.3 @@ -36,6 +36,9 @@ 355.4 #include <fcntl.h> 355.5 #include <unistd.h> 355.6 #include <termios.h> 355.7 +#include <stdarg.h> 355.8 +#include <sys/ioctl.h> 355.9 +#include <sys/mman.h> 355.10 355.11 #define MAX(a, b) (((a) > (b)) ? (a) : (b)) 355.12 #define MIN(a, b) (((a) < (b)) ? (a) : (b)) 355.13 @@ -48,26 +51,67 @@ struct buffer 355.14 size_t max_capacity; 355.15 }; 355.16 355.17 -static void buffer_append(struct buffer *buffer, const void *data, size_t size) 355.18 +struct domain 355.19 +{ 355.20 + int domid; 355.21 + int tty_fd; 355.22 + bool is_dead; 355.23 + struct buffer buffer; 355.24 + struct domain *next; 355.25 + unsigned long mfn; 355.26 + int local_port; 355.27 + int remote_port; 355.28 + char *page; 355.29 + int evtchn_fd; 355.30 +}; 355.31 + 355.32 +static struct domain *dom_head; 355.33 + 355.34 +struct ring_head 355.35 { 355.36 - if ((buffer->capacity - buffer->size) < size) { 355.37 - buffer->capacity += (size + 1024); 355.38 - buffer->data = realloc(buffer->data, buffer->capacity); 355.39 - if (buffer->data == NULL) { 355.40 - dolog(LOG_ERR, "Memory allocation failed"); 355.41 - exit(ENOMEM); 355.42 + u32 cons; 355.43 + u32 prod; 355.44 + char buf[0]; 355.45 +} __attribute__((packed)); 355.46 + 355.47 +#define PAGE_SIZE (getpagesize()) 355.48 +#define XENCONS_RING_SIZE (PAGE_SIZE/2 - sizeof (struct ring_head)) 355.49 +#define XENCONS_IDX(cnt) ((cnt) % XENCONS_RING_SIZE) 355.50 +#define XENCONS_FULL(ring) (((ring)->prod - (ring)->cons) == XENCONS_RING_SIZE) 355.51 +#define XENCONS_SPACE(ring) (XENCONS_RING_SIZE - ((ring)->prod - (ring)->cons)) 355.52 + 355.53 +static void buffer_append(struct domain *dom) 355.54 +{ 355.55 + struct buffer *buffer = &dom->buffer; 355.56 + struct ring_head *ring = (struct ring_head *)dom->page; 355.57 + size_t size; 355.58 + 355.59 + while ((size = ring->prod - ring->cons) != 0) { 355.60 + if ((buffer->capacity - buffer->size) < size) { 355.61 + buffer->capacity += (size + 1024); 355.62 + buffer->data = realloc(buffer->data, buffer->capacity); 355.63 + if (buffer->data == NULL) { 355.64 + dolog(LOG_ERR, "Memory allocation failed"); 355.65 + exit(ENOMEM); 355.66 + } 355.67 } 355.68 - } 355.69 355.70 - memcpy(buffer->data + buffer->size, data, size); 355.71 - buffer->size += size; 355.72 + while (ring->cons < ring->prod) { 355.73 + buffer->data[buffer->size] = 355.74 + ring->buf[XENCONS_IDX(ring->cons)]; 355.75 + buffer->size++; 355.76 + ring->cons++; 355.77 + } 355.78 355.79 - if (buffer->max_capacity && 355.80 - buffer->size > buffer->max_capacity) { 355.81 - memmove(buffer->data + (buffer->size - buffer->max_capacity), 355.82 - buffer->data, buffer->max_capacity); 355.83 - buffer->data = realloc(buffer->data, buffer->max_capacity); 355.84 - buffer->capacity = buffer->max_capacity; 355.85 + if (buffer->max_capacity && 355.86 + buffer->size > buffer->max_capacity) { 355.87 + memmove(buffer->data + (buffer->size - 355.88 + buffer->max_capacity), 355.89 + buffer->data, buffer->max_capacity); 355.90 + buffer->data = realloc(buffer->data, 355.91 + buffer->max_capacity); 355.92 + buffer->capacity = buffer->max_capacity; 355.93 + } 355.94 } 355.95 } 355.96 355.97 @@ -83,17 +127,6 @@ static void buffer_advance(struct buffer 355.98 buffer->size -= size; 355.99 } 355.100 355.101 -struct domain 355.102 -{ 355.103 - int domid; 355.104 - int tty_fd; 355.105 - bool is_dead; 355.106 - struct buffer buffer; 355.107 - struct domain *next; 355.108 -}; 355.109 - 355.110 -static struct domain *dom_head; 355.111 - 355.112 static bool domain_is_valid(int domid) 355.113 { 355.114 bool ret; 355.115 @@ -107,7 +140,7 @@ static bool domain_is_valid(int domid) 355.116 355.117 static int domain_create_tty(struct domain *dom) 355.118 { 355.119 - char path[1024]; 355.120 + char *path; 355.121 int master; 355.122 355.123 if ((master = getpt()) == -1 || 355.124 @@ -126,24 +159,108 @@ static int domain_create_tty(struct doma 355.125 tcsetattr(master, TCSAFLUSH, &term); 355.126 } 355.127 355.128 - xs_mkdir(xs, "/console"); 355.129 - snprintf(path, sizeof(path), "/console/%d", dom->domid); 355.130 - xs_mkdir(xs, path); 355.131 - strcat(path, "/tty"); 355.132 + asprintf(&path, "/console/%d/tty", dom->domid); 355.133 + xs_write(xs, path, slave, strlen(slave), O_CREAT); 355.134 + free(path); 355.135 355.136 - xs_write(xs, path, slave, strlen(slave), O_CREAT); 355.137 - 355.138 - snprintf(path, sizeof(path), "/console/%d/limit", dom->domid); 355.139 + asprintf(&path, "/console/%d/limit", dom->domid); 355.140 data = xs_read(xs, path, &len); 355.141 if (data) { 355.142 dom->buffer.max_capacity = strtoul(data, 0, 0); 355.143 free(data); 355.144 } 355.145 + free(path); 355.146 } 355.147 355.148 return master; 355.149 } 355.150 355.151 +/* Takes tuples of names, scanf-style args, and void **, NULL terminated. */ 355.152 +int xs_gather(struct xs_handle *xs, const char *dir, ...) 355.153 +{ 355.154 + va_list ap; 355.155 + const char *name; 355.156 + char *path; 355.157 + int ret = 0; 355.158 + 355.159 + va_start(ap, dir); 355.160 + while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { 355.161 + const char *fmt = va_arg(ap, char *); 355.162 + void *result = va_arg(ap, void *); 355.163 + char *p; 355.164 + 355.165 + asprintf(&path, "%s/%s", dir, name); 355.166 + p = xs_read(xs, path, NULL); 355.167 + free(path); 355.168 + if (p == NULL) { 355.169 + ret = ENOENT; 355.170 + break; 355.171 + } 355.172 + if (fmt) { 355.173 + if (sscanf(p, fmt, result) == 0) 355.174 + ret = EINVAL; 355.175 + free(p); 355.176 + } else 355.177 + *(char **)result = p; 355.178 + } 355.179 + va_end(ap); 355.180 + return ret; 355.181 +} 355.182 + 355.183 +#define EVENTCHN_BIND _IO('E', 2) 355.184 +#define EVENTCHN_UNBIND _IO('E', 3) 355.185 + 355.186 +static int domain_create_ring(struct domain *dom) 355.187 +{ 355.188 + char *dompath, *path; 355.189 + int err; 355.190 + 355.191 + dom->page = NULL; 355.192 + dom->evtchn_fd = -1; 355.193 + 355.194 + asprintf(&path, "/console/%d/domain", dom->domid); 355.195 + dompath = xs_read(xs, path, NULL); 355.196 + free(path); 355.197 + if (!dompath) 355.198 + return ENOENT; 355.199 + 355.200 + err = xs_gather(xs, dompath, 355.201 + "console_mfn", "%li", &dom->mfn, 355.202 + "console_channel/port1", "%i", &dom->local_port, 355.203 + "console_channel/port2", "%i", &dom->remote_port, 355.204 + NULL); 355.205 + if (err) 355.206 + goto out; 355.207 + 355.208 + dom->page = xc_map_foreign_range(xc, dom->domid, getpagesize(), 355.209 + PROT_READ|PROT_WRITE, dom->mfn); 355.210 + if (dom->page == NULL) { 355.211 + err = EINVAL; 355.212 + goto out; 355.213 + } 355.214 + 355.215 + /* Opening evtchn independently for each console is a bit 355.216 + * wastefule, but that's how the code is structured... */ 355.217 + err = open("/dev/xen/evtchn", O_RDWR); 355.218 + if (err == -1) { 355.219 + err = errno; 355.220 + goto out; 355.221 + } 355.222 + dom->evtchn_fd = err; 355.223 + 355.224 + if (ioctl(dom->evtchn_fd, EVENTCHN_BIND, dom->local_port) == -1) { 355.225 + err = errno; 355.226 + munmap(dom->page, getpagesize()); 355.227 + close(dom->evtchn_fd); 355.228 + dom->evtchn_fd = -1; 355.229 + goto out; 355.230 + } 355.231 + 355.232 + out: 355.233 + free(dompath); 355.234 + return err; 355.235 +} 355.236 + 355.237 static struct domain *create_domain(int domid) 355.238 { 355.239 struct domain *dom; 355.240 @@ -162,7 +279,9 @@ static struct domain *create_domain(int 355.241 dom->buffer.size = 0; 355.242 dom->buffer.capacity = 0; 355.243 dom->buffer.max_capacity = 0; 355.244 - dom->next = 0; 355.245 + dom->next = NULL; 355.246 + 355.247 + domain_create_ring(dom); 355.248 355.249 dolog(LOG_DEBUG, "New domain %d", domid); 355.250 355.251 @@ -200,9 +319,14 @@ static void remove_domain(struct domain 355.252 355.253 if (dom->domid == d->domid) { 355.254 *pp = d->next; 355.255 - if (d->buffer.data) { 355.256 + if (d->buffer.data) 355.257 free(d->buffer.data); 355.258 - } 355.259 + if (d->page) 355.260 + munmap(d->page, getpagesize()); 355.261 + if (d->evtchn_fd != -1) 355.262 + close(d->evtchn_fd); 355.263 + if (d->tty_fd != -1) 355.264 + close(d->tty_fd); 355.265 free(d); 355.266 break; 355.267 } 355.268 @@ -211,28 +335,28 @@ static void remove_domain(struct domain 355.269 355.270 static void remove_dead_domains(struct domain *dom) 355.271 { 355.272 - if (dom == NULL) return; 355.273 - remove_dead_domains(dom->next); 355.274 + struct domain *n; 355.275 355.276 - if (dom->is_dead) { 355.277 - remove_domain(dom); 355.278 + while (dom != NULL) { 355.279 + n = dom->next; 355.280 + if (dom->is_dead) 355.281 + remove_domain(dom); 355.282 + dom = n; 355.283 } 355.284 } 355.285 355.286 static void handle_tty_read(struct domain *dom) 355.287 { 355.288 ssize_t len; 355.289 - xcs_msg_t msg; 355.290 + char msg[80]; 355.291 + struct ring_head *inring = 355.292 + (struct ring_head *)(dom->page + PAGE_SIZE/2); 355.293 + int i; 355.294 355.295 - msg.type = XCS_REQUEST; 355.296 - msg.u.control.remote_dom = dom->domid; 355.297 - msg.u.control.msg.type = CMSG_CONSOLE; 355.298 - msg.u.control.msg.subtype = CMSG_CONSOLE_DATA; 355.299 - msg.u.control.msg.id = 1; 355.300 - 355.301 - len = read(dom->tty_fd, msg.u.control.msg.msg, 60); 355.302 + len = read(dom->tty_fd, msg, MAX(XENCONS_SPACE(inring), sizeof(msg))); 355.303 if (len < 1) { 355.304 close(dom->tty_fd); 355.305 + dom->tty_fd = -1; 355.306 355.307 if (domain_is_valid(dom->domid)) { 355.308 dom->tty_fd = domain_create_tty(dom); 355.309 @@ -240,14 +364,14 @@ static void handle_tty_read(struct domai 355.310 dom->is_dead = true; 355.311 } 355.312 } else if (domain_is_valid(dom->domid)) { 355.313 - msg.u.control.msg.length = len; 355.314 - 355.315 - if (!write_sync(xcs_data_fd, &msg, sizeof(msg))) { 355.316 - dolog(LOG_ERR, "Write to xcs failed: %m"); 355.317 - exit(1); 355.318 + for (i = 0; i < len; i++) { 355.319 + inring->buf[XENCONS_IDX(inring->prod)] = msg[i]; 355.320 + inring->prod++; 355.321 } 355.322 + xc_evtchn_send(xc, dom->local_port); 355.323 } else { 355.324 close(dom->tty_fd); 355.325 + dom->tty_fd = -1; 355.326 dom->is_dead = true; 355.327 } 355.328 } 355.329 @@ -259,6 +383,7 @@ static void handle_tty_write(struct doma 355.330 len = write(dom->tty_fd, dom->buffer.data, dom->buffer.size); 355.331 if (len < 1) { 355.332 close(dom->tty_fd); 355.333 + dom->tty_fd = -1; 355.334 355.335 if (domain_is_valid(dom->domid)) { 355.336 dom->tty_fd = domain_create_tty(dom); 355.337 @@ -270,6 +395,18 @@ static void handle_tty_write(struct doma 355.338 } 355.339 } 355.340 355.341 +static void handle_ring_read(struct domain *dom) 355.342 +{ 355.343 + u16 v; 355.344 + 355.345 + if (!read_sync(dom->evtchn_fd, &v, sizeof(v))) 355.346 + return; 355.347 + 355.348 + buffer_append(dom); 355.349 + 355.350 + (void)write_sync(dom->evtchn_fd, &v, sizeof(v)); 355.351 +} 355.352 + 355.353 static void handle_xcs_msg(int fd) 355.354 { 355.355 xcs_msg_t msg; 355.356 @@ -277,13 +414,6 @@ static void handle_xcs_msg(int fd) 355.357 if (!read_sync(fd, &msg, sizeof(msg))) { 355.358 dolog(LOG_ERR, "read from xcs failed! %m"); 355.359 exit(1); 355.360 - } else if (msg.type == XCS_REQUEST) { 355.361 - struct domain *dom; 355.362 - 355.363 - dom = lookup_domain(msg.u.control.remote_dom); 355.364 - buffer_append(&dom->buffer, 355.365 - msg.u.control.msg.msg, 355.366 - msg.u.control.msg.length); 355.367 } 355.368 } 355.369 355.370 @@ -291,9 +421,12 @@ static void enum_domains(void) 355.371 { 355.372 int domid = 0; 355.373 xc_dominfo_t dominfo; 355.374 + struct domain *dom; 355.375 355.376 while (xc_domain_getinfo(xc, domid, 1, &dominfo) == 1) { 355.377 - lookup_domain(dominfo.domid); 355.378 + dom = lookup_domain(dominfo.domid); 355.379 + if (dominfo.dying || dominfo.crashed || dominfo.shutdown) 355.380 + dom->is_dead = true; 355.381 domid = dominfo.domid + 1; 355.382 } 355.383 } 355.384 @@ -302,12 +435,11 @@ void handle_io(void) 355.385 { 355.386 fd_set readfds, writefds; 355.387 int ret; 355.388 - int max_fd = -1; 355.389 - int num_of_writes = 0; 355.390 355.391 do { 355.392 struct domain *d; 355.393 struct timeval tv = { 1, 0 }; 355.394 + int max_fd = -1; 355.395 355.396 FD_ZERO(&readfds); 355.397 FD_ZERO(&writefds); 355.398 @@ -319,42 +451,36 @@ void handle_io(void) 355.399 if (d->tty_fd != -1) { 355.400 FD_SET(d->tty_fd, &readfds); 355.401 } 355.402 + if (d->evtchn_fd != -1) 355.403 + FD_SET(d->evtchn_fd, &readfds); 355.404 355.405 if (d->tty_fd != -1 && !buffer_empty(&d->buffer)) { 355.406 FD_SET(d->tty_fd, &writefds); 355.407 } 355.408 355.409 max_fd = MAX(d->tty_fd, max_fd); 355.410 + max_fd = MAX(d->evtchn_fd, max_fd); 355.411 } 355.412 355.413 ret = select(max_fd + 1, &readfds, &writefds, 0, &tv); 355.414 - if (tv.tv_sec == 1 && (++num_of_writes % 100) == 0) { 355.415 -#if 0 355.416 - /* FIXME */ 355.417 - /* This is a nasty hack. xcs does not handle the 355.418 - control channels filling up well at all. We'll 355.419 - throttle ourselves here since we do proper 355.420 - queueing to give the domains a shot at pulling out 355.421 - the data. Fixing xcs is not worth it as it's 355.422 - going away */ 355.423 - tv.tv_usec = 1000; 355.424 - select(0, 0, 0, 0, &tv); 355.425 -#endif 355.426 - } 355.427 enum_domains(); 355.428 355.429 - if (FD_ISSET(xcs_data_fd, &readfds)) { 355.430 + if (FD_ISSET(xcs_data_fd, &readfds)) 355.431 handle_xcs_msg(xcs_data_fd); 355.432 - } 355.433 355.434 for (d = dom_head; d; d = d->next) { 355.435 - if (!d->is_dead && FD_ISSET(d->tty_fd, &readfds)) { 355.436 - handle_tty_read(d); 355.437 - } 355.438 + if (d->is_dead || d->tty_fd == -1 || 355.439 + d->evtchn_fd == -1) 355.440 + continue; 355.441 355.442 - if (!d->is_dead && FD_ISSET(d->tty_fd, &writefds)) { 355.443 + if (FD_ISSET(d->tty_fd, &readfds)) 355.444 + handle_tty_read(d); 355.445 + 355.446 + if (FD_ISSET(d->evtchn_fd, &readfds)) 355.447 + handle_ring_read(d); 355.448 + 355.449 + if (FD_ISSET(d->tty_fd, &writefds)) 355.450 handle_tty_write(d); 355.451 - } 355.452 } 355.453 355.454 remove_dead_domains(dom_head);
358.1 --- a/tools/console/daemon/utils.c Mon Aug 29 16:05:29 2005 -0700 358.2 +++ b/tools/console/daemon/utils.c Tue Aug 30 13:36:49 2005 -0700 358.3 @@ -226,14 +226,10 @@ bool xen_setup(void) 358.4 goto out_close_data; 358.5 } 358.6 358.7 - /* Since the vast majority of control messages are console messages 358.8 - it's just easier to ignore other messages that try to bind to 358.9 - a specific type. */ 358.10 - msg.type = XCS_MSG_BIND; 358.11 - msg.u.bind.port = PORT_WILDCARD; 358.12 - msg.u.bind.type = TYPE_WILDCARD; 358.13 + msg.type = XCS_VIRQ_BIND; 358.14 + msg.u.virq.virq = VIRQ_DOM_EXC; 358.15 if (!xcs_send_recv(xcs_ctrl_fd, &msg) || msg.result != XCS_RSLT_OK) { 358.16 - dolog(LOG_ERR, "xcs vind failed. Possible bug."); 358.17 + dolog(LOG_ERR, "xcs virq bind failed. Possible bug."); 358.18 goto out_close_data; 358.19 } 358.20
400.1 --- a/tools/examples/Makefile Mon Aug 29 16:05:29 2005 -0700 400.2 +++ b/tools/examples/Makefile Tue Aug 30 13:36:49 2005 -0700 400.3 @@ -1,3 +1,6 @@ 400.4 +XEN_ROOT = ../../ 400.5 +include $(XEN_ROOT)/tools/Rules.mk 400.6 + 400.7 INSTALL = install 400.8 INSTALL_DIR = $(INSTALL) -d -m0755 400.9 INSTALL_PROG = $(INSTALL) -m0755
428.1 --- a/tools/libxc/xc_core.c Mon Aug 29 16:05:29 2005 -0700 428.2 +++ b/tools/libxc/xc_core.c Tue Aug 30 13:36:49 2005 -0700 428.3 @@ -2,6 +2,7 @@ 428.4 #define ELFSIZE 32 428.5 #include "xc_elf.h" 428.6 #include <stdlib.h> 428.7 +#include <unistd.h> 428.8 #include <zlib.h> 428.9 428.10 /* number of pages to write at a time */
429.1 --- a/tools/libxc/xc_domain.c Mon Aug 29 16:05:29 2005 -0700 429.2 +++ b/tools/libxc/xc_domain.c Tue Aug 30 13:36:49 2005 -0700 429.3 @@ -7,6 +7,7 @@ 429.4 */ 429.5 429.6 #include "xc_private.h" 429.7 +#include <xen/memory.h> 429.8 429.9 int xc_domain_create(int xc_handle, 429.10 u32 ssidref, 429.11 @@ -265,9 +266,13 @@ int xc_domain_memory_increase_reservatio 429.12 { 429.13 int err; 429.14 unsigned int npages = mem_kb / (PAGE_SIZE/1024); 429.15 + struct xen_memory_reservation reservation = { 429.16 + .nr_extents = npages, 429.17 + .extent_order = 0, 429.18 + .domid = domid 429.19 + }; 429.20 429.21 - err = xc_dom_mem_op(xc_handle, MEMOP_increase_reservation, NULL, 429.22 - npages, 0, domid); 429.23 + err = xc_memory_op(xc_handle, XENMEM_increase_reservation, &reservation); 429.24 if (err == npages) 429.25 return 0; 429.26
431.1 --- a/tools/libxc/xc_linux_build.c Mon Aug 29 16:05:29 2005 -0700 431.2 +++ b/tools/libxc/xc_linux_build.c Tue Aug 30 13:36:49 2005 -0700 431.3 @@ -17,6 +17,7 @@ 431.4 #include "xc_elf.h" 431.5 #include "xc_aout9.h" 431.6 #include <stdlib.h> 431.7 +#include <unistd.h> 431.8 #include <zlib.h> 431.9 431.10 #if defined(__i386__) 431.11 @@ -335,7 +336,8 @@ static int setup_guest(int xc_handle, 431.12 unsigned int control_evtchn, 431.13 unsigned long flags, 431.14 unsigned int vcpus, 431.15 - unsigned int store_evtchn, unsigned long *store_mfn) 431.16 + unsigned int store_evtchn, unsigned long *store_mfn, 431.17 + unsigned int console_evtchn, unsigned long *console_mfn) 431.18 { 431.19 unsigned long *page_array = NULL; 431.20 unsigned long count, i; 431.21 @@ -358,6 +360,8 @@ static int setup_guest(int xc_handle, 431.22 unsigned long vstartinfo_end; 431.23 unsigned long vstoreinfo_start; 431.24 unsigned long vstoreinfo_end; 431.25 + unsigned long vconsole_start; 431.26 + unsigned long vconsole_end; 431.27 unsigned long vstack_start; 431.28 unsigned long vstack_end; 431.29 unsigned long vpt_start; 431.30 @@ -393,7 +397,9 @@ static int setup_guest(int xc_handle, 431.31 vphysmap_end = vphysmap_start + (nr_pages * sizeof(unsigned long)); 431.32 vstoreinfo_start = round_pgup(vphysmap_end); 431.33 vstoreinfo_end = vstoreinfo_start + PAGE_SIZE; 431.34 - vpt_start = vstoreinfo_end; 431.35 + vconsole_start = vstoreinfo_end; 431.36 + vconsole_end = vstoreinfo_end + PAGE_SIZE; 431.37 + vpt_start = vconsole_end; 431.38 431.39 for ( nr_pt_pages = 2; ; nr_pt_pages++ ) 431.40 { 431.41 @@ -437,6 +443,7 @@ static int setup_guest(int xc_handle, 431.42 " Init. ramdisk: %p->%p\n" 431.43 " Phys-Mach map: %p->%p\n" 431.44 " Store page: %p->%p\n" 431.45 + " Console page: %p->%p\n" 431.46 " Page tables: %p->%p\n" 431.47 " Start info: %p->%p\n" 431.48 " Boot stack: %p->%p\n" 431.49 @@ -445,6 +452,7 @@ static int setup_guest(int xc_handle, 431.50 _p(vinitrd_start), _p(vinitrd_end), 431.51 _p(vphysmap_start), _p(vphysmap_end), 431.52 _p(vstoreinfo_start), _p(vstoreinfo_end), 431.53 + _p(vconsole_start), _p(vconsole_end), 431.54 _p(vpt_start), _p(vpt_end), 431.55 _p(vstartinfo_start), _p(vstartinfo_end), 431.56 _p(vstack_start), _p(vstack_end), 431.57 @@ -566,6 +574,8 @@ static int setup_guest(int xc_handle, 431.58 #endif 431.59 431.60 *store_mfn = page_array[(vstoreinfo_start-dsi.v_start) >> PAGE_SHIFT]; 431.61 + *console_mfn = page_array[(vconsole_start-dsi.v_start) >> PAGE_SHIFT]; 431.62 + 431.63 431.64 start_info = xc_map_foreign_range( 431.65 xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, 431.66 @@ -580,6 +590,8 @@ static int setup_guest(int xc_handle, 431.67 start_info->domain_controller_evtchn = control_evtchn; 431.68 start_info->store_mfn = *store_mfn; 431.69 start_info->store_evtchn = store_evtchn; 431.70 + start_info->console_mfn = *console_mfn; 431.71 + start_info->console_evtchn = console_evtchn; 431.72 if ( initrd_len != 0 ) 431.73 { 431.74 start_info->mod_start = vinitrd_start; 431.75 @@ -631,7 +643,9 @@ int xc_linux_build(int xc_handle, 431.76 unsigned long flags, 431.77 unsigned int vcpus, 431.78 unsigned int store_evtchn, 431.79 - unsigned long *store_mfn) 431.80 + unsigned long *store_mfn, 431.81 + unsigned int console_evtchn, 431.82 + unsigned long *console_mfn) 431.83 { 431.84 dom0_op_t launch_op, op; 431.85 int initrd_fd = -1; 431.86 @@ -707,7 +721,8 @@ int xc_linux_build(int xc_handle, 431.87 &vstack_start, ctxt, cmdline, 431.88 op.u.getdomaininfo.shared_info_frame, 431.89 control_evtchn, flags, vcpus, 431.90 - store_evtchn, store_mfn) < 0 ) 431.91 + store_evtchn, store_mfn, 431.92 + console_evtchn, console_mfn) < 0 ) 431.93 { 431.94 ERROR("Error constructing guest OS"); 431.95 goto error_out;
432.1 --- a/tools/libxc/xc_linux_restore.c Mon Aug 29 16:05:29 2005 -0700 432.2 +++ b/tools/libxc/xc_linux_restore.c Tue Aug 30 13:36:49 2005 -0700 432.3 @@ -8,24 +8,23 @@ 432.4 432.5 #include <stdlib.h> 432.6 #include <unistd.h> 432.7 - 432.8 #include "xg_private.h" 432.9 #include <xenctrl.h> 432.10 - 432.11 #include <xen/linux/suspend.h> 432.12 +#include <xen/memory.h> 432.13 432.14 #define MAX_BATCH_SIZE 1024 432.15 432.16 #define DEBUG 0 432.17 432.18 #if 1 432.19 -#define ERR(_f, _a...) fprintf ( stderr, _f , ## _a ); fflush(stderr) 432.20 +#define ERR(_f, _a...) do { fprintf ( stderr, _f , ## _a ); fflush(stderr); } while(0) 432.21 #else 432.22 #define ERR(_f, _a...) ((void)0) 432.23 #endif 432.24 432.25 #if DEBUG 432.26 -#define DPRINTF(_f, _a...) fprintf ( stdout, _f , ## _a ); fflush(stdout) 432.27 +#define DPRINTF(_f, _a...) do { fprintf ( stdout, _f , ## _a ); fflush(stdout); } while (0) 432.28 #else 432.29 #define DPRINTF(_f, _a...) ((void)0) 432.30 #endif 432.31 @@ -103,7 +102,7 @@ int xc_linux_restore(int xc_handle, int 432.32 struct mmuext_op pin[MAX_PIN_BATCH]; 432.33 unsigned int nr_pins = 0; 432.34 432.35 - DPRINTF("xc_linux_restore start\n"); 432.36 + DPRINTF("xc_linux_restore start: nr_pfns = %lx\n", nr_pfns); 432.37 432.38 if (mlock(&ctxt, sizeof(ctxt))) { 432.39 /* needed for when we do the build dom0 op, 432.40 @@ -152,6 +151,8 @@ int xc_linux_restore(int xc_handle, int 432.41 err = xc_domain_memory_increase_reservation(xc_handle, dom, 432.42 nr_pfns * PAGE_SIZE / 1024); 432.43 if (err != 0) { 432.44 + ERR("Failed to increate reservation by %lx\n", 432.45 + nr_pfns * PAGE_SIZE / 1024); 432.46 errno = ENOMEM; 432.47 goto out; 432.48 } 432.49 @@ -409,7 +410,8 @@ int xc_linux_restore(int xc_handle, int 432.50 432.51 /* Get the list of PFNs that are not in the psuedo-phys map */ 432.52 { 432.53 - unsigned int count, *pfntab; 432.54 + unsigned int count; 432.55 + unsigned long *pfntab; 432.56 int rc; 432.57 432.58 if ( read_exact(io_fd, &count, sizeof(count)) != sizeof(count) ) 432.59 @@ -441,9 +443,15 @@ int xc_linux_restore(int xc_handle, int 432.60 432.61 if ( count > 0 ) 432.62 { 432.63 - if ( (rc = xc_dom_mem_op( xc_handle, 432.64 - MEMOP_decrease_reservation, 432.65 - pfntab, count, 0, dom )) <0 ) 432.66 + struct xen_memory_reservation reservation = { 432.67 + .extent_start = pfntab, 432.68 + .nr_extents = count, 432.69 + .extent_order = 0, 432.70 + .domid = dom 432.71 + }; 432.72 + if ( (rc = xc_memory_op(xc_handle, 432.73 + XENMEM_decrease_reservation, 432.74 + &reservation)) != count ) 432.75 { 432.76 ERR("Could not decrease reservation : %d",rc); 432.77 goto out;
437.1 --- a/tools/libxc/xc_private.c Mon Aug 29 16:05:29 2005 -0700 437.2 +++ b/tools/libxc/xc_private.c Tue Aug 30 13:36:49 2005 -0700 437.3 @@ -6,6 +6,7 @@ 437.4 437.5 #include <zlib.h> 437.6 #include "xc_private.h" 437.7 +#include <xen/memory.h> 437.8 437.9 void *xc_map_foreign_batch(int xc_handle, u32 dom, int prot, 437.10 unsigned long *arr, int num ) 437.11 @@ -187,28 +188,43 @@ int xc_finish_mmu_updates(int xc_handle, 437.12 return flush_mmu_updates(xc_handle, mmu); 437.13 } 437.14 437.15 -int xc_dom_mem_op(int xc_handle, 437.16 - unsigned int memop, 437.17 - unsigned int *extent_list, 437.18 - unsigned int nr_extents, 437.19 - unsigned int extent_order, 437.20 - domid_t domid) 437.21 +int xc_memory_op(int xc_handle, 437.22 + int cmd, 437.23 + void *arg) 437.24 { 437.25 privcmd_hypercall_t hypercall; 437.26 + struct xen_memory_reservation *reservation = arg; 437.27 long ret = -EINVAL; 437.28 437.29 - hypercall.op = __HYPERVISOR_dom_mem_op; 437.30 - hypercall.arg[0] = (unsigned long)memop; 437.31 - hypercall.arg[1] = (unsigned long)extent_list; 437.32 - hypercall.arg[2] = (unsigned long)nr_extents; 437.33 - hypercall.arg[3] = (unsigned long)extent_order; 437.34 - hypercall.arg[4] = (unsigned long)domid; 437.35 + hypercall.op = __HYPERVISOR_memory_op; 437.36 + hypercall.arg[0] = (unsigned long)cmd; 437.37 + hypercall.arg[1] = (unsigned long)arg; 437.38 437.39 - if ( (extent_list != NULL) && 437.40 - (mlock(extent_list, nr_extents*sizeof(unsigned long)) != 0) ) 437.41 + switch ( cmd ) 437.42 { 437.43 - PERROR("Could not lock memory for Xen hypercall"); 437.44 - goto out1; 437.45 + case XENMEM_increase_reservation: 437.46 + case XENMEM_decrease_reservation: 437.47 + if ( mlock(reservation, sizeof(*reservation)) != 0 ) 437.48 + { 437.49 + PERROR("Could not mlock"); 437.50 + goto out1; 437.51 + } 437.52 + if ( (reservation->extent_start != NULL) && 437.53 + (mlock(reservation->extent_start, 437.54 + reservation->nr_extents * sizeof(unsigned long)) != 0) ) 437.55 + { 437.56 + PERROR("Could not mlock"); 437.57 + safe_munlock(reservation, sizeof(*reservation)); 437.58 + goto out1; 437.59 + } 437.60 + break; 437.61 + case XENMEM_maximum_ram_page: 437.62 + if ( mlock(arg, sizeof(unsigned long)) != 0 ) 437.63 + { 437.64 + PERROR("Could not mlock"); 437.65 + goto out1; 437.66 + } 437.67 + break; 437.68 } 437.69 437.70 if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 ) 437.71 @@ -217,8 +233,19 @@ int xc_dom_mem_op(int xc_handle, 437.72 " rebuild the user-space tool set?\n",ret,errno); 437.73 } 437.74 437.75 - if ( extent_list != NULL ) 437.76 - safe_munlock(extent_list, nr_extents*sizeof(unsigned long)); 437.77 + switch ( cmd ) 437.78 + { 437.79 + case XENMEM_increase_reservation: 437.80 + case XENMEM_decrease_reservation: 437.81 + safe_munlock(reservation, sizeof(*reservation)); 437.82 + if ( reservation->extent_start != NULL ) 437.83 + safe_munlock(reservation->extent_start, 437.84 + reservation->nr_extents * sizeof(unsigned long)); 437.85 + break; 437.86 + case XENMEM_maximum_ram_page: 437.87 + safe_munlock(arg, sizeof(unsigned long)); 437.88 + break; 437.89 + } 437.90 437.91 out1: 437.92 return ret;
440.1 --- a/tools/libxc/xc_vmx_build.c Mon Aug 29 16:05:29 2005 -0700 440.2 +++ b/tools/libxc/xc_vmx_build.c Tue Aug 30 13:36:49 2005 -0700 440.3 @@ -7,6 +7,7 @@ 440.4 #define ELFSIZE 32 440.5 #include "xc_elf.h" 440.6 #include <stdlib.h> 440.7 +#include <unistd.h> 440.8 #include <zlib.h> 440.9 #include <xen/io/ioreq.h> 440.10 #include "linux_boot_params.h"
441.1 --- a/tools/libxc/xenctrl.h Mon Aug 29 16:05:29 2005 -0700 441.2 +++ b/tools/libxc/xenctrl.h Tue Aug 30 13:36:49 2005 -0700 441.3 @@ -430,9 +430,7 @@ int xc_ia64_get_pfn_list(int xc_handle, 441.4 int xc_mmuext_op(int xc_handle, struct mmuext_op *op, unsigned int nr_ops, 441.5 domid_t dom); 441.6 441.7 -int xc_dom_mem_op(int xc_handle, unsigned int memop, unsigned int *extent_list, 441.8 - unsigned int nr_extents, unsigned int extent_order, 441.9 - domid_t domid); 441.10 +int xc_memory_op(int xc_handle, int cmd, void *arg); 441.11 441.12 int xc_get_pfn_type_batch(int xc_handle, u32 dom, int num, unsigned long *arr); 441.13
442.1 --- a/tools/libxc/xenguest.h Mon Aug 29 16:05:29 2005 -0700 442.2 +++ b/tools/libxc/xenguest.h Tue Aug 30 13:36:49 2005 -0700 442.3 @@ -47,7 +47,9 @@ int xc_linux_build(int xc_handle, 442.4 unsigned long flags, 442.5 unsigned int vcpus, 442.6 unsigned int store_evtchn, 442.7 - unsigned long *store_mfn); 442.8 + unsigned long *store_mfn, 442.9 + unsigned int console_evtchn, 442.10 + unsigned long *console_mfn); 442.11 442.12 struct mem_map; 442.13 int xc_vmx_build(int xc_handle,
443.1 --- a/tools/libxc/xg_private.c Mon Aug 29 16:05:29 2005 -0700 443.2 +++ b/tools/libxc/xg_private.c Tue Aug 30 13:36:49 2005 -0700 443.3 @@ -5,6 +5,7 @@ 443.4 */ 443.5 443.6 #include <stdlib.h> 443.7 +#include <unistd.h> 443.8 #include <zlib.h> 443.9 443.10 #include "xg_private.h"
447.1 --- a/tools/misc/cpuperf/cpuperf.c Mon Aug 29 16:05:29 2005 -0700 447.2 +++ b/tools/misc/cpuperf/cpuperf.c Tue Aug 30 13:36:49 2005 -0700 447.3 @@ -243,16 +243,12 @@ int main(int argc, char **argv) 447.4 } 447.5 447.6 if (read) { 447.7 - while((cpu_mask&1)) { 447.8 - int i; 447.9 - for (i=0x300;i<0x312;i++) { 447.10 - printf("%010llu ",cpus_rdmsr( cpu_mask, i ) ); 447.11 - } 447.12 - printf("\n"); 447.13 - cpu_mask>>=1; 447.14 - } 447.15 + int i; 447.16 + for (i=0x300;i<0x312;i++) 447.17 + printf("%010llu ",cpus_rdmsr( cpu_mask, i ) ); 447.18 + printf("\n"); 447.19 exit(1); 447.20 - } 447.21 + } 447.22 447.23 if (!escr) { 447.24 fprintf(stderr, "Need an ESCR.\n");
449.1 --- a/tools/misc/mbootpack/Makefile Mon Aug 29 16:05:29 2005 -0700 449.2 +++ b/tools/misc/mbootpack/Makefile Tue Aug 30 13:36:49 2005 -0700 449.3 @@ -20,8 +20,7 @@ GDB := gdb 449.4 INCS := -I. -I- 449.5 DEFS := 449.6 LDFLAGS := 449.7 -CC := gcc 449.8 -CFLAGS := -Wall -Wpointer-arith -Wcast-qual -Wno-unused -Wno-format 449.9 +CFLAGS := -Wall -Wpointer-arith -Wcast-qual -Wno-unused -Wno-format 449.10 CFLAGS += -Wmissing-prototypes 449.11 #CFLAGS += -pipe -g -O0 -Wcast-align 449.12 CFLAGS += -pipe -O3 449.13 @@ -34,7 +33,7 @@ DEPFLAGS = -Wp,-MD,.$(@F).d 449.14 DEPS = .*.d 449.15 449.16 mbootpack: $(OBJS) 449.17 - $(CC) -o $@ $(filter-out %.a, $^) $(LDFLAGS) 449.18 + $(HOSTCC) -o $@ $(filter-out %.a, $^) $(LDFLAGS) 449.19 449.20 clean: 449.21 $(RM) mbootpack *.o $(DEPS) bootsect setup bzimage_header.c bin2c 449.22 @@ -48,7 +47,7 @@ setup: setup.S 449.23 $(LD) -m elf_i386 -Ttext 0x0 -s --oformat binary setup.o -o $@ 449.24 449.25 bin2c: bin2c.o 449.26 - $(CC) -o $@ $^ 449.27 + $(HOSTCC) -o $@ $^ 449.28 449.29 bzimage_header.c: bootsect setup bin2c 449.30 ./bin2c -n 8 -b1 -a bzimage_bootsect bootsect > bzimage_header.c 449.31 @@ -58,10 +57,10 @@ buildimage.c: bzimage_header.c 449.32 @ 449.33 449.34 %.o: %.S 449.35 - $(CC) $(DEPFLAGS) $(CFLAGS) $(INCS) $(DEFS) -c $< -o $@ 449.36 + $(HOSTCC) $(DEPFLAGS) $(CFLAGS) $(INCS) $(DEFS) -c $< -o $@ 449.37 449.38 %.o: %.c 449.39 - $(CC) $(DEPFLAGS) $(CFLAGS) $(INCS) $(DEFS) -c $< -o $@ 449.40 + $(HOSTCC) $(DEPFLAGS) $(CFLAGS) $(INCS) $(DEFS) -c $< -o $@ 449.41 449.42 .PHONY: all clean gdb 449.43 .PRECIOUS: $(OBJS) $(OBJS:.o=.c) $(DEPS)
450.1 --- a/tools/misc/mbootpack/buildimage.c Mon Aug 29 16:05:29 2005 -0700 450.2 +++ b/tools/misc/mbootpack/buildimage.c Tue Aug 30 13:36:49 2005 -0700 450.3 @@ -43,6 +43,7 @@ 450.4 #include "mbootpack.h" 450.5 #include "mb_header.h" 450.6 450.7 + 450.8 /* We will build an image that a bzImage-capable bootloader will load like 450.9 * this: 450.10 * 450.11 @@ -105,8 +106,8 @@ void make_bzImage(section_t *sections, 450.12 section_t *s; 450.13 450.14 /* Patch the kernel and mbi addresses into the setup code */ 450.15 - *(address_t *)(bzimage_setup + BZ_ENTRY_OFFSET) = entry; 450.16 - *(address_t *)(bzimage_setup + BZ_MBI_OFFSET) = mbi; 450.17 + *(address_t *)(bzimage_setup + BZ_ENTRY_OFFSET) = eswap(entry); 450.18 + *(address_t *)(bzimage_setup + BZ_MBI_OFFSET) = eswap(mbi); 450.19 if (!quiet) printf("Kernel entry is %p, MBI is %p.\n", entry, mbi); 450.20 450.21 /* Write out header and trampoline */
451.1 --- a/tools/misc/mbootpack/mbootpack.c Mon Aug 29 16:05:29 2005 -0700 451.2 +++ b/tools/misc/mbootpack/mbootpack.c Tue Aug 30 13:36:49 2005 -0700 451.3 @@ -252,20 +252,21 @@ static address_t load_kernel(const char 451.4 for (i = 0; i <= MIN(len - 12, MULTIBOOT_SEARCH - 12); i += 4) 451.5 { 451.6 mbh = (struct multiboot_header *)(headerbuf + i); 451.7 - if (mbh->magic != MULTIBOOT_MAGIC 451.8 - || ((mbh->magic+mbh->flags+mbh->checksum) & 0xffffffff)) 451.9 + if (eswap(mbh->magic) != MULTIBOOT_MAGIC 451.10 + || ((eswap(mbh->magic)+eswap(mbh->flags)+eswap(mbh->checksum)) 451.11 + & 0xffffffff)) 451.12 { 451.13 /* Not a multiboot header */ 451.14 continue; 451.15 } 451.16 - if (mbh->flags & MULTIBOOT_UNSUPPORTED) { 451.17 + if (eswap(mbh->flags) & MULTIBOOT_UNSUPPORTED) { 451.18 /* Requires options we don't support */ 451.19 printf("Fatal: found a multiboot header, but it " 451.20 "requires multiboot options that I\n" 451.21 "don't understand. Sorry.\n"); 451.22 exit(1); 451.23 } 451.24 - if (mbh->flags & MULTIBOOT_VIDEO_MODE) { 451.25 + if (eswap(mbh->flags) & MULTIBOOT_VIDEO_MODE) { 451.26 /* Asked for screen mode information */ 451.27 /* XXX carry on regardless */ 451.28 printf("Warning: found a multiboot header which asks " 451.29 @@ -275,22 +276,22 @@ static address_t load_kernel(const char 451.30 } 451.31 /* This kernel will do: place and load it */ 451.32 451.33 - if (mbh->flags & MULTIBOOT_AOUT_KLUDGE) { 451.34 + if (eswap(mbh->flags) & MULTIBOOT_AOUT_KLUDGE) { 451.35 451.36 /* Load using the offsets in the multiboot header */ 451.37 if(!quiet) 451.38 printf("Loading %s using multiboot header.\n", filename); 451.39 451.40 /* How much is there? */ 451.41 - start = mbh->load_addr; 451.42 - if (mbh->load_end_addr != 0) 451.43 - loadsize = mbh->load_end_addr - mbh->load_addr; 451.44 + start = eswap(mbh->load_addr); 451.45 + if (eswap(mbh->load_end_addr) != 0) 451.46 + loadsize = eswap(mbh->load_end_addr) - eswap(mbh->load_addr); 451.47 else 451.48 loadsize = sb.st_size; 451.49 451.50 /* How much memory will it take up? */ 451.51 - if (mbh->bss_end_addr != 0) 451.52 - size = mbh->bss_end_addr - mbh->load_addr; 451.53 + if (eswap(mbh->bss_end_addr) != 0) 451.54 + size = eswap(mbh->bss_end_addr) - eswap(mbh->load_addr); 451.55 else 451.56 size = loadsize; 451.57 451.58 @@ -335,32 +336,34 @@ static address_t load_kernel(const char 451.59 451.60 /* Done. */ 451.61 if (!quiet) printf("Loaded kernel from %s\n", filename); 451.62 - return mbh->entry_addr; 451.63 + return eswap(mbh->entry_addr); 451.64 451.65 } else { 451.66 451.67 /* Now look for an ELF32 header */ 451.68 ehdr = (Elf32_Ehdr *)headerbuf; 451.69 - if (*(unsigned long *)ehdr != 0x464c457f 451.70 + if (*(unsigned long *)ehdr != eswap(0x464c457f) 451.71 || ehdr->e_ident[EI_DATA] != ELFDATA2LSB 451.72 || ehdr->e_ident[EI_CLASS] != ELFCLASS32 451.73 - || ehdr->e_machine != EM_386) 451.74 + || eswap(ehdr->e_machine) != EM_386) 451.75 { 451.76 printf("Fatal: kernel has neither ELF32/x86 nor multiboot load" 451.77 " headers.\n"); 451.78 exit(1); 451.79 } 451.80 - if (ehdr->e_phoff + ehdr->e_phnum*sizeof(*phdr) > HEADERBUF_SIZE) { 451.81 + if (eswap(ehdr->e_phoff) + eswap(ehdr->e_phnum)*sizeof(*phdr) 451.82 + > HEADERBUF_SIZE) { 451.83 /* Don't expect this will happen with sane kernels */ 451.84 printf("Fatal: too much ELF for me. Try increasing " 451.85 "HEADERBUF_SIZE in mbootpack.\n"); 451.86 exit(1); 451.87 } 451.88 - if (ehdr->e_phoff + ehdr->e_phnum*sizeof (*phdr) > len) { 451.89 + if (eswap(ehdr->e_phoff) + eswap(ehdr->e_phnum)*sizeof (*phdr) 451.90 + > len) { 451.91 printf("Fatal: malformed ELF header overruns EOF.\n"); 451.92 exit(1); 451.93 } 451.94 - if (ehdr->e_phnum <= 0) { 451.95 + if (eswap(ehdr->e_phnum) <= 0) { 451.96 printf("Fatal: ELF kernel has no program headers.\n"); 451.97 exit(1); 451.98 } 451.99 @@ -368,22 +371,22 @@ static address_t load_kernel(const char 451.100 if(!quiet) 451.101 printf("Loading %s using ELF header.\n", filename); 451.102 451.103 - if (ehdr->e_type != ET_EXEC 451.104 - || ehdr->e_version != EV_CURRENT 451.105 - || ehdr->e_phentsize != sizeof (Elf32_Phdr)) { 451.106 + if (eswap(ehdr->e_type) != ET_EXEC 451.107 + || eswap(ehdr->e_version) != EV_CURRENT 451.108 + || eswap(ehdr->e_phentsize) != sizeof (Elf32_Phdr)) { 451.109 printf("Warning: funny-looking ELF header.\n"); 451.110 } 451.111 - phdr = (Elf32_Phdr *)(headerbuf + ehdr->e_phoff); 451.112 + phdr = (Elf32_Phdr *)(headerbuf + eswap(ehdr->e_phoff)); 451.113 451.114 /* Obey the program headers to load the kernel */ 451.115 - for(i = 0; i < ehdr->e_phnum; i++) { 451.116 + for(i = 0; i < eswap(ehdr->e_phnum); i++) { 451.117 451.118 - start = phdr[i].p_paddr; 451.119 - size = phdr[i].p_memsz; 451.120 - if (phdr[i].p_type != PT_LOAD) 451.121 + start = eswap(phdr[i].p_paddr); 451.122 + size = eswap(phdr[i].p_memsz); 451.123 + if (eswap(phdr[i].p_type) != PT_LOAD) 451.124 loadsize = 0; 451.125 else 451.126 - loadsize = MIN((long int)phdr[i].p_filesz, size); 451.127 + loadsize = MIN((long int)eswap(phdr[i].p_filesz), size); 451.128 451.129 if ((buffer = malloc(size)) == NULL) { 451.130 printf("Fatal: malloc() for kernel load failed: %s\n", 451.131 @@ -396,7 +399,7 @@ static address_t load_kernel(const char 451.132 451.133 /* Load section from file */ 451.134 if (loadsize > 0) { 451.135 - if (fseek(fp, phdr[i].p_offset, SEEK_SET) != 0) { 451.136 + if (fseek(fp, eswap(phdr[i].p_offset), SEEK_SET) != 0) { 451.137 printf("Fatal: seek failed in %s\n", 451.138 strerror(errno)); 451.139 exit(1); 451.140 @@ -452,7 +455,7 @@ static address_t load_kernel(const char 451.141 451.142 /* Done! */ 451.143 if (!quiet) printf("Loaded kernel from %s\n", filename); 451.144 - return ehdr->e_entry; 451.145 + return eswap(ehdr->e_entry); 451.146 } 451.147 451.148 } 451.149 @@ -568,12 +571,12 @@ int main(int argc, char **argv) 451.150 /* Command line */ 451.151 p = (char *)(mbi + 1); 451.152 sprintf(p, "%s %s", imagename, command_line); 451.153 - mbi->cmdline = ((address_t)p) + mbi_reloc_offset; 451.154 + mbi->cmdline = eswap(((address_t)p) + mbi_reloc_offset); 451.155 p += command_line_len; 451.156 451.157 /* Bootloader ID */ 451.158 sprintf(p, version_string); 451.159 - mbi->boot_loader_name = ((address_t)p) + mbi_reloc_offset; 451.160 + mbi->boot_loader_name = eswap(((address_t)p) + mbi_reloc_offset); 451.161 p += strlen(version_string) + 1; 451.162 451.163 /* Next is space for the module command lines */ 451.164 @@ -582,17 +585,17 @@ int main(int argc, char **argv) 451.165 /* Last come the module info structs */ 451.166 modp = (struct mod_list *) 451.167 ((((address_t)p + mod_command_line_space) + 3) & ~3); 451.168 - mbi->mods_count = modules; 451.169 - mbi->mods_addr = ((address_t)modp) + mbi_reloc_offset; 451.170 + mbi->mods_count = eswap(modules); 451.171 + mbi->mods_addr = eswap(((address_t)modp) + mbi_reloc_offset); 451.172 451.173 /* Memory information will be added at boot time, by setup.S 451.174 * or trampoline.S. */ 451.175 - mbi->flags = MB_INFO_CMDLINE | MB_INFO_BOOT_LOADER_NAME; 451.176 + mbi->flags = eswap(MB_INFO_CMDLINE | MB_INFO_BOOT_LOADER_NAME); 451.177 451.178 451.179 /* Load the modules */ 451.180 if (modules) { 451.181 - mbi->flags |= MB_INFO_MODS; 451.182 + mbi->flags = eswap(eswap(mbi->flags) | MB_INFO_MODS); 451.183 451.184 /* Go back and parse the module command lines */ 451.185 optind = opterr = 1; 451.186 @@ -652,10 +655,10 @@ int main(int argc, char **argv) 451.187 if (p != NULL) *p = ' '; 451.188 451.189 /* Fill in the module info struct */ 451.190 - modp->mod_start = start; 451.191 - modp->mod_end = start + size; 451.192 - modp->cmdline = (address_t)mod_clp + mbi_reloc_offset; 451.193 - modp->pad = 0; 451.194 + modp->mod_start = eswap(start); 451.195 + modp->mod_end = eswap(start + size); 451.196 + modp->cmdline = eswap((address_t)mod_clp + mbi_reloc_offset); 451.197 + modp->pad = eswap(0); 451.198 modp++; 451.199 451.200 /* Store the module command line */
452.1 --- a/tools/misc/mbootpack/mbootpack.h Mon Aug 29 16:05:29 2005 -0700 452.2 +++ b/tools/misc/mbootpack/mbootpack.h Tue Aug 30 13:36:49 2005 -0700 452.3 @@ -32,6 +32,24 @@ 452.4 #undef NDEBUG 452.5 #include <stdio.h> 452.6 452.7 +#include <endian.h> 452.8 +#include <byteswap.h> 452.9 +#if __BYTE_ORDER == __LITTLE_ENDIAN 452.10 +#define eswap(x) (x) 452.11 +#else 452.12 +#define eswap(x) \ 452.13 + ({ \ 452.14 + typeof(x) y = (x); \ 452.15 + switch(sizeof(y)) \ 452.16 + { \ 452.17 + case 2: y = __bswap_16(y); break; \ 452.18 + case 4: y = __bswap_32(y); break; \ 452.19 + case 8: y = __bswap_64(y); break; \ 452.20 + } \ 452.21 + y; \ 452.22 + }) 452.23 +#endif 452.24 + 452.25 /* Flags */ 452.26 extern int quiet; 452.27
457.1 --- a/tools/python/xen/lowlevel/xc/xc.c Mon Aug 29 16:05:29 2005 -0700 457.2 +++ b/tools/python/xen/lowlevel/xc/xc.c Tue Aug 30 13:36:49 2005 -0700 457.3 @@ -268,25 +268,33 @@ static PyObject *pyxc_linux_build(PyObje 457.4 u32 dom; 457.5 char *image, *ramdisk = NULL, *cmdline = ""; 457.6 int flags = 0, vcpus = 1; 457.7 - int control_evtchn, store_evtchn; 457.8 + int control_evtchn, store_evtchn, console_evtchn; 457.9 unsigned long store_mfn = 0; 457.10 + unsigned long console_mfn = 0; 457.11 457.12 static char *kwd_list[] = { "dom", "control_evtchn", "store_evtchn", 457.13 - "image", "ramdisk", "cmdline", "flags", 457.14 + "console_evtchn", "image", 457.15 + /* optional */ 457.16 + "ramdisk", "cmdline", "flags", 457.17 "vcpus", NULL }; 457.18 457.19 - if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiis|ssii", kwd_list, 457.20 + if ( !PyArg_ParseTupleAndKeywords(args, kwds, "iiiis|ssii", kwd_list, 457.21 &dom, &control_evtchn, &store_evtchn, 457.22 - &image, &ramdisk, &cmdline, &flags, 457.23 + &console_evtchn, &image, 457.24 + /* optional */ 457.25 + &ramdisk, &cmdline, &flags, 457.26 &vcpus) ) 457.27 return NULL; 457.28 457.29 if ( xc_linux_build(xc->xc_handle, dom, image, 457.30 ramdisk, cmdline, control_evtchn, flags, vcpus, 457.31 - store_evtchn, &store_mfn) != 0 ) 457.32 + store_evtchn, &store_mfn, 457.33 + console_evtchn, &console_mfn) != 0 ) 457.34 return PyErr_SetFromErrno(xc_error); 457.35 457.36 - return Py_BuildValue("{s:i}", "store_mfn", store_mfn); 457.37 + return Py_BuildValue("{s:i,s:i}", 457.38 + "store_mfn", store_mfn, 457.39 + "console_mfn", console_mfn); 457.40 } 457.41 457.42 static PyObject *pyxc_vmx_build(PyObject *self,
491.1 --- a/tools/python/xen/xend/XendDomainInfo.py Mon Aug 29 16:05:29 2005 -0700 491.2 +++ b/tools/python/xen/xend/XendDomainInfo.py Tue Aug 30 13:36:49 2005 -0700 491.3 @@ -47,7 +47,7 @@ from xen.xend.XendError import XendError 491.4 from xen.xend.XendRoot import get_component 491.5 491.6 from xen.xend.uuid import getUuid 491.7 -from xen.xend.xenstore import DBVar 491.8 +from xen.xend.xenstore import DBVar, XenNode, DBMap 491.9 491.10 """Shutdown code for poweroff.""" 491.11 DOMAIN_POWEROFF = 0 491.12 @@ -231,6 +231,7 @@ class XendDomainInfo: 491.13 DBVar('start_time', ty='float'), 491.14 DBVar('state', ty='str'), 491.15 DBVar('store_mfn', ty='long'), 491.16 + DBVar('console_mfn', ty='long'), 491.17 DBVar('restart_mode', ty='str'), 491.18 DBVar('restart_state', ty='str'), 491.19 DBVar('restart_time', ty='float'), 491.20 @@ -260,6 +261,8 @@ class XendDomainInfo: 491.21 self.channel = None 491.22 self.store_channel = None 491.23 self.store_mfn = None 491.24 + self.console_channel = None 491.25 + self.console_mfn = None 491.26 self.controllers = {} 491.27 491.28 self.info = None 491.29 @@ -297,6 +300,9 @@ class XendDomainInfo: 491.30 if self.store_channel: 491.31 self.store_channel.saveToDB(self.db.addChild("store_channel"), 491.32 save=save) 491.33 + if self.console_channel: 491.34 + self.console_channel.saveToDB(self.db.addChild("console_channel"), 491.35 + save=save) 491.36 if self.image: 491.37 self.image.exportToDB(save=save, sync=sync) 491.38 self.db.exportToDB(self, fields=self.__exports__, save=save, sync=sync) 491.39 @@ -329,6 +335,9 @@ class XendDomainInfo: 491.40 def getStoreChannel(self): 491.41 return self.store_channel 491.42 491.43 + def getConsoleChannel(self): 491.44 + return self.console_channel 491.45 + 491.46 def update(self, info): 491.47 """Update with info from xc.domain_getinfo(). 491.48 """ 491.49 @@ -518,6 +527,14 @@ class XendDomainInfo: 491.50 sxpr.append(self.store_channel.sxpr()) 491.51 if self.store_mfn: 491.52 sxpr.append(['store_mfn', self.store_mfn]) 491.53 + if self.console_channel: 491.54 + sxpr.append(['console_channel', self.console_channel.sxpr()]) 491.55 + if self.console_mfn: 491.56 + sxpr.append(['console_mfn', self.console_mfn]) 491.57 +# already in (devices) 491.58 +# console = self.getConsole() 491.59 +# if console: 491.60 +# sxpr.append(console.sxpr()) 491.61 491.62 if self.restart_count: 491.63 sxpr.append(['restart_count', self.restart_count]) 491.64 @@ -712,6 +729,13 @@ class XendDomainInfo: 491.65 except Exception, ex: 491.66 log.warning("error in domain release on xenstore: %s", ex) 491.67 pass 491.68 + if self.console_channel: 491.69 + # notify processes using this cosole? 491.70 + try: 491.71 + self.console_channel.close() 491.72 + self.console_channel = None 491.73 + except: 491.74 + pass 491.75 if self.image: 491.76 try: 491.77 self.device_model_pid = 0 491.78 @@ -808,6 +832,7 @@ class XendDomainInfo: 491.79 """ 491.80 self.channel = self.openChannel("channel", 0, 1) 491.81 self.store_channel = self.eventChannel("store_channel") 491.82 + self.console_channel = self.eventChannel("console_channel") 491.83 491.84 def create_configured_devices(self): 491.85 devices = sxp.children(self.config, 'device') 491.86 @@ -1003,6 +1028,7 @@ class XendDomainInfo: 491.87 self.configure_fields() 491.88 self.create_devices() 491.89 self.create_blkif() 491.90 + self.publish_console() 491.91 491.92 def create_blkif(self): 491.93 """Create the block device interface (blkif) for the vm. 491.94 @@ -1017,6 +1043,12 @@ class XendDomainInfo: 491.95 backend = blkif.getBackend(0) 491.96 backend.connect(recreate=self.recreate) 491.97 491.98 + def publish_console(self): 491.99 + db = DBMap(db=XenNode("/console/%d" % self.id)) 491.100 + db.clear() 491.101 + db['domain'] = self.db.getPath() 491.102 + db.saveDB(save=True) 491.103 + 491.104 def configure_fields(self): 491.105 """Process the vm configuration fields using the registered handlers. 491.106 """
499.1 --- a/tools/python/xen/xend/image.py Mon Aug 29 16:05:29 2005 -0700 499.2 +++ b/tools/python/xen/xend/image.py Tue Aug 30 13:36:49 2005 -0700 499.3 @@ -238,16 +238,33 @@ class LinuxImageHandler(ImageHandler): 499.4 store_evtchn = self.vm.store_channel.port2 499.5 else: 499.6 store_evtchn = 0 499.7 + if self.vm.console_channel: 499.8 + console_evtchn = self.vm.console_channel.port2 499.9 + else: 499.10 + console_evtchn = 0 499.11 + 499.12 + log.debug("dom = %d", self.vm.getDomain()) 499.13 + log.debug("image = %s", self.kernel) 499.14 + log.debug("control_evtchn = %s", self.vm.channel.getRemotePort()) 499.15 + log.debug("store_evtchn = %d", store_evtchn) 499.16 + log.debug("console_evtchn = %d", console_evtchn) 499.17 + log.debug("cmdline = %s", self.cmdline) 499.18 + log.debug("ramdisk = %s", self.ramdisk) 499.19 + log.debug("flags = %d", self.flags) 499.20 + log.debug("vcpus = %d", self.vm.vcpus) 499.21 + 499.22 ret = xc.linux_build(dom = self.vm.getDomain(), 499.23 image = self.kernel, 499.24 control_evtchn = self.vm.channel.getRemotePort(), 499.25 store_evtchn = store_evtchn, 499.26 + console_evtchn = console_evtchn, 499.27 cmdline = self.cmdline, 499.28 ramdisk = self.ramdisk, 499.29 flags = self.flags, 499.30 vcpus = self.vm.vcpus) 499.31 if isinstance(ret, dict): 499.32 self.vm.store_mfn = ret.get('store_mfn') 499.33 + self.vm.console_mfn = ret.get('console_mfn') 499.34 return 0 499.35 return ret 499.36
613.1 --- a/tools/xcs/Makefile Mon Aug 29 16:05:29 2005 -0700 613.2 +++ b/tools/xcs/Makefile Tue Aug 30 13:36:49 2005 -0700 613.3 @@ -10,8 +10,7 @@ INSTALL = install 613.4 INSTALL_PROG = $(INSTALL) -m0755 613.5 INSTALL_DIR = $(INSTALL) -d -m0755 613.6 613.7 -CC = gcc 613.8 -CFLAGS = -Wall -Werror -g3 -D _XOPEN_SOURCE=600 613.9 +CFLAGS += -Wall -Werror -g3 -D _XOPEN_SOURCE=600 613.10 613.11 CFLAGS += -I $(XEN_XC) 613.12 CFLAGS += -I $(XEN_LIBXC)
617.1 --- a/tools/xcutils/Makefile Mon Aug 29 16:05:29 2005 -0700 617.2 +++ b/tools/xcutils/Makefile Tue Aug 30 13:36:49 2005 -0700 617.3 @@ -19,8 +19,6 @@ PROGRAMS_INSTALL_DIR = /usr/libexec/xen 617.4 617.5 INCLUDES += -I $(XEN_LIBXC) 617.6 617.7 -CC := gcc 617.8 - 617.9 CFLAGS += -Wall -Werror -O3 -fno-strict-aliasing 617.10 CFLAGS += $(INCLUDES) 617.11
620.1 --- a/tools/xenstat/Makefile Mon Aug 29 16:05:29 2005 -0700 620.2 +++ b/tools/xenstat/Makefile Tue Aug 30 13:36:49 2005 -0700 620.3 @@ -3,7 +3,11 @@ include $(XEN_ROOT)/tools/Rules.mk 620.4 620.5 SUBDIRS := 620.6 SUBDIRS += libxenstat 620.7 + 620.8 +# This doesn't cross-compile (cross-compile environments rarely have curses) 620.9 +ifeq ($(XEN_COMPILE_ARCH),$(XEN_TARGET_ARCH)) 620.10 SUBDIRS += xentop 620.11 +endif 620.12 620.13 .PHONY: all install clean 620.14
727.1 --- a/xen/arch/x86/domain.c Mon Aug 29 16:05:29 2005 -0700 727.2 +++ b/xen/arch/x86/domain.c Tue Aug 30 13:36:49 2005 -0700 727.3 @@ -255,13 +255,13 @@ void arch_do_createdomain(struct vcpu *v 727.4 v->vcpu_info = &d->shared_info->vcpu_data[v->vcpu_id]; 727.5 v->cpumap = CPUMAP_RUNANYWHERE; 727.6 SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d); 727.7 - machine_to_phys_mapping[virt_to_phys(d->shared_info) >> 727.8 - PAGE_SHIFT] = INVALID_M2P_ENTRY; 727.9 + set_pfn_from_mfn(virt_to_phys(d->shared_info) >> PAGE_SHIFT, 727.10 + INVALID_M2P_ENTRY); 727.11 727.12 d->arch.mm_perdomain_pt = alloc_xenheap_page(); 727.13 memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE); 727.14 - machine_to_phys_mapping[virt_to_phys(d->arch.mm_perdomain_pt) >> 727.15 - PAGE_SHIFT] = INVALID_M2P_ENTRY; 727.16 + set_pfn_from_mfn(virt_to_phys(d->arch.mm_perdomain_pt) >> PAGE_SHIFT, 727.17 + INVALID_M2P_ENTRY); 727.18 v->arch.perdomain_ptes = d->arch.mm_perdomain_pt; 727.19 v->arch.perdomain_ptes[FIRST_RESERVED_GDT_PAGE] = 727.20 l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
728.1 --- a/xen/arch/x86/domain_build.c Mon Aug 29 16:05:29 2005 -0700 728.2 +++ b/xen/arch/x86/domain_build.c Tue Aug 30 13:36:49 2005 -0700 728.3 @@ -592,8 +592,7 @@ int construct_dom0(struct domain *d, 728.4 if ( opt_dom0_translate ) 728.5 { 728.6 si->shared_info = d->next_io_page << PAGE_SHIFT; 728.7 - set_machinetophys(virt_to_phys(d->shared_info) >> PAGE_SHIFT, 728.8 - d->next_io_page); 728.9 + set_pfn_from_mfn(virt_to_phys(d->shared_info) >> PAGE_SHIFT, d->next_io_page); 728.10 d->next_io_page++; 728.11 } 728.12 else 728.13 @@ -614,7 +613,7 @@ int construct_dom0(struct domain *d, 728.14 mfn = alloc_epfn - (pfn - REVERSE_START); 728.15 #endif 728.16 ((u32 *)vphysmap_start)[pfn] = mfn; 728.17 - machine_to_phys_mapping[mfn] = pfn; 728.18 + set_pfn_from_mfn(mfn, pfn); 728.19 } 728.20 while ( pfn < nr_pages ) 728.21 { 728.22 @@ -627,7 +626,7 @@ int construct_dom0(struct domain *d, 728.23 #define pfn (nr_pages - 1 - (pfn - (alloc_epfn - alloc_spfn))) 728.24 #endif 728.25 ((u32 *)vphysmap_start)[pfn] = mfn; 728.26 - machine_to_phys_mapping[mfn] = pfn; 728.27 + set_pfn_from_mfn(mfn, pfn); 728.28 #undef pfn 728.29 page++; pfn++; 728.30 }
732.1 --- a/xen/arch/x86/mm.c Mon Aug 29 16:05:29 2005 -0700 732.2 +++ b/xen/arch/x86/mm.c Tue Aug 30 13:36:49 2005 -0700 732.3 @@ -1452,7 +1452,7 @@ int get_page_type(struct pfn_info *page, 732.4 "!= exp %" PRtype_info ") " 732.5 "for mfn %lx (pfn %x)", 732.6 x, type, page_to_pfn(page), 732.7 - machine_to_phys_mapping[page_to_pfn(page)]); 732.8 + get_pfn_from_mfn(page_to_pfn(page))); 732.9 return 0; 732.10 } 732.11 else if ( (x & PGT_va_mask) == PGT_va_mutable ) 732.12 @@ -2206,7 +2206,7 @@ int do_mmu_update( 732.13 printk("privileged guest dom%d requests pfn=%lx to " 732.14 "map mfn=%lx for dom%d\n", 732.15 d->domain_id, gpfn, mfn, FOREIGNDOM->domain_id); 732.16 - set_machinetophys(mfn, gpfn); 732.17 + set_pfn_from_mfn(mfn, gpfn); 732.18 set_p2m_entry(FOREIGNDOM, gpfn, mfn, &sh_mapcache, &mapcache); 732.19 okay = 1; 732.20 shadow_unlock(FOREIGNDOM); 732.21 @@ -2225,7 +2225,7 @@ int do_mmu_update( 732.22 break; 732.23 } 732.24 732.25 - set_machinetophys(mfn, gpfn); 732.26 + set_pfn_from_mfn(mfn, gpfn); 732.27 okay = 1; 732.28 732.29 /*
737.1 --- a/xen/arch/x86/shadow32.c Mon Aug 29 16:05:29 2005 -0700 737.2 +++ b/xen/arch/x86/shadow32.c Tue Aug 30 13:36:49 2005 -0700 737.3 @@ -827,7 +827,7 @@ alloc_p2m_table(struct domain *d) 737.4 { 737.5 page = list_entry(list_ent, struct pfn_info, list); 737.6 mfn = page_to_pfn(page); 737.7 - pfn = machine_to_phys_mapping[mfn]; 737.8 + pfn = get_pfn_from_mfn(mfn); 737.9 ASSERT(pfn != INVALID_M2P_ENTRY); 737.10 ASSERT(pfn < (1u<<20)); 737.11 737.12 @@ -841,7 +841,7 @@ alloc_p2m_table(struct domain *d) 737.13 { 737.14 page = list_entry(list_ent, struct pfn_info, list); 737.15 mfn = page_to_pfn(page); 737.16 - pfn = machine_to_phys_mapping[mfn]; 737.17 + pfn = get_pfn_from_mfn(mfn); 737.18 if ( (pfn != INVALID_M2P_ENTRY) && 737.19 (pfn < (1u<<20)) ) 737.20 {
738.1 --- a/xen/arch/x86/shadow_public.c Mon Aug 29 16:05:29 2005 -0700 738.2 +++ b/xen/arch/x86/shadow_public.c Tue Aug 30 13:36:49 2005 -0700 738.3 @@ -1311,7 +1311,7 @@ alloc_p2m_table(struct domain *d) 738.4 { 738.5 page = list_entry(list_ent, struct pfn_info, list); 738.6 mfn = page_to_pfn(page); 738.7 - pfn = machine_to_phys_mapping[mfn]; 738.8 + pfn = get_pfn_from_mfn(mfn); 738.9 ASSERT(pfn != INVALID_M2P_ENTRY); 738.10 ASSERT(pfn < (1u<<20)); 738.11 738.12 @@ -1325,7 +1325,7 @@ alloc_p2m_table(struct domain *d) 738.13 { 738.14 page = list_entry(list_ent, struct pfn_info, list); 738.15 mfn = page_to_pfn(page); 738.16 - pfn = machine_to_phys_mapping[mfn]; 738.17 + pfn = get_pfn_from_mfn(mfn); 738.18 if ( (pfn != INVALID_M2P_ENTRY) && 738.19 (pfn < (1u<<20)) ) 738.20 {
741.1 --- a/xen/arch/x86/traps.c Mon Aug 29 16:05:29 2005 -0700 741.2 +++ b/xen/arch/x86/traps.c Tue Aug 30 13:36:49 2005 -0700 741.3 @@ -100,6 +100,7 @@ unsigned long do_get_debugreg(int reg); 741.4 741.5 static int debug_stack_lines = 20; 741.6 integer_param("debug_stack_lines", debug_stack_lines); 741.7 +#define stack_words_per_line (32 / BYTES_PER_LONG) 741.8 741.9 int is_kernel_text(unsigned long addr) 741.10 { 741.11 @@ -125,7 +126,7 @@ void show_guest_stack(void) 741.12 741.13 printk("Guest stack trace from "__OP"sp=%p:\n ", stack); 741.14 741.15 - for ( i = 0; i < (debug_stack_lines*8); i++ ) 741.16 + for ( i = 0; i < (debug_stack_lines*stack_words_per_line); i++ ) 741.17 { 741.18 if ( ((long)stack & (STACK_SIZE-1)) == 0 ) 741.19 break; 741.20 @@ -137,7 +138,7 @@ void show_guest_stack(void) 741.21 i = 1; 741.22 break; 741.23 } 741.24 - if ( (i != 0) && ((i % 8) == 0) ) 741.25 + if ( (i != 0) && ((i % stack_words_per_line) == 0) ) 741.26 printk("\n "); 741.27 printk("%p ", _p(addr)); 741.28 stack++; 741.29 @@ -176,11 +177,11 @@ void show_stack(unsigned long *esp) 741.30 741.31 printk("Xen stack trace from "__OP"sp=%p:\n ", stack); 741.32 741.33 - for ( i = 0; i < (debug_stack_lines*8); i++ ) 741.34 + for ( i = 0; i < (debug_stack_lines*stack_words_per_line); i++ ) 741.35 { 741.36 if ( ((long)stack & (STACK_SIZE-1)) == 0 ) 741.37 break; 741.38 - if ( (i != 0) && ((i % 8) == 0) ) 741.39 + if ( (i != 0) && ((i % stack_words_per_line) == 0) ) 741.40 printk("\n "); 741.41 addr = *stack++; 741.42 printk("%p ", _p(addr));
742.1 --- a/xen/arch/x86/vmx.c Mon Aug 29 16:05:29 2005 -0700 742.2 +++ b/xen/arch/x86/vmx.c Tue Aug 30 13:36:49 2005 -0700 742.3 @@ -704,7 +704,7 @@ vmx_copy(void *buf, unsigned long laddr, 742.4 return 0; 742.5 } 742.6 742.7 - mfn = phys_to_machine_mapping(laddr >> PAGE_SHIFT); 742.8 + mfn = get_mfn_from_pfn(laddr >> PAGE_SHIFT); 742.9 addr = (char *)map_domain_page(mfn) + (laddr & ~PAGE_MASK); 742.10 742.11 if (dir == COPY_IN) 742.12 @@ -805,7 +805,7 @@ vmx_world_restore(struct vcpu *d, struct 742.13 * removed some translation or changed page attributes. 742.14 * We simply invalidate the shadow. 742.15 */ 742.16 - mfn = phys_to_machine_mapping(c->cr3 >> PAGE_SHIFT); 742.17 + mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT); 742.18 if (mfn != pagetable_get_pfn(d->arch.guest_table)) { 742.19 printk("Invalid CR3 value=%x", c->cr3); 742.20 domain_crash_synchronous(); 742.21 @@ -823,7 +823,7 @@ vmx_world_restore(struct vcpu *d, struct 742.22 domain_crash_synchronous(); 742.23 return 0; 742.24 } 742.25 - mfn = phys_to_machine_mapping(c->cr3 >> PAGE_SHIFT); 742.26 + mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT); 742.27 d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT); 742.28 update_pagetables(d); 742.29 /* 742.30 @@ -978,7 +978,7 @@ static int vmx_set_cr0(unsigned long val 742.31 /* 742.32 * The guest CR3 must be pointing to the guest physical. 742.33 */ 742.34 - if ( !VALID_MFN(mfn = phys_to_machine_mapping( 742.35 + if ( !VALID_MFN(mfn = get_mfn_from_pfn( 742.36 d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) || 742.37 !get_page(pfn_to_page(mfn), d->domain) ) 742.38 { 742.39 @@ -1174,7 +1174,7 @@ static int mov_to_cr(int gp, int cr, str 742.40 * removed some translation or changed page attributes. 742.41 * We simply invalidate the shadow. 742.42 */ 742.43 - mfn = phys_to_machine_mapping(value >> PAGE_SHIFT); 742.44 + mfn = get_mfn_from_pfn(value >> PAGE_SHIFT); 742.45 if (mfn != pagetable_get_pfn(d->arch.guest_table)) 742.46 __vmx_bug(regs); 742.47 shadow_sync_all(d->domain); 742.48 @@ -1185,7 +1185,7 @@ static int mov_to_cr(int gp, int cr, str 742.49 */ 742.50 VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value); 742.51 if ( ((value >> PAGE_SHIFT) > d->domain->max_pages ) || 742.52 - !VALID_MFN(mfn = phys_to_machine_mapping(value >> PAGE_SHIFT)) || 742.53 + !VALID_MFN(mfn = get_mfn_from_pfn(value >> PAGE_SHIFT)) || 742.54 !get_page(pfn_to_page(mfn), d->domain) ) 742.55 { 742.56 printk("Invalid CR3 value=%lx", value);
745.1 --- a/xen/arch/x86/vmx_platform.c Mon Aug 29 16:05:29 2005 -0700 745.2 +++ b/xen/arch/x86/vmx_platform.c Tue Aug 30 13:36:49 2005 -0700 745.3 @@ -521,7 +521,7 @@ int inst_copy_from_guest(unsigned char * 745.4 if ( vmx_paging_enabled(current) ) 745.5 { 745.6 gpa = gva_to_gpa(guest_eip); 745.7 - mfn = phys_to_machine_mapping(gpa >> PAGE_SHIFT); 745.8 + mfn = get_mfn_from_pfn(gpa >> PAGE_SHIFT); 745.9 745.10 /* Does this cross a page boundary ? */ 745.11 if ( (guest_eip & PAGE_MASK) != ((guest_eip + inst_len) & PAGE_MASK) ) 745.12 @@ -532,7 +532,7 @@ int inst_copy_from_guest(unsigned char * 745.13 } 745.14 else 745.15 { 745.16 - mfn = phys_to_machine_mapping(guest_eip >> PAGE_SHIFT); 745.17 + mfn = get_mfn_from_pfn(guest_eip >> PAGE_SHIFT); 745.18 } 745.19 745.20 inst_start = map_domain_page(mfn); 745.21 @@ -542,7 +542,7 @@ int inst_copy_from_guest(unsigned char * 745.22 if ( remaining ) 745.23 { 745.24 gpa = gva_to_gpa(guest_eip+inst_len+remaining); 745.25 - mfn = phys_to_machine_mapping(gpa >> PAGE_SHIFT); 745.26 + mfn = get_mfn_from_pfn(gpa >> PAGE_SHIFT); 745.27 745.28 inst_start = map_domain_page(mfn); 745.29 memcpy((char *)buf+inst_len, inst_start, remaining);
746.1 --- a/xen/arch/x86/vmx_vmcs.c Mon Aug 29 16:05:29 2005 -0700 746.2 +++ b/xen/arch/x86/vmx_vmcs.c Tue Aug 30 13:36:49 2005 -0700 746.3 @@ -148,7 +148,7 @@ int vmx_setup_platform(struct vcpu *d, s 746.4 offset = (addr & ~PAGE_MASK); 746.5 addr = round_pgdown(addr); 746.6 746.7 - mpfn = phys_to_machine_mapping(addr >> PAGE_SHIFT); 746.8 + mpfn = get_mfn_from_pfn(addr >> PAGE_SHIFT); 746.9 p = map_domain_page(mpfn); 746.10 746.11 e820p = (struct e820entry *) ((unsigned long) p + offset); 746.12 @@ -175,7 +175,7 @@ int vmx_setup_platform(struct vcpu *d, s 746.13 unmap_domain_page(p); 746.14 746.15 /* Initialise shared page */ 746.16 - mpfn = phys_to_machine_mapping(gpfn); 746.17 + mpfn = get_mfn_from_pfn(gpfn); 746.18 p = map_domain_page(mpfn); 746.19 d->domain->arch.vmx_platform.shared_page_va = (unsigned long)p; 746.20
748.1 --- a/xen/arch/x86/x86_32/entry.S Mon Aug 29 16:05:29 2005 -0700 748.2 +++ b/xen/arch/x86/x86_32/entry.S Tue Aug 30 13:36:49 2005 -0700 748.3 @@ -796,7 +796,7 @@ ENTRY(hypercall_table) 748.4 .long do_get_debugreg 748.5 .long do_update_descriptor /* 10 */ 748.6 .long do_ni_hypercall 748.7 - .long do_dom_mem_op 748.8 + .long do_memory_op 748.9 .long do_multicall 748.10 .long do_update_va_mapping 748.11 .long do_set_timer_op /* 15 */ 748.12 @@ -829,7 +829,7 @@ ENTRY(hypercall_args_table) 748.13 .byte 1 /* do_get_debugreg */ 748.14 .byte 4 /* do_update_descriptor */ /* 10 */ 748.15 .byte 0 /* do_ni_hypercall */ 748.16 - .byte 5 /* do_dom_mem_op */ 748.17 + .byte 2 /* do_memory_op */ 748.18 .byte 2 /* do_multicall */ 748.19 .byte 4 /* do_update_va_mapping */ 748.20 .byte 2 /* do_set_timer_op */ /* 15 */
752.1 --- a/xen/arch/x86/x86_64/entry.S Mon Aug 29 16:05:29 2005 -0700 752.2 +++ b/xen/arch/x86/x86_64/entry.S Tue Aug 30 13:36:49 2005 -0700 752.3 @@ -339,7 +339,8 @@ create_bounce_frame: 752.4 1: /* In kernel context already: push new frame at existing %rsp. */ 752.5 movq UREGS_rsp+8(%rsp),%rsi 752.6 andb $0xfc,UREGS_cs+8(%rsp) # Indicate kernel context to guest. 752.7 -2: movq $HYPERVISOR_VIRT_START,%rax 752.8 +2: andq $~0xf,%rsi # Stack frames are 16-byte aligned. 752.9 + movq $HYPERVISOR_VIRT_START,%rax 752.10 cmpq %rax,%rsi 752.11 jb 1f # In +ve address space? Then okay. 752.12 movq $HYPERVISOR_VIRT_END+60,%rax 752.13 @@ -616,7 +617,7 @@ ENTRY(hypercall_table) 752.14 .quad do_get_debugreg 752.15 .quad do_update_descriptor /* 10 */ 752.16 .quad do_ni_hypercall 752.17 - .quad do_dom_mem_op 752.18 + .quad do_memory_op 752.19 .quad do_multicall 752.20 .quad do_update_va_mapping 752.21 .quad do_set_timer_op /* 15 */ 752.22 @@ -649,7 +650,7 @@ ENTRY(hypercall_args_table) 752.23 .byte 1 /* do_get_debugreg */ 752.24 .byte 2 /* do_update_descriptor */ /* 10 */ 752.25 .byte 0 /* do_ni_hypercall */ 752.26 - .byte 5 /* do_dom_mem_op */ 752.27 + .byte 2 /* do_memory_op */ 752.28 .byte 2 /* do_multicall */ 752.29 .byte 3 /* do_update_va_mapping */ 752.30 .byte 1 /* do_set_timer_op */ /* 15 */
754.1 --- a/xen/arch/x86/x86_64/traps.c Mon Aug 29 16:05:29 2005 -0700 754.2 +++ b/xen/arch/x86/x86_64/traps.c Tue Aug 30 13:36:49 2005 -0700 754.3 @@ -15,19 +15,22 @@ 754.4 754.5 void show_registers(struct cpu_user_regs *regs) 754.6 { 754.7 - printk("CPU: %d\nEIP: %04x:[<%016lx>]", 754.8 + printk("CPU: %d\nRIP: %04x:[<%016lx>]", 754.9 smp_processor_id(), 0xffff & regs->cs, regs->rip); 754.10 if ( !GUEST_MODE(regs) ) 754.11 print_symbol(" %s", regs->rip); 754.12 - printk("\nEFLAGS: %016lx\n", regs->eflags); 754.13 - printk("rax: %016lx rbx: %016lx rcx: %016lx rdx: %016lx\n", 754.14 - regs->rax, regs->rbx, regs->rcx, regs->rdx); 754.15 - printk("rsi: %016lx rdi: %016lx rbp: %016lx rsp: %016lx\n", 754.16 - regs->rsi, regs->rdi, regs->rbp, regs->rsp); 754.17 - printk("r8: %016lx r9: %016lx r10: %016lx r11: %016lx\n", 754.18 - regs->r8, regs->r9, regs->r10, regs->r11); 754.19 - printk("r12: %016lx r13: %016lx r14: %016lx r15: %016lx\n", 754.20 - regs->r12, regs->r13, regs->r14, regs->r15); 754.21 + printk("\nRFLAGS: %016lx\n", regs->eflags); 754.22 + printk("rax: %016lx rbx: %016lx rcx: %016lx\n", 754.23 + regs->rax, regs->rbx, regs->rcx); 754.24 + printk("rdx: %016lx rsi: %016lx rdi: %016lx\n", 754.25 + regs->rdx, regs->rsi, regs->rdi); 754.26 + printk("rbp: %016lx rsp: %016lx r8: %016lx\n", 754.27 + regs->rbp, regs->rsp, regs->r8); 754.28 + printk("r9: %016lx r10: %016lx r11: %016lx\n", 754.29 + regs->r9, regs->r10, regs->r11); 754.30 + printk("r12: %016lx r13: %016lx r14: %016lx\n", 754.31 + regs->r12, regs->r13, regs->r14); 754.32 + printk("r15: %016lx\n", regs->r15); 754.33 754.34 if ( GUEST_MODE(regs) ) 754.35 show_guest_stack();
755.1 --- a/xen/common/Makefile Mon Aug 29 16:05:29 2005 -0700 755.2 +++ b/xen/common/Makefile Tue Aug 30 13:36:49 2005 -0700 755.3 @@ -2,7 +2,6 @@ 755.4 include $(BASEDIR)/Rules.mk 755.5 755.6 ifeq ($(TARGET_ARCH),ia64) 755.7 -#OBJS := $(subst dom_mem_ops.o,,$(OBJS)) 755.8 OBJS := $(subst grant_table.o,,$(OBJS)) 755.9 endif 755.10
758.1 --- a/xen/common/dom_mem_ops.c Mon Aug 29 16:05:29 2005 -0700 758.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 758.3 @@ -1,186 +0,0 @@ 758.4 -/****************************************************************************** 758.5 - * dom_mem_ops.c 758.6 - * 758.7 - * Code to handle memory related requests from domains eg. balloon driver. 758.8 - * 758.9 - * Copyright (c) 2003-2004, B Dragovic & K A Fraser. 758.10 - */ 758.11 - 758.12 -#include <xen/config.h> 758.13 -#include <xen/types.h> 758.14 -#include <xen/lib.h> 758.15 -#include <xen/mm.h> 758.16 -#include <xen/perfc.h> 758.17 -#include <xen/sched.h> 758.18 -#include <xen/event.h> 758.19 -#include <xen/shadow.h> 758.20 -#include <asm/current.h> 758.21 -#include <asm/hardirq.h> 758.22 - 758.23 -/* 758.24 - * To allow safe resume of do_dom_mem_op() after preemption, we need to know 758.25 - * at what point in the page list to resume. For this purpose I steal the 758.26 - * high-order bits of the @op parameter, which are otherwise unused and zero. 758.27 - */ 758.28 -#define START_EXTENT_SHIFT 4 /* op[:4] == start_extent */ 758.29 - 758.30 -#define PREEMPT_CHECK(_op) \ 758.31 - if ( hypercall_preempt_check() ) \ 758.32 - return hypercall5_create_continuation( \ 758.33 - __HYPERVISOR_dom_mem_op, \ 758.34 - (_op) | (i << START_EXTENT_SHIFT), \ 758.35 - extent_list, nr_extents, extent_order, \ 758.36 - (d == current->domain) ? DOMID_SELF : d->domain_id); 758.37 - 758.38 -static long 758.39 -alloc_dom_mem(struct domain *d, 758.40 - unsigned long *extent_list, 758.41 - unsigned long start_extent, 758.42 - unsigned int nr_extents, 758.43 - unsigned int extent_order, 758.44 - unsigned int flags) 758.45 -{ 758.46 - struct pfn_info *page; 758.47 - unsigned long i; 758.48 - 758.49 - if ( (extent_list != NULL) && 758.50 - !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) ) 758.51 - return start_extent; 758.52 - 758.53 - if ( (extent_order != 0) && !IS_CAPABLE_PHYSDEV(current->domain) ) 758.54 - { 758.55 - DPRINTK("Only I/O-capable domains may allocate > order-0 memory.\n"); 758.56 - return start_extent; 758.57 - } 758.58 - 758.59 - for ( i = start_extent; i < nr_extents; i++ ) 758.60 - { 758.61 - PREEMPT_CHECK(MEMOP_increase_reservation); 758.62 - 758.63 - if ( unlikely((page = alloc_domheap_pages(d, extent_order, 758.64 - flags)) == NULL) ) 758.65 - { 758.66 - DPRINTK("Could not allocate a frame\n"); 758.67 - return i; 758.68 - } 758.69 - 758.70 - /* Inform the domain of the new page's machine address. */ 758.71 - if ( (extent_list != NULL) && 758.72 - (__put_user(page_to_pfn(page), &extent_list[i]) != 0) ) 758.73 - return i; 758.74 - } 758.75 - 758.76 - return i; 758.77 -} 758.78 - 758.79 -static long 758.80 -free_dom_mem(struct domain *d, 758.81 - unsigned long *extent_list, 758.82 - unsigned long start_extent, 758.83 - unsigned int nr_extents, 758.84 - unsigned int extent_order) 758.85 -{ 758.86 - struct pfn_info *page; 758.87 - unsigned long i, j, mpfn; 758.88 - 758.89 - if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) ) 758.90 - return start_extent; 758.91 - 758.92 - for ( i = start_extent; i < nr_extents; i++ ) 758.93 - { 758.94 - PREEMPT_CHECK(MEMOP_decrease_reservation); 758.95 - 758.96 - if ( unlikely(__get_user(mpfn, &extent_list[i]) != 0) ) 758.97 - return i; 758.98 - 758.99 - for ( j = 0; j < (1 << extent_order); j++ ) 758.100 - { 758.101 - if ( unlikely((mpfn + j) >= max_page) ) 758.102 - { 758.103 - DPRINTK("Domain %u page number out of range (%lx >= %lx)\n", 758.104 - d->domain_id, mpfn + j, max_page); 758.105 - return i; 758.106 - } 758.107 - 758.108 - page = &frame_table[mpfn + j]; 758.109 - if ( unlikely(!get_page(page, d)) ) 758.110 - { 758.111 - DPRINTK("Bad page free for domain %u\n", d->domain_id); 758.112 - return i; 758.113 - } 758.114 - 758.115 - if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) ) 758.116 - put_page_and_type(page); 758.117 - 758.118 - if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) 758.119 - put_page(page); 758.120 - 758.121 - shadow_sync_and_drop_references(d, page); 758.122 - 758.123 - put_page(page); 758.124 - } 758.125 - } 758.126 - 758.127 - return i; 758.128 -} 758.129 - 758.130 -long 758.131 -do_dom_mem_op(unsigned long op, 758.132 - unsigned long *extent_list, 758.133 - unsigned int nr_extents, 758.134 - unsigned int extent_order, 758.135 - domid_t domid) 758.136 -{ 758.137 - struct domain *d; 758.138 - unsigned long rc, start_extent; 758.139 - unsigned int address_bits_order; 758.140 - 758.141 - /* Extract @start_extent from @op. */ 758.142 - start_extent = op >> START_EXTENT_SHIFT; 758.143 - op &= (1 << START_EXTENT_SHIFT) - 1; 758.144 - 758.145 - /* seperate extent_order and address_bits_order */ 758.146 - address_bits_order = (extent_order >> 8) & 0xff; 758.147 - extent_order &= 0xff; 758.148 - 758.149 - if ( unlikely(start_extent > nr_extents) ) 758.150 - return -EINVAL; 758.151 - 758.152 - if ( likely(domid == DOMID_SELF) ) 758.153 - d = current->domain; 758.154 - else if ( unlikely(!IS_PRIV(current->domain)) ) 758.155 - return -EPERM; 758.156 - else if ( unlikely((d = find_domain_by_id(domid)) == NULL) ) 758.157 - return -ESRCH; 758.158 - 758.159 - switch ( op ) 758.160 - { 758.161 - case MEMOP_increase_reservation: 758.162 - rc = alloc_dom_mem( 758.163 - d, extent_list, start_extent, nr_extents, extent_order, 758.164 - (address_bits_order <= 32) ? ALLOC_DOM_DMA : 0); 758.165 - break; 758.166 - case MEMOP_decrease_reservation: 758.167 - rc = free_dom_mem( 758.168 - d, extent_list, start_extent, nr_extents, extent_order); 758.169 - break; 758.170 - default: 758.171 - rc = -ENOSYS; 758.172 - break; 758.173 - } 758.174 - 758.175 - if ( unlikely(domid != DOMID_SELF) ) 758.176 - put_domain(d); 758.177 - 758.178 - return rc; 758.179 -} 758.180 - 758.181 -/* 758.182 - * Local variables: 758.183 - * mode: C 758.184 - * c-set-style: "BSD" 758.185 - * c-basic-offset: 4 758.186 - * tab-width: 4 758.187 - * indent-tabs-mode: nil 758.188 - * End: 758.189 - */
761.1 --- a/xen/common/grant_table.c Mon Aug 29 16:05:29 2005 -0700 761.2 +++ b/xen/common/grant_table.c Tue Aug 30 13:36:49 2005 -0700 761.3 @@ -1211,13 +1211,13 @@ gnttab_notify_transfer( 761.4 DPRINTK("Bad pfn (%lx)\n", pfn); 761.5 else 761.6 { 761.7 - machine_to_phys_mapping[frame] = pfn; 761.8 + set_pfn_from_mfn(frame, pfn); 761.9 761.10 if ( unlikely(shadow_mode_log_dirty(ld))) 761.11 mark_dirty(ld, frame); 761.12 761.13 if (shadow_mode_translate(ld)) 761.14 - __phys_to_machine_mapping[pfn] = frame; 761.15 + set_mfn_from_pfn(pfn, frame); 761.16 } 761.17 sha->frame = __mfn_to_gpfn(rd, frame); 761.18 sha->domid = rd->domain_id; 761.19 @@ -1268,8 +1268,7 @@ grant_table_create( 761.20 { 761.21 SHARE_PFN_WITH_DOMAIN( 761.22 virt_to_page((char *)(t->shared)+(i*PAGE_SIZE)), d); 761.23 - machine_to_phys_mapping[(virt_to_phys(t->shared) >> PAGE_SHIFT) + i] = 761.24 - INVALID_M2P_ENTRY; 761.25 + set_pfn_from_mfn((virt_to_phys(t->shared) >> PAGE_SHIFT) + i, INVALID_M2P_ENTRY); 761.26 } 761.27 761.28 /* Okay, install the structure. */
764.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 764.2 +++ b/xen/common/memory.c Tue Aug 30 13:36:49 2005 -0700 764.3 @@ -0,0 +1,205 @@ 764.4 +/****************************************************************************** 764.5 + * memory.c 764.6 + * 764.7 + * Code to handle memory-related requests. 764.8 + * 764.9 + * Copyright (c) 2003-2004, B Dragovic 764.10 + * Copyright (c) 2003-2005, K A Fraser 764.11 + */ 764.12 + 764.13 +#include <xen/config.h> 764.14 +#include <xen/types.h> 764.15 +#include <xen/lib.h> 764.16 +#include <xen/mm.h> 764.17 +#include <xen/perfc.h> 764.18 +#include <xen/sched.h> 764.19 +#include <xen/event.h> 764.20 +#include <xen/shadow.h> 764.21 +#include <asm/current.h> 764.22 +#include <asm/hardirq.h> 764.23 +#include <public/memory.h> 764.24 + 764.25 +static long 764.26 +increase_reservation( 764.27 + struct domain *d, 764.28 + unsigned long *extent_list, 764.29 + unsigned int nr_extents, 764.30 + unsigned int extent_order, 764.31 + unsigned int flags) 764.32 +{ 764.33 + struct pfn_info *page; 764.34 + unsigned long i; 764.35 + 764.36 + if ( (extent_list != NULL) 764.37 + && !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) ) 764.38 + return 0; 764.39 + 764.40 + if ( (extent_order != 0) && !IS_CAPABLE_PHYSDEV(current->domain) ) 764.41 + { 764.42 + DPRINTK("Only I/O-capable domains may allocate > order-0 memory.\n"); 764.43 + return 0; 764.44 + } 764.45 + 764.46 + for ( i = 0; i < nr_extents; i++ ) 764.47 + { 764.48 + if ( hypercall_preempt_check() ) 764.49 + return i; 764.50 + 764.51 + if ( unlikely((page = alloc_domheap_pages( 764.52 + d, extent_order, flags)) == NULL) ) 764.53 + { 764.54 + DPRINTK("Could not allocate a frame\n"); 764.55 + return i; 764.56 + } 764.57 + 764.58 + /* Inform the domain of the new page's machine address. */ 764.59 + if ( (extent_list != NULL) 764.60 + && (__put_user(page_to_pfn(page), &extent_list[i]) != 0) ) 764.61 + return i; 764.62 + } 764.63 + 764.64 + return nr_extents; 764.65 +} 764.66 + 764.67 +static long 764.68 +decrease_reservation( 764.69 + struct domain *d, 764.70 + unsigned long *extent_list, 764.71 + unsigned int nr_extents, 764.72 + unsigned int extent_order, 764.73 + unsigned int flags) 764.74 +{ 764.75 + struct pfn_info *page; 764.76 + unsigned long i, j, mpfn; 764.77 + 764.78 + if ( !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) ) 764.79 + return 0; 764.80 + 764.81 + for ( i = 0; i < nr_extents; i++ ) 764.82 + { 764.83 + if ( hypercall_preempt_check() ) 764.84 + return i; 764.85 + 764.86 + if ( unlikely(__get_user(mpfn, &extent_list[i]) != 0) ) 764.87 + return i; 764.88 + 764.89 + for ( j = 0; j < (1 << extent_order); j++ ) 764.90 + { 764.91 + if ( unlikely((mpfn + j) >= max_page) ) 764.92 + { 764.93 + DPRINTK("Domain %u page number out of range (%lx >= %lx)\n", 764.94 + d->domain_id, mpfn + j, max_page); 764.95 + return i; 764.96 + } 764.97 + 764.98 + page = &frame_table[mpfn + j]; 764.99 + if ( unlikely(!get_page(page, d)) ) 764.100 + { 764.101 + DPRINTK("Bad page free for domain %u\n", d->domain_id); 764.102 + return i; 764.103 + } 764.104 + 764.105 + if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) ) 764.106 + put_page_and_type(page); 764.107 + 764.108 + if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) 764.109 + put_page(page); 764.110 + 764.111 + shadow_sync_and_drop_references(d, page); 764.112 + 764.113 + put_page(page); 764.114 + } 764.115 + } 764.116 + 764.117 + return nr_extents; 764.118 +} 764.119 + 764.120 +/* 764.121 + * To allow safe resume of do_memory_op() after preemption, we need to know 764.122 + * at what point in the page list to resume. For this purpose I steal the 764.123 + * high-order bits of the @cmd parameter, which are otherwise unused and zero. 764.124 + */ 764.125 +#define START_EXTENT_SHIFT 4 /* cmd[:4] == start_extent */ 764.126 + 764.127 +long do_memory_op(int cmd, void *arg) 764.128 +{ 764.129 + struct domain *d; 764.130 + int rc, start_extent, op, flags = 0; 764.131 + struct xen_memory_reservation reservation; 764.132 + 764.133 + op = cmd & ((1 << START_EXTENT_SHIFT) - 1); 764.134 + 764.135 + switch ( op ) 764.136 + { 764.137 + case XENMEM_increase_reservation: 764.138 + case XENMEM_decrease_reservation: 764.139 + if ( copy_from_user(&reservation, arg, sizeof(reservation)) ) 764.140 + return -EFAULT; 764.141 + 764.142 + start_extent = cmd >> START_EXTENT_SHIFT; 764.143 + if ( unlikely(start_extent > reservation.nr_extents) ) 764.144 + return -EINVAL; 764.145 + 764.146 + if ( reservation.extent_start != NULL ) 764.147 + reservation.extent_start += start_extent; 764.148 + reservation.nr_extents -= start_extent; 764.149 + 764.150 + if ( unlikely(reservation.address_bits != 0) 764.151 + && (reservation.address_bits > (get_order(max_page)+PAGE_SHIFT)) ) 764.152 + { 764.153 + if ( reservation.address_bits < 31 ) 764.154 + return -ENOMEM; 764.155 + flags = ALLOC_DOM_DMA; 764.156 + } 764.157 + 764.158 + if ( likely(reservation.domid == DOMID_SELF) ) 764.159 + d = current->domain; 764.160 + else if ( !IS_PRIV(current->domain) ) 764.161 + return -EPERM; 764.162 + else if ( (d = find_domain_by_id(reservation.domid)) == NULL ) 764.163 + return -ESRCH; 764.164 + 764.165 + rc = ((op == XENMEM_increase_reservation) ? 764.166 + increase_reservation : decrease_reservation)( 764.167 + d, 764.168 + reservation.extent_start, 764.169 + reservation.nr_extents, 764.170 + reservation.extent_order, 764.171 + flags); 764.172 + 764.173 + if ( unlikely(reservation.domid != DOMID_SELF) ) 764.174 + put_domain(d); 764.175 + 764.176 + rc += start_extent; 764.177 + 764.178 + if ( (rc != reservation.nr_extents) && hypercall_preempt_check() ) 764.179 + return hypercall2_create_continuation( 764.180 + __HYPERVISOR_memory_op, 764.181 + op | (rc << START_EXTENT_SHIFT), 764.182 + arg); 764.183 + 764.184 + break; 764.185 + 764.186 + case XENMEM_maximum_ram_page: 764.187 + if ( put_user(max_page, (unsigned long *)arg) ) 764.188 + return -EFAULT; 764.189 + rc = -ENOSYS; 764.190 + break; 764.191 + 764.192 + default: 764.193 + rc = -ENOSYS; 764.194 + break; 764.195 + } 764.196 + 764.197 + return rc; 764.198 +} 764.199 + 764.200 +/* 764.201 + * Local variables: 764.202 + * mode: C 764.203 + * c-set-style: "BSD" 764.204 + * c-basic-offset: 4 764.205 + * tab-width: 4 764.206 + * indent-tabs-mode: nil 764.207 + * End: 764.208 + */
766.1 --- a/xen/common/page_alloc.c Mon Aug 29 16:05:29 2005 -0700 766.2 +++ b/xen/common/page_alloc.c Tue Aug 30 13:36:49 2005 -0700 766.3 @@ -216,7 +216,7 @@ unsigned long alloc_boot_pages(unsigned 766.4 #define NR_ZONES 3 766.5 766.6 766.7 -#define MAX_DMADOM_PFN 0xFFFFF 766.8 +#define MAX_DMADOM_PFN 0x7FFFF /* 31 addressable bits */ 766.9 #define pfn_dom_zone_type(_pfn) \ 766.10 (((_pfn) <= MAX_DMADOM_PFN) ? MEMZONE_DMADOM : MEMZONE_DOM) 766.11
783.1 --- a/xen/include/asm-ia64/mm.h Mon Aug 29 16:05:29 2005 -0700 783.2 +++ b/xen/include/asm-ia64/mm.h Tue Aug 30 13:36:49 2005 -0700 783.3 @@ -405,7 +405,7 @@ extern unsigned long *mpt_table; 783.4 /* If pmt table is provided by control pannel later, we need __get_user 783.5 * here. However if it's allocated by HV, we should access it directly 783.6 */ 783.7 -#define phys_to_machine_mapping(d, gpfn) \ 783.8 +#define get_mfn_from_pfn(d, gpfn) \ 783.9 ((d) == dom0 ? gpfn : \ 783.10 (gpfn <= d->arch.max_pfn ? (d)->arch.pmt[(gpfn)] : \ 783.11 INVALID_MFN)) 783.12 @@ -414,7 +414,7 @@ extern unsigned long *mpt_table; 783.13 machine_to_phys_mapping[(mfn)] 783.14 783.15 #define __gpfn_to_mfn(_d, gpfn) \ 783.16 - phys_to_machine_mapping((_d), (gpfn)) 783.17 + get_mfn_from_pfn((_d), (gpfn)) 783.18 783.19 #define __gpfn_invalid(_d, gpfn) \ 783.20 (__gpfn_to_mfn((_d), (gpfn)) & GPFN_INV_MASK)
812.1 --- a/xen/include/asm-x86/mm.h Mon Aug 29 16:05:29 2005 -0700 812.2 +++ b/xen/include/asm-x86/mm.h Tue Aug 30 13:36:49 2005 -0700 812.3 @@ -255,28 +255,31 @@ int check_descriptor(struct desc_struct 812.4 * contiguous (or near contiguous) physical memory. 812.5 */ 812.6 #undef machine_to_phys_mapping 812.7 -#define machine_to_phys_mapping ((u32 *)RDWR_MPT_VIRT_START) 812.8 +#define machine_to_phys_mapping ((u32 *)RDWR_MPT_VIRT_START) 812.9 #define INVALID_M2P_ENTRY (~0U) 812.10 #define VALID_M2P(_e) (!((_e) & (1U<<31))) 812.11 #define IS_INVALID_M2P_ENTRY(_e) (!VALID_M2P(_e)) 812.12 812.13 +#define set_pfn_from_mfn(mfn, pfn) (machine_to_phys_mapping[(mfn)] = (pfn)) 812.14 +#define get_pfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)]) 812.15 + 812.16 /* 812.17 * The phys_to_machine_mapping is the reversed mapping of MPT for full 812.18 * virtualization. It is only used by shadow_mode_translate()==true 812.19 * guests, so we steal the address space that would have normally 812.20 * been used by the read-only MPT map. 812.21 */ 812.22 -#define __phys_to_machine_mapping ((unsigned long *)RO_MPT_VIRT_START) 812.23 -#define INVALID_MFN (~0UL) 812.24 -#define VALID_MFN(_mfn) (!((_mfn) & (1U<<31))) 812.25 +#define phys_to_machine_mapping ((unsigned long *)RO_MPT_VIRT_START) 812.26 +#define INVALID_MFN (~0UL) 812.27 +#define VALID_MFN(_mfn) (!((_mfn) & (1U<<31))) 812.28 812.29 -/* Returns the machine physical */ 812.30 -static inline unsigned long phys_to_machine_mapping(unsigned long pfn) 812.31 +#define set_mfn_from_pfn(pfn, mfn) (phys_to_machine_mapping[(pfn)] = (mfn)) 812.32 +static inline unsigned long get_mfn_from_pfn(unsigned long pfn) 812.33 { 812.34 unsigned long mfn; 812.35 l1_pgentry_t pte; 812.36 812.37 - if ( (__copy_from_user(&pte, &__phys_to_machine_mapping[pfn], 812.38 + if ( (__copy_from_user(&pte, &phys_to_machine_mapping[pfn], 812.39 sizeof(pte)) == 0) && 812.40 (l1e_get_flags(pte) & _PAGE_PRESENT) ) 812.41 mfn = l1e_get_pfn(pte); 812.42 @@ -285,7 +288,6 @@ static inline unsigned long phys_to_mach 812.43 812.44 return mfn; 812.45 } 812.46 -#define set_machinetophys(_mfn, _pfn) machine_to_phys_mapping[(_mfn)] = (_pfn) 812.47 812.48 #ifdef MEMORY_GUARD 812.49 void memguard_init(void);
814.1 --- a/xen/include/asm-x86/shadow.h Mon Aug 29 16:05:29 2005 -0700 814.2 +++ b/xen/include/asm-x86/shadow.h Tue Aug 30 13:36:49 2005 -0700 814.3 @@ -269,14 +269,14 @@ static inline void shadow_mode_disable(s 814.4 814.5 #define __mfn_to_gpfn(_d, mfn) \ 814.6 ( (shadow_mode_translate(_d)) \ 814.7 - ? machine_to_phys_mapping[(mfn)] \ 814.8 + ? get_pfn_from_mfn(mfn) \ 814.9 : (mfn) ) 814.10 814.11 #define __gpfn_to_mfn(_d, gpfn) \ 814.12 ({ \ 814.13 ASSERT(current->domain == (_d)); \ 814.14 (shadow_mode_translate(_d)) \ 814.15 - ? phys_to_machine_mapping(gpfn) \ 814.16 + ? get_mfn_from_pfn(gpfn) \ 814.17 : (gpfn); \ 814.18 }) 814.19 814.20 @@ -461,7 +461,7 @@ static inline int __mark_dirty(struct do 814.21 // This wants the nice compact set of PFNs from 0..domain's max, 814.22 // which __mfn_to_gpfn() only returns for translated domains. 814.23 // 814.24 - pfn = machine_to_phys_mapping[mfn]; 814.25 + pfn = get_pfn_from_mfn(mfn); 814.26 814.27 /* 814.28 * Values with the MSB set denote MFNs that aren't really part of the 814.29 @@ -562,7 +562,7 @@ update_hl2e(struct vcpu *v, unsigned lon 814.30 old_hl2e = v->arch.hl2_vtable[index]; 814.31 814.32 if ( (l2e_get_flags(gl2e) & _PAGE_PRESENT) && 814.33 - VALID_MFN(mfn = phys_to_machine_mapping(l2e_get_pfn(gl2e))) ) 814.34 + VALID_MFN(mfn = get_mfn_from_pfn(l2e_get_pfn(gl2e))) ) 814.35 new_hl2e = l1e_from_pfn(mfn, __PAGE_HYPERVISOR); 814.36 else 814.37 new_hl2e = l1e_empty();
815.1 --- a/xen/include/asm-x86/shadow_64.h Mon Aug 29 16:05:29 2005 -0700 815.2 +++ b/xen/include/asm-x86/shadow_64.h Tue Aug 30 13:36:49 2005 -0700 815.3 @@ -138,7 +138,7 @@ static inline pgentry_64_t *__entry( 815.4 return NULL; 815.5 mfn = entry_get_value(*le_e) >> PAGE_SHIFT; 815.6 if ((flag & GUEST_ENTRY) && shadow_mode_translate(d)) 815.7 - mfn = phys_to_machine_mapping(mfn); 815.8 + mfn = get_mfn_from_pfn(mfn); 815.9 le_p = (pgentry_64_t *)phys_to_virt(mfn << PAGE_SHIFT); 815.10 index = table_offset_64(va, (level + i - 1)); 815.11 le_e = &le_p[index]; 815.12 @@ -257,7 +257,7 @@ static inline void * __guest_set_l1e( 815.13 if (unlikely(!(l2e_get_flags_32(gl2e) & _PAGE_PRESENT))) 815.14 return NULL; 815.15 815.16 - l1mfn = phys_to_machine_mapping( 815.17 + l1mfn = get_mfn_from_pfn( 815.18 l2e_get_pfn(gl2e)); 815.19 815.20 l1va = (l1_pgentry_32_t *) 815.21 @@ -299,7 +299,7 @@ static inline void * __guest_get_l1e( 815.22 return NULL; 815.23 815.24 815.25 - l1mfn = phys_to_machine_mapping( 815.26 + l1mfn = get_mfn_from_pfn( 815.27 l2e_get_pfn(gl2e)); 815.28 l1va = (l1_pgentry_32_t *) phys_to_virt( 815.29 l1mfn << L1_PAGETABLE_SHIFT);
821.1 --- a/xen/include/asm-x86/vmx_platform.h Mon Aug 29 16:05:29 2005 -0700 821.2 +++ b/xen/include/asm-x86/vmx_platform.h Tue Aug 30 13:36:49 2005 -0700 821.3 @@ -91,6 +91,6 @@ extern int vmx_setup_platform(struct vcp 821.4 extern void vmx_io_assist(struct vcpu *v); 821.5 821.6 // XXX - think about this -- maybe use bit 30 of the mfn to signify an MMIO frame. 821.7 -#define mmio_space(gpa) (!VALID_MFN(phys_to_machine_mapping((gpa) >> PAGE_SHIFT))) 821.8 +#define mmio_space(gpa) (!VALID_MFN(get_mfn_from_pfn((gpa) >> PAGE_SHIFT))) 821.9 821.10 #endif
835.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 835.2 +++ b/xen/include/public/memory.h Tue Aug 30 13:36:49 2005 -0700 835.3 @@ -0,0 +1,50 @@ 835.4 +/****************************************************************************** 835.5 + * memory.h 835.6 + * 835.7 + * Memory reservation and information. 835.8 + * 835.9 + * Copyright (c) 2005, Keir Fraser <keir@xensource.com> 835.10 + */ 835.11 + 835.12 +#ifndef __XEN_PUBLIC_MEMORY_H__ 835.13 +#define __XEN_PUBLIC_MEMORY_H__ 835.14 + 835.15 +/* arg == addr of struct xen_memory_reservation. */ 835.16 +#define XENMEM_increase_reservation 0 835.17 + 835.18 +/* arg == addr of struct xen_memory_reservation. */ 835.19 +#define XENMEM_decrease_reservation 1 835.20 + 835.21 +/* arg == addr of unsigned long. */ 835.22 +#define XENMEM_maximum_ram_page 2 835.23 + 835.24 +typedef struct xen_memory_reservation { 835.25 + 835.26 + /* 835.27 + * MFN bases of extents to free (XENMEM_decrease_reservation). 835.28 + * MFN bases of extents that were allocated (XENMEM_increase_reservation). 835.29 + */ 835.30 + unsigned long *extent_start; 835.31 + 835.32 + /* Number of extents, and size/alignment of each (2^extent_order pages). */ 835.33 + unsigned long nr_extents; 835.34 + unsigned int extent_order; 835.35 + 835.36 + /* 835.37 + * XENMEM_increase_reservation: maximum # bits addressable by the user 835.38 + * of the allocated region (e.g., I/O devices often have a 32-bit 835.39 + * limitation even in 64-bit systems). If zero then the user has no 835.40 + * addressing restriction. 835.41 + * XENMEM_decrease_reservation: unused. 835.42 + */ 835.43 + unsigned int address_bits; 835.44 + 835.45 + /* 835.46 + * Domain whose reservation is being changed. 835.47 + * Unprivileged domains can specify only DOMID_SELF. 835.48 + */ 835.49 + domid_t domid; 835.50 + 835.51 +} xen_memory_reservation_t; 835.52 + 835.53 +#endif /* __XEN_PUBLIC_MEMORY_H__ */
838.1 --- a/xen/include/public/xen.h Mon Aug 29 16:05:29 2005 -0700 838.2 +++ b/xen/include/public/xen.h Tue Aug 30 13:36:49 2005 -0700 838.3 @@ -42,7 +42,7 @@ 838.4 #define __HYPERVISOR_set_debugreg 8 838.5 #define __HYPERVISOR_get_debugreg 9 838.6 #define __HYPERVISOR_update_descriptor 10 838.7 -#define __HYPERVISOR_dom_mem_op 12 838.8 +#define __HYPERVISOR_memory_op 12 838.9 #define __HYPERVISOR_multicall 13 838.10 #define __HYPERVISOR_update_va_mapping 14 838.11 #define __HYPERVISOR_set_timer_op 15 838.12 @@ -225,12 +225,6 @@ struct mmuext_op { 838.13 #define CONSOLEIO_read 1 838.14 838.15 /* 838.16 - * Commands to HYPERVISOR_dom_mem_op(). 838.17 - */ 838.18 -#define MEMOP_increase_reservation 0 838.19 -#define MEMOP_decrease_reservation 1 838.20 - 838.21 -/* 838.22 * Commands to HYPERVISOR_vm_assist(). 838.23 */ 838.24 #define VMASST_CMD_enable 0 838.25 @@ -438,19 +432,21 @@ typedef struct shared_info { 838.26 #define MAX_GUEST_CMDLINE 1024 838.27 typedef struct start_info { 838.28 /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ 838.29 - unsigned long nr_pages; /* Total pages allocated to this domain. */ 838.30 - unsigned long shared_info;/* MACHINE address of shared info struct. */ 838.31 - u32 flags; /* SIF_xxx flags. */ 838.32 + unsigned long nr_pages; /* Total pages allocated to this domain. */ 838.33 + unsigned long shared_info; /* MACHINE address of shared info struct. */ 838.34 + u32 flags; /* SIF_xxx flags. */ 838.35 u16 domain_controller_evtchn; 838.36 /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */ 838.37 - unsigned long pt_base; /* VIRTUAL address of page directory. */ 838.38 - unsigned long nr_pt_frames;/* Number of bootstrap p.t. frames. */ 838.39 - unsigned long mfn_list; /* VIRTUAL address of page-frame list. */ 838.40 - unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ 838.41 - unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ 838.42 + unsigned long pt_base; /* VIRTUAL address of page directory. */ 838.43 + unsigned long nr_pt_frames; /* Number of bootstrap p.t. frames. */ 838.44 + unsigned long mfn_list; /* VIRTUAL address of page-frame list. */ 838.45 + unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */ 838.46 + unsigned long mod_len; /* Size (bytes) of pre-loaded module. */ 838.47 s8 cmd_line[MAX_GUEST_CMDLINE]; 838.48 - unsigned long store_mfn; /* MACHINE page number of shared page. */ 838.49 - u16 store_evtchn; /* Event channel for store communication. */ 838.50 + unsigned long store_mfn; /* MACHINE page number of shared page. */ 838.51 + u16 store_evtchn; /* Event channel for store communication. */ 838.52 + unsigned long console_mfn; /* MACHINE address of console page. */ 838.53 + u16 console_evtchn; /* Event channel for console messages. */ 838.54 } start_info_t; 838.55 838.56 /* These flags are passed in the 'flags' field of start_info_t. */
844.1 --- a/xen/include/xen/perfc.h Mon Aug 29 16:05:29 2005 -0700 844.2 +++ b/xen/include/xen/perfc.h Tue Aug 30 13:36:49 2005 -0700 844.3 @@ -4,6 +4,7 @@ 844.4 844.5 #ifdef PERF_COUNTERS 844.6 844.7 +#include <xen/lib.h> 844.8 #include <asm/atomic.h> 844.9 844.10 /* 844.11 @@ -87,7 +88,7 @@ extern struct perfcounter perfcounters; 844.12 * Histogram: special treatment for 0 and 1 count. After that equally spaced 844.13 * with last bucket taking the rest. 844.14 */ 844.15 -#ifdef PERFC_ARRAYS 844.16 +#ifdef PERF_ARRAYS 844.17 #define perfc_incr_histo(_x,_v,_n) \ 844.18 do { \ 844.19 if ( (_v) == 0 ) \