xen-vtx-unstable
changeset 6684:e3fd0fa58364
Rename get_order() to get_order_from_bytes() and add
new function get_order_from_pages(). Fix
HYPERVISOR_memory_op(), properly this time.
Signed-off-by: Keir Fraser <keir@xensource.com>
new function get_order_from_pages(). Fix
HYPERVISOR_memory_op(), properly this time.
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Thu Sep 08 17:25:52 2005 +0000 (2005-09-08) |
parents | c2705e74efba |
children | 5321e0858b0d |
files | xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/vmx_vmcs.c xen/arch/x86/x86_32/mm.c xen/common/memory.c xen/common/trace.c xen/common/xmalloc.c xen/drivers/char/console.c xen/drivers/char/serial.c xen/include/asm-x86/page.h |
line diff
1.1 --- a/xen/arch/x86/domain.c Thu Sep 08 15:22:01 2005 +0000 1.2 +++ b/xen/arch/x86/domain.c Thu Sep 08 17:25:52 2005 +0000 1.3 @@ -381,11 +381,13 @@ static int vmx_final_setup_guest( 1.4 out: 1.5 free_vmcs(vmcs); 1.6 if(v->arch.arch_vmx.io_bitmap_a != 0) { 1.7 - free_xenheap_pages(v->arch.arch_vmx.io_bitmap_a, get_order(0x1000)); 1.8 + free_xenheap_pages( 1.9 + v->arch.arch_vmx.io_bitmap_a, get_order_from_bytes(0x1000)); 1.10 v->arch.arch_vmx.io_bitmap_a = 0; 1.11 } 1.12 if(v->arch.arch_vmx.io_bitmap_b != 0) { 1.13 - free_xenheap_pages(v->arch.arch_vmx.io_bitmap_b, get_order(0x1000)); 1.14 + free_xenheap_pages( 1.15 + v->arch.arch_vmx.io_bitmap_b, get_order_from_bytes(0x1000)); 1.16 v->arch.arch_vmx.io_bitmap_b = 0; 1.17 } 1.18 v->arch.arch_vmx.vmcs = 0; 1.19 @@ -972,11 +974,13 @@ static void vmx_relinquish_resources(str 1.20 BUG_ON(v->arch.arch_vmx.vmcs == NULL); 1.21 free_vmcs(v->arch.arch_vmx.vmcs); 1.22 if(v->arch.arch_vmx.io_bitmap_a != 0) { 1.23 - free_xenheap_pages(v->arch.arch_vmx.io_bitmap_a, get_order(0x1000)); 1.24 + free_xenheap_pages( 1.25 + v->arch.arch_vmx.io_bitmap_a, get_order_from_bytes(0x1000)); 1.26 v->arch.arch_vmx.io_bitmap_a = 0; 1.27 } 1.28 if(v->arch.arch_vmx.io_bitmap_b != 0) { 1.29 - free_xenheap_pages(v->arch.arch_vmx.io_bitmap_b, get_order(0x1000)); 1.30 + free_xenheap_pages( 1.31 + v->arch.arch_vmx.io_bitmap_b, get_order_from_bytes(0x1000)); 1.32 v->arch.arch_vmx.io_bitmap_b = 0; 1.33 } 1.34 v->arch.arch_vmx.vmcs = 0;
2.1 --- a/xen/arch/x86/domain_build.c Thu Sep 08 15:22:01 2005 +0000 2.2 +++ b/xen/arch/x86/domain_build.c Thu Sep 08 17:25:52 2005 +0000 2.3 @@ -75,15 +75,12 @@ static struct pfn_info *alloc_chunk(stru 2.4 struct pfn_info *page; 2.5 unsigned int order; 2.6 /* 2.7 - * Allocate up to 2MB at a time: 2.8 - * 1. This prevents overflow of get_order() when allocating more than 2.9 - * 4GB to domain 0 on a PAE machine. 2.10 - * 2. It prevents allocating very large chunks from DMA pools before 2.11 - * the >4GB pool is fully depleted. 2.12 + * Allocate up to 2MB at a time: It prevents allocating very large chunks 2.13 + * from DMA pools before the >4GB pool is fully depleted. 2.14 */ 2.15 if ( max_pages > (2UL << (20 - PAGE_SHIFT)) ) 2.16 max_pages = 2UL << (20 - PAGE_SHIFT); 2.17 - order = get_order(max_pages << PAGE_SHIFT); 2.18 + order = get_order_from_pages(max_pages); 2.19 if ( (max_pages & (max_pages-1)) != 0 ) 2.20 order--; 2.21 while ( (page = alloc_domheap_pages(d, order, 0)) == NULL ) 2.22 @@ -252,7 +249,7 @@ int construct_dom0(struct domain *d, 2.23 #endif 2.24 } 2.25 2.26 - order = get_order(v_end - dsi.v_start); 2.27 + order = get_order_from_bytes(v_end - dsi.v_start); 2.28 if ( (1UL << order) > nr_pages ) 2.29 panic("Domain 0 allocation is too small for kernel image.\n"); 2.30
3.1 --- a/xen/arch/x86/vmx_vmcs.c Thu Sep 08 15:22:01 2005 +0000 3.2 +++ b/xen/arch/x86/vmx_vmcs.c Thu Sep 08 17:25:52 2005 +0000 3.3 @@ -44,7 +44,7 @@ struct vmcs_struct *alloc_vmcs(void) 3.4 3.5 rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high); 3.6 vmcs_size = vmx_msr_high & 0x1fff; 3.7 - vmcs = alloc_xenheap_pages(get_order(vmcs_size)); 3.8 + vmcs = alloc_xenheap_pages(get_order_from_bytes(vmcs_size)); 3.9 memset((char *)vmcs, 0, vmcs_size); /* don't remove this */ 3.10 3.11 vmcs->vmcs_revision_id = vmx_msr_low; 3.12 @@ -55,7 +55,7 @@ void free_vmcs(struct vmcs_struct *vmcs) 3.13 { 3.14 int order; 3.15 3.16 - order = get_order(vmcs_size); 3.17 + order = get_order_from_bytes(vmcs_size); 3.18 free_xenheap_pages(vmcs, order); 3.19 } 3.20 3.21 @@ -76,8 +76,8 @@ static inline int construct_vmcs_control 3.22 error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS); 3.23 3.24 /* need to use 0x1000 instead of PAGE_SIZE */ 3.25 - io_bitmap_a = (void*) alloc_xenheap_pages(get_order(0x1000)); 3.26 - io_bitmap_b = (void*) alloc_xenheap_pages(get_order(0x1000)); 3.27 + io_bitmap_a = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000)); 3.28 + io_bitmap_b = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000)); 3.29 memset(io_bitmap_a, 0xff, 0x1000); 3.30 /* don't bother debug port access */ 3.31 clear_bit(PC_DEBUG_PORT, io_bitmap_a);
4.1 --- a/xen/arch/x86/x86_32/mm.c Thu Sep 08 15:22:01 2005 +0000 4.2 +++ b/xen/arch/x86/x86_32/mm.c Thu Sep 08 17:25:52 2005 +0000 4.3 @@ -118,7 +118,8 @@ void __init paging_init(void) 4.4 } 4.5 4.6 /* Set up mapping cache for domain pages. */ 4.7 - mapcache_order = get_order(MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER)); 4.8 + mapcache_order = get_order_from_bytes( 4.9 + MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER)); 4.10 mapcache = alloc_xenheap_pages(mapcache_order); 4.11 memset(mapcache, 0, PAGE_SIZE << mapcache_order); 4.12 for ( i = 0; i < (MAPCACHE_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
5.1 --- a/xen/common/memory.c Thu Sep 08 15:22:01 2005 +0000 5.2 +++ b/xen/common/memory.c Thu Sep 08 17:25:52 2005 +0000 5.3 @@ -154,7 +154,8 @@ long do_memory_op(int cmd, void *arg) 5.4 reservation.nr_extents -= start_extent; 5.5 5.6 if ( (reservation.address_bits != 0) && 5.7 - (reservation.address_bits < (get_order(max_page) + PAGE_SHIFT)) ) 5.8 + (reservation.address_bits < 5.9 + (get_order_from_pages(max_page) + PAGE_SHIFT)) ) 5.10 { 5.11 if ( reservation.address_bits < 31 ) 5.12 return -ENOMEM;
6.1 --- a/xen/common/trace.c Thu Sep 08 15:22:01 2005 +0000 6.2 +++ b/xen/common/trace.c Thu Sep 08 17:25:52 2005 +0000 6.3 @@ -66,7 +66,7 @@ void init_trace_bufs(void) 6.4 } 6.5 6.6 nr_pages = num_online_cpus() * opt_tbuf_size; 6.7 - order = get_order(nr_pages * PAGE_SIZE); 6.8 + order = get_order_from_pages(nr_pages); 6.9 6.10 if ( (rawbuf = alloc_xenheap_pages(order)) == NULL ) 6.11 {
7.1 --- a/xen/common/xmalloc.c Thu Sep 08 15:22:01 2005 +0000 7.2 +++ b/xen/common/xmalloc.c Thu Sep 08 17:25:52 2005 +0000 7.3 @@ -86,7 +86,7 @@ static void *xmalloc_new_page(size_t siz 7.4 static void *xmalloc_whole_pages(size_t size) 7.5 { 7.6 struct xmalloc_hdr *hdr; 7.7 - unsigned int pageorder = get_order(size); 7.8 + unsigned int pageorder = get_order_from_bytes(size); 7.9 7.10 hdr = alloc_xenheap_pages(pageorder); 7.11 if ( hdr == NULL ) 7.12 @@ -159,7 +159,7 @@ void xfree(const void *p) 7.13 /* Big allocs free directly. */ 7.14 if ( hdr->size >= PAGE_SIZE ) 7.15 { 7.16 - free_xenheap_pages(hdr, get_order(hdr->size)); 7.17 + free_xenheap_pages(hdr, get_order_from_bytes(hdr->size)); 7.18 return; 7.19 } 7.20
8.1 --- a/xen/drivers/char/console.c Thu Sep 08 15:22:01 2005 +0000 8.2 +++ b/xen/drivers/char/console.c Thu Sep 08 17:25:52 2005 +0000 8.3 @@ -627,7 +627,7 @@ static int __init debugtrace_init(void) 8.4 if ( bytes == 0 ) 8.5 return 0; 8.6 8.7 - order = get_order(bytes); 8.8 + order = get_order_from_bytes(bytes); 8.9 debugtrace_buf = alloc_xenheap_pages(order); 8.10 ASSERT(debugtrace_buf != NULL); 8.11
9.1 --- a/xen/drivers/char/serial.c Thu Sep 08 15:22:01 2005 +0000 9.2 +++ b/xen/drivers/char/serial.c Thu Sep 08 17:25:52 2005 +0000 9.3 @@ -366,8 +366,9 @@ void serial_register_uart(int idx, struc 9.4 void serial_async_transmit(struct serial_port *port) 9.5 { 9.6 BUG_ON(!port->driver->tx_empty); 9.7 - if ( !port->txbuf ) 9.8 - port->txbuf = alloc_xenheap_pages(get_order(SERIAL_TXBUFSZ)); 9.9 + if ( port->txbuf == NULL ) 9.10 + port->txbuf = alloc_xenheap_pages( 9.11 + get_order_from_bytes(SERIAL_TXBUFSZ)); 9.12 } 9.13 9.14 /*
10.1 --- a/xen/include/asm-x86/page.h Thu Sep 08 15:22:01 2005 +0000 10.2 +++ b/xen/include/asm-x86/page.h Thu Sep 08 17:25:52 2005 +0000 10.3 @@ -280,7 +280,7 @@ extern void paging_init(void); 10.4 10.5 #ifndef __ASSEMBLY__ 10.6 10.7 -static __inline__ int get_order(unsigned long size) 10.8 +static inline int get_order_from_bytes(physaddr_t size) 10.9 { 10.10 int order; 10.11 size = (size-1) >> PAGE_SHIFT; 10.12 @@ -289,6 +289,15 @@ static __inline__ int get_order(unsigned 10.13 return order; 10.14 } 10.15 10.16 +static inline int get_order_from_pages(unsigned long nr_pages) 10.17 +{ 10.18 + int order; 10.19 + nr_pages--; 10.20 + for ( order = 0; nr_pages; order++ ) 10.21 + nr_pages >>= 1; 10.22 + return order; 10.23 +} 10.24 + 10.25 /* Allocator functions for Xen pagetables. */ 10.26 struct pfn_info *alloc_xen_pagetable(void); 10.27 void free_xen_pagetable(struct pfn_info *pg);