xen-vtx-unstable
changeset 6689:3bde4219c681
manual merge
author | iap10@freefall.cl.cam.ac.uk |
---|---|
date | Thu Sep 08 17:40:37 2005 +0000 (2005-09-08) |
parents | 5db85ba1c4e0 5321e0858b0d |
children | aa0990ef260f |
files | tools/libxc/xc_domain.c tools/libxc/xc_linux_build.c tools/libxc/xc_linux_restore.c tools/libxc/xc_private.c tools/libxc/xenctrl.h tools/python/xen/lowlevel/xc/xc.c tools/python/xen/xend/image.py xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/vmx_vmcs.c xen/arch/x86/x86_32/mm.c xen/common/grant_table.c xen/common/trace.c xen/common/xmalloc.c xen/drivers/char/console.c xen/drivers/char/serial.c xen/include/asm-x86/page.h |
line diff
8.1 --- a/xen/arch/x86/domain.c Thu Sep 08 17:36:23 2005 +0000 8.2 +++ b/xen/arch/x86/domain.c Thu Sep 08 17:40:37 2005 +0000 8.3 @@ -381,11 +381,13 @@ static int vmx_final_setup_guest( 8.4 out: 8.5 free_vmcs(vmcs); 8.6 if(v->arch.arch_vmx.io_bitmap_a != 0) { 8.7 - free_xenheap_pages(v->arch.arch_vmx.io_bitmap_a, get_order(0x1000)); 8.8 + free_xenheap_pages( 8.9 + v->arch.arch_vmx.io_bitmap_a, get_order_from_bytes(0x1000)); 8.10 v->arch.arch_vmx.io_bitmap_a = 0; 8.11 } 8.12 if(v->arch.arch_vmx.io_bitmap_b != 0) { 8.13 - free_xenheap_pages(v->arch.arch_vmx.io_bitmap_b, get_order(0x1000)); 8.14 + free_xenheap_pages( 8.15 + v->arch.arch_vmx.io_bitmap_b, get_order_from_bytes(0x1000)); 8.16 v->arch.arch_vmx.io_bitmap_b = 0; 8.17 } 8.18 v->arch.arch_vmx.vmcs = 0; 8.19 @@ -972,11 +974,13 @@ static void vmx_relinquish_resources(str 8.20 BUG_ON(v->arch.arch_vmx.vmcs == NULL); 8.21 free_vmcs(v->arch.arch_vmx.vmcs); 8.22 if(v->arch.arch_vmx.io_bitmap_a != 0) { 8.23 - free_xenheap_pages(v->arch.arch_vmx.io_bitmap_a, get_order(0x1000)); 8.24 + free_xenheap_pages( 8.25 + v->arch.arch_vmx.io_bitmap_a, get_order_from_bytes(0x1000)); 8.26 v->arch.arch_vmx.io_bitmap_a = 0; 8.27 } 8.28 if(v->arch.arch_vmx.io_bitmap_b != 0) { 8.29 - free_xenheap_pages(v->arch.arch_vmx.io_bitmap_b, get_order(0x1000)); 8.30 + free_xenheap_pages( 8.31 + v->arch.arch_vmx.io_bitmap_b, get_order_from_bytes(0x1000)); 8.32 v->arch.arch_vmx.io_bitmap_b = 0; 8.33 } 8.34 v->arch.arch_vmx.vmcs = 0;
9.1 --- a/xen/arch/x86/domain_build.c Thu Sep 08 17:36:23 2005 +0000 9.2 +++ b/xen/arch/x86/domain_build.c Thu Sep 08 17:40:37 2005 +0000 9.3 @@ -75,15 +75,12 @@ static struct pfn_info *alloc_chunk(stru 9.4 struct pfn_info *page; 9.5 unsigned int order; 9.6 /* 9.7 - * Allocate up to 2MB at a time: 9.8 - * 1. This prevents overflow of get_order() when allocating more than 9.9 - * 4GB to domain 0 on a PAE machine. 9.10 - * 2. It prevents allocating very large chunks from DMA pools before 9.11 - * the >4GB pool is fully depleted. 9.12 + * Allocate up to 2MB at a time: It prevents allocating very large chunks 9.13 + * from DMA pools before the >4GB pool is fully depleted. 9.14 */ 9.15 if ( max_pages > (2UL << (20 - PAGE_SHIFT)) ) 9.16 max_pages = 2UL << (20 - PAGE_SHIFT); 9.17 - order = get_order(max_pages << PAGE_SHIFT); 9.18 + order = get_order_from_pages(max_pages); 9.19 if ( (max_pages & (max_pages-1)) != 0 ) 9.20 order--; 9.21 while ( (page = alloc_domheap_pages(d, order, 0)) == NULL ) 9.22 @@ -252,7 +249,7 @@ int construct_dom0(struct domain *d, 9.23 #endif 9.24 } 9.25 9.26 - order = get_order(v_end - dsi.v_start); 9.27 + order = get_order_from_bytes(v_end - dsi.v_start); 9.28 if ( (1UL << order) > nr_pages ) 9.29 panic("Domain 0 allocation is too small for kernel image.\n"); 9.30
10.1 --- a/xen/arch/x86/vmx_vmcs.c Thu Sep 08 17:36:23 2005 +0000 10.2 +++ b/xen/arch/x86/vmx_vmcs.c Thu Sep 08 17:40:37 2005 +0000 10.3 @@ -44,7 +44,7 @@ struct vmcs_struct *alloc_vmcs(void) 10.4 10.5 rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high); 10.6 vmcs_size = vmx_msr_high & 0x1fff; 10.7 - vmcs = alloc_xenheap_pages(get_order(vmcs_size)); 10.8 + vmcs = alloc_xenheap_pages(get_order_from_bytes(vmcs_size)); 10.9 memset((char *)vmcs, 0, vmcs_size); /* don't remove this */ 10.10 10.11 vmcs->vmcs_revision_id = vmx_msr_low; 10.12 @@ -55,7 +55,7 @@ void free_vmcs(struct vmcs_struct *vmcs) 10.13 { 10.14 int order; 10.15 10.16 - order = get_order(vmcs_size); 10.17 + order = get_order_from_bytes(vmcs_size); 10.18 free_xenheap_pages(vmcs, order); 10.19 } 10.20 10.21 @@ -76,8 +76,8 @@ static inline int construct_vmcs_control 10.22 error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS); 10.23 10.24 /* need to use 0x1000 instead of PAGE_SIZE */ 10.25 - io_bitmap_a = (void*) alloc_xenheap_pages(get_order(0x1000)); 10.26 - io_bitmap_b = (void*) alloc_xenheap_pages(get_order(0x1000)); 10.27 + io_bitmap_a = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000)); 10.28 + io_bitmap_b = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000)); 10.29 memset(io_bitmap_a, 0xff, 0x1000); 10.30 /* don't bother debug port access */ 10.31 clear_bit(PC_DEBUG_PORT, io_bitmap_a);
11.1 --- a/xen/arch/x86/x86_32/mm.c Thu Sep 08 17:36:23 2005 +0000 11.2 +++ b/xen/arch/x86/x86_32/mm.c Thu Sep 08 17:40:37 2005 +0000 11.3 @@ -118,7 +118,8 @@ void __init paging_init(void) 11.4 } 11.5 11.6 /* Set up mapping cache for domain pages. */ 11.7 - mapcache_order = get_order(MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER)); 11.8 + mapcache_order = get_order_from_bytes( 11.9 + MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER)); 11.10 mapcache = alloc_xenheap_pages(mapcache_order); 11.11 memset(mapcache, 0, PAGE_SIZE << mapcache_order); 11.12 for ( i = 0; i < (MAPCACHE_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
12.1 --- a/xen/common/grant_table.c Thu Sep 08 17:36:23 2005 +0000 12.2 +++ b/xen/common/grant_table.c Thu Sep 08 17:40:37 2005 +0000 12.3 @@ -399,7 +399,7 @@ static int 12.4 { 12.5 int i; 12.6 grant_mapping_t *new_mt; 12.7 - grant_table_t *lgt = ld->grant_table; 12.8 + grant_table_t *lgt = ld->grant_table; 12.9 12.10 if ( (lgt->maptrack_limit << 1) > MAPTRACK_MAX_ENTRIES ) 12.11 { 12.12 @@ -437,9 +437,8 @@ static int 12.13 ref, dom, dev_hst_ro_flags); 12.14 #endif 12.15 12.16 - if ( 0 <= ( rc = __gnttab_activate_grant_ref( ld, led, rd, ref, 12.17 - dev_hst_ro_flags, 12.18 - addr, &frame))) 12.19 + if ( (rc = __gnttab_activate_grant_ref(ld, led, rd, ref, dev_hst_ro_flags, 12.20 + addr, &frame)) >= 0 ) 12.21 { 12.22 /* 12.23 * Only make the maptrack live _after_ writing the pte, in case we 12.24 @@ -807,7 +806,8 @@ gnttab_donate(gnttab_donate_t *uop, unsi 12.25 int i; 12.26 int result = GNTST_okay; 12.27 12.28 - for (i = 0; i < count; i++) { 12.29 + for ( i = 0; i < count; i++ ) 12.30 + { 12.31 gnttab_donate_t *gop = &uop[i]; 12.32 #if GRANT_DEBUG 12.33 printk("gnttab_donate: i=%d mfn=%lx domid=%d gref=%08x\n", 12.34 @@ -881,30 +881,6 @@ gnttab_donate(gnttab_donate_t *uop, unsi 12.35 * headroom. Also, a domain mustn't have PGC_allocated 12.36 * pages when it is dying. 12.37 */ 12.38 -#ifdef GRANT_DEBUG 12.39 - if (unlikely(e->tot_pages >= e->max_pages)) { 12.40 - printk("gnttab_dontate: no headroom tot_pages=%d max_pages=%d\n", 12.41 - e->tot_pages, e->max_pages); 12.42 - spin_unlock(&e->page_alloc_lock); 12.43 - put_domain(e); 12.44 - gop->status = result = GNTST_general_error; 12.45 - break; 12.46 - } 12.47 - if (unlikely(test_bit(DOMFLAGS_DYING, &e->domain_flags))) { 12.48 - printk("gnttab_donate: target domain is dying\n"); 12.49 - spin_unlock(&e->page_alloc_lock); 12.50 - put_domain(e); 12.51 - gop->status = result = GNTST_general_error; 12.52 - break; 12.53 - } 12.54 - if (unlikely(!gnttab_prepare_for_transfer(e, d, gop->handle))) { 12.55 - printk("gnttab_donate: gnttab_prepare_for_transfer fails.\n"); 12.56 - spin_unlock(&e->page_alloc_lock); 12.57 - put_domain(e); 12.58 - gop->status = result = GNTST_general_error; 12.59 - break; 12.60 - } 12.61 -#else 12.62 ASSERT(e->tot_pages <= e->max_pages); 12.63 if (unlikely(test_bit(DOMFLAGS_DYING, &e->domain_flags)) || 12.64 unlikely(e->tot_pages == e->max_pages) || 12.65 @@ -914,11 +890,10 @@ gnttab_donate(gnttab_donate_t *uop, unsi 12.66 e->tot_pages, e->max_pages, gop->handle, e->d_flags); 12.67 spin_unlock(&e->page_alloc_lock); 12.68 put_domain(e); 12.69 - /* XXX SMH: better error return here would be useful */ 12.70 gop->status = result = GNTST_general_error; 12.71 break; 12.72 } 12.73 -#endif 12.74 + 12.75 /* Okay, add the page to 'e'. */ 12.76 if (unlikely(e->tot_pages++ == 0)) { 12.77 get_knownalive_domain(e); 12.78 @@ -957,38 +932,38 @@ do_grant_table_op( 12.79 12.80 rc = -EFAULT; 12.81 switch ( cmd ) 12.82 - { 12.83 - case GNTTABOP_map_grant_ref: 12.84 - if ( unlikely(!array_access_ok( 12.85 - uop, count, sizeof(gnttab_map_grant_ref_t))) ) 12.86 - goto out; 12.87 - rc = gnttab_map_grant_ref((gnttab_map_grant_ref_t *)uop, count); 12.88 - break; 12.89 - case GNTTABOP_unmap_grant_ref: 12.90 - if ( unlikely(!array_access_ok( 12.91 - uop, count, sizeof(gnttab_unmap_grant_ref_t))) ) 12.92 - goto out; 12.93 - rc = gnttab_unmap_grant_ref((gnttab_unmap_grant_ref_t *)uop, 12.94 - count); 12.95 - break; 12.96 - case GNTTABOP_setup_table: 12.97 - rc = gnttab_setup_table((gnttab_setup_table_t *)uop, count); 12.98 - break; 12.99 + { 12.100 + case GNTTABOP_map_grant_ref: 12.101 + if ( unlikely(!array_access_ok( 12.102 + uop, count, sizeof(gnttab_map_grant_ref_t))) ) 12.103 + goto out; 12.104 + rc = gnttab_map_grant_ref((gnttab_map_grant_ref_t *)uop, count); 12.105 + break; 12.106 + case GNTTABOP_unmap_grant_ref: 12.107 + if ( unlikely(!array_access_ok( 12.108 + uop, count, sizeof(gnttab_unmap_grant_ref_t))) ) 12.109 + goto out; 12.110 + rc = gnttab_unmap_grant_ref( 12.111 + (gnttab_unmap_grant_ref_t *)uop, count); 12.112 + break; 12.113 + case GNTTABOP_setup_table: 12.114 + rc = gnttab_setup_table((gnttab_setup_table_t *)uop, count); 12.115 + break; 12.116 #if GRANT_DEBUG 12.117 - case GNTTABOP_dump_table: 12.118 - rc = gnttab_dump_table((gnttab_dump_table_t *)uop); 12.119 - break; 12.120 + case GNTTABOP_dump_table: 12.121 + rc = gnttab_dump_table((gnttab_dump_table_t *)uop); 12.122 + break; 12.123 #endif 12.124 - case GNTTABOP_donate: 12.125 - if (unlikely(!array_access_ok(uop, count, 12.126 - sizeof(gnttab_donate_t)))) 12.127 - goto out; 12.128 - rc = gnttab_donate(uop, count); 12.129 - break; 12.130 - default: 12.131 - rc = -ENOSYS; 12.132 - break; 12.133 - } 12.134 + case GNTTABOP_donate: 12.135 + if (unlikely(!array_access_ok( 12.136 + uop, count, sizeof(gnttab_donate_t)))) 12.137 + goto out; 12.138 + rc = gnttab_donate(uop, count); 12.139 + break; 12.140 + default: 12.141 + rc = -ENOSYS; 12.142 + break; 12.143 + } 12.144 12.145 out: 12.146 UNLOCK_BIGLOCK(d); 12.147 @@ -1021,17 +996,17 @@ gnttab_check_unmap( 12.148 lgt = ld->grant_table; 12.149 12.150 #if GRANT_DEBUG_VERBOSE 12.151 - if ( ld->domain_id != 0 ) { 12.152 - DPRINTK("Foreign unref rd(%d) ld(%d) frm(%lx) flgs(%x).\n", 12.153 - rd->domain_id, ld->domain_id, frame, readonly); 12.154 - } 12.155 + if ( ld->domain_id != 0 ) 12.156 + DPRINTK("Foreign unref rd(%d) ld(%d) frm(%lx) flgs(%x).\n", 12.157 + rd->domain_id, ld->domain_id, frame, readonly); 12.158 #endif 12.159 12.160 /* Fast exit if we're not mapping anything using grant tables */ 12.161 if ( lgt->map_count == 0 ) 12.162 return 0; 12.163 12.164 - if ( get_domain(rd) == 0 ) { 12.165 + if ( get_domain(rd) == 0 ) 12.166 + { 12.167 DPRINTK("gnttab_check_unmap: couldn't get_domain rd(%d)\n", 12.168 rd->domain_id); 12.169 return 0; 12.170 @@ -1268,8 +1243,11 @@ grant_table_create( 12.171 for ( i = 0; i < NR_GRANT_FRAMES; i++ ) 12.172 { 12.173 SHARE_PFN_WITH_DOMAIN( 12.174 - virt_to_page((char *)(t->shared)+(i*PAGE_SIZE)), d); 12.175 - set_pfn_from_mfn((virt_to_phys(t->shared) >> PAGE_SHIFT) + i, INVALID_M2P_ENTRY); 12.176 + virt_to_page((char *)t->shared + (i * PAGE_SIZE)), 12.177 + d); 12.178 + set_pfn_from_mfn( 12.179 + (virt_to_phys(t->shared) >> PAGE_SHIFT) + i, 12.180 + INVALID_M2P_ENTRY); 12.181 } 12.182 12.183 /* Okay, install the structure. */ 12.184 @@ -1306,57 +1284,53 @@ gnttab_release_dev_mappings(grant_table_ 12.185 { 12.186 map = >->maptrack[handle]; 12.187 12.188 - if ( map->ref_and_flags & GNTMAP_device_map ) 12.189 - { 12.190 - dom = map->domid; 12.191 - ref = map->ref_and_flags >> MAPTRACK_REF_SHIFT; 12.192 + if ( !(map->ref_and_flags & GNTMAP_device_map) ) 12.193 + continue; 12.194 12.195 - DPRINTK("Grant release (%hu) ref:(%hu) flags:(%x) dom:(%hu)\n", 12.196 - handle, ref, 12.197 - map->ref_and_flags & MAPTRACK_GNTMAP_MASK, dom); 12.198 + dom = map->domid; 12.199 + ref = map->ref_and_flags >> MAPTRACK_REF_SHIFT; 12.200 + 12.201 + DPRINTK("Grant release (%hu) ref:(%hu) flags:(%x) dom:(%hu)\n", 12.202 + handle, ref, map->ref_and_flags & MAPTRACK_GNTMAP_MASK, dom); 12.203 12.204 - if ( unlikely((rd = find_domain_by_id(dom)) == NULL) || 12.205 - unlikely(ld == rd) ) 12.206 + if ( unlikely((rd = find_domain_by_id(dom)) == NULL) || 12.207 + unlikely(ld == rd) ) 12.208 + { 12.209 + if ( rd != NULL ) 12.210 + put_domain(rd); 12.211 + printk(KERN_WARNING "Grant release: No dom%d\n", dom); 12.212 + continue; 12.213 + } 12.214 + 12.215 + act = &rd->grant_table->active[ref]; 12.216 + sha = &rd->grant_table->shared[ref]; 12.217 + 12.218 + spin_lock(&rd->grant_table->lock); 12.219 + 12.220 + if ( act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask) ) 12.221 + { 12.222 + frame = act->frame; 12.223 + 12.224 + if ( ( (act->pin & GNTPIN_hstw_mask) == 0 ) && 12.225 + ( (act->pin & GNTPIN_devw_mask) > 0 ) ) 12.226 { 12.227 - if ( rd != NULL ) 12.228 - put_domain(rd); 12.229 - 12.230 - printk(KERN_WARNING "Grant release: No dom%d\n", dom); 12.231 - continue; 12.232 + clear_bit(_GTF_writing, &sha->flags); 12.233 + put_page_type(&frame_table[frame]); 12.234 } 12.235 12.236 - act = &rd->grant_table->active[ref]; 12.237 - sha = &rd->grant_table->shared[ref]; 12.238 - 12.239 - spin_lock(&rd->grant_table->lock); 12.240 - 12.241 - if ( act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask) ) 12.242 + map->ref_and_flags &= ~GNTMAP_device_map; 12.243 + act->pin &= ~(GNTPIN_devw_mask | GNTPIN_devr_mask); 12.244 + if ( act->pin == 0 ) 12.245 { 12.246 - frame = act->frame; 12.247 - 12.248 - if ( ( (act->pin & GNTPIN_hstw_mask) == 0 ) && 12.249 - ( (act->pin & GNTPIN_devw_mask) > 0 ) ) 12.250 - { 12.251 - clear_bit(_GTF_writing, &sha->flags); 12.252 - put_page_type(&frame_table[frame]); 12.253 - } 12.254 + clear_bit(_GTF_reading, &sha->flags); 12.255 + map->ref_and_flags = 0; 12.256 + put_page(&frame_table[frame]); 12.257 + } 12.258 + } 12.259 12.260 - act->pin &= ~(GNTPIN_devw_mask | GNTPIN_devr_mask); 12.261 + spin_unlock(&rd->grant_table->lock); 12.262 12.263 - if ( act->pin == 0 ) 12.264 - { 12.265 - clear_bit(_GTF_reading, &sha->flags); 12.266 - map->ref_and_flags = 0; 12.267 - put_page(&frame_table[frame]); 12.268 - } 12.269 - else 12.270 - map->ref_and_flags &= ~GNTMAP_device_map; 12.271 - } 12.272 - 12.273 - spin_unlock(&rd->grant_table->lock); 12.274 - 12.275 - put_domain(rd); 12.276 - } 12.277 + put_domain(rd); 12.278 } 12.279 } 12.280
13.1 --- a/xen/common/trace.c Thu Sep 08 17:36:23 2005 +0000 13.2 +++ b/xen/common/trace.c Thu Sep 08 17:40:37 2005 +0000 13.3 @@ -66,7 +66,7 @@ void init_trace_bufs(void) 13.4 } 13.5 13.6 nr_pages = num_online_cpus() * opt_tbuf_size; 13.7 - order = get_order(nr_pages * PAGE_SIZE); 13.8 + order = get_order_from_pages(nr_pages); 13.9 13.10 if ( (rawbuf = alloc_xenheap_pages(order)) == NULL ) 13.11 {
14.1 --- a/xen/common/xmalloc.c Thu Sep 08 17:36:23 2005 +0000 14.2 +++ b/xen/common/xmalloc.c Thu Sep 08 17:40:37 2005 +0000 14.3 @@ -86,7 +86,7 @@ static void *xmalloc_new_page(size_t siz 14.4 static void *xmalloc_whole_pages(size_t size) 14.5 { 14.6 struct xmalloc_hdr *hdr; 14.7 - unsigned int pageorder = get_order(size); 14.8 + unsigned int pageorder = get_order_from_bytes(size); 14.9 14.10 hdr = alloc_xenheap_pages(pageorder); 14.11 if ( hdr == NULL ) 14.12 @@ -159,7 +159,7 @@ void xfree(const void *p) 14.13 /* Big allocs free directly. */ 14.14 if ( hdr->size >= PAGE_SIZE ) 14.15 { 14.16 - free_xenheap_pages(hdr, get_order(hdr->size)); 14.17 + free_xenheap_pages(hdr, get_order_from_bytes(hdr->size)); 14.18 return; 14.19 } 14.20
15.1 --- a/xen/drivers/char/console.c Thu Sep 08 17:36:23 2005 +0000 15.2 +++ b/xen/drivers/char/console.c Thu Sep 08 17:40:37 2005 +0000 15.3 @@ -627,7 +627,7 @@ static int __init debugtrace_init(void) 15.4 if ( bytes == 0 ) 15.5 return 0; 15.6 15.7 - order = get_order(bytes); 15.8 + order = get_order_from_bytes(bytes); 15.9 debugtrace_buf = alloc_xenheap_pages(order); 15.10 ASSERT(debugtrace_buf != NULL); 15.11
16.1 --- a/xen/drivers/char/serial.c Thu Sep 08 17:36:23 2005 +0000 16.2 +++ b/xen/drivers/char/serial.c Thu Sep 08 17:40:37 2005 +0000 16.3 @@ -366,8 +366,9 @@ void serial_register_uart(int idx, struc 16.4 void serial_async_transmit(struct serial_port *port) 16.5 { 16.6 BUG_ON(!port->driver->tx_empty); 16.7 - if ( !port->txbuf ) 16.8 - port->txbuf = alloc_xenheap_pages(get_order(SERIAL_TXBUFSZ)); 16.9 + if ( port->txbuf == NULL ) 16.10 + port->txbuf = alloc_xenheap_pages( 16.11 + get_order_from_bytes(SERIAL_TXBUFSZ)); 16.12 } 16.13 16.14 /*
17.1 --- a/xen/include/asm-x86/page.h Thu Sep 08 17:36:23 2005 +0000 17.2 +++ b/xen/include/asm-x86/page.h Thu Sep 08 17:40:37 2005 +0000 17.3 @@ -298,8 +298,6 @@ static inline int get_order_from_pages(u 17.4 return order; 17.5 } 17.6 17.7 -#define get_order(s) get_order_from_bytes(s) 17.8 - 17.9 /* Allocator functions for Xen pagetables. */ 17.10 struct pfn_info *alloc_xen_pagetable(void); 17.11 void free_xen_pagetable(struct pfn_info *pg);