debuggers.hg
changeset 9817:827c65c06a66
[IA64] Add memory operations for xen/ia64
This patch removes ugly hack upon memory operation and thus
allow inc/decrease_reservation op available for xen/ia64 now.
As a result:
* Now we conform to common sequence where all domain pages
are allocated together by increase_reservation before
image builder
* physmap table is now built at point of arch_set_info_guest
* DOM0_GETMEMLIST can only query without allocation now
After this patch, some long-existing hacks due to mismatched
memory interface are cleaned this time. Also this is a base
step toward introducing balloon feature into xen/ia64.
Signed-off-by Kevin Tian <kevin.tian@intel.com>
This patch removes ugly hack upon memory operation and thus
allow inc/decrease_reservation op available for xen/ia64 now.
As a result:
* Now we conform to common sequence where all domain pages
are allocated together by increase_reservation before
image builder
* physmap table is now built at point of arch_set_info_guest
* DOM0_GETMEMLIST can only query without allocation now
After this patch, some long-existing hacks due to mismatched
memory interface are cleaned this time. Also this is a base
step toward introducing balloon feature into xen/ia64.
Signed-off-by Kevin Tian <kevin.tian@intel.com>
author | awilliam@xenbuild.aw |
---|---|
date | Tue Apr 04 09:43:41 2006 -0600 (2006-04-04) |
parents | 0a6f5527ca4b |
children | 55e8f512fed5 |
files | tools/ioemu/vl.c tools/libxc/xc_ia64_stubs.c xen/arch/ia64/vmx/vmx_init.c xen/arch/ia64/xen/dom0_ops.c xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/hypercall.c xen/include/asm-ia64/domain.h xen/include/asm-ia64/vmx.h |
line diff
1.1 --- a/tools/ioemu/vl.c Tue Apr 04 09:39:45 2006 -0600 1.2 +++ b/tools/ioemu/vl.c Tue Apr 04 09:43:41 2006 -0600 1.3 @@ -3226,7 +3226,8 @@ int main(int argc, char **argv) 1.4 } 1.5 1.6 if ( xc_ia64_get_pfn_list(xc_handle, domid, 1.7 - page_array, IO_PAGE_START >> PAGE_SHIFT, 1) != 1 ) 1.8 + page_array, 1.9 + ram_pages + (GFW_SIZE >> PAGE_SHIFT), 1) != 1 ) 1.10 { 1.11 fprintf(logfile, "xc_ia64_get_pfn_list returned error %d\n", errno); 1.12 exit(-1);
2.1 --- a/tools/libxc/xc_ia64_stubs.c Tue Apr 04 09:39:45 2006 -0600 2.2 +++ b/tools/libxc/xc_ia64_stubs.c Tue Apr 04 09:43:41 2006 -0600 2.3 @@ -101,7 +101,7 @@ int xc_ia64_copy_to_domain_pages(int xc_ 2.4 goto error_out; 2.5 } 2.6 if ( xc_ia64_get_pfn_list(xc_handle, domid, page_array, 2.7 - dst_pfn>>PAGE_SHIFT, nr_pages) != nr_pages ){ 2.8 + dst_pfn, nr_pages) != nr_pages ){ 2.9 PERROR("Could not get the page frame list"); 2.10 goto error_out; 2.11 } 2.12 @@ -121,10 +121,17 @@ error_out: 2.13 2.14 2.15 #define HOB_SIGNATURE 0x3436474953424f48 // "HOBSIG64" 2.16 -#define GFW_HOB_START ((4UL<<30)-(14UL<<20)) //4G -14M 2.17 -#define GFW_HOB_SIZE (1UL<<20) //1M 2.18 -#define MEM_G (1UL << 30) 2.19 -#define MEM_M (1UL << 20) 2.20 +#define GFW_HOB_START ((4UL<<30)-(14UL<<20)) //4G -14M 2.21 +#define GFW_HOB_SIZE (1UL<<20) //1M 2.22 +#define RAW_GFW_START_NR(s) ((s) >> PAGE_SHIFT) 2.23 +#define RAW_GFW_HOB_START_NR(s) \ 2.24 + (RAW_GFW_START_NR(s) + ((GFW_HOB_START - GFW_START) >> PAGE_SHIFT)) 2.25 +#define RAW_GFW_IMAGE_START_NR(s,i) \ 2.26 + (RAW_GFW_START_NR(s) + (((GFW_SIZE - (i))) >> PAGE_SHIFT)) 2.27 +#define RAW_IO_PAGE_START_NR(s) \ 2.28 + (RAW_GFW_START_NR(s) + (GFW_SIZE >> PAGE_SHIFT)) 2.29 +#define RAW_STORE_PAGE_START_NR(s) \ 2.30 + (RAW_IO_PAGE_START_NR(s) + (IO_PAGE_SIZE >> PAGE_SHFIT)) 2.31 2.32 typedef struct { 2.33 unsigned long signature; 2.34 @@ -179,7 +186,8 @@ static int add_pal_hob(void* hob_buf); 2.35 static int add_mem_hob(void* hob_buf, unsigned long dom_mem_size); 2.36 static int build_hob (void* hob_buf, unsigned long hob_buf_size, 2.37 unsigned long dom_mem_size); 2.38 -static int load_hob(int xc_handle,uint32_t dom, void *hob_buf); 2.39 +static int load_hob(int xc_handle,uint32_t dom, void *hob_buf, 2.40 + unsigned long dom_mem_size); 2.41 2.42 int xc_ia64_build_hob(int xc_handle, uint32_t dom, unsigned long memsize){ 2.43 2.44 @@ -191,13 +199,13 @@ int xc_ia64_build_hob(int xc_handle, uin 2.45 return -1; 2.46 } 2.47 2.48 - if ( build_hob( hob_buf, GFW_HOB_SIZE, memsize<<20) < 0){ 2.49 + if ( build_hob( hob_buf, GFW_HOB_SIZE, memsize) < 0){ 2.50 free (hob_buf); 2.51 PERROR("Could not build hob"); 2.52 return -1; 2.53 } 2.54 2.55 - if ( load_hob( xc_handle, dom, hob_buf) <0){ 2.56 + if ( load_hob( xc_handle, dom, hob_buf, memsize) < 0){ 2.57 free (hob_buf); 2.58 PERROR("Could not load hob"); 2.59 return -1; 2.60 @@ -317,7 +325,8 @@ err_out: 2.61 } 2.62 2.63 static int 2.64 -load_hob(int xc_handle, uint32_t dom, void *hob_buf) 2.65 +load_hob(int xc_handle, uint32_t dom, void *hob_buf, 2.66 + unsigned long dom_mem_size) 2.67 { 2.68 // hob_buf should be page aligned 2.69 int hob_size; 2.70 @@ -336,7 +345,7 @@ load_hob(int xc_handle, uint32_t dom, vo 2.71 nr_pages = (hob_size + PAGE_SIZE -1) >> PAGE_SHIFT; 2.72 2.73 return xc_ia64_copy_to_domain_pages(xc_handle, dom, 2.74 - hob_buf, GFW_HOB_START, nr_pages ); 2.75 + hob_buf, RAW_GFW_HOB_START_NR(dom_mem_size), nr_pages ); 2.76 } 2.77 2.78 #define MIN(x, y) ((x) < (y)) ? (x) : (y) 2.79 @@ -576,13 +585,8 @@ static int setup_guest( int xc_handle, 2.80 unsigned long page_array[2]; 2.81 shared_iopage_t *sp; 2.82 int i; 2.83 + unsigned long dom_memsize = (memsize << 20); 2.84 2.85 - // FIXME: initialize pfn list for a temp hack 2.86 - if (xc_ia64_get_pfn_list(xc_handle, dom, NULL, -1, -1) == -1) { 2.87 - PERROR("Could not allocate continuous memory"); 2.88 - goto error_out; 2.89 - } 2.90 - 2.91 if ((image_size > 12 * MEM_M) || (image_size & (PAGE_SIZE - 1))) { 2.92 PERROR("Guest firmware size is incorrect [%ld]?", image_size); 2.93 return -1; 2.94 @@ -590,19 +594,21 @@ static int setup_guest( int xc_handle, 2.95 2.96 /* Load guest firmware */ 2.97 if( xc_ia64_copy_to_domain_pages( xc_handle, dom, 2.98 - image, 4*MEM_G-image_size, image_size>>PAGE_SHIFT)) { 2.99 + image, RAW_GFW_IMAGE_START_NR(dom_memsize, image_size), 2.100 + image_size>>PAGE_SHIFT)) { 2.101 PERROR("Could not load guest firmware into domain"); 2.102 goto error_out; 2.103 } 2.104 2.105 /* Hand-off state passed to guest firmware */ 2.106 - if (xc_ia64_build_hob(xc_handle, dom, memsize) < 0){ 2.107 + if (xc_ia64_build_hob(xc_handle, dom, dom_memsize) < 0){ 2.108 PERROR("Could not build hob\n"); 2.109 goto error_out; 2.110 } 2.111 2.112 /* Retrieve special pages like io, xenstore, etc. */ 2.113 - if ( xc_ia64_get_pfn_list(xc_handle, dom, page_array, IO_PAGE_START>>PAGE_SHIFT, 2) != 2 ) 2.114 + if ( xc_ia64_get_pfn_list(xc_handle, dom, page_array, 2.115 + RAW_IO_PAGE_START_NR(dom_memsize), 2) != 2 ) 2.116 { 2.117 PERROR("Could not get the page frame list"); 2.118 goto error_out;
3.1 --- a/xen/arch/ia64/vmx/vmx_init.c Tue Apr 04 09:39:45 2006 -0600 3.2 +++ b/xen/arch/ia64/vmx/vmx_init.c Tue Apr 04 09:43:41 2006 -0600 3.3 @@ -327,13 +327,15 @@ io_range_t io_ranges[] = { 3.4 #define VMX_SYS_PAGES (2 + (GFW_SIZE >> PAGE_SHIFT)) 3.5 #define VMX_CONFIG_PAGES(d) ((d)->max_pages - VMX_SYS_PAGES) 3.6 3.7 -int vmx_alloc_contig_pages(struct domain *d) 3.8 +int vmx_build_physmap_table(struct domain *d) 3.9 { 3.10 - unsigned long i, j, start,tmp, end, pgnr, conf_nr; 3.11 - struct page_info *page; 3.12 + unsigned long i, j, start, tmp, end, mfn; 3.13 struct vcpu *v = d->vcpu[0]; 3.14 + struct list_head *list_ent = d->page_list.next; 3.15 3.16 + ASSERT(!d->arch.physmap_built); 3.17 ASSERT(!test_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags)); 3.18 + ASSERT(d->max_pages == d->tot_pages); 3.19 3.20 /* Mark I/O ranges */ 3.21 for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) { 3.22 @@ -343,103 +345,54 @@ int vmx_alloc_contig_pages(struct domain 3.23 assign_domain_page(d, j, io_ranges[i].type); 3.24 } 3.25 3.26 - conf_nr = VMX_CONFIG_PAGES(d); 3.27 - if((conf_nr<<PAGE_SHIFT)<(1UL<<(_PAGE_SIZE_64M+1))) 3.28 - panic("vti domain needs 128M memory at least\n"); 3.29 -/* 3.30 - order = get_order_from_pages(conf_nr); 3.31 - if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) { 3.32 - printk("Could not allocate order=%d pages for vmx contig alloc\n", 3.33 - order); 3.34 - return -1; 3.35 + /* Map normal memory below 3G */ 3.36 + end = VMX_CONFIG_PAGES(d) << PAGE_SHIFT; 3.37 + tmp = end < MMIO_START ? end : MMIO_START; 3.38 + for (i = 0; (i < tmp) && (list_ent != &d->page_list); i += PAGE_SIZE) { 3.39 + mfn = page_to_mfn(list_entry( 3.40 + list_ent, struct page_info, list)); 3.41 + assign_domain_page(d, i, mfn << PAGE_SHIFT); 3.42 + list_ent = mfn_to_page(mfn)->list.next; 3.43 } 3.44 -*/ 3.45 - 3.46 -/* reserve contiguous 64M for linux kernel */ 3.47 + ASSERT(list_ent != &d->page_list); 3.48 3.49 - if (unlikely((page = alloc_domheap_pages(d,(KERNEL_TR_PAGE_SHIFT-PAGE_SHIFT), 0)) == NULL)) { 3.50 - printk("No enough memory for vti domain!!!\n"); 3.51 - return -1; 3.52 - } 3.53 - pgnr = page_to_mfn(page); 3.54 - for (i=(1UL<<KERNEL_TR_PAGE_SHIFT);i<(1UL<<(KERNEL_TR_PAGE_SHIFT+1));i+=PAGE_SIZE,pgnr++){ 3.55 - assign_domain_page(d, i, pgnr << PAGE_SHIFT); 3.56 - } 3.57 - 3.58 - for (i = 0; i < (1UL<<KERNEL_TR_PAGE_SHIFT) ; i += PAGE_SIZE){ 3.59 - if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) { 3.60 - printk("No enough memory for vti domain!!!\n"); 3.61 - return -1; 3.62 - } 3.63 - pgnr = page_to_mfn(page); 3.64 - assign_domain_page(d, i, pgnr << PAGE_SHIFT); 3.65 - } 3.66 - 3.67 - /* Map normal memory below 3G */ 3.68 - end = conf_nr << PAGE_SHIFT; 3.69 - tmp = end < MMIO_START ? end : MMIO_START; 3.70 - for (i = (1UL<<(KERNEL_TR_PAGE_SHIFT+1)); i < tmp; i += PAGE_SIZE){ 3.71 - if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) { 3.72 - printk("No enough memory for vti domain!!!\n"); 3.73 - return -1; 3.74 - } 3.75 - pgnr = page_to_mfn(page); 3.76 - assign_domain_page(d, i, pgnr << PAGE_SHIFT); 3.77 - } 3.78 /* Map normal memory beyond 4G */ 3.79 if (unlikely(end > MMIO_START)) { 3.80 start = 4 * MEM_G; 3.81 end = start + (end - 3 * MEM_G); 3.82 - for (i = start; i < end; i += PAGE_SIZE){ 3.83 - if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) { 3.84 - printk("No enough memory for vti domain!!!\n"); 3.85 - return -1; 3.86 - } 3.87 - pgnr = page_to_mfn(page); 3.88 - assign_domain_page(d, i, pgnr << PAGE_SHIFT); 3.89 + for (i = start; (i < end) && 3.90 + (list_ent != &d->page_list); i += PAGE_SIZE) { 3.91 + mfn = page_to_mfn(list_entry( 3.92 + list_ent, struct page_info, list)); 3.93 + assign_domain_page(d, i, mfn << PAGE_SHIFT); 3.94 + list_ent = mfn_to_page(mfn)->list.next; 3.95 + } 3.96 + ASSERT(list_ent != &d->page_list); 3.97 } 3.98 + 3.99 + /* Map guest firmware */ 3.100 + for (i = GFW_START; (i < GFW_START + GFW_SIZE) && 3.101 + (list_ent != &d->page_list); i += PAGE_SIZE) { 3.102 + mfn = page_to_mfn(list_entry( 3.103 + list_ent, struct page_info, list)); 3.104 + assign_domain_page(d, i, mfn << PAGE_SHIFT); 3.105 + list_ent = mfn_to_page(mfn)->list.next; 3.106 } 3.107 + ASSERT(list_ent != &d->page_list); 3.108 + 3.109 + /* Map for shared I/O page and xenstore */ 3.110 + mfn = page_to_mfn(list_entry(list_ent, struct page_info, list)); 3.111 + assign_domain_page(d, IO_PAGE_START, mfn << PAGE_SHIFT); 3.112 + list_ent = mfn_to_page(mfn)->list.next; 3.113 + ASSERT(list_ent != &d->page_list); 3.114 + 3.115 + mfn = page_to_mfn(list_entry(list_ent, struct page_info, list)); 3.116 + assign_domain_page(d, STORE_PAGE_START, mfn << PAGE_SHIFT); 3.117 + list_ent = mfn_to_page(mfn)->list.next; 3.118 + ASSERT(list_ent == &d->page_list); 3.119 3.120 d->arch.max_pfn = end >> PAGE_SHIFT; 3.121 -/* 3.122 - order = get_order_from_pages(GFW_SIZE >> PAGE_SHIFT); 3.123 - if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) { 3.124 - printk("Could not allocate order=%d pages for vmx contig alloc\n", 3.125 - order);` 3.126 - return -1; 3.127 - } 3.128 -*/ 3.129 - /* Map guest firmware */ 3.130 - for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++){ 3.131 - if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) { 3.132 - printk("No enough memory for vti domain!!!\n"); 3.133 - return -1; 3.134 - } 3.135 - pgnr = page_to_mfn(page); 3.136 - assign_domain_page(d, i, pgnr << PAGE_SHIFT); 3.137 - } 3.138 - 3.139 -/* 3.140 - if (unlikely((page = alloc_domheap_pages(d, 1, 0)) == NULL)) { 3.141 - printk("Could not allocate order=1 pages for vmx contig alloc\n"); 3.142 - return -1; 3.143 - } 3.144 -*/ 3.145 - /* Map for shared I/O page and xenstore */ 3.146 - if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) { 3.147 - printk("No enough memory for vti domain!!!\n"); 3.148 - return -1; 3.149 - } 3.150 - pgnr = page_to_mfn(page); 3.151 - assign_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT); 3.152 - 3.153 - if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) { 3.154 - printk("No enough memory for vti domain!!!\n"); 3.155 - return -1; 3.156 - } 3.157 - pgnr = page_to_mfn(page); 3.158 - assign_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT); 3.159 - 3.160 + d->arch.physmap_built = 1; 3.161 set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags); 3.162 return 0; 3.163 } 3.164 @@ -447,6 +400,10 @@ int vmx_alloc_contig_pages(struct domain 3.165 void vmx_setup_platform(struct domain *d, struct vcpu_guest_context *c) 3.166 { 3.167 ASSERT(d != dom0); /* only for non-privileged vti domain */ 3.168 + 3.169 + if (!d->arch.physmap_built) 3.170 + vmx_build_physmap_table(d); 3.171 + 3.172 d->arch.vmx_platform.shared_page_va = 3.173 (unsigned long)__va(__gpa_to_mpa(d, IO_PAGE_START)); 3.174 /* TEMP */
4.1 --- a/xen/arch/ia64/xen/dom0_ops.c Tue Apr 04 09:39:45 2006 -0600 4.2 +++ b/xen/arch/ia64/xen/dom0_ops.c Tue Apr 04 09:43:41 2006 -0600 4.3 @@ -157,40 +157,45 @@ long arch_do_dom0_op(dom0_op_t *op, GUES 4.4 */ 4.5 case DOM0_GETMEMLIST: 4.6 { 4.7 - unsigned long i; 4.8 + unsigned long i = 0; 4.9 struct domain *d = find_domain_by_id(op->u.getmemlist.domain); 4.10 unsigned long start_page = op->u.getmemlist.max_pfns >> 32; 4.11 unsigned long nr_pages = op->u.getmemlist.max_pfns & 0xffffffff; 4.12 unsigned long mfn; 4.13 + struct list_head *list_ent; 4.14 4.15 ret = -EINVAL; 4.16 if ( d != NULL ) 4.17 { 4.18 ret = 0; 4.19 4.20 - /* A temp trick here. When max_pfns == -1, we assume 4.21 - * the request is for machine contiguous pages, so request 4.22 - * all pages at first query 4.23 - */ 4.24 - if ( (op->u.getmemlist.max_pfns == -1UL) && 4.25 - !test_bit(ARCH_VMX_CONTIG_MEM, 4.26 - &d->vcpu[0]->arch.arch_vmx.flags) ) { 4.27 - ret = (long) vmx_alloc_contig_pages(d); 4.28 - put_domain(d); 4.29 - return ret ? (-ENOMEM) : 0; 4.30 + list_ent = d->page_list.next; 4.31 + while ( (i != start_page) && (list_ent != &d->page_list)) { 4.32 + mfn = page_to_mfn(list_entry( 4.33 + list_ent, struct page_info, list)); 4.34 + i++; 4.35 + list_ent = mfn_to_page(mfn)->list.next; 4.36 } 4.37 4.38 - for ( i = start_page; i < (start_page + nr_pages); i++ ) 4.39 + if (i == start_page) 4.40 { 4.41 - mfn = gmfn_to_mfn_foreign(d, i); 4.42 + while((i < (start_page + nr_pages)) && 4.43 + (list_ent != &d->page_list)) 4.44 + { 4.45 + mfn = page_to_mfn(list_entry( 4.46 + list_ent, struct page_info, list)); 4.47 4.48 - if ( copy_to_guest_offset(op->u.getmemlist.buffer, 4.49 + if ( copy_to_guest_offset(op->u.getmemlist.buffer, 4.50 i - start_page, &mfn, 1) ) 4.51 - { 4.52 - ret = -EFAULT; 4.53 - break; 4.54 + { 4.55 + ret = -EFAULT; 4.56 + break; 4.57 + } 4.58 + i++; 4.59 + list_ent = mfn_to_page(mfn)->list.next; 4.60 } 4.61 - } 4.62 + } else 4.63 + ret = -ENOMEM; 4.64 4.65 op->u.getmemlist.num_pfns = i - start_page; 4.66 copy_to_guest(u_dom0_op, op, 1);
5.1 --- a/xen/arch/ia64/xen/domain.c Tue Apr 04 09:39:45 2006 -0600 5.2 +++ b/xen/arch/ia64/xen/domain.c Tue Apr 04 09:43:41 2006 -0600 5.3 @@ -76,6 +76,7 @@ extern void sync_split_caches(void); 5.4 extern void serial_input_init(void); 5.5 5.6 static void init_switch_stack(struct vcpu *v); 5.7 +void build_physmap_table(struct domain *d); 5.8 5.9 /* this belongs in include/asm, but there doesn't seem to be a suitable place */ 5.10 void arch_domain_destroy(struct domain *d) 5.11 @@ -272,6 +273,7 @@ int arch_domain_create(struct domain *d) 5.12 memset(d->arch.mm, 0, sizeof(*d->arch.mm)); 5.13 INIT_LIST_HEAD(&d->arch.mm->pt_list); 5.14 5.15 + d->arch.physmap_built = 0; 5.16 if ((d->arch.mm->pgd = pgd_alloc(d->arch.mm)) == NULL) 5.17 goto fail_nomem; 5.18 5.19 @@ -317,7 +319,8 @@ int arch_set_info_guest(struct vcpu *v, 5.20 vmx_setup_platform(d, c); 5.21 5.22 vmx_final_setup_guest(v); 5.23 - } 5.24 + } else if (!d->arch.physmap_built) 5.25 + build_physmap_table(d); 5.26 5.27 *regs = c->regs; 5.28 if (v == d->vcpu[0]) { 5.29 @@ -583,44 +586,24 @@ void assign_domain_page(struct domain *d 5.30 *(mpt_table + (physaddr>>PAGE_SHIFT))=(mpaddr>>PAGE_SHIFT); 5.31 } 5.32 } 5.33 -#if 0 5.34 -/* map a physical address with specified I/O flag */ 5.35 -void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags) 5.36 + 5.37 +void build_physmap_table(struct domain *d) 5.38 { 5.39 - struct mm_struct *mm = d->arch.mm; 5.40 - pgd_t *pgd; 5.41 - pud_t *pud; 5.42 - pmd_t *pmd; 5.43 - pte_t *pte; 5.44 - pte_t io_pte; 5.45 - 5.46 - if (!mm->pgd) { 5.47 - printk("assign_domain_page: domain pgd must exist!\n"); 5.48 - return; 5.49 - } 5.50 - ASSERT(flags & GPFN_IO_MASK); 5.51 + struct list_head *list_ent = d->page_list.next; 5.52 + unsigned long mfn, i = 0; 5.53 5.54 - pgd = pgd_offset(mm,mpaddr); 5.55 - if (pgd_none(*pgd)) 5.56 - pgd_populate(mm, pgd, pud_alloc_one(mm,mpaddr)); 5.57 - 5.58 - pud = pud_offset(pgd, mpaddr); 5.59 - if (pud_none(*pud)) 5.60 - pud_populate(mm, pud, pmd_alloc_one(mm,mpaddr)); 5.61 + ASSERT(!d->arch.physmap_built); 5.62 + while(list_ent != &d->page_list) { 5.63 + mfn = page_to_mfn(list_entry( 5.64 + list_ent, struct page_info, list)); 5.65 + assign_domain_page(d, i << PAGE_SHIFT, mfn << PAGE_SHIFT); 5.66 5.67 - pmd = pmd_offset(pud, mpaddr); 5.68 - if (pmd_none(*pmd)) 5.69 - pmd_populate_kernel(mm, pmd, pte_alloc_one_kernel(mm,mpaddr)); 5.70 -// pmd_populate(mm, pmd, pte_alloc_one(mm,mpaddr)); 5.71 + i++; 5.72 + list_ent = mfn_to_page(mfn)->list.next; 5.73 + } 5.74 + d->arch.physmap_built = 1; 5.75 +} 5.76 5.77 - pte = pte_offset_map(pmd, mpaddr); 5.78 - if (pte_none(*pte)) { 5.79 - pte_val(io_pte) = flags; 5.80 - set_pte(pte, io_pte); 5.81 - } 5.82 - else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr); 5.83 -} 5.84 -#endif 5.85 void mpafoo(unsigned long mpaddr) 5.86 { 5.87 extern unsigned long privop_trace; 5.88 @@ -650,7 +633,6 @@ unsigned long lookup_domain_mpa(struct d 5.89 return *(unsigned long *)pte; 5.90 } 5.91 #endif 5.92 -tryagain: 5.93 if (pgd_present(*pgd)) { 5.94 pud = pud_offset(pgd,mpaddr); 5.95 if (pud_present(*pud)) { 5.96 @@ -665,12 +647,12 @@ tryagain: 5.97 } 5.98 } 5.99 } 5.100 - /* if lookup fails and mpaddr is "legal", "create" the page */ 5.101 if ((mpaddr >> PAGE_SHIFT) < d->max_pages) { 5.102 - if (assign_new_domain_page(d,mpaddr)) goto tryagain; 5.103 - } 5.104 - printk("lookup_domain_mpa: bad mpa 0x%lx (> 0x%lx)\n", 5.105 - mpaddr, (unsigned long) d->max_pages<<PAGE_SHIFT); 5.106 + printk("lookup_domain_mpa: non-allocated mpa 0x%lx (< 0x%lx)\n", 5.107 + mpaddr, (unsigned long) d->max_pages<<PAGE_SHIFT); 5.108 + } else 5.109 + printk("lookup_domain_mpa: bad mpa 0x%lx (> 0x%lx)\n", 5.110 + mpaddr, (unsigned long) d->max_pages<<PAGE_SHIFT); 5.111 mpafoo(mpaddr); 5.112 return 0; 5.113 }
6.1 --- a/xen/arch/ia64/xen/hypercall.c Tue Apr 04 09:39:45 2006 -0600 6.2 +++ b/xen/arch/ia64/xen/hypercall.c Tue Apr 04 09:43:41 2006 -0600 6.3 @@ -76,25 +76,8 @@ xen_hypercall (struct pt_regs *regs) 6.4 break; 6.5 6.6 case __HYPERVISOR_memory_op: 6.7 - /* we don't handle reservations; just return success */ 6.8 - { 6.9 - struct xen_memory_reservation reservation; 6.10 - void *arg = (void *) regs->r15; 6.11 - 6.12 - switch(regs->r14) { 6.13 - case XENMEM_increase_reservation: 6.14 - case XENMEM_decrease_reservation: 6.15 - if (copy_from_user(&reservation, arg, 6.16 - sizeof(reservation))) 6.17 - regs->r8 = -EFAULT; 6.18 - else 6.19 - regs->r8 = reservation.nr_extents; 6.20 - break; 6.21 - default: 6.22 - regs->r8 = do_memory_op((int) regs->r14, guest_handle_from_ptr(regs->r15, void)); 6.23 - break; 6.24 - } 6.25 - } 6.26 + regs->r8 = do_memory_op(regs->r14, 6.27 + guest_handle_from_ptr(regs->r15, void)); 6.28 break; 6.29 6.30 case __HYPERVISOR_event_channel_op: 6.31 @@ -102,19 +85,24 @@ xen_hypercall (struct pt_regs *regs) 6.32 break; 6.33 6.34 case __HYPERVISOR_grant_table_op: 6.35 - regs->r8 = do_grant_table_op((unsigned int) regs->r14, guest_handle_from_ptr(regs->r15, void), (unsigned int) regs->r16); 6.36 + regs->r8 = do_grant_table_op((unsigned int) regs->r14, 6.37 + guest_handle_from_ptr(regs->r15, void), 6.38 + (unsigned int) regs->r16); 6.39 break; 6.40 6.41 case __HYPERVISOR_console_io: 6.42 - regs->r8 = do_console_io((int) regs->r14, (int) regs->r15, guest_handle_from_ptr(regs->r16, char)); 6.43 + regs->r8 = do_console_io((int) regs->r14, (int) regs->r15, 6.44 + guest_handle_from_ptr(regs->r16, char)); 6.45 break; 6.46 6.47 case __HYPERVISOR_xen_version: 6.48 - regs->r8 = do_xen_version((int) regs->r14, guest_handle_from_ptr(regs->r15, void)); 6.49 + regs->r8 = do_xen_version((int) regs->r14, 6.50 + guest_handle_from_ptr(regs->r15, void)); 6.51 break; 6.52 6.53 case __HYPERVISOR_multicall: 6.54 - regs->r8 = do_multicall(guest_handle_from_ptr(regs->r14, multicall_entry_t), (unsigned int) regs->r15); 6.55 + regs->r8 = do_multicall(guest_handle_from_ptr(regs->r14, 6.56 + multicall_entry_t), (unsigned int) regs->r15); 6.57 break; 6.58 6.59 default:
7.1 --- a/xen/include/asm-ia64/domain.h Tue Apr 04 09:39:45 2006 -0600 7.2 +++ b/xen/include/asm-ia64/domain.h Tue Apr 04 09:43:41 2006 -0600 7.3 @@ -27,6 +27,7 @@ struct arch_domain { 7.4 int rid_bits; /* number of virtual rid bits (default: 18) */ 7.5 int breakimm; 7.6 7.7 + int physmap_built; /* Whether is physmap built or not */ 7.8 int imp_va_msb; 7.9 /* System pages out of guest memory, like for xenstore/console */ 7.10 unsigned long sys_pgnr;
8.1 --- a/xen/include/asm-ia64/vmx.h Tue Apr 04 09:39:45 2006 -0600 8.2 +++ b/xen/include/asm-ia64/vmx.h Tue Apr 04 09:43:41 2006 -0600 8.3 @@ -40,7 +40,7 @@ extern int ia64_hypercall (struct pt_reg 8.4 extern void vmx_save_state(struct vcpu *v); 8.5 extern void vmx_load_state(struct vcpu *v); 8.6 extern void show_registers(struct pt_regs *regs); 8.7 -extern int vmx_alloc_contig_pages(struct domain *d); 8.8 +extern int vmx_build_physmap_table(struct domain *d); 8.9 extern unsigned long __gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn); 8.10 extern void sync_split_caches(void); 8.11 extern void vmx_virq_line_assist(struct vcpu *v);