debuggers.hg
changeset 9850:96bc87dd7ca9
[IA64] get rid of sync_split_cache
Get rid of sync_split_cache.
Use flush_icache_range and ia64_fc instead.
Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
Get rid of sync_split_cache.
Use flush_icache_range and ia64_fc instead.
Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author | awilliam@xenbuild.aw |
---|---|
date | Fri Apr 14 14:20:04 2006 -0600 (2006-04-14) |
parents | 4e8a64d8bd0e |
children | b5c2dba60b69 |
files | xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/privop.c xen/arch/ia64/xen/xenmisc.c |
line diff
1.1 --- a/xen/arch/ia64/xen/domain.c Fri Apr 14 14:13:13 2006 -0600 1.2 +++ b/xen/arch/ia64/xen/domain.c Fri Apr 14 14:20:04 2006 -0600 1.3 @@ -339,8 +339,9 @@ int arch_set_info_guest(struct vcpu *v, 1.4 d->arch.cmdline = c->cmdline; 1.5 d->shared_info->arch = c->shared; 1.6 1.7 - /* FIXME: it is required here ? */ 1.8 - sync_split_caches(); 1.9 + /* Cache synchronization seems to be done by the linux kernel 1.10 + during mmap/unmap operation. However be conservative. */ 1.11 + domain_cache_flush (d, 1); 1.12 } 1.13 new_thread(v, regs->cr_iip, 0, 0); 1.14 1.15 @@ -784,50 +785,68 @@ static void loaddomainelfimage(struct do 1.16 1.17 copy_memory(&ehdr, (void *) image_start, sizeof(Elf_Ehdr)); 1.18 for ( h = 0; h < ehdr.e_phnum; h++ ) { 1.19 - copy_memory(&phdr,elfbase + ehdr.e_phoff + (h*ehdr.e_phentsize), 1.20 - sizeof(Elf_Phdr)); 1.21 - //if ( !is_loadable_phdr(phdr) ) 1.22 - if ((phdr.p_type != PT_LOAD)) { 1.23 - continue; 1.24 - } 1.25 - filesz = phdr.p_filesz; memsz = phdr.p_memsz; 1.26 - elfaddr = (unsigned long) elfbase + phdr.p_offset; 1.27 - dom_mpaddr = phdr.p_paddr; 1.28 + copy_memory(&phdr, 1.29 + elfbase + ehdr.e_phoff + (h*ehdr.e_phentsize), 1.30 + sizeof(Elf_Phdr)); 1.31 + if ((phdr.p_type != PT_LOAD)) 1.32 + continue; 1.33 + 1.34 + filesz = phdr.p_filesz; 1.35 + memsz = phdr.p_memsz; 1.36 + elfaddr = (unsigned long) elfbase + phdr.p_offset; 1.37 + dom_mpaddr = phdr.p_paddr; 1.38 + 1.39 //printf("p_offset: %x, size=%x\n",elfaddr,filesz); 1.40 #ifdef CONFIG_DOMAIN0_CONTIGUOUS 1.41 - if (d == dom0) { 1.42 - if (dom_mpaddr+memsz>dom0_size || dom_mpaddr+filesz>dom0_size) { 1.43 - printf("Domain0 doesn't fit in allocated space!\n"); 1.44 - while(1); 1.45 + if (d == dom0) { 1.46 + if (dom_mpaddr+memsz>dom0_size) 1.47 + panic("Dom0 doesn't fit in memory space!\n"); 1.48 + dom_imva = __va_ul(dom_mpaddr + dom0_start); 1.49 + copy_memory((void *)dom_imva, (void *)elfaddr, filesz); 1.50 + if (memsz > filesz) 1.51 + memset((void *)dom_imva+filesz, 0, 1.52 + memsz-filesz); 1.53 +//FIXME: This test for code seems to find a lot more than objdump -x does 1.54 + if (phdr.p_flags & PF_X) { 1.55 + privify_memory(dom_imva,filesz); 1.56 + flush_icache_range (dom_imva, dom_imva+filesz); 1.57 + } 1.58 } 1.59 - dom_imva = (unsigned long) __va(dom_mpaddr + dom0_start); 1.60 - copy_memory((void *) dom_imva, (void *) elfaddr, filesz); 1.61 - if (memsz > filesz) memset((void *) dom_imva+filesz, 0, memsz-filesz); 1.62 -//FIXME: This test for code seems to find a lot more than objdump -x does 1.63 - if (phdr.p_flags & PF_X) privify_memory(dom_imva,filesz); 1.64 - } 1.65 - else 1.66 + else 1.67 #endif 1.68 - while (memsz > 0) { 1.69 - p = assign_new_domain_page(d,dom_mpaddr); 1.70 - if (unlikely(!p)) BUG(); 1.71 - dom_imva = (unsigned long) __va(page_to_maddr(p)); 1.72 - if (filesz > 0) { 1.73 - if (filesz >= PAGE_SIZE) 1.74 - copy_memory((void *) dom_imva, (void *) elfaddr, PAGE_SIZE); 1.75 - else { // copy partial page, zero the rest of page 1.76 - copy_memory((void *) dom_imva, (void *) elfaddr, filesz); 1.77 - memset((void *) dom_imva+filesz, 0, PAGE_SIZE-filesz); 1.78 + while (memsz > 0) { 1.79 + p = assign_new_domain_page(d,dom_mpaddr); 1.80 + BUG_ON (unlikely(p == NULL)); 1.81 + dom_imva = __va_ul(page_to_maddr(p)); 1.82 + if (filesz > 0) { 1.83 + if (filesz >= PAGE_SIZE) 1.84 + copy_memory((void *) dom_imva, 1.85 + (void *) elfaddr, 1.86 + PAGE_SIZE); 1.87 + else { 1.88 + // copy partial page 1.89 + copy_memory((void *) dom_imva, 1.90 + (void *) elfaddr, filesz); 1.91 + // zero the rest of page 1.92 + memset((void *) dom_imva+filesz, 0, 1.93 + PAGE_SIZE-filesz); 1.94 + } 1.95 +//FIXME: This test for code seems to find a lot more than objdump -x does 1.96 + if (phdr.p_flags & PF_X) { 1.97 + privify_memory(dom_imva,PAGE_SIZE); 1.98 + flush_icache_range(dom_imva, 1.99 + dom_imva+PAGE_SIZE); 1.100 + } 1.101 } 1.102 -//FIXME: This test for code seems to find a lot more than objdump -x does 1.103 - if (phdr.p_flags & PF_X) 1.104 - privify_memory(dom_imva,PAGE_SIZE); 1.105 + else if (memsz > 0) { 1.106 + /* always zero out entire page */ 1.107 + memset((void *) dom_imva, 0, PAGE_SIZE); 1.108 + } 1.109 + memsz -= PAGE_SIZE; 1.110 + filesz -= PAGE_SIZE; 1.111 + elfaddr += PAGE_SIZE; 1.112 + dom_mpaddr += PAGE_SIZE; 1.113 } 1.114 - else if (memsz > 0) // always zero out entire page 1.115 - memset((void *) dom_imva, 0, PAGE_SIZE); 1.116 - memsz -= PAGE_SIZE; filesz -= PAGE_SIZE; 1.117 - elfaddr += PAGE_SIZE; dom_mpaddr += PAGE_SIZE; 1.118 - } 1.119 } 1.120 } 1.121 1.122 @@ -1086,7 +1105,6 @@ int construct_dom0(struct domain *d, 1.123 1.124 new_thread(v, pkern_entry, 0, 0); 1.125 physdev_init_dom0(d); 1.126 - sync_split_caches(); 1.127 1.128 // FIXME: Hack for keyboard input 1.129 //serial_input_init();
2.1 --- a/xen/arch/ia64/xen/privop.c Fri Apr 14 14:13:13 2006 -0600 2.2 +++ b/xen/arch/ia64/xen/privop.c Fri Apr 14 14:20:04 2006 -0600 2.3 @@ -60,7 +60,9 @@ void build_hypercall_bundle(UINT64 *imva 2.4 bundle.slot0 = slot0.inst; bundle.slot2 = slot2.inst; 2.5 bundle.slot1a = slot1.inst; bundle.slot1b = slot1.inst >> 18; 2.6 2.7 - *imva++ = bundle.i64[0]; *imva = bundle.i64[1]; 2.8 + imva[0] = bundle.i64[0]; imva[1] = bundle.i64[1]; 2.9 + ia64_fc (imva); 2.10 + ia64_fc (imva + 1); 2.11 } 2.12 2.13 void build_pal_hypercall_bundles(UINT64 *imva, UINT64 brkimm, UINT64 hypnum) 2.14 @@ -83,6 +85,8 @@ void build_pal_hypercall_bundles(UINT64 2.15 bundle.slot0 = slot_a5.inst; 2.16 imva[0] = bundle.i64[0]; 2.17 imva[1] = bundle.i64[1]; 2.18 + ia64_fc (imva); 2.19 + ia64_fc (imva + 1); 2.20 2.21 /* Copy the second bundle and patch the hypercall vector. */ 2.22 bundle.i64[0] = pal_call_stub[2]; 2.23 @@ -93,6 +97,8 @@ void build_pal_hypercall_bundles(UINT64 2.24 bundle.slot0 = slot_m37.inst; 2.25 imva[2] = bundle.i64[0]; 2.26 imva[3] = bundle.i64[1]; 2.27 + ia64_fc (imva + 2); 2.28 + ia64_fc (imva + 3); 2.29 } 2.30 2.31
3.1 --- a/xen/arch/ia64/xen/xenmisc.c Fri Apr 14 14:13:13 2006 -0600 3.2 +++ b/xen/arch/ia64/xen/xenmisc.c Fri Apr 14 14:20:04 2006 -0600 3.3 @@ -363,26 +363,6 @@ void panic_domain(struct pt_regs *regs, 3.4 domain_crash_synchronous (); 3.5 } 3.6 3.7 -/* FIXME: for the forseeable future, all cpu's that enable VTi have split 3.8 - * caches and all cpu's that have split caches enable VTi. This may 3.9 - * eventually be untrue though. */ 3.10 -#define cpu_has_split_cache vmx_enabled 3.11 -extern unsigned int vmx_enabled; 3.12 - 3.13 -void sync_split_caches(void) 3.14 -{ 3.15 - unsigned long ret, progress = 0; 3.16 - 3.17 - if (cpu_has_split_cache) { 3.18 - /* Sync d/i cache conservatively */ 3.19 - ret = ia64_pal_cache_flush(4, 0, &progress, NULL); 3.20 - if ((ret!=PAL_STATUS_SUCCESS)&& (ret!=PAL_STATUS_UNIMPLEMENTED)) 3.21 - printk("PAL CACHE FLUSH failed\n"); 3.22 - else printk("Sync i/d cache for guest SUCC\n"); 3.23 - } 3.24 - else printk("sync_split_caches ignored for CPU with no split cache\n"); 3.25 -} 3.26 - 3.27 /////////////////////////////// 3.28 // from arch/x86/mm.c 3.29 ///////////////////////////////