debuggers.hg
changeset 4607:5fd95a1db011
bitkeeper revision 1.1323 (426441f4N_rRW9JqV6OGmWTaHrLiCQ)
- Update FreeBSD sparse tree to build against latest -unstable
- add conditional support for writable pagetables (don't use
currently locks up xen)
- block driver not update to use grant tables yet, DOM0 will have to
be re-compiled with grant table substrate disabled
Signed-off-by: Kip Macy <kmacy@fsmware.com>
Signed-off-by: ian@xensource.com
- Update FreeBSD sparse tree to build against latest -unstable
- add conditional support for writable pagetables (don't use
currently locks up xen)
- block driver not update to use grant tables yet, DOM0 will have to
be re-compiled with grant table substrate disabled
Signed-off-by: Kip Macy <kmacy@fsmware.com>
Signed-off-by: ian@xensource.com
author | iap10@freefall.cl.cam.ac.uk |
---|---|
date | Mon Apr 18 23:25:40 2005 +0000 (2005-04-18) |
parents | 756991cc6c70 |
children | 4d0b44989cce 76a627cb3006 |
files | freebsd-5.3-xen-sparse/i386-xen/i386-xen/machdep.c freebsd-5.3-xen-sparse/i386-xen/i386-xen/pmap.c freebsd-5.3-xen-sparse/i386-xen/i386-xen/xen_machdep.c freebsd-5.3-xen-sparse/i386-xen/include/hypervisor.h freebsd-5.3-xen-sparse/i386-xen/include/pmap.h freebsd-5.3-xen-sparse/i386-xen/include/xenfunc.h freebsd-5.3-xen-sparse/i386-xen/include/xenpmap.h freebsd-5.3-xen-sparse/i386-xen/xen/netfront/xn_netfront.c |
line diff
1.1 --- a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/machdep.c Mon Apr 18 23:20:07 2005 +0000 1.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/machdep.c Mon Apr 18 23:25:40 2005 +0000 1.3 @@ -224,7 +224,6 @@ cpu_startup(void *dummy) 1.4 /* 1.5 * Good {morning,afternoon,evening,night}. 1.6 */ 1.7 - /* XXX need to write clock driver */ 1.8 startrtclock(); 1.9 1.10 printcpuinfo(); 1.11 @@ -1375,6 +1374,7 @@ extern unsigned long cpu0prvpage; 1.12 extern unsigned long *SMPpt; 1.13 pteinfo_t *pteinfo_list; 1.14 unsigned long *xen_machine_phys = ((unsigned long *)VADDR(1008, 0)); 1.15 +pt_entry_t *KPTphysv; 1.16 int preemptable; 1.17 int gdt_set; 1.18 1.19 @@ -1386,6 +1386,10 @@ void 1.20 initvalues(start_info_t *startinfo) 1.21 { 1.22 int i; 1.23 +#ifdef WRITABLE_PAGETABLES 1.24 + HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables); 1.25 +#endif 1.26 + 1.27 xen_start_info = startinfo; 1.28 xen_phys_machine = (unsigned long *)startinfo->mfn_list; 1.29 unsigned long tmpindex = ((__pa(xen_start_info->pt_base) >> PAGE_SHIFT) + xen_start_info->nr_pt_frames) + 3 /* number of pages allocated after the pts + 1*/; 1.30 @@ -1393,6 +1397,7 @@ initvalues(start_info_t *startinfo) 1.31 /* pre-zero unused mapped pages */ 1.32 bzero((char *)(KERNBASE + (tmpindex << PAGE_SHIFT)), (1024 - tmpindex)*PAGE_SIZE); 1.33 IdlePTD = (pd_entry_t *)xpmap_ptom(__pa(startinfo->pt_base)); 1.34 + KPTphysv = (pt_entry_t *)(startinfo->pt_base + PAGE_SIZE); 1.35 XENPRINTF("IdlePTD %p\n", IdlePTD); 1.36 XENPRINTF("nr_pages: %ld shared_info: 0x%lx flags: 0x%lx pt_base: 0x%lx " 1.37 "mod_start: 0x%lx mod_len: 0x%lx\n", 1.38 @@ -1401,9 +1406,9 @@ initvalues(start_info_t *startinfo) 1.39 xen_start_info->mod_start, xen_start_info->mod_len); 1.40 1.41 /* setup self-referential mapping first so vtomach will work */ 1.42 - xpq_queue_pt_update(IdlePTD + PTDPTDI , (unsigned long)IdlePTD | 1.43 + xen_queue_pt_update(IdlePTD + PTDPTDI , (unsigned long)IdlePTD | 1.44 PG_V | PG_A); 1.45 - mcl_flush_queue(); 1.46 + xen_flush_queue(); 1.47 /* Map proc0's UPAGES */ 1.48 proc0uarea = (struct user *)(KERNBASE + (tmpindex << PAGE_SHIFT)); 1.49 tmpindex += UAREA_PAGES; 1.50 @@ -1431,10 +1436,10 @@ initvalues(start_info_t *startinfo) 1.51 SMPpt[0] = vtomach(cpu0prvpage) | PG_RW | PG_M | PG_V | PG_A; 1.52 1.53 /* map SMP page table RO */ 1.54 - PT_SET_MA(SMPpt, vtomach(SMPpt) & ~PG_RW, TRUE); 1.55 + PT_SET_MA(SMPpt, vtomach(SMPpt) & ~PG_RW); 1.56 1.57 /* put the page table into the pde */ 1.58 - xpq_queue_pt_update(IdlePTD + MPPTDI, xpmap_ptom((tmpindex << PAGE_SHIFT))| PG_M | PG_RW | PG_V | PG_A); 1.59 + xen_queue_pt_update(IdlePTD + MPPTDI, xpmap_ptom((tmpindex << PAGE_SHIFT))| PG_M | PG_RW | PG_V | PG_A); 1.60 1.61 tmpindex++; 1.62 #endif 1.63 @@ -1448,20 +1453,16 @@ initvalues(start_info_t *startinfo) 1.64 #endif 1.65 /* unmap remaining pages from initial 4MB chunk */ 1.66 for (i = tmpindex; i%1024 != 0; i++) 1.67 - PT_CLEAR(KERNBASE + (i << PAGE_SHIFT), TRUE); 1.68 + PT_CLEAR_VA(KPTphysv + i, TRUE); 1.69 1.70 /* allocate remainder of NKPT pages */ 1.71 for (i = 0; i < NKPT-1; i++, tmpindex++) 1.72 - xpq_queue_pt_update(IdlePTD + KPTDI + i + 1, xpmap_ptom((tmpindex << PAGE_SHIFT))| PG_M | PG_RW | PG_V | PG_A); 1.73 - tmpindex += NKPT-1; 1.74 - 1.75 - 1.76 - 1.77 + PT_SET_VA(((unsigned long *)startinfo->pt_base) + KPTDI + i + 1, (tmpindex << PAGE_SHIFT)| PG_M | PG_RW | PG_V | PG_A, TRUE); 1.78 tmpindex += NKPT-1; 1.79 PT_UPDATES_FLUSH(); 1.80 1.81 HYPERVISOR_shared_info = (shared_info_t *)(KERNBASE + (tmpindex << PAGE_SHIFT)); 1.82 - PT_SET_MA(HYPERVISOR_shared_info, xen_start_info->shared_info | PG_A | PG_V | PG_RW | PG_M, TRUE); 1.83 + PT_SET_MA(HYPERVISOR_shared_info, xen_start_info->shared_info | PG_A | PG_V | PG_RW | PG_M); 1.84 tmpindex++; 1.85 1.86 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list = (unsigned long)xen_phys_machine; 1.87 @@ -1568,7 +1569,7 @@ init386(void) 1.88 for (x = 0; x < NGDT; x++) 1.89 ssdtosd(&gdt_segs[x], &gdt[x].sd); 1.90 1.91 - PT_SET_MA(gdt, *vtopte((unsigned long)gdt) & ~PG_RW, TRUE); 1.92 + PT_SET_MA(gdt, *vtopte((unsigned long)gdt) & ~PG_RW); 1.93 gdtmachpfn = vtomach(gdt) >> PAGE_SHIFT; 1.94 if (HYPERVISOR_set_gdt(&gdtmachpfn, LAST_RESERVED_GDT_ENTRY + 1)) { 1.95 XENPRINTF("set_gdt failed\n"); 1.96 @@ -1617,7 +1618,7 @@ init386(void) 1.97 default_proc_ldt.ldt_len = 6; 1.98 _default_ldt = (int)&default_proc_ldt; 1.99 PCPU_SET(currentldt, _default_ldt) 1.100 - PT_SET_MA(ldt, *vtopte((unsigned long)ldt) & ~PG_RW, TRUE); 1.101 + PT_SET_MA(ldt, *vtopte((unsigned long)ldt) & ~PG_RW); 1.102 xen_set_ldt((unsigned long) ldt, (sizeof ldt_segs / sizeof ldt_segs[0])); 1.103 1.104
2.1 --- a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/pmap.c Mon Apr 18 23:20:07 2005 +0000 2.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/pmap.c Mon Apr 18 23:25:40 2005 +0000 2.3 @@ -793,7 +793,7 @@ pmap_pte(pmap_t pmap, vm_offset_t va) 2.4 newpf = PT_GET(pde) & PG_FRAME; 2.5 tmppf = PT_GET(PMAP2) & PG_FRAME; 2.6 if (tmppf != newpf) { 2.7 - PT_SET_VA(PMAP2, newpf | PG_V | PG_A, FALSE); 2.8 + PD_SET_VA(PMAP2, newpf | PG_V | PG_A, FALSE); 2.9 pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2); 2.10 } 2.11 return (PADDR2 + (i386_btop(va) & (NPTEPG - 1))); 2.12 @@ -852,7 +852,7 @@ pmap_pte_quick(pmap_t pmap, vm_offset_t 2.13 newpf = PT_GET(pde) & PG_FRAME; 2.14 tmppf = PT_GET(PMAP1) & PG_FRAME; 2.15 if (tmppf != newpf) { 2.16 - PT_SET_VA(PMAP1, newpf | PG_V | PG_A, TRUE); 2.17 + PD_SET_VA(PMAP1, newpf | PG_V | PG_A, TRUE); 2.18 #ifdef SMP 2.19 PMAP1cpu = PCPU_GET(cpuid); 2.20 #endif 2.21 @@ -955,7 +955,10 @@ pmap_extract_and_hold(pmap_t pmap, vm_of 2.22 PMAP_INLINE void 2.23 pmap_kenter(vm_offset_t va, vm_paddr_t pa) 2.24 { 2.25 - PT_SET(va, pa | PG_RW | PG_V | pgeflag, TRUE); 2.26 + pt_entry_t *pte; 2.27 + 2.28 + pte = vtopte(va); 2.29 + pte_store(pte, pa | PG_RW | PG_V | pgeflag); 2.30 } 2.31 2.32 /* 2.33 @@ -965,7 +968,10 @@ pmap_kenter(vm_offset_t va, vm_paddr_t p 2.34 PMAP_INLINE void 2.35 pmap_kremove(vm_offset_t va) 2.36 { 2.37 - PT_CLEAR(va, TRUE); 2.38 + pt_entry_t *pte; 2.39 + 2.40 + pte = vtopte(va); 2.41 + pte_clear(pte); 2.42 } 2.43 2.44 /* 2.45 @@ -984,12 +990,10 @@ vm_offset_t 2.46 pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 2.47 { 2.48 vm_offset_t va, sva; 2.49 - pt_entry_t *pte; 2.50 2.51 va = sva = *virt; 2.52 while (start < end) { 2.53 - pte = vtopte(va); 2.54 - PT_SET_VA(pte, start | PG_RW | PG_V | pgeflag, FALSE); 2.55 + pmap_kenter(va, start); 2.56 va += PAGE_SIZE; 2.57 start += PAGE_SIZE; 2.58 } 2.59 @@ -1016,8 +1020,7 @@ pmap_qenter(vm_offset_t sva, vm_page_t * 2.60 2.61 va = sva; 2.62 while (count-- > 0) { 2.63 - PT_SET(va, VM_PAGE_TO_PHYS(*m) | PG_RW | PG_V | pgeflag, 2.64 - FALSE); 2.65 + pmap_kenter(va, VM_PAGE_TO_PHYS(*m)); 2.66 va += PAGE_SIZE; 2.67 m++; 2.68 } 2.69 @@ -1037,7 +1040,7 @@ pmap_qremove(vm_offset_t sva, int count) 2.70 2.71 va = sva; 2.72 while (count-- > 0) { 2.73 - PT_CLEAR(va, FALSE); 2.74 + pmap_kremove(va); 2.75 va += PAGE_SIZE; 2.76 } 2.77 /* invalidate will flush the update queue */ 2.78 @@ -1070,8 +1073,8 @@ static int 2.79 /* 2.80 * unmap the page table page 2.81 */ 2.82 - xpq_queue_unpin_table(pmap->pm_pdir[m->pindex]); 2.83 - PT_CLEAR_VA(&pmap->pm_pdir[m->pindex], TRUE); 2.84 + xen_pt_unpin(pmap->pm_pdir[m->pindex]); 2.85 + PD_CLEAR_VA(&pmap->pm_pdir[m->pindex], TRUE); 2.86 --pmap->pm_stats.resident_count; 2.87 2.88 /* 2.89 @@ -1188,16 +1191,14 @@ pmap_pinit(struct pmap *pmap) 2.90 /* install self-referential address mapping entry(s) */ 2.91 for (i = 0; i < NPGPTD; i++) { 2.92 ma = xpmap_ptom(VM_PAGE_TO_PHYS(ptdpg[i])); 2.93 - pmap->pm_pdir[PTDPTDI + i] = ma | PG_V | PG_A; 2.94 + pmap->pm_pdir[PTDPTDI + i] = ma | PG_V | PG_A | PG_M; 2.95 #ifdef PAE 2.96 pmap->pm_pdpt[i] = ma | PG_V; 2.97 #endif 2.98 -#ifndef PAE 2.99 - PT_SET_MA(pmap->pm_pdir, ma | PG_V | PG_A, TRUE); 2.100 -#else 2.101 - panic("FIX ME!"); 2.102 -#endif 2.103 - xpq_queue_pin_table(ma, XPQ_PIN_L2_TABLE); 2.104 + /* re-map page directory read-only */ 2.105 + PT_SET_MA(pmap->pm_pdir, ma | PG_V | PG_A); 2.106 + xen_pgd_pin(ma); 2.107 + 2.108 } 2.109 2.110 pmap->pm_active = 0; 2.111 @@ -1249,8 +1250,8 @@ static vm_page_t 2.112 pmap->pm_stats.resident_count++; 2.113 2.114 ptepa = VM_PAGE_TO_PHYS(m); 2.115 - xpq_queue_pin_table(xpmap_ptom(ptepa), XPQ_PIN_L1_TABLE); 2.116 - PT_SET_VA(&pmap->pm_pdir[ptepindex], 2.117 + xen_pt_pin(xpmap_ptom(ptepa)); 2.118 + PD_SET_VA(&pmap->pm_pdir[ptepindex], 2.119 (pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M), TRUE); 2.120 2.121 return m; 2.122 @@ -1425,12 +1426,12 @@ pmap_release(pmap_t pmap) 2.123 ptdpg[i] = PHYS_TO_VM_PAGE(PT_GET(&pmap->pm_pdir[PTDPTDI + i])); 2.124 2.125 for (i = 0; i < nkpt + NPGPTD; i++) 2.126 - PT_CLEAR_VA(&pmap->pm_pdir[PTDPTDI + i], FALSE); 2.127 + PD_CLEAR_VA(&pmap->pm_pdir[PTDPTDI + i], FALSE); 2.128 2.129 bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) * 2.130 sizeof(*pmap->pm_pdir)); 2.131 #ifdef SMP 2.132 - PT_CLEAR_VA(&pmap->pm_pdir[MPPTDI], FALSE); 2.133 + PD_CLEAR_VA(&pmap->pm_pdir[MPPTDI], FALSE); 2.134 #endif 2.135 PT_UPDATES_FLUSH(); 2.136 pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD); 2.137 @@ -1440,8 +1441,7 @@ pmap_release(pmap_t pmap) 2.138 m = ptdpg[i]; 2.139 2.140 ma = xpmap_ptom(VM_PAGE_TO_PHYS(m)); 2.141 - xpq_queue_unpin_table(ma); 2.142 - pmap_zero_page(m); 2.143 + xen_pgd_unpin(ma); 2.144 #ifdef PAE 2.145 KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME), 2.146 ("pmap_release: got wrong ptd page")); 2.147 @@ -1516,12 +1516,12 @@ pmap_growkernel(vm_offset_t addr) 2.148 pmap_zero_page(nkpg); 2.149 ptppaddr = VM_PAGE_TO_PHYS(nkpg); 2.150 newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M); 2.151 - PT_SET_VA(&pdir_pde(PTD, kernel_vm_end), newpdir, TRUE); 2.152 + PD_SET_VA(&pdir_pde(PTD, kernel_vm_end), newpdir, TRUE); 2.153 2.154 mtx_lock_spin(&allpmaps_lock); 2.155 LIST_FOREACH(pmap, &allpmaps, pm_list) { 2.156 pde = pmap_pde(pmap, kernel_vm_end); 2.157 - PT_SET_VA(pde, newpdir, FALSE); 2.158 + PD_SET_VA(pde, newpdir, FALSE); 2.159 } 2.160 PT_UPDATES_FLUSH(); 2.161 mtx_unlock_spin(&allpmaps_lock); 2.162 @@ -1738,7 +1738,7 @@ pmap_remove(pmap_t pmap, vm_offset_t sva 2.163 * Check for large page. 2.164 */ 2.165 if ((ptpaddr & PG_PS) != 0) { 2.166 - PT_CLEAR_VA(pmap->pm_pdir[pdirindex], TRUE); 2.167 + PD_CLEAR_VA(pmap->pm_pdir[pdirindex], TRUE); 2.168 pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; 2.169 anyvalid = 1; 2.170 continue; 2.171 @@ -2222,9 +2222,9 @@ retry: 2.172 * Now validate mapping with RO protection 2.173 */ 2.174 if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) 2.175 - PT_SET(va, pa | PG_V | PG_U, TRUE); 2.176 + pte_store(pte, pa | PG_V | PG_U); 2.177 else 2.178 - PT_SET(va, pa | PG_V | PG_U | PG_MANAGED, TRUE); 2.179 + pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); 2.180 out: 2.181 vm_page_unlock_queues(); 2.182 PMAP_UNLOCK(pmap); 2.183 @@ -2312,7 +2312,7 @@ retry: 2.184 pmap->pm_stats.resident_count += size >> PAGE_SHIFT; 2.185 npdes = size >> PDRSHIFT; 2.186 for(i = 0; i < npdes; i++) { 2.187 - PT_SET_VA(&pmap->pm_pdir[ptepindex], 2.188 + PD_SET_VA(&pmap->pm_pdir[ptepindex], 2.189 ptepa | PG_U | PG_RW | PG_V | PG_PS, FALSE); 2.190 ptepa += NBPDR; 2.191 ptepindex += 1; 2.192 @@ -2330,7 +2330,7 @@ pmap_map_readonly(pmap_t pmap, vm_offset 2.193 for (i = 0; i < npages; i++) { 2.194 pt_entry_t *pte; 2.195 pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE)); 2.196 - PT_SET_MA(va + i*PAGE_SIZE, *pte & ~(PG_RW|PG_M), FALSE); 2.197 + pte_store(pte, xpmap_mtop(*pte & ~(PG_RW|PG_M))); 2.198 PMAP_MARK_PRIV(xpmap_mtop(*pte)); 2.199 pmap_pte_release(pte); 2.200 } 2.201 @@ -2345,7 +2345,7 @@ pmap_map_readwrite(pmap_t pmap, vm_offse 2.202 pt_entry_t *pte; 2.203 pte = pmap_pte(pmap, (vm_offset_t)(va + i*PAGE_SIZE)); 2.204 PMAP_MARK_UNPRIV(xpmap_mtop(*pte)); 2.205 - PT_SET_MA(va + i*PAGE_SIZE, *pte | (PG_RW|PG_M), FALSE); 2.206 + pte_store(pte, xpmap_mtop(*pte) | (PG_RW|PG_M)); 2.207 pmap_pte_release(pte); 2.208 } 2.209 PT_UPDATES_FLUSH(); 2.210 @@ -3010,7 +3010,7 @@ pmap_mapdev(pa, size) 2.211 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 2.212 2.213 for (tmpva = va; size > 0; ) { 2.214 - PT_SET(tmpva, pa | PG_RW | PG_V | pgeflag, FALSE); 2.215 + pmap_kenter(tmpva, pa); 2.216 size -= PAGE_SIZE; 2.217 tmpva += PAGE_SIZE; 2.218 pa += PAGE_SIZE; 2.219 @@ -3032,7 +3032,7 @@ pmap_unmapdev(va, size) 2.220 offset = va & PAGE_MASK; 2.221 size = roundup(offset + size, PAGE_SIZE); 2.222 for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) 2.223 - PT_CLEAR(tmpva, FALSE); 2.224 + pmap_kremove(tmpva); 2.225 pmap_invalidate_range(kernel_pmap, va, tmpva); 2.226 kmem_free(kernel_map, base, size); 2.227 }
3.1 --- a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/xen_machdep.c Mon Apr 18 23:20:07 2005 +0000 3.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/xen_machdep.c Mon Apr 18 23:25:40 2005 +0000 3.3 @@ -1,8 +1,7 @@ 3.4 -/* $NetBSD:$ */ 3.5 - 3.6 /* 3.7 * 3.8 * Copyright (c) 2004 Christian Limpach. 3.9 + * Copyright (c) 2004,2005 Kip Macy 3.10 * All rights reserved. 3.11 * 3.12 * Redistribution and use in source and binary forms, with or without 3.13 @@ -381,39 +380,31 @@ printk(const char *fmt, ...) 3.14 (void)HYPERVISOR_console_write(buf, ret); 3.15 } 3.16 3.17 -#define XPQUEUE_SIZE 128 3.18 +#define PANIC_IF(exp) if (unlikely(exp)) {printk("%s failed\n",#exp); panic("%s: %s:%d", #exp, __FILE__, __LINE__);} 3.19 3.20 -#define MCLQUEUE_SIZE 32 3.21 + 3.22 +#define XPQUEUE_SIZE 128 3.23 #ifdef SMP 3.24 /* per-cpu queues and indices */ 3.25 -static multicall_entry_t mcl_queue[MAX_VIRT_CPUS][MCLQUEUE_SIZE]; 3.26 static mmu_update_t xpq_queue[MAX_VIRT_CPUS][XPQUEUE_SIZE]; 3.27 -static int mcl_idx[MAX_VIRT_CPUS]; 3.28 static int xpq_idx[MAX_VIRT_CPUS]; 3.29 3.30 -#define MCL_QUEUE mcl_queue[vcpu] 3.31 #define XPQ_QUEUE xpq_queue[vcpu] 3.32 -#define MCL_IDX mcl_idx[vcpu] 3.33 #define XPQ_IDX xpq_idx[vcpu] 3.34 #define SET_VCPU() int vcpu = smp_processor_id() 3.35 #else 3.36 -static multicall_entry_t mcl_queue[MCLQUEUE_SIZE]; 3.37 static mmu_update_t xpq_queue[XPQUEUE_SIZE]; 3.38 -static int mcl_idx = 0; 3.39 static int xpq_idx = 0; 3.40 3.41 -#define MCL_QUEUE mcl_queue 3.42 #define XPQ_QUEUE xpq_queue 3.43 -#define MCL_IDX mcl_idx 3.44 #define XPQ_IDX xpq_idx 3.45 #define SET_VCPU() 3.46 #endif 3.47 #define XPQ_IDX_INC atomic_add_int(&XPQ_IDX, 1); 3.48 -#define MCL_IDX_INC atomic_add_int(&MCL_IDX, 1); 3.49 3.50 3.51 static __inline void 3.52 -_xpq_flush_queue(void) 3.53 +_xen_flush_queue(void) 3.54 { 3.55 SET_VCPU(); 3.56 int _xpq_idx = XPQ_IDX; 3.57 @@ -423,7 +414,7 @@ static __inline void 3.58 XPQ_IDX = 0; 3.59 /* Make sure index is cleared first to avoid double updates. */ 3.60 error = HYPERVISOR_mmu_update((mmu_update_t *)&XPQ_QUEUE, 3.61 - _xpq_idx, NULL); 3.62 + _xpq_idx, NULL, DOMID_SELF); 3.63 3.64 if (__predict_false(error < 0)) { 3.65 for (i = 0; i < _xpq_idx; i++) 3.66 @@ -432,81 +423,44 @@ static __inline void 3.67 } 3.68 3.69 } 3.70 -static void 3.71 -xpq_flush_queue(void) 3.72 + 3.73 +void 3.74 +xen_flush_queue(void) 3.75 { 3.76 SET_VCPU(); 3.77 - 3.78 - if (XPQ_IDX != 0) _xpq_flush_queue(); 3.79 + if (XPQ_IDX != 0) _xen_flush_queue(); 3.80 } 3.81 3.82 static __inline void 3.83 -_mcl_flush_queue(void) 3.84 -{ 3.85 - SET_VCPU(); 3.86 - int _mcl_idx = MCL_IDX; 3.87 - 3.88 - MCL_IDX = 0; 3.89 - (void)HYPERVISOR_multicall(&MCL_QUEUE, _mcl_idx); 3.90 -} 3.91 - 3.92 -void 3.93 -mcl_flush_queue(void) 3.94 -{ 3.95 - 3.96 - if (__predict_true(mcl_idx != 0)) _mcl_flush_queue(); 3.97 - /* XXX: until we can remove the pervasive 3.98 - * __HYPERVISOR_update_va_mapping calls, we have 2 queues. In order 3.99 - * to ensure that they never get out of sync, only 1 flush interface 3.100 - * is provided. 3.101 - */ 3.102 - xpq_flush_queue(); 3.103 -} 3.104 - 3.105 - 3.106 -static __inline void 3.107 -xpq_increment_idx(void) 3.108 +xen_increment_idx(void) 3.109 { 3.110 SET_VCPU(); 3.111 3.112 XPQ_IDX++; 3.113 if (__predict_false(XPQ_IDX == XPQUEUE_SIZE)) 3.114 - xpq_flush_queue(); 3.115 -} 3.116 - 3.117 -static __inline void 3.118 -mcl_increment_idx(void) 3.119 -{ 3.120 - SET_VCPU(); 3.121 - MCL_IDX++; 3.122 - 3.123 - if (__predict_false(MCL_IDX == MCLQUEUE_SIZE)) 3.124 - mcl_flush_queue(); 3.125 + xen_flush_queue(); 3.126 } 3.127 3.128 void 3.129 -xpq_queue_invlpg(vm_offset_t va) 3.130 +xen_invlpg(vm_offset_t va) 3.131 { 3.132 - SET_VCPU(); 3.133 - 3.134 - XPQ_QUEUE[XPQ_IDX].ptr = (va & ~PAGE_MASK) | MMU_EXTENDED_COMMAND; 3.135 - XPQ_QUEUE[XPQ_IDX].val = MMUEXT_INVLPG; 3.136 - xpq_increment_idx(); 3.137 + struct mmuext_op op; 3.138 + op.cmd = MMUEXT_INVLPG_LOCAL; 3.139 + op.linear_addr = va & ~PAGE_MASK; 3.140 + xen_flush_queue(); 3.141 + PANIC_IF(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 3.142 } 3.143 3.144 void 3.145 load_cr3(uint32_t val) 3.146 { 3.147 - xpq_queue_pt_switch(val); 3.148 - xpq_flush_queue(); 3.149 + struct mmuext_op op; 3.150 + op.cmd = MMUEXT_NEW_BASEPTR; 3.151 + op.mfn = xpmap_ptom(val) >> PAGE_SHIFT; 3.152 + xen_flush_queue(); 3.153 + PANIC_IF(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 3.154 } 3.155 3.156 -void 3.157 -xen_set_ldt(vm_offset_t base, uint32_t entries) 3.158 -{ 3.159 - xpq_queue_set_ldt(base, entries); 3.160 - _xpq_flush_queue(); 3.161 -} 3.162 3.163 void 3.164 xen_machphys_update(unsigned long mfn, unsigned long pfn) 3.165 @@ -515,97 +469,77 @@ xen_machphys_update(unsigned long mfn, u 3.166 3.167 XPQ_QUEUE[XPQ_IDX].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; 3.168 XPQ_QUEUE[XPQ_IDX].val = pfn; 3.169 - xpq_increment_idx(); 3.170 - _xpq_flush_queue(); 3.171 + xen_increment_idx(); 3.172 + _xen_flush_queue(); 3.173 } 3.174 3.175 void 3.176 -xpq_queue_pt_update(pt_entry_t *ptr, pt_entry_t val) 3.177 +xen_queue_pt_update(pt_entry_t *ptr, pt_entry_t val) 3.178 { 3.179 SET_VCPU(); 3.180 3.181 XPQ_QUEUE[XPQ_IDX].ptr = (memory_t)ptr; 3.182 XPQ_QUEUE[XPQ_IDX].val = (memory_t)val; 3.183 - xpq_increment_idx(); 3.184 + xen_increment_idx(); 3.185 +} 3.186 + 3.187 +void 3.188 +xen_pgd_pin(unsigned long ma) 3.189 +{ 3.190 + struct mmuext_op op; 3.191 + op.cmd = MMUEXT_PIN_L2_TABLE; 3.192 + op.mfn = ma >> PAGE_SHIFT; 3.193 + xen_flush_queue(); 3.194 + PANIC_IF(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 3.195 +} 3.196 + 3.197 +void 3.198 +xen_pgd_unpin(unsigned long ma) 3.199 +{ 3.200 + struct mmuext_op op; 3.201 + op.cmd = MMUEXT_UNPIN_TABLE; 3.202 + op.mfn = ma >> PAGE_SHIFT; 3.203 + xen_flush_queue(); 3.204 + PANIC_IF(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 3.205 } 3.206 3.207 void 3.208 -mcl_queue_pt_update(vm_offset_t va, vm_paddr_t ma) 3.209 +xen_pt_pin(unsigned long ma) 3.210 { 3.211 -#if 0 3.212 - printf("setting va %x to ma %x\n", va, ma); 3.213 -#endif 3.214 - SET_VCPU(); 3.215 - 3.216 - MCL_QUEUE[MCL_IDX].op = __HYPERVISOR_update_va_mapping; 3.217 - MCL_QUEUE[MCL_IDX].args[0] = (unsigned long)va; 3.218 - MCL_QUEUE[MCL_IDX].args[1] = (unsigned long)ma; 3.219 - MCL_QUEUE[MCL_IDX].args[2] = UVMF_INVLPG|UVMF_LOCAL; 3.220 - mcl_increment_idx(); 3.221 -} 3.222 - 3.223 - 3.224 - 3.225 -void 3.226 -xpq_queue_pt_switch(uint32_t val) 3.227 -{ 3.228 - vm_paddr_t ma = xpmap_ptom(val) & PG_FRAME; 3.229 - SET_VCPU(); 3.230 - 3.231 - XPQ_QUEUE[XPQ_IDX].ptr = ma | MMU_EXTENDED_COMMAND; 3.232 - XPQ_QUEUE[XPQ_IDX].val = MMUEXT_NEW_BASEPTR; 3.233 - xpq_increment_idx(); 3.234 + struct mmuext_op op; 3.235 + op.cmd = MMUEXT_PIN_L1_TABLE; 3.236 + op.mfn = ma >> PAGE_SHIFT; 3.237 + xen_flush_queue(); 3.238 + PANIC_IF(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 3.239 } 3.240 3.241 - 3.242 -void 3.243 -xpq_queue_pin_table(uint32_t pa, int type) 3.244 +void 3.245 +xen_pt_unpin(unsigned long ma) 3.246 { 3.247 - SET_VCPU(); 3.248 - 3.249 - 3.250 - XPQ_QUEUE[XPQ_IDX].ptr = pa | MMU_EXTENDED_COMMAND; 3.251 - switch (type) { 3.252 - case XPQ_PIN_L1_TABLE: 3.253 - XPQ_QUEUE[XPQ_IDX].val = MMUEXT_PIN_L1_TABLE; 3.254 - break; 3.255 - case XPQ_PIN_L2_TABLE: 3.256 - XPQ_QUEUE[XPQ_IDX].val = MMUEXT_PIN_L2_TABLE; 3.257 - break; 3.258 - } 3.259 - xpq_increment_idx(); 3.260 + struct mmuext_op op; 3.261 + op.cmd = MMUEXT_UNPIN_TABLE; 3.262 + op.mfn = ma >> PAGE_SHIFT; 3.263 + xen_flush_queue(); 3.264 + PANIC_IF(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 3.265 } 3.266 3.267 -void 3.268 -xpq_queue_unpin_table(uint32_t pa) 3.269 +void 3.270 +xen_set_ldt(unsigned long ptr, unsigned long len) 3.271 { 3.272 - SET_VCPU(); 3.273 - 3.274 - XPQ_QUEUE[XPQ_IDX].ptr = pa | MMU_EXTENDED_COMMAND; 3.275 - XPQ_QUEUE[XPQ_IDX].val = MMUEXT_UNPIN_TABLE; 3.276 - xpq_increment_idx(); 3.277 + struct mmuext_op op; 3.278 + op.cmd = MMUEXT_SET_LDT; 3.279 + op.linear_addr = ptr; 3.280 + op.nr_ents = len; 3.281 + xen_flush_queue(); 3.282 + PANIC_IF(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 3.283 } 3.284 3.285 -void 3.286 -xpq_queue_set_ldt(vm_offset_t va, uint32_t entries) 3.287 +void xen_tlb_flush(void) 3.288 { 3.289 - SET_VCPU(); 3.290 - 3.291 - KASSERT(va == (va & PG_FRAME), ("ldt not page aligned")); 3.292 - XPQ_QUEUE[XPQ_IDX].ptr = MMU_EXTENDED_COMMAND | va; 3.293 - XPQ_QUEUE[XPQ_IDX].val = MMUEXT_SET_LDT | 3.294 - (entries << MMUEXT_CMD_SHIFT); 3.295 - xpq_increment_idx(); 3.296 -} 3.297 - 3.298 -void 3.299 -xpq_queue_tlb_flush() 3.300 -{ 3.301 - SET_VCPU(); 3.302 - 3.303 - XPQ_QUEUE[XPQ_IDX].ptr = MMU_EXTENDED_COMMAND; 3.304 - XPQ_QUEUE[XPQ_IDX].val = MMUEXT_TLB_FLUSH; 3.305 - xpq_increment_idx(); 3.306 + struct mmuext_op op; 3.307 + op.cmd = MMUEXT_TLB_FLUSH_LOCAL; 3.308 + xen_flush_queue(); 3.309 + PANIC_IF(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0); 3.310 } 3.311 3.312
4.1 --- a/freebsd-5.3-xen-sparse/i386-xen/include/hypervisor.h Mon Apr 18 23:20:07 2005 +0000 4.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/include/hypervisor.h Mon Apr 18 23:25:40 2005 +0000 4.3 @@ -49,19 +49,40 @@ static inline int HYPERVISOR_set_trap_ta 4.4 return ret; 4.5 } 4.6 4.7 -static inline int HYPERVISOR_mmu_update(mmu_update_t *req, 4.8 - int count, 4.9 - int *success_count) 4.10 +static inline int 4.11 +HYPERVISOR_mmu_update(mmu_update_t *req, int count, 4.12 + int *success_count, domid_t domid) 4.13 { 4.14 int ret; 4.15 + unsigned long ign1, ign2, ign3, ign4; 4.16 __asm__ __volatile__ ( 4.17 TRAP_INSTR 4.18 - : "=a" (ret) : "0" (__HYPERVISOR_mmu_update), 4.19 - "b" (req), "c" (count), "d" (success_count) : "memory" ); 4.20 + : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4) 4.21 + : "0" (__HYPERVISOR_mmu_update), "1" (req), "2" (count), 4.22 + "3" (success_count), "4" (domid) 4.23 + : "memory" ); 4.24 4.25 return ret; 4.26 } 4.27 4.28 +static inline int 4.29 +HYPERVISOR_mmuext_op( 4.30 + struct mmuext_op *op, int count, int *success_count, domid_t domid) 4.31 +{ 4.32 + int ret; 4.33 + unsigned long ign1, ign2, ign3, ign4; 4.34 + __asm__ __volatile__ ( 4.35 + TRAP_INSTR 4.36 + : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4) 4.37 + : "0" (__HYPERVISOR_mmuext_op), "1" (op), "2" (count), 4.38 + "3" (success_count), "4" (domid) 4.39 + : "memory" ); 4.40 + 4.41 + return ret; 4.42 +} 4.43 + 4.44 + 4.45 + 4.46 static inline int HYPERVISOR_set_gdt(unsigned long *frame_list, int entries) 4.47 { 4.48 int ret; 4.49 @@ -262,13 +283,13 @@ static inline int HYPERVISOR_multicall(v 4.50 } 4.51 4.52 static inline int HYPERVISOR_update_va_mapping( 4.53 - unsigned long page_nr, pte_t new_val, unsigned long flags) 4.54 + unsigned long page_nr, unsigned long new_val, unsigned long flags) 4.55 { 4.56 int ret; 4.57 __asm__ __volatile__ ( 4.58 TRAP_INSTR 4.59 : "=a" (ret) : "0" (__HYPERVISOR_update_va_mapping), 4.60 - "b" (page_nr), "c" ((new_val).pte_low), "d" (flags): 4.61 + "b" (page_nr), "c" (new_val), "d" (flags): 4.62 "memory" ); 4.63 /* XXX */ 4.64 #if 0
5.1 --- a/freebsd-5.3-xen-sparse/i386-xen/include/pmap.h Mon Apr 18 23:20:07 2005 +0000 5.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/include/pmap.h Mon Apr 18 23:25:40 2005 +0000 5.3 @@ -253,8 +253,8 @@ pte_load_store(pt_entry_t *ptep, pt_entr 5.4 return (r); 5.5 } 5.6 5.7 -#define pte_store(ptep, pte) PT_SET_VA_MA(ptep, pte, TRUE); 5.8 -#define pte_clear(pte) PT_CLEAR_VA(pte, TRUE); 5.9 +#define pte_store(ptep, pte) PT_SET_VA(ptep, pte, TRUE) 5.10 +#define pte_clear(pte) PT_CLEAR_VA(pte, TRUE) 5.11 5.12 5.13 #endif /* _KERNEL */
6.1 --- a/freebsd-5.3-xen-sparse/i386-xen/include/xenfunc.h Mon Apr 18 23:20:07 2005 +0000 6.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/include/xenfunc.h Mon Apr 18 23:25:40 2005 +0000 6.3 @@ -1,8 +1,7 @@ 6.4 -/* $NetBSD:$ */ 6.5 - 6.6 /* 6.7 * 6.8 * Copyright (c) 2004 Christian Limpach. 6.9 + * Copyright (c) 2004,2005 Kip Macy 6.10 * All rights reserved. 6.11 * 6.12 * Redistribution and use in source and binary forms, with or without 6.13 @@ -59,7 +58,6 @@ extern pteinfo_t *pteinfo_list; 6.14 char *xen_setbootenv(char *cmd_line); 6.15 int xen_boothowto(char *envp); 6.16 void load_cr3(uint32_t val); 6.17 -void xen_set_ldt(vm_offset_t, uint32_t); 6.18 void xen_machphys_update(unsigned long, unsigned long); 6.19 void xen_update_descriptor(union descriptor *, union descriptor *); 6.20 void lldt(u_short sel); 6.21 @@ -71,14 +69,14 @@ void lldt(u_short sel); 6.22 static __inline void 6.23 invlpg(u_int addr) 6.24 { 6.25 - xpq_queue_invlpg(addr); 6.26 + xen_invlpg(addr); 6.27 } 6.28 6.29 static __inline void 6.30 invltlb(void) 6.31 { 6.32 - xpq_queue_tlb_flush(); 6.33 - mcl_flush_queue(); 6.34 + xen_tlb_flush(); 6.35 + 6.36 } 6.37 6.38
7.1 --- a/freebsd-5.3-xen-sparse/i386-xen/include/xenpmap.h Mon Apr 18 23:20:07 2005 +0000 7.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/include/xenpmap.h Mon Apr 18 23:25:40 2005 +0000 7.3 @@ -1,8 +1,7 @@ 7.4 -/* $NetBSD:$ */ 7.5 - 7.6 /* 7.7 * 7.8 * Copyright (c) 2004 Christian Limpach. 7.9 + * Copyright (c) 2004,2005 Kip Macy 7.10 * All rights reserved. 7.11 * 7.12 * Redistribution and use in source and binary forms, with or without 7.13 @@ -35,17 +34,16 @@ 7.14 #ifndef _XEN_XENPMAP_H_ 7.15 #define _XEN_XENPMAP_H_ 7.16 #include <machine/xenvar.h> 7.17 -void xpq_physbcopy(const unsigned long *, unsigned long, size_t); 7.18 -void xpq_queue_invlpg(vm_offset_t); 7.19 -void xpq_queue_pt_update(pt_entry_t *, pt_entry_t); 7.20 -void xpq_queue_pt_switch(uint32_t); 7.21 -void xpq_queue_set_ldt(vm_offset_t, uint32_t); 7.22 -void xpq_queue_tlb_flush(void); 7.23 -void xpq_queue_pin_table(uint32_t, int); 7.24 -void xpq_queue_unpin_table(uint32_t); 7.25 -void xpq_record(unsigned long, unsigned long); 7.26 -void mcl_queue_pt_update(vm_offset_t, vm_offset_t); 7.27 -void mcl_flush_queue(void); 7.28 +void xen_invlpg(vm_offset_t); 7.29 +void xen_queue_pt_update(pt_entry_t *, pt_entry_t); 7.30 +void xen_pt_switch(uint32_t); 7.31 +void xen_set_ldt(unsigned long, unsigned long); 7.32 +void xen_tlb_flush(void); 7.33 +void xen_pgd_pin(unsigned long); 7.34 +void xen_pgd_unpin(unsigned long); 7.35 +void xen_pt_pin(unsigned long); 7.36 +void xen_pt_unpin(unsigned long); 7.37 +void xen_flush_queue(void); 7.38 void pmap_ref(pt_entry_t *pte, unsigned long ma); 7.39 7.40 7.41 @@ -62,57 +60,76 @@ void pmap_ref(pt_entry_t *pte, unsigned 7.42 #endif 7.43 7.44 #define ALWAYS_SYNC 0 7.45 - 7.46 #define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */ 7.47 7.48 -#define XPQ_PIN_L1_TABLE 1 7.49 -#define XPQ_PIN_L2_TABLE 2 7.50 - 7.51 #define PT_GET(_ptp) \ 7.52 (pmap_valid_entry(*(_ptp)) ? xpmap_mtop(*(_ptp)) : *(_ptp)) 7.53 + 7.54 +#ifdef WRITABLE_PAGETABLES 7.55 #define PT_SET_VA(_ptp,_npte,sync) do { \ 7.56 PMAP_REF((_ptp), xpmap_ptom(_npte)); \ 7.57 - xpq_queue_pt_update((pt_entry_t *)vtomach((_ptp)), \ 7.58 - xpmap_ptom((_npte))); \ 7.59 - if (sync || ALWAYS_SYNC) \ 7.60 - mcl_flush_queue(); \ 7.61 + *(_ptp) = xpmap_ptom((_npte)); \ 7.62 } while (/*CONSTCOND*/0) 7.63 #define PT_SET_VA_MA(_ptp,_npte,sync) do { \ 7.64 PMAP_REF((_ptp), (_npte)); \ 7.65 - xpq_queue_pt_update((pt_entry_t *)vtomach((_ptp)), (_npte)); \ 7.66 - if (sync || ALWAYS_SYNC) \ 7.67 - mcl_flush_queue(); \ 7.68 + *(_ptp) = (_npte); \ 7.69 } while (/*CONSTCOND*/0) 7.70 #define PT_CLEAR_VA(_ptp, sync) do { \ 7.71 PMAP_REF((pt_entry_t *)(_ptp), 0); \ 7.72 - xpq_queue_pt_update((pt_entry_t *)vtomach(_ptp), 0); \ 7.73 - if (sync || ALWAYS_SYNC) \ 7.74 - mcl_flush_queue(); \ 7.75 + *(_ptp) = 0; \ 7.76 } while (/*CONSTCOND*/0) 7.77 -#define PT_CLEAR(_ptp, sync) do { \ 7.78 - PMAP_REF((pt_entry_t *)(vtopte(_ptp)), 0); \ 7.79 - mcl_queue_pt_update((unsigned long)_ptp, 0); \ 7.80 - if (sync || ALWAYS_SYNC) \ 7.81 - mcl_flush_queue(); \ 7.82 + 7.83 +#define PD_SET_VA(_ptp,_npte,sync) do { \ 7.84 + PMAP_REF((_ptp), xpmap_ptom(_npte)); \ 7.85 + xen_queue_pt_update((pt_entry_t *)vtomach((_ptp)), \ 7.86 + xpmap_ptom((_npte))); \ 7.87 + if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 7.88 } while (/*CONSTCOND*/0) 7.89 -#define PT_SET_MA(_va,_ma,sync) do { \ 7.90 - PMAP_REF(vtopte((unsigned long)_va), (_ma)); \ 7.91 - mcl_queue_pt_update((vm_offset_t )(_va), (_ma)); \ 7.92 - if (sync || ALWAYS_SYNC) \ 7.93 - mcl_flush_queue(); \ 7.94 +#define PD_SET_VA_MA(_ptp,_npte,sync) do { \ 7.95 + PMAP_REF((_ptp), (_npte)); \ 7.96 + xen_queue_pt_update((pt_entry_t *)vtomach((_ptp)), (_npte)); \ 7.97 + if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 7.98 } while (/*CONSTCOND*/0) 7.99 -#define PT_SET(_va,_pa,sync) do { \ 7.100 - PMAP_REF((pt_entry_t *)(vtopte(_va)), xpmap_ptom(_pa)); \ 7.101 - mcl_queue_pt_update((vm_offset_t)(_va), \ 7.102 - xpmap_ptom((_pa))); \ 7.103 - if (sync || ALWAYS_SYNC) \ 7.104 - mcl_flush_queue(); \ 7.105 +#define PD_CLEAR_VA(_ptp, sync) do { \ 7.106 + PMAP_REF((pt_entry_t *)(_ptp), 0); \ 7.107 + xen_queue_pt_update((pt_entry_t *)vtomach(_ptp), 0); \ 7.108 + if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 7.109 } while (/*CONSTCOND*/0) 7.110 7.111 7.112 +#else /* !WRITABLE_PAGETABLES */ 7.113 + 7.114 +#define PT_SET_VA(_ptp,_npte,sync) do { \ 7.115 + PMAP_REF((_ptp), xpmap_ptom(_npte)); \ 7.116 + xen_queue_pt_update((pt_entry_t *)vtomach(_ptp), \ 7.117 + xpmap_ptom(_npte)); \ 7.118 + if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 7.119 +} while (/*CONSTCOND*/0) 7.120 +#define PT_SET_VA_MA(_ptp,_npte,sync) do { \ 7.121 + PMAP_REF((_ptp), (_npte)); \ 7.122 + xen_queue_pt_update((pt_entry_t *)vtomach(_ptp), _npte); \ 7.123 + if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 7.124 +} while (/*CONSTCOND*/0) 7.125 +#define PT_CLEAR_VA(_ptp, sync) do { \ 7.126 + PMAP_REF((pt_entry_t *)(_ptp), 0); \ 7.127 + xen_queue_pt_update((pt_entry_t *)vtomach(_ptp), 0); \ 7.128 + if (sync || ALWAYS_SYNC) \ 7.129 + xen_flush_queue(); \ 7.130 +} while (/*CONSTCOND*/0) 7.131 + 7.132 +#define PD_SET_VA PT_SET_VA 7.133 +#define PD_SET_VA_MA PT_SET_VA_MA 7.134 +#define PD_CLEAR_VA PT_CLEAR_VA 7.135 + 7.136 +#endif 7.137 + 7.138 +#define PT_SET_MA(_va, _ma) \ 7.139 + HYPERVISOR_update_va_mapping(((unsigned long)_va), \ 7.140 + ((unsigned long)_ma), \ 7.141 + UVMF_INVLPG| UVMF_LOCAL)\ 7.142 7.143 #define PT_UPDATES_FLUSH() do { \ 7.144 - mcl_flush_queue(); \ 7.145 + xen_flush_queue(); \ 7.146 } while (/*CONSTCOND*/0) 7.147 7.148
8.1 --- a/freebsd-5.3-xen-sparse/i386-xen/xen/netfront/xn_netfront.c Mon Apr 18 23:20:07 2005 +0000 8.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/xen/netfront/xn_netfront.c Mon Apr 18 23:25:40 2005 +0000 8.3 @@ -444,7 +444,7 @@ xn_alloc_rx_buffers(struct xn_softc *sc) 8.4 8.5 /* Give away a batch of pages. */ 8.6 xn_rx_mcl[i].op = __HYPERVISOR_dom_mem_op; 8.7 - xn_rx_mcl[i].args[0] = (unsigned long) MEMOP_decrease_reservation; 8.8 + xn_rx_mcl[i].args[0] = MEMOP_decrease_reservation; 8.9 xn_rx_mcl[i].args[1] = (unsigned long)xn_rx_pfns; 8.10 xn_rx_mcl[i].args[2] = (unsigned long)i; 8.11 xn_rx_mcl[i].args[3] = 0; 8.12 @@ -454,7 +454,7 @@ xn_alloc_rx_buffers(struct xn_softc *sc) 8.13 (void)HYPERVISOR_multicall(xn_rx_mcl, i+1); 8.14 8.15 /* Check return status of HYPERVISOR_dom_mem_op(). */ 8.16 - if ( xn_rx_mcl[i].args[5] != i ) 8.17 + if (unlikely(xn_rx_mcl[i].args[5] != i)) 8.18 panic("Unable to reduce memory reservation\n"); 8.19 8.20 /* Above is a suitable barrier to ensure backend will see requests. */ 8.21 @@ -544,6 +544,7 @@ xn_rxeof(struct xn_softc *sc) 8.22 mcl->args[0] = (unsigned long)xn_rx_mmu; 8.23 mcl->args[1] = mmu - xn_rx_mmu; 8.24 mcl->args[2] = 0; 8.25 + mcl->args[3] = DOMID_SELF; 8.26 mcl++; 8.27 (void)HYPERVISOR_multicall(xn_rx_mcl, mcl - xn_rx_mcl); 8.28 }