xen-vtx-unstable
changeset 5576:48dd03e4b388
bitkeeper revision 1.1751 (42bbe480z9Fp_L5Tc500W8c8CL3g9A)
Rationalise x86 CRn guest state into a ctrlreg array in the per-vcpu
context structure. Most noticeably this means the pt_base field has
gone away -- replaced by ctrlreg[3] (CR3). VCPU_guest_stts is also
gone -- it was never arch-independent anyway.
Signed-off-by: Keir Fraser <keir@xensource.com>
Rationalise x86 CRn guest state into a ctrlreg array in the per-vcpu
context structure. Most noticeably this means the pt_base field has
gone away -- replaced by ctrlreg[3] (CR3). VCPU_guest_stts is also
gone -- it was never arch-independent anyway.
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Fri Jun 24 10:46:24 2005 +0000 (2005-06-24) |
parents | 200f5dfe9647 |
children | 0b5f09002630 |
files | .rootkeys freebsd-5.3-xen-sparse/i386-xen/i386-xen/mp_machdep.c linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c tools/debugger/libxendebug/xendebug.c tools/libxc/Makefile tools/libxc/xc_linux_build.c tools/libxc/xc_linux_restore.c tools/libxc/xc_linux_save.c tools/libxc/xc_plan9_build.c tools/libxc/xc_ptrace.c tools/libxc/xc_ptrace_core.c tools/libxc/xc_vmx_build.c xen/arch/x86/dom0_ops.c xen/arch/x86/domain.c xen/arch/x86/i387.c xen/arch/x86/traps.c xen/arch/x86/vmx_vmcs.c xen/include/asm-x86/domain.h xen/include/public/arch-x86_32.h xen/include/public/arch-x86_64.h xen/include/public/dom0_ops.h xen/include/xen/sched.h |
line diff
1.1 --- a/.rootkeys Fri Jun 24 09:39:40 2005 +0000 1.2 +++ b/.rootkeys Fri Jun 24 10:46:24 2005 +0000 1.3 @@ -749,7 +749,6 @@ 42a40bc3vE3p9fPSJZQZK0MdQF9B8g tools/lib 1.4 42a40bc4diWfFsPGf0RW7qXMufU4YQ tools/libxc/xc_load_elf.c 1.5 3fbba6db7WnnJr0KFrIFrqNlSKvFYg tools/libxc/xc_misc.c 1.6 4051bce6CHAsYh8P5t2OHDtRWOP9og tools/libxc/xc_physdev.c 1.7 -41cc934aO1m6NxEh_8eDr9bJIMoLFA tools/libxc/xc_plan9_build.c 1.8 3fbba6dctWRWlFJkYb6hdix2X4WMuw tools/libxc/xc_private.c 1.9 3fbba6dcbVrG2hPzEzwdeV_UC8kydQ tools/libxc/xc_private.h 1.10 42337174PxyzzPk62raDiYCIsfStDg tools/libxc/xc_ptrace.c
2.1 --- a/freebsd-5.3-xen-sparse/i386-xen/i386-xen/mp_machdep.c Fri Jun 24 09:39:40 2005 +0000 2.2 +++ b/freebsd-5.3-xen-sparse/i386-xen/i386-xen/mp_machdep.c Fri Jun 24 10:46:24 2005 +0000 2.3 @@ -974,7 +974,7 @@ start_ap(int apic_id) 2.4 ctxt.failsafe_callback_cs = __KERNEL_CS; 2.5 ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback; 2.6 2.7 - ctxt.pt_base = (vm_paddr_t)IdlePTD; 2.8 + ctxt.ctrlreg[3] = (vm_paddr_t)IdlePTD; 2.9 2.10 boot_error = HYPERVISOR_boot_vcpu(bootAP, &ctxt); 2.11
3.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c Fri Jun 24 09:39:40 2005 +0000 3.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c Fri Jun 24 10:46:24 2005 +0000 3.3 @@ -908,7 +908,7 @@ static int __init do_boot_cpu(int apicid 3.4 ctxt.failsafe_callback_cs = __KERNEL_CS; 3.5 ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback; 3.6 3.7 - ctxt.pt_base = (unsigned long)virt_to_machine(swapper_pg_dir); 3.8 + ctxt.ctrlreg[3] = (unsigned long)virt_to_machine(swapper_pg_dir); 3.9 3.10 boot_error = HYPERVISOR_boot_vcpu(cpu, &ctxt); 3.11
4.1 --- a/tools/debugger/libxendebug/xendebug.c Fri Jun 24 09:39:40 2005 +0000 4.2 +++ b/tools/debugger/libxendebug/xendebug.c Fri Jun 24 10:46:24 2005 +0000 4.3 @@ -342,9 +342,9 @@ xendebug_memory_page (domain_context_p c 4.4 } 4.5 } 4.6 4.7 - if ( vcpu_ctxt->pt_base != ctxt->cr3_phys[vcpu]) 4.8 + if ( vcpu_ctxt->ctrlreg[3] != ctxt->cr3_phys[vcpu]) 4.9 { 4.10 - ctxt->cr3_phys[vcpu] = vcpu_ctxt->pt_base; 4.11 + ctxt->cr3_phys[vcpu] = vcpu_ctxt->ctrlreg[3]; 4.12 if ( ctxt->cr3_virt[vcpu] ) 4.13 munmap(ctxt->cr3_virt[vcpu], PAGE_SIZE); 4.14 ctxt->cr3_virt[vcpu] = xc_map_foreign_range(xc_handle, ctxt->domid, 4.15 @@ -383,7 +383,7 @@ xendebug_memory_page (domain_context_p c 4.16 if ( ctxt->page_virt[vcpu] == NULL ) 4.17 { 4.18 printf("cr3 %lx pde %lx page %lx pti %lx\n", 4.19 - vcpu_ctxt->pt_base, pde, page, vtopti(va)); 4.20 + vcpu_ctxt->ctrlreg[3], pde, page, vtopti(va)); 4.21 ctxt->page_phys[vcpu] = 0; 4.22 return 0; 4.23 }
5.1 --- a/tools/libxc/Makefile Fri Jun 24 09:39:40 2005 +0000 5.2 +++ b/tools/libxc/Makefile Fri Jun 24 10:46:24 2005 +0000 5.3 @@ -22,7 +22,6 @@ SRCS += xc_gnttab.c 5.4 SRCS += xc_load_bin.c 5.5 SRCS += xc_load_elf.c 5.6 SRCS += xc_linux_build.c 5.7 -SRCS += xc_plan9_build.c 5.8 SRCS += xc_linux_restore.c 5.9 SRCS += xc_linux_save.c 5.10 SRCS += xc_misc.c
6.1 --- a/tools/libxc/xc_linux_build.c Fri Jun 24 09:39:40 2005 +0000 6.2 +++ b/tools/libxc/xc_linux_build.c Fri Jun 24 10:46:24 2005 +0000 6.3 @@ -227,7 +227,7 @@ static int setup_guest(int xc_handle, 6.4 /* First allocate page for page dir. */ 6.5 ppt_alloc = (vpt_start - dsi.v_start) >> PAGE_SHIFT; 6.6 l2tab = page_array[ppt_alloc++] << PAGE_SHIFT; 6.7 - ctxt->pt_base = l2tab; 6.8 + ctxt->ctrlreg[3] = l2tab; 6.9 6.10 /* Initialise the page tables. */ 6.11 if ( (vl2tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, 6.12 @@ -282,7 +282,7 @@ static int setup_guest(int xc_handle, 6.13 /* First allocate page for page dir. */ 6.14 ppt_alloc = (vpt_start - dsi.v_start) >> PAGE_SHIFT; 6.15 l4tab = page_array[ppt_alloc++] << PAGE_SHIFT; 6.16 - ctxt->pt_base = l4tab; 6.17 + ctxt->ctrlreg[3] = l4tab; 6.18 6.19 /* Intiliaize page table */ 6.20 if ( (vl4tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, 6.21 @@ -502,7 +502,7 @@ int xc_linux_build(int xc_handle, 6.22 } 6.23 6.24 if ( !(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED) || 6.25 - (ctxt->pt_base != 0) ) 6.26 + (ctxt->ctrlreg[3] != 0) ) 6.27 { 6.28 ERROR("Domain is already constructed"); 6.29 goto error_out;
7.1 --- a/tools/libxc/xc_linux_restore.c Fri Jun 24 09:39:40 2005 +0000 7.2 +++ b/tools/libxc/xc_linux_restore.c Fri Jun 24 10:46:24 2005 +0000 7.3 @@ -489,7 +489,7 @@ int xc_linux_restore(int xc_handle, int 7.4 } 7.5 7.6 /* Uncanonicalise the page table base pointer. */ 7.7 - pfn = ctxt.pt_base >> PAGE_SHIFT; 7.8 + pfn = ctxt.ctrlreg[3] >> PAGE_SHIFT; 7.9 if ( (pfn >= nr_pfns) || ((pfn_type[pfn]<ABTYPE_MASK) != L2TAB) ) 7.10 { 7.11 printf("PT base is bad. pfn=%lu nr=%lu type=%08lx %08lx\n", 7.12 @@ -497,7 +497,7 @@ int xc_linux_restore(int xc_handle, int 7.13 ERR("PT base is bad."); 7.14 goto out; 7.15 } 7.16 - ctxt.pt_base = pfn_to_mfn_table[pfn] << PAGE_SHIFT; 7.17 + ctxt.ctrlreg[3] = pfn_to_mfn_table[pfn] << PAGE_SHIFT; 7.18 7.19 /* clear any pending events and the selector */ 7.20 memset(&(shared_info->evtchn_pending[0]), 0,
8.1 --- a/tools/libxc/xc_linux_save.c Fri Jun 24 09:39:40 2005 +0000 8.2 +++ b/tools/libxc/xc_linux_save.c Fri Jun 24 10:46:24 2005 +0000 8.3 @@ -459,7 +459,7 @@ int xc_linux_save(int xc_handle, int io_ 8.4 shared_info_frame = info.shared_info_frame; 8.5 8.6 /* A cheesy test to see whether the domain contains valid state. */ 8.7 - if ( ctxt.pt_base == 0 ){ 8.8 + if ( ctxt.ctrlreg[3] == 0 ){ 8.9 ERR("Domain is not in a valid Linux guest OS state"); 8.10 goto out; 8.11 } 8.12 @@ -1015,11 +1015,11 @@ int xc_linux_save(int xc_handle, int io_ 8.13 } 8.14 8.15 /* Canonicalise the page table base pointer. */ 8.16 - if ( !MFN_IS_IN_PSEUDOPHYS_MAP(ctxt.pt_base >> PAGE_SHIFT) ) { 8.17 + if ( !MFN_IS_IN_PSEUDOPHYS_MAP(ctxt.ctrlreg[3] >> PAGE_SHIFT) ) { 8.18 ERR("PT base is not in range of pseudophys map"); 8.19 goto out; 8.20 } 8.21 - ctxt.pt_base = live_mfn_to_pfn_table[ctxt.pt_base >> PAGE_SHIFT] << 8.22 + ctxt.ctrlreg[3] = live_mfn_to_pfn_table[ctxt.ctrlreg[3] >> PAGE_SHIFT] << 8.23 PAGE_SHIFT; 8.24 8.25 if (write(io_fd, &ctxt, sizeof(ctxt)) != sizeof(ctxt) ||
9.1 --- a/tools/libxc/xc_plan9_build.c Fri Jun 24 09:39:40 2005 +0000 9.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 9.3 @@ -1,694 +0,0 @@ 9.4 -/****************************************************************************** 9.5 - * xc_plan9_build.c 9.6 - * derived from xc_linux_build.c 9.7 - */ 9.8 - 9.9 -#include "xc_private.h" 9.10 - 9.11 -#include <zlib.h> 9.12 - 9.13 -#define DEBUG 1 9.14 -#ifdef DEBUG 9.15 -#define DPRINTF(x) printf x; fflush(stdout); 9.16 -#else 9.17 -#define DPRINTF(x) 9.18 -#endif 9.19 - 9.20 -#include "plan9a.out.h" 9.21 - 9.22 -/* really TOS which means stack starts at 0x2000, and uses page 1*/ 9.23 -#define STACKPAGE 2 9.24 -struct Exec header, origheader; 9.25 - 9.26 -typedef struct page { 9.27 - char data[PAGE_SIZE]; 9.28 -} PAGE; 9.29 - 9.30 - 9.31 -int 9.32 -memcpy_toguest(int xc_handle, u32 dom, void *v, int size, 9.33 - unsigned long *page_array, unsigned int to_page) 9.34 -{ 9.35 - int ret; 9.36 - unsigned char *cp = v; 9.37 - unsigned int whichpage; 9.38 - unsigned char *vaddr; 9.39 - 9.40 -// DPRINTF(("memcpy_to_guest: to_page 0x%x, count %d\n", to_page, size)); 9.41 - for (ret = 0, whichpage = to_page; size > 0; 9.42 - whichpage++, size -= PAGE_SIZE, cp += PAGE_SIZE) { 9.43 - 9.44 - // DPRINTF (("map_pfn_writeable(%p, 0x%lx)\n", pm_handle, 9.45 -// page_array[whichpage])); 9.46 - vaddr = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, 9.47 - PROT_READ | PROT_WRITE, 9.48 - page_array[whichpage]); 9.49 - // DPRINTF (("vaddr is %p\n", vaddr)); 9.50 - if (vaddr == NULL) { 9.51 - ret = -1; 9.52 - ERROR("Couldn't map guest memory"); 9.53 - goto out; 9.54 - } 9.55 - // DPRINTF (("copy %p to %p, count 0x%x\n", cp, vaddr, 4096)); 9.56 - memcpy(vaddr, cp, 4096); 9.57 - munmap(vaddr, PAGE_SIZE); 9.58 - // DPRINTF (("Did %ud'th pages\n", whichpage)); 9.59 - } 9.60 - out: 9.61 - return ret; 9.62 -} 9.63 - 9.64 -int 9.65 -blah(char *b) 9.66 -{ 9.67 - fprintf(stderr, "Error in xc_plan9_build!\n"); 9.68 - perror(b); 9.69 - return errno; 9.70 -} 9.71 - 9.72 -/* swap bytes. For plan 9 headers */ 9.73 -void 9.74 -swabby(unsigned long *s, char *name) 9.75 -{ 9.76 - unsigned long it; 9.77 - it = ((*s & 0xff000000) >> 24) | ((*s & 0xff0000) >> 8) | 9.78 - ((*s & 0xff00) << 8) | ((*s & 0xff) << 24); 9.79 - DPRINTF(("Item %s is 0x%lx\n", name, it)); 9.80 - *s = it; 9.81 -} 9.82 - 9.83 -void 9.84 -plan9header(Exec * header) 9.85 -{ 9.86 - /* header is big-endian */ 9.87 - swabby((unsigned long *)&header->magic, "magic"); 9.88 - swabby((unsigned long *)&header->text, "text"); 9.89 - swabby((unsigned long *)&header->data, "data"); 9.90 - swabby((unsigned long *)&header->bss, "bss"); 9.91 - swabby((unsigned long *)&header->syms, "syms"); 9.92 - swabby((unsigned long *)&header->entry, "entry"); 9.93 - swabby((unsigned long *)&header->spsz, "spsz"); 9.94 - swabby((unsigned long *)&header->pcsz, "pcsz"); 9.95 - 9.96 -} 9.97 - 9.98 -static int 9.99 - loadp9image(gzFile kernel_gfd, int xc_handle, u32 dom, 9.100 - unsigned long *page_array, 9.101 - unsigned long tot_pages, unsigned long *virt_load_addr, 9.102 - unsigned long *ksize, unsigned long *symtab_addr, 9.103 - unsigned long *symtab_len, 9.104 - unsigned long *first_data_page, unsigned long *pdb_page, 9.105 - const char *cmdline); 9.106 - 9.107 -#define P9ROUND (P9SIZE / 8) 9.108 - 9.109 -#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED) 9.110 -#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER) 9.111 - 9.112 -static int 9.113 -setup_guest(int xc_handle, 9.114 - u32 dom, 9.115 - gzFile kernel_gfd, 9.116 - unsigned long tot_pages, 9.117 - unsigned long *virt_startinfo_addr, 9.118 - unsigned long *virt_load_addr, 9.119 - vcpu_guest_context_t * ctxt, 9.120 - const char *cmdline, 9.121 - unsigned long shared_info_frame, 9.122 - unsigned int control_evtchn, 9.123 - int flags) 9.124 -{ 9.125 - l1_pgentry_t *vl1e = NULL; 9.126 - l2_pgentry_t *vl2tab = NULL, *vl2e = NULL; 9.127 - unsigned long *cpage_array = NULL; 9.128 - unsigned long *pte_array = NULL; 9.129 - unsigned long l2tab; 9.130 - unsigned long l1tab; 9.131 - unsigned long count; 9.132 - unsigned long symtab_addr = 0, symtab_len = 0; 9.133 - start_info_t *start_info; 9.134 - shared_info_t *shared_info; 9.135 - unsigned long ksize; 9.136 - mmu_t *mmu = NULL; 9.137 - int i; 9.138 - unsigned long first_page_after_kernel = 0, 9.139 - first_data_page = 0, 9.140 - page_array_page; 9.141 - unsigned long cpu0pdb, cpu0pte, cpu0ptelast; 9.142 - unsigned long /*last_pfn, */ tot_pte_pages; 9.143 - 9.144 - DPRINTF(("tot pages is %ld\n", tot_pages)); 9.145 - if ((cpage_array = malloc(tot_pages * sizeof (unsigned long))) == NULL) { 9.146 - PERROR("Could not allocate cpage array"); 9.147 - goto error_out; 9.148 - } 9.149 - 9.150 - if (xc_get_pfn_list(xc_handle, dom, cpage_array, tot_pages) != tot_pages) { 9.151 - PERROR("Could not get the page frame list"); 9.152 - goto error_out; 9.153 - } 9.154 - 9.155 - for (i = 0; i < 64; i++) 9.156 - DPRINTF(("First %d page is 0x%lx\n", i, cpage_array[i])); 9.157 - 9.158 - tot_pte_pages = tot_pages >> 10; 9.159 - DPRINTF(("Page range is 0 to 0x%lx, which requires 0x%lx pte pages\n", 9.160 - tot_pte_pages, tot_pte_pages)); 9.161 - 9.162 - if (loadp9image(kernel_gfd, xc_handle, dom, cpage_array, tot_pages, 9.163 - virt_load_addr, &ksize, &symtab_addr, &symtab_len, 9.164 - &first_data_page, &first_page_after_kernel, cmdline)) 9.165 - goto error_out; 9.166 - DPRINTF(("First data page is 0x%lx\n", first_data_page)); 9.167 - DPRINTF(("First page after kernel is 0x%lx\n", 9.168 - first_page_after_kernel)); 9.169 - 9.170 - /* 9.171 - NEED TO INCREMENT first page after kernel by: 9.172 - + 1 (pdb) 9.173 - + tot_pte_pages (pte) 9.174 - + tot_pte_pages (page_array) 9.175 - */ 9.176 - /* SO, have to copy the first kernel pages pfns right into the 9.177 - * page_array, then do identity maps for the rest. 9.178 - */ 9.179 - DPRINTF(("mapped kernel pages\n")); 9.180 - 9.181 - /* now loop over all ptes and store into the page_array, so as 9.182 - * to get the identity map. 9.183 - */ 9.184 - if ((pte_array = 9.185 - malloc(tot_pte_pages * 1024 * sizeof (unsigned long))) == NULL) { 9.186 - PERROR("Could not allocate pte array"); 9.187 - goto error_out; 9.188 - } 9.189 - 9.190 - /* plan 9 on startup expects a "l2" (xen parlance) at 0x2000, 9.191 - * this "l2" should have one PTE pointer for a va of 0x80000000. 9.192 - * and an l1 (PTEs to you) at 0x3000. (physical). 9.193 - * the PTEs should map the first 4M of memory. 9.194 - */ 9.195 - /* get a physical address for the L2. This means take the PFN and 9.196 - * shift left. 9.197 - */ 9.198 - /* this terminology is plan 9 terminology. 9.199 - * pdb is essentially the Xen L2. 'Page Directory Block'? 9.200 - * I need to ask JMK. 9.201 - * cpupte is the pte array. 9.202 - * Plan 9 counts on these being set up for cpu0. 9.203 - * SO: cpu0pdb (Xen L2) 9.204 - * and cpupte (Xen L1) 9.205 - */ 9.206 - /* cpu0pdb is right after kernel */ 9.207 - cpu0pdb = first_page_after_kernel; 9.208 - /* cpu0pte comes right after cpu0pdb */ 9.209 - cpu0pte = cpu0pdb + 1; 9.210 - /* number of the past cpu0pte page */ 9.211 - cpu0ptelast = cpu0pte + tot_pte_pages - 1; 9.212 - /* first page of the page array (mfn) */ 9.213 - page_array_page = cpu0ptelast + 1; 9.214 - 9.215 - DPRINTF(("cpu0pdb 0x%lx, cpu0pte 0x%lx cpu0ptelast 0x%lx\n", cpu0pdb, 9.216 - cpu0pte, cpu0ptelast)); 9.217 - l2tab = cpage_array[cpu0pdb] << PAGE_SHIFT; 9.218 - DPRINTF(("l2tab 0x%lx\n", l2tab)); 9.219 - ctxt->pt_base = l2tab; 9.220 - 9.221 - /* get a physical address for the L1. This means take the PFN and 9.222 - * shift left. 9.223 - */ 9.224 - l1tab = cpage_array[cpu0pte] << PAGE_SHIFT; 9.225 - DPRINTF(("l1tab 0x%lx\n", l1tab)); 9.226 - if ((mmu = init_mmu_updates(xc_handle, dom)) == NULL) 9.227 - goto error_out; 9.228 - DPRINTF(("now map in l2tab\n")); 9.229 - 9.230 - /* Initialise the page tables. */ 9.231 - /* mmap in the l2tab */ 9.232 - if ((vl2tab = xc_map_foreign_range(xc_handle, dom, 9.233 - PAGE_SIZE, PROT_READ | PROT_WRITE, 9.234 - l2tab >> PAGE_SHIFT)) == NULL) 9.235 - goto error_out; 9.236 - DPRINTF(("vl2tab 0x%p\n", vl2tab)); 9.237 - /* now we have the cpu0pdb for the kernel, starting at 0x2000, 9.238 - * so we can plug in the physical pointer to the 0x3000 pte 9.239 - */ 9.240 - /* zero it */ 9.241 - memset(vl2tab, 0, PAGE_SIZE); 9.242 - /* get a pointer in the l2tab for the virt_load_addr */ 9.243 - DPRINTF(("&vl2tab[l2_table_offset(*virt_load_addr)] is 0x%p[0x%lx]\n", 9.244 - &vl2tab[l2_table_offset(*virt_load_addr)], 9.245 - l2_table_offset(*virt_load_addr))); 9.246 - 9.247 - vl2e = &vl2tab[l2_table_offset(*virt_load_addr)]; 9.248 - 9.249 - /* OK, for all the available PTE, set the PTE pointer up */ 9.250 - DPRINTF(("For i = %ld to %ld ...\n", cpu0pte, cpu0ptelast)); 9.251 - for (i = cpu0pte; i <= cpu0ptelast; i++) { 9.252 - DPRINTF(("Index %d Set %p to 0x%lx\n", i, vl2e, 9.253 - (cpage_array[i] << PAGE_SHIFT) | L2_PROT)); 9.254 - *vl2e++ = (cpage_array[i] << PAGE_SHIFT) | L2_PROT; 9.255 - } 9.256 - 9.257 - /* unmap it ... */ 9.258 - munmap(vl2tab, PAGE_SIZE); 9.259 - 9.260 - /* for the pages from virt_load_pointer to the end of this 9.261 - * set of PTEs, map in the PFN for that VA 9.262 - */ 9.263 - for (vl1e = (l1_pgentry_t *) pte_array, count = 0; 9.264 - count < tot_pte_pages * 1024; count++, vl1e++) { 9.265 - 9.266 - *vl1e = cpage_array[count]; 9.267 - if (!cpage_array[count]) 9.268 - continue; 9.269 - /* set in the PFN for this entry */ 9.270 - *vl1e = (cpage_array[count] << PAGE_SHIFT) | L1_PROT; 9.271 -/* 9.272 - DPRINTF (("vl1e # %d 0x%lx gets 0x%lx\n", 9.273 - count, vl1e, *vl1e)); 9.274 -*/ 9.275 - if ((count >= cpu0pdb) && (count <= cpu0ptelast)) { 9.276 - //DPRINTF((" Fix up page %d as it is in pte ville: ", count)); 9.277 - *vl1e &= ~_PAGE_RW; 9.278 - DPRINTF(("0x%lx\n", *vl1e)); 9.279 - } 9.280 - if ((count >= (0x100000 >> 12)) 9.281 - && (count < (first_data_page >> 12))) { 9.282 - //DPRINTF((" Fix up page %d as it is in text ", count)); 9.283 - *vl1e &= ~_PAGE_RW; 9.284 - //DPRINTF (("0x%lx\n", *vl1e)); 9.285 - } 9.286 - } 9.287 - /* special thing. Pre-map the shared info page */ 9.288 - vl1e = &pte_array[2]; 9.289 - *vl1e = (shared_info_frame << PAGE_SHIFT) | L1_PROT; 9.290 - DPRINTF(("v1l1 %p, has value 0x%lx\n", vl1e, *(unsigned long *) vl1e)); 9.291 - /* another special thing. VA 80005000 has to point to 80006000 */ 9.292 - /* this is a Plan 9 thing -- the 'mach' pointer */ 9.293 - /* 80005000 is the mach pointer per-cpu, and the actual 9.294 - * mach pointers are 80006000, 80007000 etc. 9.295 - */ 9.296 - vl1e = &pte_array[5]; 9.297 - *vl1e = (cpage_array[6] << PAGE_SHIFT) | L1_PROT; 9.298 - 9.299 - /* OK, it's all set up, copy it in */ 9.300 - memcpy_toguest(xc_handle, dom, pte_array, 9.301 - (tot_pte_pages * 1024 * sizeof (unsigned long) /**/), 9.302 - cpage_array, cpu0pte); 9.303 - 9.304 - /* We really need to have the vl1tab unmapped or the add_mmu_update 9.305 - * below will fail bigtime. 9.306 - */ 9.307 - /* Xen guys: remember my errors on domain exit? Something I'm doing 9.308 - * wrong in here? We never did find out ... 9.309 - */ 9.310 - /* get rid of the entries we can not use ... */ 9.311 - memcpy_toguest(xc_handle, dom, cpage_array, 9.312 - (tot_pte_pages * 1024 * sizeof (unsigned long) /**/), 9.313 - cpage_array, page_array_page); 9.314 - /* last chance to dump all of memory */ 9.315 - // dumpit(xc_handle, dom, 0 /*0x100000>>12*/, tot_pages, cpage_array) ; 9.316 - /* 9.317 - * Pin down l2tab addr as page dir page - causes hypervisor to provide 9.318 - * correct protection for the page 9.319 - */ 9.320 - if (pin_table(xc_handle, MMUEXT_PIN_L2_TABLE, l2tab>>PAGE_SHIFT, dom)) 9.321 - goto error_out; 9.322 - 9.323 - for (count = 0; count < tot_pages; count++) { 9.324 -/* 9.325 - DPRINTF (("add_mmu_update(0x%x, 0x%x, 0x%x, %d)\n", xc_handle, mmu, 9.326 - (cpage_array[count] 9.327 - << PAGE_SHIFT) | 9.328 - MMU_MACHPHYS_UPDATE, 9.329 - count)); 9.330 -*/ 9.331 - if (add_mmu_update(xc_handle, mmu, 9.332 - (cpage_array[count] << PAGE_SHIFT) | 9.333 - MMU_MACHPHYS_UPDATE, count)) 9.334 - goto error_out; 9.335 - //DPRINTF(("Do the next one\n")); 9.336 - } 9.337 -/* 9.338 - */ 9.339 - 9.340 - //dumpit(pm_handle, 3, 4, page_array); 9.341 - /* put the virt_startinfo_addr at KZERO */ 9.342 - /* just hard-code for now */ 9.343 - *virt_startinfo_addr = 0x80000000; 9.344 - 9.345 - DPRINTF(("virt_startinfo_addr = 0x%lx\n", *virt_startinfo_addr)); 9.346 - start_info = xc_map_foreign_range(xc_handle, dom, 9.347 - PAGE_SIZE, PROT_READ | PROT_WRITE, 9.348 - cpage_array[0]); 9.349 - DPRINTF(("startinfo = 0x%p\n", start_info)); 9.350 - DPRINTF(("shared_info_frame is %lx\n", shared_info_frame)); 9.351 - memset(start_info, 0, sizeof (*start_info)); 9.352 - start_info->pt_base = 0x80000000 | cpu0pdb << PAGE_SHIFT; 9.353 - start_info->mfn_list = 0x80000000 | (page_array_page) << PAGE_SHIFT; 9.354 - DPRINTF(("mfn_list 0x%lx\n", start_info->mfn_list)); 9.355 - start_info->mod_start = 0; 9.356 - start_info->mod_len = 0; 9.357 - start_info->nr_pages = tot_pte_pages * 1024; 9.358 - start_info->nr_pt_frames = tot_pte_pages + 1; 9.359 - start_info->shared_info = shared_info_frame; 9.360 - start_info->flags = 0; 9.361 - DPRINTF((" control event channel is %d\n", control_evtchn)); 9.362 - start_info->domain_controller_evtchn = control_evtchn; 9.363 - strncpy((char *)start_info->cmd_line, cmdline, MAX_GUEST_CMDLINE); 9.364 - start_info->cmd_line[MAX_GUEST_CMDLINE - 1] = '\0'; 9.365 - munmap(start_info, PAGE_SIZE); 9.366 - 9.367 - DPRINTF(("done setting up start_info\n")); 9.368 - DPRINTF(("shared_info_frame = 0x%lx\n", shared_info_frame)); 9.369 - /* shared_info page starts its life empty. */ 9.370 - 9.371 - shared_info = xc_map_foreign_range(xc_handle, dom, 9.372 - PAGE_SIZE, PROT_READ | PROT_WRITE, 9.373 - shared_info_frame); 9.374 - memset(shared_info, 0, PAGE_SIZE); 9.375 - /* Mask all upcalls... */ 9.376 - DPRINTF(("mask all upcalls\n")); 9.377 - for (i = 0; i < MAX_VIRT_CPUS; i++) 9.378 - shared_info->vcpu_data[i].evtchn_upcall_mask = 1; 9.379 - munmap(shared_info, PAGE_SIZE); 9.380 - 9.381 - /* Send the page update requests down to the hypervisor. */ 9.382 - DPRINTF(("send page update reqs down.\n")); 9.383 - if (finish_mmu_updates(xc_handle, mmu)) 9.384 - goto error_out; 9.385 - 9.386 - //DPRINTF (("call dumpit.\n")); 9.387 - //dumpit(pm_handle, 0x100000>>12, tot_pages, page_array) ; 9.388 - //dumpit (pm_handle, 2, 0x100, page_array); 9.389 - free(mmu); 9.390 - 9.391 - /* we don't bother freeing anything at this point -- 9.392 - * we're exiting and it is pointless 9.393 - */ 9.394 - return 0; 9.395 - 9.396 - error_out: 9.397 - /* oh well we still free some things -- I oughtta nuke this */ 9.398 - if (mmu != NULL) 9.399 - free(mmu); 9.400 - ; 9.401 - return -1; 9.402 -} 9.403 - 9.404 -int 9.405 -xc_plan9_build(int xc_handle, 9.406 - u32 domid, 9.407 - const char *image_name, 9.408 - const char *cmdline, 9.409 - unsigned int control_evtchn, unsigned long flags) 9.410 -{ 9.411 - dom0_op_t launch_op, op; 9.412 - unsigned long load_addr = 0; 9.413 - long tot_pages; 9.414 - int kernel_fd = -1; 9.415 - gzFile kernel_gfd = NULL; 9.416 - int rc, i; 9.417 - vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt; 9.418 - unsigned long virt_startinfo_addr; 9.419 - 9.420 - if ((tot_pages = xc_get_tot_pages(xc_handle, domid)) < 0) { 9.421 - PERROR("Could not find total pages for domain"); 9.422 - return 1; 9.423 - } 9.424 - DPRINTF(("xc_get_tot_pages returns %ld pages\n", tot_pages)); 9.425 - 9.426 - kernel_fd = open(image_name, O_RDONLY); 9.427 - if (kernel_fd < 0) { 9.428 - PERROR("Could not open kernel image"); 9.429 - return 1; 9.430 - } 9.431 - 9.432 - if ((kernel_gfd = gzdopen(kernel_fd, "rb")) == NULL) { 9.433 - PERROR("Could not allocate decompression state for state file"); 9.434 - close(kernel_fd); 9.435 - return 1; 9.436 - } 9.437 - 9.438 - DPRINTF(("xc_get_tot_pages returns %ld pages\n", tot_pages)); 9.439 - if (mlock(&st_ctxt, sizeof (st_ctxt))) { 9.440 - PERROR("xc_plan9_build: ctxt mlock failed"); 9.441 - return 1; 9.442 - } 9.443 - 9.444 - op.cmd = DOM0_GETDOMAININFO; 9.445 - op.u.getdomaininfo.domain = (domid_t) domid; 9.446 - if ((do_dom0_op(xc_handle, &op) < 0) || 9.447 - ((u32) op.u.getdomaininfo.domain != domid)) { 9.448 - PERROR("Could not get info on domain"); 9.449 - goto error_out; 9.450 - } 9.451 - DPRINTF(("xc_get_tot_pages returns %ld pages\n", tot_pages)); 9.452 - 9.453 - if ( xc_domain_get_vcpu_context(xc_handle, domid, 0, ctxt) ) 9.454 - { 9.455 - PERROR("Could not get vcpu context"); 9.456 - goto error_out; 9.457 - } 9.458 - 9.459 - if (!(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED) 9.460 - || (ctxt->pt_base != 0)) { 9.461 - ERROR("Domain is already constructed"); 9.462 - goto error_out; 9.463 - } 9.464 - 9.465 - DPRINTF(("xc_get_tot_pages returns %ld pages\n", tot_pages)); 9.466 - if (setup_guest(xc_handle, domid, kernel_gfd, tot_pages, 9.467 - &virt_startinfo_addr, 9.468 - &load_addr, &st_ctxt, cmdline, 9.469 - op.u.getdomaininfo.shared_info_frame, 9.470 - control_evtchn, flags) < 0) { 9.471 - ERROR("Error constructing guest OS"); 9.472 - goto error_out; 9.473 - } 9.474 - 9.475 - /* leave the leak in here for now 9.476 - if ( kernel_fd >= 0 ) 9.477 - close(kernel_fd); 9.478 - if( kernel_gfd ) 9.479 - gzclose(kernel_gfd); 9.480 - */ 9.481 - ctxt->flags = 0; 9.482 - 9.483 - /* 9.484 - * Initial register values: 9.485 - * DS,ES,FS,GS = FLAT_KERNEL_DS 9.486 - * CS:EIP = FLAT_KERNEL_CS:start_pc 9.487 - * SS:ESP = FLAT_KERNEL_DS:start_stack 9.488 - * ESI = start_info 9.489 - * [EAX,EBX,ECX,EDX,EDI,EBP are zero] 9.490 - * EFLAGS = IF | 2 (bit 1 is reserved and should always be 1) 9.491 - */ 9.492 - ctxt->user_regs.ds = FLAT_KERNEL_DS; 9.493 - ctxt->user_regs.es = FLAT_KERNEL_DS; 9.494 - ctxt->user_regs.fs = FLAT_KERNEL_DS; 9.495 - ctxt->user_regs.gs = FLAT_KERNEL_DS; 9.496 - ctxt->user_regs.ss = FLAT_KERNEL_DS; 9.497 - ctxt->user_regs.cs = FLAT_KERNEL_CS; 9.498 - ctxt->user_regs.eip = load_addr; 9.499 - ctxt->user_regs.eip = 0x80100020; 9.500 - /* put stack at top of second page */ 9.501 - ctxt->user_regs.esp = 0x80000000 + (STACKPAGE << PAGE_SHIFT); 9.502 - 9.503 - /* why is this set? */ 9.504 - ctxt->user_regs.esi = ctxt->user_regs.esp; 9.505 - ctxt->user_regs.eflags = 1 << 9; /* Interrupt Enable */ 9.506 - 9.507 - /* FPU is set up to default initial state. */ 9.508 - memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); 9.509 - 9.510 - /* Virtual IDT is empty at start-of-day. */ 9.511 - for (i = 0; i < 256; i++) { 9.512 - ctxt->trap_ctxt[i].vector = i; 9.513 - ctxt->trap_ctxt[i].cs = FLAT_KERNEL_CS; 9.514 - } 9.515 - 9.516 - /* No LDT. */ 9.517 - ctxt->ldt_ents = 0; 9.518 - 9.519 - /* Use the default Xen-provided GDT. */ 9.520 - ctxt->gdt_ents = 0; 9.521 - 9.522 - /* Ring 1 stack is the initial stack. */ 9.523 - /* put stack at top of second page */ 9.524 - ctxt->kernel_ss = FLAT_KERNEL_DS; 9.525 - ctxt->kernel_sp = ctxt->user_regs.esp; 9.526 - 9.527 - /* No debugging. */ 9.528 - memset(ctxt->debugreg, 0, sizeof (ctxt->debugreg)); 9.529 - 9.530 - /* No callback handlers. */ 9.531 -#if defined(__i386__) 9.532 - ctxt->event_callback_cs = FLAT_KERNEL_CS; 9.533 - ctxt->event_callback_eip = 0; 9.534 - ctxt->failsafe_callback_cs = FLAT_KERNEL_CS; 9.535 - ctxt->failsafe_callback_eip = 0; 9.536 -#elif defined(__x86_64__) 9.537 - ctxt->event_callback_eip = 0; 9.538 - ctxt->failsafe_callback_eip = 0; 9.539 - ctxt->syscall_callback_eip = 0; 9.540 -#endif 9.541 - 9.542 - memset(&launch_op, 0, sizeof (launch_op)); 9.543 - 9.544 - launch_op.u.setdomaininfo.domain = (domid_t) domid; 9.545 - launch_op.u.setdomaininfo.vcpu = 0; 9.546 - // launch_op.u.setdomaininfo.num_vifs = 1; 9.547 - launch_op.u.setdomaininfo.ctxt = ctxt; 9.548 - launch_op.cmd = DOM0_SETDOMAININFO; 9.549 - rc = do_dom0_op(xc_handle, &launch_op); 9.550 - 9.551 - fprintf(stderr, "RC is %d\n", rc); 9.552 - return rc; 9.553 - 9.554 - error_out: 9.555 - if (kernel_fd >= 0) 9.556 - close(kernel_fd); 9.557 - if (kernel_gfd) 9.558 - gzclose(kernel_gfd); 9.559 - 9.560 - return -1; 9.561 -} 9.562 - 9.563 -/* 9.564 - * Plan 9 memory layout (initial) 9.565 - * ---------------- 9.566 - * | info from xen| @0 9.567 - * ---------------|<--- boot args (start at 0x1200 + 64) 9.568 - * | stack | 9.569 - * ----------------<--- page 2 9.570 - * | empty | 9.571 - * ---------------<---- page 5 MACHADDR (always points to machp[cpuno] 9.572 - * | aliased | 9.573 - * ---------------<----- page 6 CPU0MACH 9.574 - * | CPU0MACH | 9.575 - * ---------------- 9.576 - * | empty | 9.577 - * ---------------- *virt_load_addr = ehdr.e_entry (0x80100000) 9.578 - * | kernel | 9.579 - * | | 9.580 - * ---------------- <----- page aligned boundary. 9.581 - * | data | 9.582 - * | | 9.583 - * ---------------- 9.584 - * | bss | 9.585 - * ----------------<--- end of kernel (page aligned) 9.586 - * | PMD cpu0pdb | 9.587 - * ----------------<--- page +1 9.588 - * | PTE cpu0pte | 9.589 - * ----------------<--- page (tot_pte_pages)/1024 9.590 - * | page_array | 9.591 - * ---------------- <--- page (tot_pte_pages)/1024 9.592 - * | empty to TOM | 9.593 - * ---------------- 9.594 - */ 9.595 - 9.596 -static int 9.597 -loadp9image(gzFile kernel_gfd, int xc_handle, u32 dom, 9.598 - unsigned long *page_array, 9.599 - unsigned long tot_pages, unsigned long *virt_load_addr, 9.600 - unsigned long *ksize, unsigned long *symtab_addr, 9.601 - unsigned long *symtab_len, 9.602 - unsigned long *first_data_page, unsigned long *pdb_page, 9.603 - const char *cmdline) 9.604 -{ 9.605 - unsigned long datapage; 9.606 - Exec ehdr; 9.607 - 9.608 - char *p; 9.609 - unsigned long maxva; 9.610 - int curpos, ret; 9.611 - PAGE *image = 0; 9.612 - unsigned long image_tot_pages = 0; 9.613 - unsigned long textround; 9.614 - static PAGE args; 9.615 - 9.616 - ret = -1; 9.617 - 9.618 - p = NULL; 9.619 - maxva = 0; 9.620 - 9.621 - if (gzread(kernel_gfd, &ehdr, sizeof (Exec)) != sizeof (Exec)) { 9.622 - PERROR("Error reading kernel image P9 header."); 9.623 - goto out; 9.624 - } 9.625 - 9.626 - plan9header(&ehdr); 9.627 - curpos = sizeof (Exec); 9.628 - 9.629 - if (ehdr.magic != I_MAGIC) { 9.630 - PERROR("Image does not have an P9 header."); 9.631 - goto out; 9.632 - } 9.633 - 9.634 - textround = ((ehdr.text + 0x20 + 4095) >> 12) << 12; 9.635 - *first_data_page = 0x100000 + textround; 9.636 - DPRINTF(("ehrd.text is 0x%lx, textround is 0x%lx\n", 9.637 - ehdr.text, textround)); 9.638 - 9.639 - image_tot_pages = 9.640 - (textround + ehdr.data + ehdr.bss + PAGE_SIZE - 1) >> PAGE_SHIFT; 9.641 - DPRINTF(("tot pages is %ld\n", image_tot_pages)); 9.642 - 9.643 - *virt_load_addr = 0x80100000; 9.644 - 9.645 - if ((*virt_load_addr & (PAGE_SIZE - 1)) != 0) { 9.646 - ERROR("We can only deal with page-aligned load addresses"); 9.647 - goto out; 9.648 - } 9.649 - 9.650 - if ((*virt_load_addr + (image_tot_pages << PAGE_SHIFT)) > 9.651 - HYPERVISOR_VIRT_START) { 9.652 - ERROR("Cannot map all domain memory without hitting Xen space"); 9.653 - goto out; 9.654 - } 9.655 - 9.656 - /* just malloc an image that is image_tot_pages in size. Then read in 9.657 - * the image -- text, data, -- to page-rounded alignments. 9.658 - * then copy into xen . 9.659 - * this gets BSS zeroed for free 9.660 - */ 9.661 - DPRINTF(("Allocate %ld bytes\n", image_tot_pages * sizeof (*image))); 9.662 - image = calloc(image_tot_pages, sizeof (*image)); 9.663 - if (!image) 9.664 - return blah("alloc data"); 9.665 - /* text starts at 0x20, after the header, just like Unix long ago */ 9.666 - if (gzread(kernel_gfd, &image[0].data[sizeof (Exec)], ehdr.text) < 9.667 - ehdr.text) 9.668 - return blah("read text"); 9.669 - DPRINTF(("READ TEXT %ld bytes\n", ehdr.text)); 9.670 - datapage = ((ehdr.text + sizeof (Exec)) / PAGE_SIZE) + 1; 9.671 - if (gzread(kernel_gfd, image[datapage].data, ehdr.data) < ehdr.data) 9.672 - return blah("read data"); 9.673 - DPRINTF(("READ DATA %ld bytes\n", ehdr.data)); 9.674 - 9.675 - /* nice contig stuff */ 9.676 - /* oops need to start at 0x100000 */ 9.677 - 9.678 - ret = memcpy_toguest(xc_handle, dom, 9.679 - image, image_tot_pages * 4096, page_array, 0x100); 9.680 - DPRINTF(("done copying kernel to guest memory\n")); 9.681 - 9.682 - /* now do the bootargs */ 9.683 - /* in plan 9, the x=y bootargs start at 0x1200 + 64 in real memory */ 9.684 - /* we'll copy to page 1, so we offset into the page struct at 9.685 - * 0x200 + 64 9.686 - */ 9.687 - memset(&args, 0, sizeof(args)); 9.688 - memcpy(&args.data[0x200 + 64], cmdline, strlen(cmdline)); 9.689 - printf("Copied :%s: to page for args\n", cmdline); 9.690 - ret = memcpy_toguest(xc_handle, dom, &args, sizeof(args), page_array,1); 9.691 - //dumpit(xc_handle, dom, 0 /*0x100000>>12*/, 4, page_array) ; 9.692 - out: 9.693 - if (image) 9.694 - free(image); 9.695 - *pdb_page = image_tot_pages + (0x100000 >> PAGE_SHIFT); 9.696 - return ret; 9.697 -}
10.1 --- a/tools/libxc/xc_ptrace.c Fri Jun 24 09:39:40 2005 +0000 10.2 +++ b/tools/libxc/xc_ptrace.c Fri Jun 24 10:46:24 2005 +0000 10.3 @@ -75,7 +75,7 @@ struct gdb_regs { 10.4 int retval = xc_domain_get_vcpu_context(xc_handle, domid, cpu, &ctxt[cpu]); \ 10.5 if (retval) \ 10.6 goto error_out; \ 10.7 - cr3[cpu] = ctxt[cpu].pt_base; /* physical address */ \ 10.8 + cr3[cpu] = ctxt[cpu].ctrlreg[3]; /* physical address */ \ 10.9 regs_valid[cpu] = 1; \ 10.10 } \ 10.11 10.12 @@ -136,7 +136,7 @@ static vcpu_guest_context_t ctxt[MAX_VIR 10.13 10.14 static inline int paging_enabled(vcpu_guest_context_t *v) 10.15 { 10.16 - unsigned long cr0 = v->cr0; 10.17 + unsigned long cr0 = v->ctrlreg[0]; 10.18 10.19 return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG); 10.20 }
11.1 --- a/tools/libxc/xc_ptrace_core.c Fri Jun 24 09:39:40 2005 +0000 11.2 +++ b/tools/libxc/xc_ptrace_core.c Fri Jun 24 10:46:24 2005 +0000 11.3 @@ -193,7 +193,7 @@ xc_waitdomain_core(int domfd, int *statu 11.4 return -1; 11.5 11.6 for (i = 0; i < nr_vcpus; i++) { 11.7 - cr3[i] = ctxt[i].pt_base; 11.8 + cr3[i] = ctxt[i].ctrlreg[3]; 11.9 } 11.10 if ((p2m_array = malloc(nr_pages * sizeof(unsigned long))) == NULL) { 11.11 printf("Could not allocate p2m_array\n");
12.1 --- a/tools/libxc/xc_vmx_build.c Fri Jun 24 09:39:40 2005 +0000 12.2 +++ b/tools/libxc/xc_vmx_build.c Fri Jun 24 10:46:24 2005 +0000 12.3 @@ -271,7 +271,7 @@ static int setup_guest(int xc_handle, 12.4 /* First allocate page for page dir. */ 12.5 ppt_alloc = (vpt_start - dsi.v_start) >> PAGE_SHIFT; 12.6 l2tab = page_array[ppt_alloc++] << PAGE_SHIFT; 12.7 - ctxt->pt_base = l2tab; 12.8 + ctxt->ctrlreg[3] = l2tab; 12.9 12.10 /* Initialise the page tables. */ 12.11 if ( (vl2tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, 12.12 @@ -549,7 +549,7 @@ int xc_vmx_build(int xc_handle, 12.13 } 12.14 12.15 if ( !(op.u.getdomaininfo.flags & DOMFLAGS_PAUSED) || 12.16 - (ctxt->pt_base != 0) ) 12.17 + (ctxt->ctrlreg[3] != 0) ) 12.18 { 12.19 ERROR("Domain is already constructed"); 12.20 goto error_out;
13.1 --- a/xen/arch/x86/dom0_ops.c Fri Jun 24 09:39:40 2005 +0000 13.2 +++ b/xen/arch/x86/dom0_ops.c Fri Jun 24 10:46:24 2005 +0000 13.3 @@ -378,12 +378,8 @@ long arch_do_dom0_op(dom0_op_t *op, dom0 13.4 13.5 void arch_getdomaininfo_ctxt( 13.6 struct vcpu *v, struct vcpu_guest_context *c) 13.7 -{ 13.8 -#ifdef __i386__ /* Remove when x86_64 VMX is implemented */ 13.9 -#ifdef CONFIG_VMX 13.10 +{ 13.11 extern void save_vmx_cpu_user_regs(struct cpu_user_regs *); 13.12 -#endif 13.13 -#endif 13.14 13.15 memcpy(c, &v->arch.guest_context, sizeof(*c)); 13.16 13.17 @@ -391,27 +387,22 @@ void arch_getdomaininfo_ctxt( 13.18 BUG_ON((c->user_regs.eflags & EF_IOPL) != 0); 13.19 c->user_regs.eflags |= v->arch.iopl << 12; 13.20 13.21 -#ifdef __i386__ 13.22 -#ifdef CONFIG_VMX 13.23 - if ( VMX_DOMAIN(v) ) { 13.24 + if ( VMX_DOMAIN(v) ) 13.25 + { 13.26 save_vmx_cpu_user_regs(&c->user_regs); 13.27 - __vmread(CR0_READ_SHADOW, &c->cr0); 13.28 - __vmread(CR4_READ_SHADOW, &c->cr4); 13.29 + __vmread(CR0_READ_SHADOW, &c->ctrlreg[0]); 13.30 + __vmread(CR4_READ_SHADOW, &c->ctrlreg[4]); 13.31 } 13.32 -#endif 13.33 -#endif 13.34 13.35 c->flags = 0; 13.36 if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) ) 13.37 c->flags |= VGCF_I387_VALID; 13.38 if ( KERNEL_MODE(v, &v->arch.guest_context.user_regs) ) 13.39 c->flags |= VGCF_IN_KERNEL; 13.40 -#ifdef CONFIG_VMX 13.41 if (VMX_DOMAIN(v)) 13.42 c->flags |= VGCF_VMX_GUEST; 13.43 -#endif 13.44 13.45 - c->pt_base = pagetable_get_paddr(v->arch.guest_table); 13.46 + c->ctrlreg[3] = pagetable_get_paddr(v->arch.guest_table); 13.47 13.48 c->vm_assist = v->domain->vm_assist; 13.49 }
14.1 --- a/xen/arch/x86/domain.c Fri Jun 24 09:39:40 2005 +0000 14.2 +++ b/xen/arch/x86/domain.c Fri Jun 24 10:46:24 2005 +0000 14.3 @@ -8,7 +8,7 @@ 14.4 * Copyright (C) 1995 Linus Torvalds 14.5 * 14.6 * Pentium III FXSR, SSE support 14.7 - * Gareth Hughes <gareth@valinux.com>, May 2000 14.8 + * Gareth Hughes <gareth@valinux.com>, May 2000 14.9 */ 14.10 14.11 #include <xen/config.h> 14.12 @@ -115,7 +115,7 @@ static inline void kb_wait(void) 14.13 void machine_restart(char * __unused) 14.14 { 14.15 int i; 14.16 - 14.17 + 14.18 if ( opt_noreboot ) 14.19 { 14.20 printk("Reboot disabled on cmdline: require manual reset\n"); 14.21 @@ -432,7 +432,7 @@ int arch_set_info_guest( 14.22 if ( v->vcpu_id == 0 ) 14.23 d->vm_assist = c->vm_assist; 14.24 14.25 - phys_basetab = c->pt_base; 14.26 + phys_basetab = c->ctrlreg[3]; 14.27 v->arch.guest_table = mk_pagetable(phys_basetab); 14.28 14.29 if ( shadow_mode_refcounts(d) ) 14.30 @@ -453,24 +453,15 @@ int arch_set_info_guest( 14.31 return rc; 14.32 } 14.33 14.34 -#ifdef CONFIG_VMX 14.35 if ( c->flags & VGCF_VMX_GUEST ) 14.36 { 14.37 - int error; 14.38 - 14.39 - // VMX uses the initially provided page tables as the P2M map. 14.40 - // 14.41 - // XXX: This creates a security issue -- Xen can't necessarily 14.42 - // trust the VMX domain builder. Xen should validate this 14.43 - // page table, and/or build the table itself, or ??? 14.44 - // 14.45 + /* VMX uses the initially provided page tables as the P2M map. */ 14.46 if ( !pagetable_get_paddr(d->arch.phys_table) ) 14.47 d->arch.phys_table = v->arch.guest_table; 14.48 14.49 - if ( (error = vmx_final_setup_guest(v, c)) ) 14.50 - return error; 14.51 + if ( (rc = vmx_final_setup_guest(v, c)) != 0 ) 14.52 + return rc; 14.53 } 14.54 -#endif 14.55 14.56 update_pagetables(v); 14.57 14.58 @@ -704,7 +695,7 @@ static inline void switch_kernel_stack(s 14.59 #endif 14.60 14.61 #define loaddebug(_v,_reg) \ 14.62 - __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg])) 14.63 + __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg])) 14.64 14.65 static void __context_switch(void) 14.66 { 14.67 @@ -982,6 +973,7 @@ static void relinquish_memory(struct dom 14.68 void domain_relinquish_resources(struct domain *d) 14.69 { 14.70 struct vcpu *v; 14.71 + unsigned long pfn; 14.72 14.73 BUG_ON(!cpus_empty(d->cpumask)); 14.74 14.75 @@ -995,22 +987,20 @@ void domain_relinquish_resources(struct 14.76 /* Drop the in-use references to page-table bases. */ 14.77 for_each_vcpu ( d, v ) 14.78 { 14.79 - if ( pagetable_get_paddr(v->arch.guest_table) != 0 ) 14.80 + if ( (pfn = pagetable_get_pfn(v->arch.guest_table)) != 0 ) 14.81 { 14.82 - if ( shadow_mode_refcounts(d) ) 14.83 - put_page(&frame_table[pagetable_get_pfn(v->arch.guest_table)]); 14.84 - else 14.85 - put_page_and_type(&frame_table[pagetable_get_pfn(v->arch.guest_table)]); 14.86 + if ( !shadow_mode_refcounts(d) ) 14.87 + put_page_type(pfn_to_page(pfn)); 14.88 + put_page(pfn_to_page(pfn)); 14.89 14.90 v->arch.guest_table = mk_pagetable(0); 14.91 } 14.92 14.93 - if ( pagetable_get_paddr(v->arch.guest_table_user) != 0 ) 14.94 + if ( (pfn = pagetable_get_pfn(v->arch.guest_table_user)) != 0 ) 14.95 { 14.96 - if ( shadow_mode_refcounts(d) ) 14.97 - put_page(&frame_table[pagetable_get_pfn(v->arch.guest_table_user)]); 14.98 - else 14.99 - put_page_and_type(&frame_table[pagetable_get_pfn(v->arch.guest_table_user)]); 14.100 + if ( !shadow_mode_refcounts(d) ) 14.101 + put_page_type(pfn_to_page(pfn)); 14.102 + put_page(pfn_to_page(pfn)); 14.103 14.104 v->arch.guest_table_user = mk_pagetable(0); 14.105 }
15.1 --- a/xen/arch/x86/i387.c Fri Jun 24 09:39:40 2005 +0000 15.2 +++ b/xen/arch/x86/i387.c Fri Jun 24 10:46:24 2005 +0000 15.3 @@ -29,7 +29,7 @@ void save_init_fpu(struct vcpu *tsk) 15.4 * This causes us to set the real flag, so we'll need 15.5 * to temporarily clear it while saving f-p state. 15.6 */ 15.7 - if ( test_bit(_VCPUF_guest_stts, &tsk->vcpu_flags) ) 15.8 + if ( VMX_DOMAIN(tsk) || (tsk->arch.guest_context.ctrlreg[0] & X86_CR0_TS) ) 15.9 clts(); 15.10 15.11 if ( cpu_has_fxsr )
16.1 --- a/xen/arch/x86/traps.c Fri Jun 24 09:39:40 2005 +0000 16.2 +++ b/xen/arch/x86/traps.c Fri Jun 24 10:46:24 2005 +0000 16.3 @@ -348,7 +348,7 @@ void propagate_page_fault(unsigned long 16.4 if ( TI_GET_IF(ti) ) 16.5 tb->flags |= TBF_INTERRUPT; 16.6 16.7 - v->arch.guest_cr2 = addr; 16.8 + v->arch.guest_context.ctrlreg[2] = addr; 16.9 } 16.10 16.11 static int handle_perdomain_mapping_fault( 16.12 @@ -478,12 +478,12 @@ long do_fpu_taskswitch(int set) 16.13 16.14 if ( set ) 16.15 { 16.16 - set_bit(_VCPUF_guest_stts, &v->vcpu_flags); 16.17 + v->arch.guest_context.ctrlreg[0] |= X86_CR0_TS; 16.18 stts(); 16.19 } 16.20 else 16.21 { 16.22 - clear_bit(_VCPUF_guest_stts, &v->vcpu_flags); 16.23 + v->arch.guest_context.ctrlreg[0] &= ~X86_CR0_TS; 16.24 if ( test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) ) 16.25 clts(); 16.26 } 16.27 @@ -789,13 +789,11 @@ static int emulate_privileged_op(struct 16.28 switch ( (opcode >> 3) & 7 ) 16.29 { 16.30 case 0: /* Read CR0 */ 16.31 - *reg = 16.32 - (read_cr0() & ~X86_CR0_TS) | 16.33 - (test_bit(_VCPUF_guest_stts, &v->vcpu_flags) ? X86_CR0_TS:0); 16.34 + *reg = v->arch.guest_context.ctrlreg[0]; 16.35 break; 16.36 16.37 case 2: /* Read CR2 */ 16.38 - *reg = v->arch.guest_cr2; 16.39 + *reg = v->arch.guest_context.ctrlreg[2]; 16.40 break; 16.41 16.42 case 3: /* Read CR3 */ 16.43 @@ -820,7 +818,7 @@ static int emulate_privileged_op(struct 16.44 break; 16.45 16.46 case 2: /* Write CR2 */ 16.47 - v->arch.guest_cr2 = *reg; 16.48 + v->arch.guest_context.ctrlreg[2] = *reg; 16.49 break; 16.50 16.51 case 3: /* Write CR3 */ 16.52 @@ -1033,12 +1031,13 @@ asmlinkage int math_state_restore(struct 16.53 16.54 setup_fpu(current); 16.55 16.56 - if ( test_and_clear_bit(_VCPUF_guest_stts, ¤t->vcpu_flags) ) 16.57 + if ( current->arch.guest_context.ctrlreg[0] & X86_CR0_TS ) 16.58 { 16.59 struct trap_bounce *tb = ¤t->arch.trap_bounce; 16.60 tb->flags = TBF_EXCEPTION; 16.61 tb->cs = current->arch.guest_context.trap_ctxt[7].cs; 16.62 tb->eip = current->arch.guest_context.trap_ctxt[7].address; 16.63 + current->arch.guest_context.ctrlreg[0] &= ~X86_CR0_TS; 16.64 } 16.65 16.66 return EXCRET_fault_fixed;
17.1 --- a/xen/arch/x86/vmx_vmcs.c Fri Jun 24 09:39:40 2005 +0000 17.2 +++ b/xen/arch/x86/vmx_vmcs.c Fri Jun 24 10:46:24 2005 +0000 17.3 @@ -168,7 +168,6 @@ void vmx_do_launch(struct vcpu *v) 17.4 struct cpu_user_regs *regs = guest_cpu_user_regs(); 17.5 17.6 vmx_stts(); 17.7 - set_bit(_VCPUF_guest_stts, &v->vcpu_flags); 17.8 17.9 cpu = smp_processor_id(); 17.10
18.1 --- a/xen/include/asm-x86/domain.h Fri Jun 24 09:39:40 2005 +0000 18.2 +++ b/xen/include/asm-x86/domain.h Fri Jun 24 10:46:24 2005 +0000 18.3 @@ -112,9 +112,6 @@ struct arch_vcpu 18.4 18.5 unsigned long monitor_shadow_ref; 18.6 18.7 - /* Virtual CR2 value. Can be read/written by guest. */ 18.8 - unsigned long guest_cr2; 18.9 - 18.10 /* Current LDT details. */ 18.11 unsigned long shadow_ldt_mapcnt; 18.12 } __cacheline_aligned;
19.1 --- a/xen/include/public/arch-x86_32.h Fri Jun 24 09:39:40 2005 +0000 19.2 +++ b/xen/include/public/arch-x86_32.h Fri Jun 24 10:46:24 2005 +0000 19.3 @@ -136,9 +136,7 @@ typedef struct vcpu_guest_context { 19.4 unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ 19.5 unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ 19.6 unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ 19.7 - unsigned long pt_base; /* CR3 (pagetable base) */ 19.8 - unsigned long cr0; /* CR0 */ 19.9 - unsigned long cr4; /* CR4 */ 19.10 + unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ 19.11 unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ 19.12 unsigned long event_callback_cs; /* CS:EIP of event callback */ 19.13 unsigned long event_callback_eip;
20.1 --- a/xen/include/public/arch-x86_64.h Fri Jun 24 09:39:40 2005 +0000 20.2 +++ b/xen/include/public/arch-x86_64.h Fri Jun 24 10:46:24 2005 +0000 20.3 @@ -186,9 +186,7 @@ typedef struct vcpu_guest_context { 20.4 unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ 20.5 unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ 20.6 unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ 20.7 - unsigned long pt_base; /* CR3 (pagetable base) */ 20.8 - unsigned long cr0; /* CR0 */ 20.9 - unsigned long cr4; /* CR4 */ 20.10 + unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ 20.11 unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ 20.12 unsigned long event_callback_eip; 20.13 unsigned long failsafe_callback_eip;
21.1 --- a/xen/include/public/dom0_ops.h Fri Jun 24 09:39:40 2005 +0000 21.2 +++ b/xen/include/public/dom0_ops.h Fri Jun 24 10:46:24 2005 +0000 21.3 @@ -19,7 +19,7 @@ 21.4 * This makes sure that old versions of dom0 tools will stop working in a 21.5 * well-defined way (rather than crashing the machine, for instance). 21.6 */ 21.7 -#define DOM0_INTERFACE_VERSION 0xAAAA1006 21.8 +#define DOM0_INTERFACE_VERSION 0xAAAA1007 21.9 21.10 /************************************************************************/ 21.11
22.1 --- a/xen/include/xen/sched.h Fri Jun 24 09:39:40 2005 +0000 22.2 +++ b/xen/include/xen/sched.h Fri Jun 24 10:46:24 2005 +0000 22.3 @@ -327,9 +327,6 @@ extern struct domain *domain_list; 22.4 /* Has the FPU been used since it was last saved? */ 22.5 #define _VCPUF_fpu_dirtied 1 22.6 #define VCPUF_fpu_dirtied (1UL<<_VCPUF_fpu_dirtied) 22.7 - /* Has the guest OS requested 'stts'? */ 22.8 -#define _VCPUF_guest_stts 2 22.9 -#define VCPUF_guest_stts (1UL<<_VCPUF_guest_stts) 22.10 /* Domain is blocked waiting for an event. */ 22.11 #define _VCPUF_blocked 3 22.12 #define VCPUF_blocked (1UL<<_VCPUF_blocked)