debuggers.hg
changeset 6768:3feb7fa331ed
Re-indent vmx code.
Signed-off-by: Keir Fraser <keir@xensource.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Sun Sep 11 16:44:23 2005 +0000 (2005-09-11) |
parents | 4508c22dc458 |
children | 2c2c0b843f05 b594bb976a74 |
files | xen/arch/x86/shadow.c xen/arch/x86/shadow_public.c xen/arch/x86/vmx.c xen/arch/x86/vmx_intercept.c xen/arch/x86/vmx_io.c xen/arch/x86/vmx_platform.c xen/arch/x86/vmx_vmcs.c xen/include/asm-x86/shadow_64.h xen/include/asm-x86/shadow_ops.h xen/include/asm-x86/vmx.h xen/include/asm-x86/vmx_platform.h xen/include/asm-x86/vmx_virpit.h |
line diff
1.1 --- a/xen/arch/x86/shadow.c Sun Sep 11 16:36:24 2005 +0000 1.2 +++ b/xen/arch/x86/shadow.c Sun Sep 11 16:44:23 2005 +0000 1.3 @@ -54,7 +54,7 @@ static unsigned long shadow_l4_table( 1.4 static void shadow_map_into_current(struct vcpu *v, 1.5 unsigned long va, unsigned int from, unsigned int to); 1.6 static inline void validate_bl2e_change( struct domain *d, 1.7 - guest_root_pgentry_t *new_gle_p, pgentry_64_t *shadow_l3, int index); 1.8 + guest_root_pgentry_t *new_gle_p, pgentry_64_t *shadow_l3, int index); 1.9 1.10 #endif 1.11
2.1 --- a/xen/arch/x86/shadow_public.c Sun Sep 11 16:36:24 2005 +0000 2.2 +++ b/xen/arch/x86/shadow_public.c Sun Sep 11 16:44:23 2005 +0000 2.3 @@ -54,24 +54,24 @@ int shadow_set_guest_paging_levels(struc 2.4 switch(levels) { 2.5 #if CONFIG_PAGING_LEVELS >= 4 2.6 case 4: 2.7 - if ( d->arch.ops != &MODE_F_HANDLER ) 2.8 - d->arch.ops = &MODE_F_HANDLER; 2.9 - shadow_unlock(d); 2.10 + if ( d->arch.ops != &MODE_F_HANDLER ) 2.11 + d->arch.ops = &MODE_F_HANDLER; 2.12 + shadow_unlock(d); 2.13 return 1; 2.14 #endif 2.15 case 3: 2.16 case 2: 2.17 #if CONFIG_PAGING_LEVELS == 2 2.18 - if ( d->arch.ops != &MODE_A_HANDLER ) 2.19 - d->arch.ops = &MODE_A_HANDLER; 2.20 + if ( d->arch.ops != &MODE_A_HANDLER ) 2.21 + d->arch.ops = &MODE_A_HANDLER; 2.22 #elif CONFIG_PAGING_LEVELS == 4 2.23 - if ( d->arch.ops != &MODE_D_HANDLER ) 2.24 - d->arch.ops = &MODE_D_HANDLER; 2.25 + if ( d->arch.ops != &MODE_D_HANDLER ) 2.26 + d->arch.ops = &MODE_D_HANDLER; 2.27 #endif 2.28 - shadow_unlock(d); 2.29 + shadow_unlock(d); 2.30 return 1; 2.31 - default: 2.32 - shadow_unlock(d); 2.33 + default: 2.34 + shadow_unlock(d); 2.35 return 0; 2.36 } 2.37 } 2.38 @@ -115,10 +115,10 @@ int shadow_do_update_va_mapping(unsigned 2.39 2.40 struct out_of_sync_entry * 2.41 shadow_mark_mfn_out_of_sync(struct vcpu *v, unsigned long gpfn, 2.42 - unsigned long mfn) 2.43 + unsigned long mfn) 2.44 { 2.45 - struct domain *d = v->domain; 2.46 - return d->arch.ops->mark_mfn_out_of_sync(v, gpfn, mfn); 2.47 + struct domain *d = v->domain; 2.48 + return d->arch.ops->mark_mfn_out_of_sync(v, gpfn, mfn); 2.49 } 2.50 2.51 /* 2.52 @@ -181,7 +181,7 @@ static void alloc_monitor_pagetable(stru 2.53 l4_pgentry_t *mpl4e; 2.54 struct pfn_info *mmfn_info; 2.55 struct domain *d = v->domain; 2.56 - pagetable_t phys_table; 2.57 + pagetable_t phys_table; 2.58 2.59 ASSERT(!pagetable_get_paddr(v->arch.monitor_table)); /* we should only get called once */ 2.60 2.61 @@ -192,13 +192,13 @@ static void alloc_monitor_pagetable(stru 2.62 mpl4e = (l4_pgentry_t *) map_domain_page(mmfn); 2.63 memcpy(mpl4e, &idle_pg_table[0], PAGE_SIZE); 2.64 mpl4e[l4_table_offset(PERDOMAIN_VIRT_START)] = 2.65 - l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR); 2.66 + l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR); 2.67 /* map the phys_to_machine map into the per domain Read-Only MPT space */ 2.68 phys_table = page_table_convert(d); 2.69 2.70 mpl4e[l4_table_offset(RO_MPT_VIRT_START)] = 2.71 - l4e_from_paddr(pagetable_get_paddr(phys_table), 2.72 - __PAGE_HYPERVISOR); 2.73 + l4e_from_paddr(pagetable_get_paddr(phys_table), 2.74 + __PAGE_HYPERVISOR); 2.75 v->arch.monitor_table = mk_pagetable(mmfn << PAGE_SHIFT); 2.76 v->arch.monitor_vtable = (l2_pgentry_t *) mpl4e; 2.77 } 2.78 @@ -245,7 +245,7 @@ free_shadow_tables(struct domain *d, uns 2.79 for ( i = 0; i < PAGETABLE_ENTRIES; i++ ) 2.80 if ( external || is_guest_l4_slot(i) ) 2.81 if ( entry_get_flags(ple[i]) & _PAGE_PRESENT ) 2.82 - put_shadow_ref(entry_get_pfn(ple[i])); 2.83 + put_shadow_ref(entry_get_pfn(ple[i])); 2.84 2.85 unmap_domain_page(ple); 2.86 } 2.87 @@ -306,12 +306,12 @@ static void alloc_monitor_pagetable(stru 2.88 2.89 mpl2e[l2_table_offset(PERDOMAIN_VIRT_START)] = 2.90 l2e_from_paddr(__pa(d->arch.mm_perdomain_pt), 2.91 - __PAGE_HYPERVISOR); 2.92 + __PAGE_HYPERVISOR); 2.93 2.94 // map the phys_to_machine map into the Read-Only MPT space for this domain 2.95 mpl2e[l2_table_offset(RO_MPT_VIRT_START)] = 2.96 l2e_from_paddr(pagetable_get_paddr(d->arch.phys_table), 2.97 - __PAGE_HYPERVISOR); 2.98 + __PAGE_HYPERVISOR); 2.99 2.100 // Don't (yet) have mappings for these... 2.101 // Don't want to accidentally see the idle_pg_table's linear mapping. 2.102 @@ -365,7 +365,7 @@ void free_monitor_pagetable(struct vcpu 2.103 v->arch.monitor_table = mk_pagetable(0); 2.104 v->arch.monitor_vtable = 0; 2.105 } 2.106 -#endif 2.107 +#endif 2.108 2.109 static void 2.110 shadow_free_snapshot(struct domain *d, struct out_of_sync_entry *entry) 2.111 @@ -850,16 +850,16 @@ void free_shadow_pages(struct domain *d) 2.112 perfc_decr(free_l1_pages); 2.113 2.114 struct pfn_info *page = list_entry(list_ent, struct pfn_info, list); 2.115 - if (d->arch.ops->guest_paging_levels == PAGING_L2) 2.116 - { 2.117 + if (d->arch.ops->guest_paging_levels == PAGING_L2) 2.118 + { 2.119 #if CONFIG_PAGING_LEVELS >=4 2.120 - free_domheap_pages(page, SL1_ORDER); 2.121 + free_domheap_pages(page, SL1_ORDER); 2.122 #else 2.123 - free_domheap_page(page); 2.124 + free_domheap_page(page); 2.125 #endif 2.126 - } 2.127 - else 2.128 - free_domheap_page(page); 2.129 + } 2.130 + else 2.131 + free_domheap_page(page); 2.132 } 2.133 2.134 shadow_audit(d, 0); 2.135 @@ -930,9 +930,9 @@ int __shadow_mode_enable(struct domain * 2.136 2.137 #if defined(CONFIG_PAGING_LEVELS) 2.138 if(!shadow_set_guest_paging_levels(d, 2.139 - CONFIG_PAGING_LEVELS)) { 2.140 - printk("Unsupported guest paging levels\n"); 2.141 - domain_crash_synchronous(); /* need to take a clean path */ 2.142 + CONFIG_PAGING_LEVELS)) { 2.143 + printk("Unsupported guest paging levels\n"); 2.144 + domain_crash_synchronous(); /* need to take a clean path */ 2.145 } 2.146 #endif 2.147 2.148 @@ -1004,7 +1004,7 @@ int __shadow_mode_enable(struct domain * 2.149 goto nomem; 2.150 2.151 memset(d->arch.shadow_ht, 0, 2.152 - shadow_ht_buckets * sizeof(struct shadow_status)); 2.153 + shadow_ht_buckets * sizeof(struct shadow_status)); 2.154 } 2.155 2.156 if ( new_modes & SHM_log_dirty ) 2.157 @@ -1013,7 +1013,7 @@ int __shadow_mode_enable(struct domain * 2.158 d->arch.shadow_dirty_bitmap_size = (d->max_pages + 63) & ~63; 2.159 d->arch.shadow_dirty_bitmap = 2.160 xmalloc_array(unsigned long, d->arch.shadow_dirty_bitmap_size / 2.161 - (8 * sizeof(unsigned long))); 2.162 + (8 * sizeof(unsigned long))); 2.163 if ( d->arch.shadow_dirty_bitmap == NULL ) 2.164 { 2.165 d->arch.shadow_dirty_bitmap_size = 0; 2.166 @@ -1039,7 +1039,7 @@ int __shadow_mode_enable(struct domain * 2.167 // external guests provide their own memory for their P2M maps. 2.168 // 2.169 ASSERT( d == page_get_owner( 2.170 - &frame_table[pagetable_get_pfn(d->arch.phys_table)]) ); 2.171 + &frame_table[pagetable_get_pfn(d->arch.phys_table)]) ); 2.172 } 2.173 } 2.174 2.175 @@ -1188,9 +1188,9 @@ static int shadow_mode_table_op( 2.176 chunk : (d->max_pages - i)) + 7) / 8; 2.177 2.178 if (copy_to_user( 2.179 - sc->dirty_bitmap + (i/(8*sizeof(unsigned long))), 2.180 - d->arch.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))), 2.181 - bytes)) 2.182 + sc->dirty_bitmap + (i/(8*sizeof(unsigned long))), 2.183 + d->arch.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))), 2.184 + bytes)) 2.185 { 2.186 // copy_to_user can fail when copying to guest app memory. 2.187 // app should zero buffer after mallocing, and pin it 2.188 @@ -1474,8 +1474,8 @@ void shadow_l3_normal_pt_update( 2.189 2.190 spl3e = (pgentry_64_t *) map_domain_page_with_cache(sl3mfn, cache); 2.191 validate_entry_change(d, (pgentry_64_t *) &gpde, 2.192 - &spl3e[(pa & ~PAGE_MASK) / sizeof(l3_pgentry_t)], 2.193 - shadow_type_to_level(PGT_l3_shadow)); 2.194 + &spl3e[(pa & ~PAGE_MASK) / sizeof(l3_pgentry_t)], 2.195 + shadow_type_to_level(PGT_l3_shadow)); 2.196 unmap_domain_page_with_cache(spl3e, cache); 2.197 } 2.198 2.199 @@ -1502,8 +1502,8 @@ void shadow_l4_normal_pt_update( 2.200 2.201 spl4e = (pgentry_64_t *)map_domain_page_with_cache(sl4mfn, cache); 2.202 validate_entry_change(d, (pgentry_64_t *)&gpde, 2.203 - &spl4e[(pa & ~PAGE_MASK) / sizeof(l4_pgentry_t)], 2.204 - shadow_type_to_level(PGT_l4_shadow)); 2.205 + &spl4e[(pa & ~PAGE_MASK) / sizeof(l4_pgentry_t)], 2.206 + shadow_type_to_level(PGT_l4_shadow)); 2.207 unmap_domain_page_with_cache(spl4e, cache); 2.208 } 2.209 2.210 @@ -1619,7 +1619,7 @@ gpfn_to_mfn_foreign(struct domain *d, un 2.211 } 2.212 2.213 static u32 remove_all_access_in_page( 2.214 - struct domain *d, unsigned long l1mfn, unsigned long forbidden_gmfn) 2.215 + struct domain *d, unsigned long l1mfn, unsigned long forbidden_gmfn) 2.216 { 2.217 l1_pgentry_t *pl1e = map_domain_page(l1mfn); 2.218 l1_pgentry_t match; 2.219 @@ -1627,8 +1627,8 @@ static u32 remove_all_access_in_page( 2.220 int i; 2.221 u32 count = 0; 2.222 int is_l1_shadow = 2.223 - ((frame_table[l1mfn].u.inuse.type_info & PGT_type_mask) == 2.224 - PGT_l1_shadow); 2.225 + ((frame_table[l1mfn].u.inuse.type_info & PGT_type_mask) == 2.226 + PGT_l1_shadow); 2.227 2.228 match = l1e_from_pfn(forbidden_gmfn, flags); 2.229 2.230 @@ -1671,19 +1671,19 @@ static u32 __shadow_remove_all_access(st 2.231 { 2.232 switch (a->gpfn_and_flags & PGT_type_mask) 2.233 { 2.234 - case PGT_l1_shadow: 2.235 - case PGT_l2_shadow: 2.236 - case PGT_l3_shadow: 2.237 - case PGT_l4_shadow: 2.238 - case PGT_hl2_shadow: 2.239 - count += remove_all_access_in_page(d, a->smfn, forbidden_gmfn); 2.240 - break; 2.241 - case PGT_snapshot: 2.242 - case PGT_writable_pred: 2.243 - // these can't hold refs to the forbidden page 2.244 - break; 2.245 - default: 2.246 - BUG(); 2.247 + case PGT_l1_shadow: 2.248 + case PGT_l2_shadow: 2.249 + case PGT_l3_shadow: 2.250 + case PGT_l4_shadow: 2.251 + case PGT_hl2_shadow: 2.252 + count += remove_all_access_in_page(d, a->smfn, forbidden_gmfn); 2.253 + break; 2.254 + case PGT_snapshot: 2.255 + case PGT_writable_pred: 2.256 + // these can't hold refs to the forbidden page 2.257 + break; 2.258 + default: 2.259 + BUG(); 2.260 } 2.261 2.262 a = a->next; 2.263 @@ -1694,29 +1694,29 @@ static u32 __shadow_remove_all_access(st 2.264 } 2.265 2.266 void shadow_drop_references( 2.267 - struct domain *d, struct pfn_info *page) 2.268 + struct domain *d, struct pfn_info *page) 2.269 { 2.270 if ( likely(!shadow_mode_refcounts(d)) || 2.271 - ((page->u.inuse.type_info & PGT_count_mask) == 0) ) 2.272 + ((page->u.inuse.type_info & PGT_count_mask) == 0) ) 2.273 return; 2.274 2.275 /* XXX This needs more thought... */ 2.276 printk("%s: needing to call __shadow_remove_all_access for mfn=%lx\n", 2.277 - __func__, page_to_pfn(page)); 2.278 + __func__, page_to_pfn(page)); 2.279 printk("Before: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page), 2.280 - page->count_info, page->u.inuse.type_info); 2.281 + page->count_info, page->u.inuse.type_info); 2.282 2.283 shadow_lock(d); 2.284 __shadow_remove_all_access(d, page_to_pfn(page)); 2.285 shadow_unlock(d); 2.286 2.287 printk("After: mfn=%lx c=%08x t=%" PRtype_info "\n", page_to_pfn(page), 2.288 - page->count_info, page->u.inuse.type_info); 2.289 + page->count_info, page->u.inuse.type_info); 2.290 } 2.291 2.292 /* XXX Needs more thought. Neither pretty nor fast: a place holder. */ 2.293 void shadow_sync_and_drop_references( 2.294 - struct domain *d, struct pfn_info *page) 2.295 + struct domain *d, struct pfn_info *page) 2.296 { 2.297 if ( likely(!shadow_mode_refcounts(d)) ) 2.298 return; 2.299 @@ -1730,3 +1730,13 @@ void shadow_sync_and_drop_references( 2.300 2.301 shadow_unlock(d); 2.302 } 2.303 + 2.304 +/* 2.305 + * Local variables: 2.306 + * mode: C 2.307 + * c-set-style: "BSD" 2.308 + * c-basic-offset: 4 2.309 + * tab-width: 4 2.310 + * indent-tabs-mode: nil 2.311 + * End: 2.312 + */
3.1 --- a/xen/arch/x86/vmx.c Sun Sep 11 16:36:24 2005 +0000 3.2 +++ b/xen/arch/x86/vmx.c Sun Sep 11 16:44:23 2005 +0000 3.3 @@ -122,37 +122,37 @@ static inline int long_mode_do_msr_read( 3.4 struct vcpu *vc = current; 3.5 struct msr_state * msr = &vc->arch.arch_vmx.msr_content; 3.6 switch(regs->ecx){ 3.7 - case MSR_EFER: 3.8 - msr_content = msr->msr_items[VMX_INDEX_MSR_EFER]; 3.9 - VMX_DBG_LOG(DBG_LEVEL_2, "EFER msr_content %llx\n", (unsigned long long)msr_content); 3.10 - if (test_bit(VMX_CPU_STATE_LME_ENABLED, 3.11 - &vc->arch.arch_vmx.cpu_state)) 3.12 - msr_content |= 1 << _EFER_LME; 3.13 + case MSR_EFER: 3.14 + msr_content = msr->msr_items[VMX_INDEX_MSR_EFER]; 3.15 + VMX_DBG_LOG(DBG_LEVEL_2, "EFER msr_content %llx\n", (unsigned long long)msr_content); 3.16 + if (test_bit(VMX_CPU_STATE_LME_ENABLED, 3.17 + &vc->arch.arch_vmx.cpu_state)) 3.18 + msr_content |= 1 << _EFER_LME; 3.19 3.20 - if (VMX_LONG_GUEST(vc)) 3.21 - msr_content |= 1 << _EFER_LMA; 3.22 - break; 3.23 - case MSR_FS_BASE: 3.24 - if (!(VMX_LONG_GUEST(vc))) 3.25 - /* XXX should it be GP fault */ 3.26 - domain_crash(); 3.27 - __vmread(GUEST_FS_BASE, &msr_content); 3.28 - break; 3.29 - case MSR_GS_BASE: 3.30 - if (!(VMX_LONG_GUEST(vc))) 3.31 - domain_crash(); 3.32 - __vmread(GUEST_GS_BASE, &msr_content); 3.33 - break; 3.34 - case MSR_SHADOW_GS_BASE: 3.35 - msr_content = msr->shadow_gs; 3.36 - break; 3.37 + if (VMX_LONG_GUEST(vc)) 3.38 + msr_content |= 1 << _EFER_LMA; 3.39 + break; 3.40 + case MSR_FS_BASE: 3.41 + if (!(VMX_LONG_GUEST(vc))) 3.42 + /* XXX should it be GP fault */ 3.43 + domain_crash(); 3.44 + __vmread(GUEST_FS_BASE, &msr_content); 3.45 + break; 3.46 + case MSR_GS_BASE: 3.47 + if (!(VMX_LONG_GUEST(vc))) 3.48 + domain_crash(); 3.49 + __vmread(GUEST_GS_BASE, &msr_content); 3.50 + break; 3.51 + case MSR_SHADOW_GS_BASE: 3.52 + msr_content = msr->shadow_gs; 3.53 + break; 3.54 3.55 CASE_READ_MSR(STAR); 3.56 CASE_READ_MSR(LSTAR); 3.57 CASE_READ_MSR(CSTAR); 3.58 CASE_READ_MSR(SYSCALL_MASK); 3.59 - default: 3.60 - return 0; 3.61 + default: 3.62 + return 0; 3.63 } 3.64 VMX_DBG_LOG(DBG_LEVEL_2, "mode_do_msr_read: msr_content: %lx\n", msr_content); 3.65 regs->eax = msr_content & 0xffffffff; 3.66 @@ -166,68 +166,68 @@ static inline int long_mode_do_msr_write 3.67 struct vcpu *vc = current; 3.68 struct msr_state * msr = &vc->arch.arch_vmx.msr_content; 3.69 struct msr_state * host_state = 3.70 - &percpu_msr[smp_processor_id()]; 3.71 + &percpu_msr[smp_processor_id()]; 3.72 3.73 VMX_DBG_LOG(DBG_LEVEL_1, " mode_do_msr_write msr %lx msr_content %lx\n", 3.74 regs->ecx, msr_content); 3.75 3.76 switch (regs->ecx){ 3.77 - case MSR_EFER: 3.78 - if ((msr_content & EFER_LME) ^ 3.79 - test_bit(VMX_CPU_STATE_LME_ENABLED, 3.80 - &vc->arch.arch_vmx.cpu_state)){ 3.81 - if (test_bit(VMX_CPU_STATE_PG_ENABLED, 3.82 - &vc->arch.arch_vmx.cpu_state) || 3.83 - !test_bit(VMX_CPU_STATE_PAE_ENABLED, 3.84 - &vc->arch.arch_vmx.cpu_state)){ 3.85 - vmx_inject_exception(vc, TRAP_gp_fault, 0); 3.86 - } 3.87 + case MSR_EFER: 3.88 + if ((msr_content & EFER_LME) ^ 3.89 + test_bit(VMX_CPU_STATE_LME_ENABLED, 3.90 + &vc->arch.arch_vmx.cpu_state)){ 3.91 + if (test_bit(VMX_CPU_STATE_PG_ENABLED, 3.92 + &vc->arch.arch_vmx.cpu_state) || 3.93 + !test_bit(VMX_CPU_STATE_PAE_ENABLED, 3.94 + &vc->arch.arch_vmx.cpu_state)){ 3.95 + vmx_inject_exception(vc, TRAP_gp_fault, 0); 3.96 } 3.97 - if (msr_content & EFER_LME) 3.98 - set_bit(VMX_CPU_STATE_LME_ENABLED, 3.99 - &vc->arch.arch_vmx.cpu_state); 3.100 - /* No update for LME/LMA since it have no effect */ 3.101 - msr->msr_items[VMX_INDEX_MSR_EFER] = 3.102 - msr_content; 3.103 - if (msr_content & ~(EFER_LME | EFER_LMA)){ 3.104 - msr->msr_items[VMX_INDEX_MSR_EFER] = msr_content; 3.105 - if (!test_bit(VMX_INDEX_MSR_EFER, &msr->flags)){ 3.106 - rdmsrl(MSR_EFER, 3.107 - host_state->msr_items[VMX_INDEX_MSR_EFER]); 3.108 - set_bit(VMX_INDEX_MSR_EFER, &host_state->flags); 3.109 - set_bit(VMX_INDEX_MSR_EFER, &msr->flags); 3.110 - wrmsrl(MSR_EFER, msr_content); 3.111 - } 3.112 + } 3.113 + if (msr_content & EFER_LME) 3.114 + set_bit(VMX_CPU_STATE_LME_ENABLED, 3.115 + &vc->arch.arch_vmx.cpu_state); 3.116 + /* No update for LME/LMA since it have no effect */ 3.117 + msr->msr_items[VMX_INDEX_MSR_EFER] = 3.118 + msr_content; 3.119 + if (msr_content & ~(EFER_LME | EFER_LMA)){ 3.120 + msr->msr_items[VMX_INDEX_MSR_EFER] = msr_content; 3.121 + if (!test_bit(VMX_INDEX_MSR_EFER, &msr->flags)){ 3.122 + rdmsrl(MSR_EFER, 3.123 + host_state->msr_items[VMX_INDEX_MSR_EFER]); 3.124 + set_bit(VMX_INDEX_MSR_EFER, &host_state->flags); 3.125 + set_bit(VMX_INDEX_MSR_EFER, &msr->flags); 3.126 + wrmsrl(MSR_EFER, msr_content); 3.127 } 3.128 - break; 3.129 + } 3.130 + break; 3.131 3.132 - case MSR_FS_BASE: 3.133 - case MSR_GS_BASE: 3.134 - if (!(VMX_LONG_GUEST(vc))) 3.135 - domain_crash(); 3.136 - if (!IS_CANO_ADDRESS(msr_content)){ 3.137 - VMX_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n"); 3.138 - vmx_inject_exception(vc, TRAP_gp_fault, 0); 3.139 - } 3.140 - if (regs->ecx == MSR_FS_BASE) 3.141 - __vmwrite(GUEST_FS_BASE, msr_content); 3.142 - else 3.143 - __vmwrite(GUEST_GS_BASE, msr_content); 3.144 - break; 3.145 + case MSR_FS_BASE: 3.146 + case MSR_GS_BASE: 3.147 + if (!(VMX_LONG_GUEST(vc))) 3.148 + domain_crash(); 3.149 + if (!IS_CANO_ADDRESS(msr_content)){ 3.150 + VMX_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n"); 3.151 + vmx_inject_exception(vc, TRAP_gp_fault, 0); 3.152 + } 3.153 + if (regs->ecx == MSR_FS_BASE) 3.154 + __vmwrite(GUEST_FS_BASE, msr_content); 3.155 + else 3.156 + __vmwrite(GUEST_GS_BASE, msr_content); 3.157 + break; 3.158 3.159 - case MSR_SHADOW_GS_BASE: 3.160 - if (!(VMX_LONG_GUEST(vc))) 3.161 - domain_crash(); 3.162 - vc->arch.arch_vmx.msr_content.shadow_gs = msr_content; 3.163 - wrmsrl(MSR_SHADOW_GS_BASE, msr_content); 3.164 - break; 3.165 + case MSR_SHADOW_GS_BASE: 3.166 + if (!(VMX_LONG_GUEST(vc))) 3.167 + domain_crash(); 3.168 + vc->arch.arch_vmx.msr_content.shadow_gs = msr_content; 3.169 + wrmsrl(MSR_SHADOW_GS_BASE, msr_content); 3.170 + break; 3.171 3.172 - CASE_WRITE_MSR(STAR); 3.173 - CASE_WRITE_MSR(LSTAR); 3.174 - CASE_WRITE_MSR(CSTAR); 3.175 - CASE_WRITE_MSR(SYSCALL_MASK); 3.176 - default: 3.177 - return 0; 3.178 + CASE_WRITE_MSR(STAR); 3.179 + CASE_WRITE_MSR(LSTAR); 3.180 + CASE_WRITE_MSR(CSTAR); 3.181 + CASE_WRITE_MSR(SYSCALL_MASK); 3.182 + default: 3.183 + return 0; 3.184 } 3.185 return 1; 3.186 } 3.187 @@ -252,8 +252,8 @@ vmx_restore_msrs(struct vcpu *d) 3.188 i = find_first_set_bit(guest_flags); 3.189 3.190 VMX_DBG_LOG(DBG_LEVEL_2, 3.191 - "restore guest's index %d msr %lx with %lx\n", 3.192 - i, (unsigned long) msr_data_index[i], (unsigned long) guest_state->msr_items[i]); 3.193 + "restore guest's index %d msr %lx with %lx\n", 3.194 + i, (unsigned long) msr_data_index[i], (unsigned long) guest_state->msr_items[i]); 3.195 set_bit(i, &host_state->flags); 3.196 wrmsrl(msr_data_index[i], guest_state->msr_items[i]); 3.197 clear_bit(i, &guest_flags); 3.198 @@ -309,8 +309,8 @@ int start_vmx(void) 3.199 3.200 if (eax & IA32_FEATURE_CONTROL_MSR_LOCK) { 3.201 if ((eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) == 0x0) { 3.202 - printk("VMX disabled by Feature Control MSR.\n"); 3.203 - return 0; 3.204 + printk("VMX disabled by Feature Control MSR.\n"); 3.205 + return 0; 3.206 } 3.207 } 3.208 else { 3.209 @@ -320,16 +320,16 @@ int start_vmx(void) 3.210 } 3.211 3.212 if (!check_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS, 3.213 - MSR_IA32_VMX_PINBASED_CTLS_MSR)) 3.214 + MSR_IA32_VMX_PINBASED_CTLS_MSR)) 3.215 return 0; 3.216 if (!check_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS, 3.217 - MSR_IA32_VMX_PROCBASED_CTLS_MSR)) 3.218 + MSR_IA32_VMX_PROCBASED_CTLS_MSR)) 3.219 return 0; 3.220 if (!check_vmx_controls(MONITOR_VM_EXIT_CONTROLS, 3.221 - MSR_IA32_VMX_EXIT_CTLS_MSR)) 3.222 + MSR_IA32_VMX_EXIT_CTLS_MSR)) 3.223 return 0; 3.224 if (!check_vmx_controls(MONITOR_VM_ENTRY_CONTROLS, 3.225 - MSR_IA32_VMX_ENTRY_CTLS_MSR)) 3.226 + MSR_IA32_VMX_ENTRY_CTLS_MSR)) 3.227 return 0; 3.228 3.229 set_in_cr4(X86_CR4_VMXE); /* Enable VMXE */ 3.230 @@ -385,8 +385,8 @@ static int vmx_do_page_fault(unsigned lo 3.231 { 3.232 __vmread(GUEST_RIP, &eip); 3.233 VMX_DBG_LOG(DBG_LEVEL_VMMU, 3.234 - "vmx_do_page_fault = 0x%lx, eip = %lx, error_code = %lx", 3.235 - va, eip, (unsigned long)regs->error_code); 3.236 + "vmx_do_page_fault = 0x%lx, eip = %lx, error_code = %lx", 3.237 + va, eip, (unsigned long)regs->error_code); 3.238 } 3.239 #endif 3.240 3.241 @@ -478,8 +478,8 @@ static void vmx_vmexit_do_cpuid(unsigned 3.242 regs->edx = (unsigned long) edx; 3.243 3.244 VMX_DBG_LOG(DBG_LEVEL_1, 3.245 - "vmx_vmexit_do_cpuid: eip: %lx, input: %lx, out:eax=%x, ebx=%x, ecx=%x, edx=%x", 3.246 - eip, input, eax, ebx, ecx, edx); 3.247 + "vmx_vmexit_do_cpuid: eip: %lx, input: %lx, out:eax=%x, ebx=%x, ecx=%x, edx=%x", 3.248 + eip, input, eax, ebx, ecx, edx); 3.249 3.250 } 3.251 3.252 @@ -607,7 +607,7 @@ static int check_for_null_selector(unsig 3.253 } 3.254 3.255 void send_pio_req(struct cpu_user_regs *regs, unsigned long port, 3.256 - unsigned long count, int size, long value, int dir, int pvalid) 3.257 + unsigned long count, int size, long value, int dir, int pvalid) 3.258 { 3.259 struct vcpu *v = current; 3.260 vcpu_iodata_t *vio; 3.261 @@ -620,8 +620,8 @@ void send_pio_req(struct cpu_user_regs * 3.262 } 3.263 3.264 if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) { 3.265 - printf("VMX I/O has not yet completed\n"); 3.266 - domain_crash_synchronous(); 3.267 + printf("VMX I/O has not yet completed\n"); 3.268 + domain_crash_synchronous(); 3.269 } 3.270 set_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags); 3.271 3.272 @@ -656,7 +656,7 @@ void send_pio_req(struct cpu_user_regs * 3.273 } 3.274 3.275 static void vmx_io_instruction(struct cpu_user_regs *regs, 3.276 - unsigned long exit_qualification, unsigned long inst_len) 3.277 + unsigned long exit_qualification, unsigned long inst_len) 3.278 { 3.279 struct mi_per_cpu_info *mpcip; 3.280 unsigned long eip, cs, eflags; 3.281 @@ -686,10 +686,10 @@ static void vmx_io_instruction(struct cp 3.282 dir = test_bit(3, &exit_qualification); /* direction */ 3.283 3.284 if (test_bit(4, &exit_qualification)) { /* string instruction */ 3.285 - unsigned long addr, count = 1; 3.286 - int sign = regs->eflags & EF_DF ? -1 : 1; 3.287 + unsigned long addr, count = 1; 3.288 + int sign = regs->eflags & EF_DF ? -1 : 1; 3.289 3.290 - __vmread(GUEST_LINEAR_ADDRESS, &addr); 3.291 + __vmread(GUEST_LINEAR_ADDRESS, &addr); 3.292 3.293 /* 3.294 * In protected mode, guest linear address is invalid if the 3.295 @@ -699,35 +699,35 @@ static void vmx_io_instruction(struct cp 3.296 addr = dir == IOREQ_WRITE ? regs->esi : regs->edi; 3.297 3.298 if (test_bit(5, &exit_qualification)) { /* "rep" prefix */ 3.299 - mpcip->flags |= REPZ; 3.300 - count = vm86 ? regs->ecx & 0xFFFF : regs->ecx; 3.301 - } 3.302 + mpcip->flags |= REPZ; 3.303 + count = vm86 ? regs->ecx & 0xFFFF : regs->ecx; 3.304 + } 3.305 3.306 - /* 3.307 - * Handle string pio instructions that cross pages or that 3.308 - * are unaligned. See the comments in vmx_platform.c/handle_mmio() 3.309 - */ 3.310 - if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) { 3.311 - unsigned long value = 0; 3.312 + /* 3.313 + * Handle string pio instructions that cross pages or that 3.314 + * are unaligned. See the comments in vmx_platform.c/handle_mmio() 3.315 + */ 3.316 + if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) { 3.317 + unsigned long value = 0; 3.318 3.319 - mpcip->flags |= OVERLAP; 3.320 - if (dir == IOREQ_WRITE) 3.321 - vmx_copy(&value, addr, size, VMX_COPY_IN); 3.322 - send_pio_req(regs, port, 1, size, value, dir, 0); 3.323 - } else { 3.324 - if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK)) { 3.325 + mpcip->flags |= OVERLAP; 3.326 + if (dir == IOREQ_WRITE) 3.327 + vmx_copy(&value, addr, size, VMX_COPY_IN); 3.328 + send_pio_req(regs, port, 1, size, value, dir, 0); 3.329 + } else { 3.330 + if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK)) { 3.331 if (sign > 0) 3.332 count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size; 3.333 else 3.334 count = (addr & ~PAGE_MASK) / size; 3.335 - } else 3.336 - __update_guest_eip(inst_len); 3.337 + } else 3.338 + __update_guest_eip(inst_len); 3.339 3.340 - send_pio_req(regs, port, count, size, addr, dir, 1); 3.341 - } 3.342 + send_pio_req(regs, port, count, size, addr, dir, 1); 3.343 + } 3.344 } else { 3.345 __update_guest_eip(inst_len); 3.346 - send_pio_req(regs, port, 1, size, regs->eax, dir, 0); 3.347 + send_pio_req(regs, port, 1, size, regs->eax, dir, 0); 3.348 } 3.349 } 3.350 3.351 @@ -739,30 +739,30 @@ vmx_copy(void *buf, unsigned long laddr, 3.352 int count; 3.353 3.354 while (size > 0) { 3.355 - count = PAGE_SIZE - (laddr & ~PAGE_MASK); 3.356 - if (count > size) 3.357 - count = size; 3.358 + count = PAGE_SIZE - (laddr & ~PAGE_MASK); 3.359 + if (count > size) 3.360 + count = size; 3.361 3.362 - if (vmx_paging_enabled(current)) { 3.363 - gpa = gva_to_gpa(laddr); 3.364 - mfn = get_mfn_from_pfn(gpa >> PAGE_SHIFT); 3.365 - } else 3.366 - mfn = get_mfn_from_pfn(laddr >> PAGE_SHIFT); 3.367 - if (mfn == INVALID_MFN) 3.368 - return 0; 3.369 + if (vmx_paging_enabled(current)) { 3.370 + gpa = gva_to_gpa(laddr); 3.371 + mfn = get_mfn_from_pfn(gpa >> PAGE_SHIFT); 3.372 + } else 3.373 + mfn = get_mfn_from_pfn(laddr >> PAGE_SHIFT); 3.374 + if (mfn == INVALID_MFN) 3.375 + return 0; 3.376 3.377 - addr = (char *)map_domain_page(mfn) + (laddr & ~PAGE_MASK); 3.378 + addr = (char *)map_domain_page(mfn) + (laddr & ~PAGE_MASK); 3.379 3.380 - if (dir == VMX_COPY_IN) 3.381 - memcpy(buf, addr, count); 3.382 - else 3.383 - memcpy(addr, buf, count); 3.384 + if (dir == VMX_COPY_IN) 3.385 + memcpy(buf, addr, count); 3.386 + else 3.387 + memcpy(addr, buf, count); 3.388 3.389 - unmap_domain_page(addr); 3.390 + unmap_domain_page(addr); 3.391 3.392 - laddr += count; 3.393 - buf += count; 3.394 - size -= count; 3.395 + laddr += count; 3.396 + buf += count; 3.397 + size -= count; 3.398 } 3.399 3.400 return 1; 3.401 @@ -846,47 +846,47 @@ vmx_world_restore(struct vcpu *d, struct 3.402 error |= __vmwrite(CR0_READ_SHADOW, c->cr0); 3.403 3.404 if (!vmx_paging_enabled(d)) { 3.405 - VMX_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table"); 3.406 - __vmwrite(GUEST_CR3, pagetable_get_paddr(d->domain->arch.phys_table)); 3.407 + VMX_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table"); 3.408 + __vmwrite(GUEST_CR3, pagetable_get_paddr(d->domain->arch.phys_table)); 3.409 goto skip_cr3; 3.410 } 3.411 3.412 if (c->cr3 == d->arch.arch_vmx.cpu_cr3) { 3.413 - /* 3.414 - * This is simple TLB flush, implying the guest has 3.415 - * removed some translation or changed page attributes. 3.416 - * We simply invalidate the shadow. 3.417 - */ 3.418 - mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT); 3.419 - if (mfn != pagetable_get_pfn(d->arch.guest_table)) { 3.420 - printk("Invalid CR3 value=%x", c->cr3); 3.421 - domain_crash_synchronous(); 3.422 - return 0; 3.423 - } 3.424 - shadow_sync_all(d->domain); 3.425 + /* 3.426 + * This is simple TLB flush, implying the guest has 3.427 + * removed some translation or changed page attributes. 3.428 + * We simply invalidate the shadow. 3.429 + */ 3.430 + mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT); 3.431 + if (mfn != pagetable_get_pfn(d->arch.guest_table)) { 3.432 + printk("Invalid CR3 value=%x", c->cr3); 3.433 + domain_crash_synchronous(); 3.434 + return 0; 3.435 + } 3.436 + shadow_sync_all(d->domain); 3.437 } else { 3.438 - /* 3.439 - * If different, make a shadow. Check if the PDBR is valid 3.440 - * first. 3.441 - */ 3.442 - VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %x", c->cr3); 3.443 - if ((c->cr3 >> PAGE_SHIFT) > d->domain->max_pages) { 3.444 - printk("Invalid CR3 value=%x", c->cr3); 3.445 - domain_crash_synchronous(); 3.446 - return 0; 3.447 - } 3.448 - mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT); 3.449 - d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT); 3.450 - update_pagetables(d); 3.451 - /* 3.452 - * arch.shadow_table should now hold the next CR3 for shadow 3.453 - */ 3.454 - d->arch.arch_vmx.cpu_cr3 = c->cr3; 3.455 - VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3); 3.456 - __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table)); 3.457 + /* 3.458 + * If different, make a shadow. Check if the PDBR is valid 3.459 + * first. 3.460 + */ 3.461 + VMX_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %x", c->cr3); 3.462 + if ((c->cr3 >> PAGE_SHIFT) > d->domain->max_pages) { 3.463 + printk("Invalid CR3 value=%x", c->cr3); 3.464 + domain_crash_synchronous(); 3.465 + return 0; 3.466 + } 3.467 + mfn = get_mfn_from_pfn(c->cr3 >> PAGE_SHIFT); 3.468 + d->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT); 3.469 + update_pagetables(d); 3.470 + /* 3.471 + * arch.shadow_table should now hold the next CR3 for shadow 3.472 + */ 3.473 + d->arch.arch_vmx.cpu_cr3 = c->cr3; 3.474 + VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3); 3.475 + __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table)); 3.476 } 3.477 3.478 -skip_cr3: 3.479 + skip_cr3: 3.480 3.481 error |= __vmread(CR4_READ_SHADOW, &old_cr4); 3.482 error |= __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK)); 3.483 @@ -952,59 +952,59 @@ vmx_assist(struct vcpu *d, int mode) 3.484 3.485 /* make sure vmxassist exists (this is not an error) */ 3.486 if (!vmx_copy(&magic, VMXASSIST_MAGIC_OFFSET, sizeof(magic), VMX_COPY_IN)) 3.487 - return 0; 3.488 + return 0; 3.489 if (magic != VMXASSIST_MAGIC) 3.490 - return 0; 3.491 + return 0; 3.492 3.493 switch (mode) { 3.494 - /* 3.495 - * Transfer control to vmxassist. 3.496 - * Store the current context in VMXASSIST_OLD_CONTEXT and load 3.497 - * the new VMXASSIST_NEW_CONTEXT context. This context was created 3.498 - * by vmxassist and will transfer control to it. 3.499 - */ 3.500 + /* 3.501 + * Transfer control to vmxassist. 3.502 + * Store the current context in VMXASSIST_OLD_CONTEXT and load 3.503 + * the new VMXASSIST_NEW_CONTEXT context. This context was created 3.504 + * by vmxassist and will transfer control to it. 3.505 + */ 3.506 case VMX_ASSIST_INVOKE: 3.507 - /* save the old context */ 3.508 - if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), VMX_COPY_IN)) 3.509 - goto error; 3.510 - if (cp != 0) { 3.511 - if (!vmx_world_save(d, &c)) 3.512 - goto error; 3.513 - if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_OUT)) 3.514 - goto error; 3.515 - } 3.516 + /* save the old context */ 3.517 + if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), VMX_COPY_IN)) 3.518 + goto error; 3.519 + if (cp != 0) { 3.520 + if (!vmx_world_save(d, &c)) 3.521 + goto error; 3.522 + if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_OUT)) 3.523 + goto error; 3.524 + } 3.525 3.526 - /* restore the new context, this should activate vmxassist */ 3.527 - if (!vmx_copy(&cp, VMXASSIST_NEW_CONTEXT, sizeof(cp), VMX_COPY_IN)) 3.528 - goto error; 3.529 - if (cp != 0) { 3.530 + /* restore the new context, this should activate vmxassist */ 3.531 + if (!vmx_copy(&cp, VMXASSIST_NEW_CONTEXT, sizeof(cp), VMX_COPY_IN)) 3.532 + goto error; 3.533 + if (cp != 0) { 3.534 if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_IN)) 3.535 - goto error; 3.536 - if (!vmx_world_restore(d, &c)) 3.537 - goto error; 3.538 - return 1; 3.539 - } 3.540 - break; 3.541 + goto error; 3.542 + if (!vmx_world_restore(d, &c)) 3.543 + goto error; 3.544 + return 1; 3.545 + } 3.546 + break; 3.547 3.548 - /* 3.549 - * Restore the VMXASSIST_OLD_CONTEXT that was saved by VMX_ASSIST_INVOKE 3.550 - * above. 3.551 - */ 3.552 + /* 3.553 + * Restore the VMXASSIST_OLD_CONTEXT that was saved by VMX_ASSIST_INVOKE 3.554 + * above. 3.555 + */ 3.556 case VMX_ASSIST_RESTORE: 3.557 - /* save the old context */ 3.558 - if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), VMX_COPY_IN)) 3.559 - goto error; 3.560 - if (cp != 0) { 3.561 + /* save the old context */ 3.562 + if (!vmx_copy(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp), VMX_COPY_IN)) 3.563 + goto error; 3.564 + if (cp != 0) { 3.565 if (!vmx_copy(&c, cp, sizeof(c), VMX_COPY_IN)) 3.566 - goto error; 3.567 - if (!vmx_world_restore(d, &c)) 3.568 - goto error; 3.569 - return 1; 3.570 - } 3.571 - break; 3.572 + goto error; 3.573 + if (!vmx_world_restore(d, &c)) 3.574 + goto error; 3.575 + return 1; 3.576 + } 3.577 + break; 3.578 } 3.579 3.580 -error: 3.581 + error: 3.582 printf("Failed to transfer to vmxassist\n"); 3.583 domain_crash_synchronous(); 3.584 return 0; 3.585 @@ -1031,7 +1031,7 @@ static int vmx_set_cr0(unsigned long val 3.586 * The guest CR3 must be pointing to the guest physical. 3.587 */ 3.588 if ( !VALID_MFN(mfn = get_mfn_from_pfn( 3.589 - d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) || 3.590 + d->arch.arch_vmx.cpu_cr3 >> PAGE_SHIFT)) || 3.591 !get_page(pfn_to_page(mfn), d->domain) ) 3.592 { 3.593 printk("Invalid CR3 value = %lx", d->arch.arch_vmx.cpu_cr3); 3.594 @@ -1040,18 +1040,18 @@ static int vmx_set_cr0(unsigned long val 3.595 3.596 #if defined(__x86_64__) 3.597 if (test_bit(VMX_CPU_STATE_LME_ENABLED, 3.598 - &d->arch.arch_vmx.cpu_state) && 3.599 - !test_bit(VMX_CPU_STATE_PAE_ENABLED, 3.600 - &d->arch.arch_vmx.cpu_state)){ 3.601 + &d->arch.arch_vmx.cpu_state) && 3.602 + !test_bit(VMX_CPU_STATE_PAE_ENABLED, 3.603 + &d->arch.arch_vmx.cpu_state)){ 3.604 VMX_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable\n"); 3.605 vmx_inject_exception(d, TRAP_gp_fault, 0); 3.606 } 3.607 if (test_bit(VMX_CPU_STATE_LME_ENABLED, 3.608 - &d->arch.arch_vmx.cpu_state)){ 3.609 + &d->arch.arch_vmx.cpu_state)){ 3.610 /* Here the PAE is should to be opened */ 3.611 VMX_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode\n"); 3.612 set_bit(VMX_CPU_STATE_LMA_ENABLED, 3.613 - &d->arch.arch_vmx.cpu_state); 3.614 + &d->arch.arch_vmx.cpu_state); 3.615 __vmread(VM_ENTRY_CONTROLS, &vm_entry_value); 3.616 vm_entry_value |= VM_ENTRY_CONTROLS_IA32E_MODE; 3.617 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value); 3.618 @@ -1073,17 +1073,17 @@ static int vmx_set_cr0(unsigned long val 3.619 #endif 3.620 } 3.621 3.622 - unsigned long crn; 3.623 + unsigned long crn; 3.624 /* update CR4's PAE if needed */ 3.625 __vmread(GUEST_CR4, &crn); 3.626 if ( (!(crn & X86_CR4_PAE)) && 3.627 - test_bit(VMX_CPU_STATE_PAE_ENABLED, 3.628 - &d->arch.arch_vmx.cpu_state)){ 3.629 + test_bit(VMX_CPU_STATE_PAE_ENABLED, 3.630 + &d->arch.arch_vmx.cpu_state)){ 3.631 VMX_DBG_LOG(DBG_LEVEL_1, "enable PAE on cr4\n"); 3.632 __vmwrite(GUEST_CR4, crn | X86_CR4_PAE); 3.633 } 3.634 #elif defined( __i386__) 3.635 - unsigned long old_base_mfn; 3.636 + unsigned long old_base_mfn; 3.637 old_base_mfn = pagetable_get_pfn(d->arch.guest_table); 3.638 if (old_base_mfn) 3.639 put_page(pfn_to_page(old_base_mfn)); 3.640 @@ -1095,14 +1095,14 @@ static int vmx_set_cr0(unsigned long val 3.641 update_pagetables(d); 3.642 3.643 VMX_DBG_LOG(DBG_LEVEL_VMMU, "New arch.guest_table = %lx", 3.644 - (unsigned long) (mfn << PAGE_SHIFT)); 3.645 + (unsigned long) (mfn << PAGE_SHIFT)); 3.646 3.647 __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table)); 3.648 /* 3.649 * arch->shadow_table should hold the next CR3 for shadow 3.650 */ 3.651 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx", 3.652 - d->arch.arch_vmx.cpu_cr3, mfn); 3.653 + d->arch.arch_vmx.cpu_cr3, mfn); 3.654 } 3.655 3.656 /* 3.657 @@ -1129,29 +1129,29 @@ static int vmx_set_cr0(unsigned long val 3.658 __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value); 3.659 } 3.660 } 3.661 - __vmread(GUEST_RIP, &eip); 3.662 - VMX_DBG_LOG(DBG_LEVEL_1, 3.663 - "Disabling CR0.PE at %%eip 0x%lx\n", eip); 3.664 - if (vmx_assist(d, VMX_ASSIST_INVOKE)) { 3.665 - set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &d->arch.arch_vmx.cpu_state); 3.666 - __vmread(GUEST_RIP, &eip); 3.667 - VMX_DBG_LOG(DBG_LEVEL_1, 3.668 - "Transfering control to vmxassist %%eip 0x%lx\n", eip); 3.669 - return 0; /* do not update eip! */ 3.670 - } 3.671 + __vmread(GUEST_RIP, &eip); 3.672 + VMX_DBG_LOG(DBG_LEVEL_1, 3.673 + "Disabling CR0.PE at %%eip 0x%lx\n", eip); 3.674 + if (vmx_assist(d, VMX_ASSIST_INVOKE)) { 3.675 + set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &d->arch.arch_vmx.cpu_state); 3.676 + __vmread(GUEST_RIP, &eip); 3.677 + VMX_DBG_LOG(DBG_LEVEL_1, 3.678 + "Transfering control to vmxassist %%eip 0x%lx\n", eip); 3.679 + return 0; /* do not update eip! */ 3.680 + } 3.681 } else if (test_bit(VMX_CPU_STATE_ASSIST_ENABLED, 3.682 - &d->arch.arch_vmx.cpu_state)) { 3.683 - __vmread(GUEST_RIP, &eip); 3.684 - VMX_DBG_LOG(DBG_LEVEL_1, 3.685 - "Enabling CR0.PE at %%eip 0x%lx\n", eip); 3.686 - if (vmx_assist(d, VMX_ASSIST_RESTORE)) { 3.687 - clear_bit(VMX_CPU_STATE_ASSIST_ENABLED, 3.688 - &d->arch.arch_vmx.cpu_state); 3.689 - __vmread(GUEST_RIP, &eip); 3.690 - VMX_DBG_LOG(DBG_LEVEL_1, 3.691 - "Restoring to %%eip 0x%lx\n", eip); 3.692 - return 0; /* do not update eip! */ 3.693 - } 3.694 + &d->arch.arch_vmx.cpu_state)) { 3.695 + __vmread(GUEST_RIP, &eip); 3.696 + VMX_DBG_LOG(DBG_LEVEL_1, 3.697 + "Enabling CR0.PE at %%eip 0x%lx\n", eip); 3.698 + if (vmx_assist(d, VMX_ASSIST_RESTORE)) { 3.699 + clear_bit(VMX_CPU_STATE_ASSIST_ENABLED, 3.700 + &d->arch.arch_vmx.cpu_state); 3.701 + __vmread(GUEST_RIP, &eip); 3.702 + VMX_DBG_LOG(DBG_LEVEL_1, 3.703 + "Restoring to %%eip 0x%lx\n", eip); 3.704 + return 0; /* do not update eip! */ 3.705 + } 3.706 } 3.707 3.708 return 1; 3.709 @@ -1198,8 +1198,8 @@ static int mov_to_cr(int gp, int cr, str 3.710 CASE_GET_REG(ESI, esi); 3.711 CASE_GET_REG(EDI, edi); 3.712 CASE_EXTEND_GET_REG 3.713 - case REG_ESP: 3.714 - __vmread(GUEST_RSP, &value); 3.715 + case REG_ESP: 3.716 + __vmread(GUEST_RSP, &value); 3.717 break; 3.718 default: 3.719 printk("invalid gp: %d\n", gp); 3.720 @@ -1212,7 +1212,7 @@ static int mov_to_cr(int gp, int cr, str 3.721 switch(cr) { 3.722 case 0: 3.723 { 3.724 - return vmx_set_cr0(value); 3.725 + return vmx_set_cr0(value); 3.726 } 3.727 case 3: 3.728 { 3.729 @@ -1262,7 +1262,7 @@ static int mov_to_cr(int gp, int cr, str 3.730 */ 3.731 d->arch.arch_vmx.cpu_cr3 = value; 3.732 VMX_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", 3.733 - value); 3.734 + value); 3.735 __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table)); 3.736 } 3.737 break; 3.738 @@ -1332,8 +1332,8 @@ static void mov_from_cr(int cr, int gp, 3.739 CASE_SET_REG(ESI, esi); 3.740 CASE_SET_REG(EDI, edi); 3.741 CASE_EXTEND_SET_REG 3.742 - case REG_ESP: 3.743 - __vmwrite(GUEST_RSP, value); 3.744 + case REG_ESP: 3.745 + __vmwrite(GUEST_RSP, value); 3.746 regs->esp = value; 3.747 break; 3.748 default: 3.749 @@ -1381,9 +1381,9 @@ static int vmx_cr_access(unsigned long e 3.750 case TYPE_LMSW: 3.751 TRACE_VMEXIT(1,TYPE_LMSW); 3.752 __vmread(CR0_READ_SHADOW, &value); 3.753 - value = (value & ~0xF) | 3.754 - (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF); 3.755 - return vmx_set_cr0(value); 3.756 + value = (value & ~0xF) | 3.757 + (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF); 3.758 + return vmx_set_cr0(value); 3.759 break; 3.760 default: 3.761 __vmx_bug(regs); 3.762 @@ -1400,20 +1400,20 @@ static inline void vmx_do_msr_read(struc 3.763 (unsigned long)regs->ecx, (unsigned long)regs->eax, 3.764 (unsigned long)regs->edx); 3.765 switch (regs->ecx) { 3.766 - case MSR_IA32_SYSENTER_CS: 3.767 - __vmread(GUEST_SYSENTER_CS, (u32 *)&msr_content); 3.768 - break; 3.769 - case MSR_IA32_SYSENTER_ESP: 3.770 - __vmread(GUEST_SYSENTER_ESP, &msr_content); 3.771 - break; 3.772 - case MSR_IA32_SYSENTER_EIP: 3.773 - __vmread(GUEST_SYSENTER_EIP, &msr_content); 3.774 - break; 3.775 - default: 3.776 - if(long_mode_do_msr_read(regs)) 3.777 - return; 3.778 - rdmsr_user(regs->ecx, regs->eax, regs->edx); 3.779 - break; 3.780 + case MSR_IA32_SYSENTER_CS: 3.781 + __vmread(GUEST_SYSENTER_CS, (u32 *)&msr_content); 3.782 + break; 3.783 + case MSR_IA32_SYSENTER_ESP: 3.784 + __vmread(GUEST_SYSENTER_ESP, &msr_content); 3.785 + break; 3.786 + case MSR_IA32_SYSENTER_EIP: 3.787 + __vmread(GUEST_SYSENTER_EIP, &msr_content); 3.788 + break; 3.789 + default: 3.790 + if(long_mode_do_msr_read(regs)) 3.791 + return; 3.792 + rdmsr_user(regs->ecx, regs->eax, regs->edx); 3.793 + break; 3.794 } 3.795 3.796 regs->eax = msr_content & 0xFFFFFFFF; 3.797 @@ -1436,18 +1436,18 @@ static inline void vmx_do_msr_write(stru 3.798 msr_content = (regs->eax & 0xFFFFFFFF) | ((u64)regs->edx << 32); 3.799 3.800 switch (regs->ecx) { 3.801 - case MSR_IA32_SYSENTER_CS: 3.802 - __vmwrite(GUEST_SYSENTER_CS, msr_content); 3.803 - break; 3.804 - case MSR_IA32_SYSENTER_ESP: 3.805 - __vmwrite(GUEST_SYSENTER_ESP, msr_content); 3.806 - break; 3.807 - case MSR_IA32_SYSENTER_EIP: 3.808 - __vmwrite(GUEST_SYSENTER_EIP, msr_content); 3.809 - break; 3.810 - default: 3.811 - long_mode_do_msr_write(regs); 3.812 - break; 3.813 + case MSR_IA32_SYSENTER_CS: 3.814 + __vmwrite(GUEST_SYSENTER_CS, msr_content); 3.815 + break; 3.816 + case MSR_IA32_SYSENTER_ESP: 3.817 + __vmwrite(GUEST_SYSENTER_ESP, msr_content); 3.818 + break; 3.819 + case MSR_IA32_SYSENTER_EIP: 3.820 + __vmwrite(GUEST_SYSENTER_EIP, msr_content); 3.821 + break; 3.822 + default: 3.823 + long_mode_do_msr_write(regs); 3.824 + break; 3.825 } 3.826 3.827 VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_write returns: " 3.828 @@ -1491,28 +1491,28 @@ static inline void vmx_vmexit_do_extint( 3.829 local_irq_disable(); 3.830 3.831 switch(vector) { 3.832 - case LOCAL_TIMER_VECTOR: 3.833 - smp_apic_timer_interrupt(regs); 3.834 - break; 3.835 - case EVENT_CHECK_VECTOR: 3.836 - smp_event_check_interrupt(); 3.837 - break; 3.838 - case INVALIDATE_TLB_VECTOR: 3.839 - smp_invalidate_interrupt(); 3.840 - break; 3.841 - case CALL_FUNCTION_VECTOR: 3.842 - smp_call_function_interrupt(); 3.843 - break; 3.844 - case SPURIOUS_APIC_VECTOR: 3.845 - smp_spurious_interrupt(regs); 3.846 - break; 3.847 - case ERROR_APIC_VECTOR: 3.848 - smp_error_interrupt(regs); 3.849 - break; 3.850 - default: 3.851 - regs->entry_vector = vector; 3.852 - do_IRQ(regs); 3.853 - break; 3.854 + case LOCAL_TIMER_VECTOR: 3.855 + smp_apic_timer_interrupt(regs); 3.856 + break; 3.857 + case EVENT_CHECK_VECTOR: 3.858 + smp_event_check_interrupt(); 3.859 + break; 3.860 + case INVALIDATE_TLB_VECTOR: 3.861 + smp_invalidate_interrupt(); 3.862 + break; 3.863 + case CALL_FUNCTION_VECTOR: 3.864 + smp_call_function_interrupt(); 3.865 + break; 3.866 + case SPURIOUS_APIC_VECTOR: 3.867 + smp_spurious_interrupt(regs); 3.868 + break; 3.869 + case ERROR_APIC_VECTOR: 3.870 + smp_error_interrupt(regs); 3.871 + break; 3.872 + default: 3.873 + regs->entry_vector = vector; 3.874 + do_IRQ(regs); 3.875 + break; 3.876 } 3.877 } 3.878 3.879 @@ -1604,17 +1604,17 @@ asmlinkage void vmx_vmexit_handler(struc 3.880 3.881 __vmread(IDT_VECTORING_INFO_FIELD, &idtv_info_field); 3.882 if (idtv_info_field & INTR_INFO_VALID_MASK) { 3.883 - __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field); 3.884 + __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field); 3.885 3.886 - __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len); 3.887 - if (inst_len >= 1 && inst_len <= 15) 3.888 - __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len); 3.889 + __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len); 3.890 + if (inst_len >= 1 && inst_len <= 15) 3.891 + __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len); 3.892 3.893 - if (idtv_info_field & 0x800) { /* valid error code */ 3.894 - unsigned long error_code; 3.895 - __vmread(IDT_VECTORING_ERROR_CODE, &error_code); 3.896 - __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); 3.897 - } 3.898 + if (idtv_info_field & 0x800) { /* valid error code */ 3.899 + unsigned long error_code; 3.900 + __vmread(IDT_VECTORING_ERROR_CODE, &error_code); 3.901 + __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); 3.902 + } 3.903 3.904 VMX_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field); 3.905 } 3.906 @@ -1652,7 +1652,7 @@ asmlinkage void vmx_vmexit_handler(struc 3.907 __vmx_bug(®s); 3.908 vector &= 0xff; 3.909 3.910 - TRACE_VMEXIT(1,vector); 3.911 + TRACE_VMEXIT(1,vector); 3.912 perfc_incra(cause_vector, vector); 3.913 3.914 TRACE_3D(TRC_VMX_VECTOR, v->domain->domain_id, eip, vector); 3.915 @@ -1698,8 +1698,8 @@ asmlinkage void vmx_vmexit_handler(struc 3.916 __vmread(EXIT_QUALIFICATION, &va); 3.917 __vmread(VM_EXIT_INTR_ERROR_CODE, ®s.error_code); 3.918 3.919 - TRACE_VMEXIT(3,regs.error_code); 3.920 - TRACE_VMEXIT(4,va); 3.921 + TRACE_VMEXIT(3,regs.error_code); 3.922 + TRACE_VMEXIT(4,va); 3.923 3.924 VMX_DBG_LOG(DBG_LEVEL_VMMU, 3.925 "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx", 3.926 @@ -1732,7 +1732,7 @@ asmlinkage void vmx_vmexit_handler(struc 3.927 break; 3.928 case EXIT_REASON_PENDING_INTERRUPT: 3.929 __vmwrite(CPU_BASED_VM_EXEC_CONTROL, 3.930 - MONITOR_CPU_BASED_EXEC_CONTROLS); 3.931 + MONITOR_CPU_BASED_EXEC_CONTROLS); 3.932 break; 3.933 case EXIT_REASON_TASK_SWITCH: 3.934 __vmx_bug(®s); 3.935 @@ -1772,10 +1772,10 @@ asmlinkage void vmx_vmexit_handler(struc 3.936 __vmread(EXIT_QUALIFICATION, &exit_qualification); 3.937 3.938 VMX_DBG_LOG(DBG_LEVEL_1, "eip = %lx, inst_len =%lx, exit_qualification = %lx", 3.939 - eip, inst_len, exit_qualification); 3.940 + eip, inst_len, exit_qualification); 3.941 if (vmx_cr_access(exit_qualification, ®s)) 3.942 - __update_guest_eip(inst_len); 3.943 - TRACE_VMEXIT(3,regs.error_code); 3.944 + __update_guest_eip(inst_len); 3.945 + TRACE_VMEXIT(3,regs.error_code); 3.946 TRACE_VMEXIT(4,exit_qualification); 3.947 break; 3.948 } 3.949 @@ -1828,8 +1828,8 @@ asmlinkage void load_cr2(void) 3.950 asmlinkage void trace_vmentry (void) 3.951 { 3.952 TRACE_5D(TRC_VMENTRY,trace_values[current->processor][0], 3.953 - trace_values[current->processor][1],trace_values[current->processor][2], 3.954 - trace_values[current->processor][3],trace_values[current->processor][4]); 3.955 + trace_values[current->processor][1],trace_values[current->processor][2], 3.956 + trace_values[current->processor][3],trace_values[current->processor][4]); 3.957 TRACE_VMEXIT(0,9); 3.958 TRACE_VMEXIT(1,9); 3.959 TRACE_VMEXIT(2,9);
4.1 --- a/xen/arch/x86/vmx_intercept.c Sun Sep 11 16:36:24 2005 +0000 4.2 +++ b/xen/arch/x86/vmx_intercept.c Sun Sep 11 16:44:23 2005 +0000 4.3 @@ -45,8 +45,8 @@ int vmx_io_intercept(ioreq_t *p, int typ 4.4 addr = handler->hdl_list[i].addr; 4.5 offset = handler->hdl_list[i].offset; 4.6 if (p->addr >= addr && 4.7 - p->addr < addr + offset) 4.8 - return handler->hdl_list[i].action(p); 4.9 + p->addr < addr + offset) 4.10 + return handler->hdl_list[i].action(p); 4.11 } 4.12 return 0; 4.13 } 4.14 @@ -172,22 +172,22 @@ int intercept_pit_io(ioreq_t *p) 4.15 4.16 if (p->size != 1 || 4.17 p->pdata_valid || 4.18 - p->type != IOREQ_TYPE_PIO) 4.19 + p->type != IOREQ_TYPE_PIO) 4.20 return 0; 4.21 4.22 if (p->addr == PIT_MODE && 4.23 - p->dir == 0 && /* write */ 4.24 - ((p->u.data >> 4) & 0x3) == 0 && /* latch command */ 4.25 + p->dir == 0 && /* write */ 4.26 + ((p->u.data >> 4) & 0x3) == 0 && /* latch command */ 4.27 ((p->u.data >> 6) & 0x3) == (vpit->channel)) {/* right channel */ 4.28 pit_latch_io(vpit); 4.29 - return 1; 4.30 + return 1; 4.31 } 4.32 4.33 if (p->addr == (PIT_CH0 + vpit->channel) && 4.34 - p->dir == 1) { /* read */ 4.35 + p->dir == 1) { /* read */ 4.36 p->u.data = pit_read_io(vpit); 4.37 resume_pit_io(p); 4.38 - return 1; 4.39 + return 1; 4.40 } 4.41 4.42 return 0; 4.43 @@ -253,8 +253,8 @@ void vmx_hooks_assist(struct vcpu *d) 4.44 vpit->channel = ((p->u.data >> 24) & 0x3); 4.45 vpit->first_injected = 0; 4.46 4.47 - vpit->count_LSB_latched = 0; 4.48 - vpit->count_MSB_latched = 0; 4.49 + vpit->count_LSB_latched = 0; 4.50 + vpit->count_MSB_latched = 0; 4.51 4.52 rw_mode = ((p->u.data >> 26) & 0x3); 4.53 switch(rw_mode) { 4.54 @@ -280,9 +280,19 @@ void vmx_hooks_assist(struct vcpu *d) 4.55 /*restore the state*/ 4.56 p->state = STATE_IORESP_READY; 4.57 4.58 - /* register handler to intercept the PIT io when vm_exit */ 4.59 + /* register handler to intercept the PIT io when vm_exit */ 4.60 if (!reinit) 4.61 - register_portio_handler(0x40, 4, intercept_pit_io); 4.62 + register_portio_handler(0x40, 4, intercept_pit_io); 4.63 } 4.64 } 4.65 #endif /* CONFIG_VMX */ 4.66 + 4.67 +/* 4.68 + * Local variables: 4.69 + * mode: C 4.70 + * c-set-style: "BSD" 4.71 + * c-basic-offset: 4 4.72 + * tab-width: 4 4.73 + * indent-tabs-mode: nil 4.74 + * End: 4.75 + */
5.1 --- a/xen/arch/x86/vmx_io.c Sun Sep 11 16:36:24 2005 +0000 5.2 +++ b/xen/arch/x86/vmx_io.c Sun Sep 11 16:44:23 2005 +0000 5.3 @@ -16,6 +16,7 @@ 5.4 * Place - Suite 330, Boston, MA 02111-1307 USA. 5.5 * 5.6 */ 5.7 + 5.8 #include <xen/config.h> 5.9 #include <xen/init.h> 5.10 #include <xen/mm.h> 5.11 @@ -198,24 +199,24 @@ void load_cpu_user_regs(struct cpu_user_ 5.12 static inline void __set_reg_value(unsigned long *reg, int size, long value) 5.13 { 5.14 switch (size) { 5.15 - case BYTE_64: 5.16 - *reg &= ~0xFF; 5.17 - *reg |= (value & 0xFF); 5.18 - break; 5.19 - case WORD: 5.20 - *reg &= ~0xFFFF; 5.21 - *reg |= (value & 0xFFFF); 5.22 - break; 5.23 - case LONG: 5.24 - *reg &= ~0xFFFFFFFF; 5.25 - *reg |= (value & 0xFFFFFFFF); 5.26 - break; 5.27 - case QUAD: 5.28 - *reg = value; 5.29 - break; 5.30 - default: 5.31 - printk("Error: <__set_reg_value>: size:%x is invalid\n", size); 5.32 - domain_crash_synchronous(); 5.33 + case BYTE_64: 5.34 + *reg &= ~0xFF; 5.35 + *reg |= (value & 0xFF); 5.36 + break; 5.37 + case WORD: 5.38 + *reg &= ~0xFFFF; 5.39 + *reg |= (value & 0xFFFF); 5.40 + break; 5.41 + case LONG: 5.42 + *reg &= ~0xFFFFFFFF; 5.43 + *reg |= (value & 0xFFFFFFFF); 5.44 + break; 5.45 + case QUAD: 5.46 + *reg = value; 5.47 + break; 5.48 + default: 5.49 + printk("Error: <__set_reg_value>: size:%x is invalid\n", size); 5.50 + domain_crash_synchronous(); 5.51 } 5.52 } 5.53 5.54 @@ -223,98 +224,98 @@ static void set_reg_value (int size, int 5.55 { 5.56 if (size == BYTE) { 5.57 switch (index) { 5.58 - case 0: 5.59 - regs->rax &= ~0xFF; 5.60 - regs->rax |= (value & 0xFF); 5.61 - break; 5.62 - case 1: 5.63 - regs->rcx &= ~0xFF; 5.64 - regs->rcx |= (value & 0xFF); 5.65 - break; 5.66 - case 2: 5.67 - regs->rdx &= ~0xFF; 5.68 - regs->rdx |= (value & 0xFF); 5.69 - break; 5.70 - case 3: 5.71 - regs->rbx &= ~0xFF; 5.72 - regs->rbx |= (value & 0xFF); 5.73 - break; 5.74 - case 4: 5.75 - regs->rax &= 0xFFFFFFFFFFFF00FF; 5.76 - regs->rax |= ((value & 0xFF) << 8); 5.77 - break; 5.78 - case 5: 5.79 - regs->rcx &= 0xFFFFFFFFFFFF00FF; 5.80 - regs->rcx |= ((value & 0xFF) << 8); 5.81 - break; 5.82 - case 6: 5.83 - regs->rdx &= 0xFFFFFFFFFFFF00FF; 5.84 - regs->rdx |= ((value & 0xFF) << 8); 5.85 - break; 5.86 - case 7: 5.87 - regs->rbx &= 0xFFFFFFFFFFFF00FF; 5.88 - regs->rbx |= ((value & 0xFF) << 8); 5.89 - break; 5.90 - default: 5.91 - printk("Error: size:%x, index:%x are invalid!\n", size, index); 5.92 - domain_crash_synchronous(); 5.93 - break; 5.94 + case 0: 5.95 + regs->rax &= ~0xFF; 5.96 + regs->rax |= (value & 0xFF); 5.97 + break; 5.98 + case 1: 5.99 + regs->rcx &= ~0xFF; 5.100 + regs->rcx |= (value & 0xFF); 5.101 + break; 5.102 + case 2: 5.103 + regs->rdx &= ~0xFF; 5.104 + regs->rdx |= (value & 0xFF); 5.105 + break; 5.106 + case 3: 5.107 + regs->rbx &= ~0xFF; 5.108 + regs->rbx |= (value & 0xFF); 5.109 + break; 5.110 + case 4: 5.111 + regs->rax &= 0xFFFFFFFFFFFF00FF; 5.112 + regs->rax |= ((value & 0xFF) << 8); 5.113 + break; 5.114 + case 5: 5.115 + regs->rcx &= 0xFFFFFFFFFFFF00FF; 5.116 + regs->rcx |= ((value & 0xFF) << 8); 5.117 + break; 5.118 + case 6: 5.119 + regs->rdx &= 0xFFFFFFFFFFFF00FF; 5.120 + regs->rdx |= ((value & 0xFF) << 8); 5.121 + break; 5.122 + case 7: 5.123 + regs->rbx &= 0xFFFFFFFFFFFF00FF; 5.124 + regs->rbx |= ((value & 0xFF) << 8); 5.125 + break; 5.126 + default: 5.127 + printk("Error: size:%x, index:%x are invalid!\n", size, index); 5.128 + domain_crash_synchronous(); 5.129 + break; 5.130 } 5.131 return; 5.132 } 5.133 5.134 switch (index) { 5.135 - case 0: 5.136 - __set_reg_value(®s->rax, size, value); 5.137 - break; 5.138 - case 1: 5.139 - __set_reg_value(®s->rcx, size, value); 5.140 - break; 5.141 - case 2: 5.142 - __set_reg_value(®s->rdx, size, value); 5.143 - break; 5.144 - case 3: 5.145 - __set_reg_value(®s->rbx, size, value); 5.146 - break; 5.147 - case 4: 5.148 - __set_reg_value(®s->rsp, size, value); 5.149 - break; 5.150 - case 5: 5.151 - __set_reg_value(®s->rbp, size, value); 5.152 - break; 5.153 - case 6: 5.154 - __set_reg_value(®s->rsi, size, value); 5.155 - break; 5.156 - case 7: 5.157 - __set_reg_value(®s->rdi, size, value); 5.158 - break; 5.159 - case 8: 5.160 - __set_reg_value(®s->r8, size, value); 5.161 - break; 5.162 - case 9: 5.163 - __set_reg_value(®s->r9, size, value); 5.164 - break; 5.165 - case 10: 5.166 - __set_reg_value(®s->r10, size, value); 5.167 - break; 5.168 - case 11: 5.169 - __set_reg_value(®s->r11, size, value); 5.170 - break; 5.171 - case 12: 5.172 - __set_reg_value(®s->r12, size, value); 5.173 - break; 5.174 - case 13: 5.175 - __set_reg_value(®s->r13, size, value); 5.176 - break; 5.177 - case 14: 5.178 - __set_reg_value(®s->r14, size, value); 5.179 - break; 5.180 - case 15: 5.181 - __set_reg_value(®s->r15, size, value); 5.182 - break; 5.183 - default: 5.184 - printk("Error: <set_reg_value> Invalid index\n"); 5.185 - domain_crash_synchronous(); 5.186 + case 0: 5.187 + __set_reg_value(®s->rax, size, value); 5.188 + break; 5.189 + case 1: 5.190 + __set_reg_value(®s->rcx, size, value); 5.191 + break; 5.192 + case 2: 5.193 + __set_reg_value(®s->rdx, size, value); 5.194 + break; 5.195 + case 3: 5.196 + __set_reg_value(®s->rbx, size, value); 5.197 + break; 5.198 + case 4: 5.199 + __set_reg_value(®s->rsp, size, value); 5.200 + break; 5.201 + case 5: 5.202 + __set_reg_value(®s->rbp, size, value); 5.203 + break; 5.204 + case 6: 5.205 + __set_reg_value(®s->rsi, size, value); 5.206 + break; 5.207 + case 7: 5.208 + __set_reg_value(®s->rdi, size, value); 5.209 + break; 5.210 + case 8: 5.211 + __set_reg_value(®s->r8, size, value); 5.212 + break; 5.213 + case 9: 5.214 + __set_reg_value(®s->r9, size, value); 5.215 + break; 5.216 + case 10: 5.217 + __set_reg_value(®s->r10, size, value); 5.218 + break; 5.219 + case 11: 5.220 + __set_reg_value(®s->r11, size, value); 5.221 + break; 5.222 + case 12: 5.223 + __set_reg_value(®s->r12, size, value); 5.224 + break; 5.225 + case 13: 5.226 + __set_reg_value(®s->r13, size, value); 5.227 + break; 5.228 + case 14: 5.229 + __set_reg_value(®s->r14, size, value); 5.230 + break; 5.231 + case 15: 5.232 + __set_reg_value(®s->r15, size, value); 5.233 + break; 5.234 + default: 5.235 + printk("Error: <set_reg_value> Invalid index\n"); 5.236 + domain_crash_synchronous(); 5.237 } 5.238 return; 5.239 } 5.240 @@ -323,44 +324,44 @@ static void set_reg_value (int size, int 5.241 extern long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs); 5.242 5.243 static inline void set_eflags_CF(int size, unsigned long v1, 5.244 - unsigned long v2, struct cpu_user_regs *regs) 5.245 + unsigned long v2, struct cpu_user_regs *regs) 5.246 { 5.247 unsigned long mask = (1 << (8 * size)) - 1; 5.248 5.249 if ((v1 & mask) > (v2 & mask)) 5.250 - regs->eflags |= X86_EFLAGS_CF; 5.251 + regs->eflags |= X86_EFLAGS_CF; 5.252 else 5.253 - regs->eflags &= ~X86_EFLAGS_CF; 5.254 + regs->eflags &= ~X86_EFLAGS_CF; 5.255 } 5.256 5.257 static inline void set_eflags_OF(int size, unsigned long v1, 5.258 - unsigned long v2, unsigned long v3, struct cpu_user_regs *regs) 5.259 + unsigned long v2, unsigned long v3, struct cpu_user_regs *regs) 5.260 { 5.261 if ((v3 ^ v2) & (v3 ^ v1) & (1 << ((8 * size) - 1))) 5.262 - regs->eflags |= X86_EFLAGS_OF; 5.263 + regs->eflags |= X86_EFLAGS_OF; 5.264 } 5.265 5.266 static inline void set_eflags_AF(int size, unsigned long v1, 5.267 - unsigned long v2, unsigned long v3, struct cpu_user_regs *regs) 5.268 + unsigned long v2, unsigned long v3, struct cpu_user_regs *regs) 5.269 { 5.270 if ((v1 ^ v2 ^ v3) & 0x10) 5.271 - regs->eflags |= X86_EFLAGS_AF; 5.272 + regs->eflags |= X86_EFLAGS_AF; 5.273 } 5.274 5.275 static inline void set_eflags_ZF(int size, unsigned long v1, 5.276 - struct cpu_user_regs *regs) 5.277 + struct cpu_user_regs *regs) 5.278 { 5.279 unsigned long mask = (1 << (8 * size)) - 1; 5.280 5.281 if ((v1 & mask) == 0) 5.282 - regs->eflags |= X86_EFLAGS_ZF; 5.283 + regs->eflags |= X86_EFLAGS_ZF; 5.284 } 5.285 5.286 static inline void set_eflags_SF(int size, unsigned long v1, 5.287 - struct cpu_user_regs *regs) 5.288 + struct cpu_user_regs *regs) 5.289 { 5.290 if (v1 & (1 << ((8 * size) - 1))) 5.291 - regs->eflags |= X86_EFLAGS_SF; 5.292 + regs->eflags |= X86_EFLAGS_SF; 5.293 } 5.294 5.295 static char parity_table[256] = { 5.296 @@ -383,14 +384,14 @@ static char parity_table[256] = { 5.297 }; 5.298 5.299 static inline void set_eflags_PF(int size, unsigned long v1, 5.300 - struct cpu_user_regs *regs) 5.301 + struct cpu_user_regs *regs) 5.302 { 5.303 if (parity_table[v1 & 0xFF]) 5.304 - regs->eflags |= X86_EFLAGS_PF; 5.305 + regs->eflags |= X86_EFLAGS_PF; 5.306 } 5.307 5.308 static void vmx_pio_assist(struct cpu_user_regs *regs, ioreq_t *p, 5.309 - struct mi_per_cpu_info *mpcip) 5.310 + struct mi_per_cpu_info *mpcip) 5.311 { 5.312 unsigned long old_eax; 5.313 int sign = p->df ? -1 : 1; 5.314 @@ -398,28 +399,28 @@ static void vmx_pio_assist(struct cpu_us 5.315 if (p->dir == IOREQ_WRITE) { 5.316 if (p->pdata_valid) { 5.317 regs->esi += sign * p->count * p->size; 5.318 - if (mpcip->flags & REPZ) 5.319 - regs->ecx -= p->count; 5.320 + if (mpcip->flags & REPZ) 5.321 + regs->ecx -= p->count; 5.322 } 5.323 } else { 5.324 - if (mpcip->flags & OVERLAP) { 5.325 - unsigned long addr; 5.326 + if (mpcip->flags & OVERLAP) { 5.327 + unsigned long addr; 5.328 5.329 regs->edi += sign * p->count * p->size; 5.330 - if (mpcip->flags & REPZ) 5.331 - regs->ecx -= p->count; 5.332 + if (mpcip->flags & REPZ) 5.333 + regs->ecx -= p->count; 5.334 5.335 - addr = regs->edi; 5.336 - if (sign > 0) 5.337 - addr -= p->size; 5.338 - vmx_copy(&p->u.data, addr, p->size, VMX_COPY_OUT); 5.339 - } else if (p->pdata_valid) { 5.340 + addr = regs->edi; 5.341 + if (sign > 0) 5.342 + addr -= p->size; 5.343 + vmx_copy(&p->u.data, addr, p->size, VMX_COPY_OUT); 5.344 + } else if (p->pdata_valid) { 5.345 regs->edi += sign * p->count * p->size; 5.346 - if (mpcip->flags & REPZ) 5.347 - regs->ecx -= p->count; 5.348 + if (mpcip->flags & REPZ) 5.349 + regs->ecx -= p->count; 5.350 } else { 5.351 - old_eax = regs->eax; 5.352 - switch (p->size) { 5.353 + old_eax = regs->eax; 5.354 + switch (p->size) { 5.355 case 1: 5.356 regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff); 5.357 break; 5.358 @@ -430,15 +431,15 @@ static void vmx_pio_assist(struct cpu_us 5.359 regs->eax = (p->u.data & 0xffffffff); 5.360 break; 5.361 default: 5.362 - printk("Error: %s unknown port size\n", __FUNCTION__); 5.363 - domain_crash_synchronous(); 5.364 - } 5.365 - } 5.366 + printk("Error: %s unknown port size\n", __FUNCTION__); 5.367 + domain_crash_synchronous(); 5.368 + } 5.369 + } 5.370 } 5.371 } 5.372 5.373 static void vmx_mmio_assist(struct cpu_user_regs *regs, ioreq_t *p, 5.374 - struct mi_per_cpu_info *mpcip) 5.375 + struct mi_per_cpu_info *mpcip) 5.376 { 5.377 int sign = p->df ? -1 : 1; 5.378 int size = -1, index = -1; 5.379 @@ -451,178 +452,178 @@ static void vmx_mmio_assist(struct cpu_u 5.380 5.381 switch (mpcip->instr) { 5.382 case INSTR_MOV: 5.383 - if (dst & REGISTER) { 5.384 - index = operand_index(dst); 5.385 - set_reg_value(size, index, 0, regs, p->u.data); 5.386 - } 5.387 - break; 5.388 + if (dst & REGISTER) { 5.389 + index = operand_index(dst); 5.390 + set_reg_value(size, index, 0, regs, p->u.data); 5.391 + } 5.392 + break; 5.393 5.394 case INSTR_MOVZ: 5.395 - if (dst & REGISTER) { 5.396 - index = operand_index(dst); 5.397 - switch (size) { 5.398 - case BYTE: p->u.data = p->u.data & 0xFFULL; break; 5.399 - case WORD: p->u.data = p->u.data & 0xFFFFULL; break; 5.400 - case LONG: p->u.data = p->u.data & 0xFFFFFFFFULL; break; 5.401 - } 5.402 - set_reg_value(operand_size(dst), index, 0, regs, p->u.data); 5.403 - } 5.404 - break; 5.405 + if (dst & REGISTER) { 5.406 + index = operand_index(dst); 5.407 + switch (size) { 5.408 + case BYTE: p->u.data = p->u.data & 0xFFULL; break; 5.409 + case WORD: p->u.data = p->u.data & 0xFFFFULL; break; 5.410 + case LONG: p->u.data = p->u.data & 0xFFFFFFFFULL; break; 5.411 + } 5.412 + set_reg_value(operand_size(dst), index, 0, regs, p->u.data); 5.413 + } 5.414 + break; 5.415 5.416 case INSTR_MOVS: 5.417 - sign = p->df ? -1 : 1; 5.418 - regs->esi += sign * p->count * p->size; 5.419 - regs->edi += sign * p->count * p->size; 5.420 + sign = p->df ? -1 : 1; 5.421 + regs->esi += sign * p->count * p->size; 5.422 + regs->edi += sign * p->count * p->size; 5.423 5.424 - if ((mpcip->flags & OVERLAP) && p->dir == IOREQ_READ) { 5.425 - unsigned long addr = regs->edi; 5.426 + if ((mpcip->flags & OVERLAP) && p->dir == IOREQ_READ) { 5.427 + unsigned long addr = regs->edi; 5.428 5.429 - if (sign > 0) 5.430 - addr -= p->size; 5.431 - vmx_copy(&p->u.data, addr, p->size, VMX_COPY_OUT); 5.432 - } 5.433 + if (sign > 0) 5.434 + addr -= p->size; 5.435 + vmx_copy(&p->u.data, addr, p->size, VMX_COPY_OUT); 5.436 + } 5.437 5.438 - if (mpcip->flags & REPZ) 5.439 - regs->ecx -= p->count; 5.440 - break; 5.441 + if (mpcip->flags & REPZ) 5.442 + regs->ecx -= p->count; 5.443 + break; 5.444 5.445 case INSTR_STOS: 5.446 - sign = p->df ? -1 : 1; 5.447 - regs->edi += sign * p->count * p->size; 5.448 - if (mpcip->flags & REPZ) 5.449 - regs->ecx -= p->count; 5.450 - break; 5.451 + sign = p->df ? -1 : 1; 5.452 + regs->edi += sign * p->count * p->size; 5.453 + if (mpcip->flags & REPZ) 5.454 + regs->ecx -= p->count; 5.455 + break; 5.456 5.457 case INSTR_AND: 5.458 - if (src & REGISTER) { 5.459 - index = operand_index(src); 5.460 - value = get_reg_value(size, index, 0, regs); 5.461 - diff = (unsigned long) p->u.data & value; 5.462 - } else if (src & IMMEDIATE) { 5.463 - value = mpcip->immediate; 5.464 - diff = (unsigned long) p->u.data & value; 5.465 - } else if (src & MEMORY) { 5.466 - index = operand_index(dst); 5.467 - value = get_reg_value(size, index, 0, regs); 5.468 - diff = (unsigned long) p->u.data & value; 5.469 - set_reg_value(size, index, 0, regs, diff); 5.470 - } 5.471 + if (src & REGISTER) { 5.472 + index = operand_index(src); 5.473 + value = get_reg_value(size, index, 0, regs); 5.474 + diff = (unsigned long) p->u.data & value; 5.475 + } else if (src & IMMEDIATE) { 5.476 + value = mpcip->immediate; 5.477 + diff = (unsigned long) p->u.data & value; 5.478 + } else if (src & MEMORY) { 5.479 + index = operand_index(dst); 5.480 + value = get_reg_value(size, index, 0, regs); 5.481 + diff = (unsigned long) p->u.data & value; 5.482 + set_reg_value(size, index, 0, regs, diff); 5.483 + } 5.484 5.485 - /* 5.486 - * The OF and CF flags are cleared; the SF, ZF, and PF 5.487 - * flags are set according to the result. The state of 5.488 - * the AF flag is undefined. 5.489 - */ 5.490 - regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF| 5.491 - X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF); 5.492 - set_eflags_ZF(size, diff, regs); 5.493 - set_eflags_SF(size, diff, regs); 5.494 - set_eflags_PF(size, diff, regs); 5.495 - break; 5.496 + /* 5.497 + * The OF and CF flags are cleared; the SF, ZF, and PF 5.498 + * flags are set according to the result. The state of 5.499 + * the AF flag is undefined. 5.500 + */ 5.501 + regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF| 5.502 + X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF); 5.503 + set_eflags_ZF(size, diff, regs); 5.504 + set_eflags_SF(size, diff, regs); 5.505 + set_eflags_PF(size, diff, regs); 5.506 + break; 5.507 5.508 case INSTR_OR: 5.509 - if (src & REGISTER) { 5.510 - index = operand_index(src); 5.511 - value = get_reg_value(size, index, 0, regs); 5.512 - diff = (unsigned long) p->u.data | value; 5.513 - } else if (src & IMMEDIATE) { 5.514 - value = mpcip->immediate; 5.515 - diff = (unsigned long) p->u.data | value; 5.516 - } else if (src & MEMORY) { 5.517 - index = operand_index(dst); 5.518 - value = get_reg_value(size, index, 0, regs); 5.519 - diff = (unsigned long) p->u.data | value; 5.520 - set_reg_value(size, index, 0, regs, diff); 5.521 - } 5.522 + if (src & REGISTER) { 5.523 + index = operand_index(src); 5.524 + value = get_reg_value(size, index, 0, regs); 5.525 + diff = (unsigned long) p->u.data | value; 5.526 + } else if (src & IMMEDIATE) { 5.527 + value = mpcip->immediate; 5.528 + diff = (unsigned long) p->u.data | value; 5.529 + } else if (src & MEMORY) { 5.530 + index = operand_index(dst); 5.531 + value = get_reg_value(size, index, 0, regs); 5.532 + diff = (unsigned long) p->u.data | value; 5.533 + set_reg_value(size, index, 0, regs, diff); 5.534 + } 5.535 5.536 - /* 5.537 - * The OF and CF flags are cleared; the SF, ZF, and PF 5.538 - * flags are set according to the result. The state of 5.539 - * the AF flag is undefined. 5.540 - */ 5.541 - regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF| 5.542 - X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF); 5.543 - set_eflags_ZF(size, diff, regs); 5.544 - set_eflags_SF(size, diff, regs); 5.545 - set_eflags_PF(size, diff, regs); 5.546 - break; 5.547 + /* 5.548 + * The OF and CF flags are cleared; the SF, ZF, and PF 5.549 + * flags are set according to the result. The state of 5.550 + * the AF flag is undefined. 5.551 + */ 5.552 + regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF| 5.553 + X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF); 5.554 + set_eflags_ZF(size, diff, regs); 5.555 + set_eflags_SF(size, diff, regs); 5.556 + set_eflags_PF(size, diff, regs); 5.557 + break; 5.558 5.559 case INSTR_XOR: 5.560 - if (src & REGISTER) { 5.561 - index = operand_index(src); 5.562 - value = get_reg_value(size, index, 0, regs); 5.563 - diff = (unsigned long) p->u.data ^ value; 5.564 - } else if (src & IMMEDIATE) { 5.565 - value = mpcip->immediate; 5.566 - diff = (unsigned long) p->u.data ^ value; 5.567 - } else if (src & MEMORY) { 5.568 - index = operand_index(dst); 5.569 - value = get_reg_value(size, index, 0, regs); 5.570 - diff = (unsigned long) p->u.data ^ value; 5.571 - set_reg_value(size, index, 0, regs, diff); 5.572 - } 5.573 + if (src & REGISTER) { 5.574 + index = operand_index(src); 5.575 + value = get_reg_value(size, index, 0, regs); 5.576 + diff = (unsigned long) p->u.data ^ value; 5.577 + } else if (src & IMMEDIATE) { 5.578 + value = mpcip->immediate; 5.579 + diff = (unsigned long) p->u.data ^ value; 5.580 + } else if (src & MEMORY) { 5.581 + index = operand_index(dst); 5.582 + value = get_reg_value(size, index, 0, regs); 5.583 + diff = (unsigned long) p->u.data ^ value; 5.584 + set_reg_value(size, index, 0, regs, diff); 5.585 + } 5.586 5.587 - /* 5.588 - * The OF and CF flags are cleared; the SF, ZF, and PF 5.589 - * flags are set according to the result. The state of 5.590 - * the AF flag is undefined. 5.591 - */ 5.592 - regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF| 5.593 - X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF); 5.594 - set_eflags_ZF(size, diff, regs); 5.595 - set_eflags_SF(size, diff, regs); 5.596 - set_eflags_PF(size, diff, regs); 5.597 - break; 5.598 + /* 5.599 + * The OF and CF flags are cleared; the SF, ZF, and PF 5.600 + * flags are set according to the result. The state of 5.601 + * the AF flag is undefined. 5.602 + */ 5.603 + regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF| 5.604 + X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF); 5.605 + set_eflags_ZF(size, diff, regs); 5.606 + set_eflags_SF(size, diff, regs); 5.607 + set_eflags_PF(size, diff, regs); 5.608 + break; 5.609 5.610 case INSTR_CMP: 5.611 - if (src & REGISTER) { 5.612 - index = operand_index(src); 5.613 - value = get_reg_value(size, index, 0, regs); 5.614 - diff = (unsigned long) p->u.data - value; 5.615 - } else if (src & IMMEDIATE) { 5.616 - value = mpcip->immediate; 5.617 - diff = (unsigned long) p->u.data - value; 5.618 - } else if (src & MEMORY) { 5.619 - index = operand_index(dst); 5.620 - value = get_reg_value(size, index, 0, regs); 5.621 - diff = value - (unsigned long) p->u.data; 5.622 - } 5.623 + if (src & REGISTER) { 5.624 + index = operand_index(src); 5.625 + value = get_reg_value(size, index, 0, regs); 5.626 + diff = (unsigned long) p->u.data - value; 5.627 + } else if (src & IMMEDIATE) { 5.628 + value = mpcip->immediate; 5.629 + diff = (unsigned long) p->u.data - value; 5.630 + } else if (src & MEMORY) { 5.631 + index = operand_index(dst); 5.632 + value = get_reg_value(size, index, 0, regs); 5.633 + diff = value - (unsigned long) p->u.data; 5.634 + } 5.635 5.636 - /* 5.637 - * The CF, OF, SF, ZF, AF, and PF flags are set according 5.638 - * to the result 5.639 - */ 5.640 - regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF| 5.641 - X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF); 5.642 - set_eflags_CF(size, value, (unsigned long) p->u.data, regs); 5.643 - set_eflags_OF(size, diff, value, (unsigned long) p->u.data, regs); 5.644 - set_eflags_AF(size, diff, value, (unsigned long) p->u.data, regs); 5.645 - set_eflags_ZF(size, diff, regs); 5.646 - set_eflags_SF(size, diff, regs); 5.647 - set_eflags_PF(size, diff, regs); 5.648 - break; 5.649 + /* 5.650 + * The CF, OF, SF, ZF, AF, and PF flags are set according 5.651 + * to the result 5.652 + */ 5.653 + regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF| 5.654 + X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF); 5.655 + set_eflags_CF(size, value, (unsigned long) p->u.data, regs); 5.656 + set_eflags_OF(size, diff, value, (unsigned long) p->u.data, regs); 5.657 + set_eflags_AF(size, diff, value, (unsigned long) p->u.data, regs); 5.658 + set_eflags_ZF(size, diff, regs); 5.659 + set_eflags_SF(size, diff, regs); 5.660 + set_eflags_PF(size, diff, regs); 5.661 + break; 5.662 5.663 case INSTR_TEST: 5.664 - if (src & REGISTER) { 5.665 - index = operand_index(src); 5.666 - value = get_reg_value(size, index, 0, regs); 5.667 - } else if (src & IMMEDIATE) { 5.668 - value = mpcip->immediate; 5.669 - } else if (src & MEMORY) { 5.670 - index = operand_index(dst); 5.671 - value = get_reg_value(size, index, 0, regs); 5.672 - } 5.673 - diff = (unsigned long) p->u.data & value; 5.674 + if (src & REGISTER) { 5.675 + index = operand_index(src); 5.676 + value = get_reg_value(size, index, 0, regs); 5.677 + } else if (src & IMMEDIATE) { 5.678 + value = mpcip->immediate; 5.679 + } else if (src & MEMORY) { 5.680 + index = operand_index(dst); 5.681 + value = get_reg_value(size, index, 0, regs); 5.682 + } 5.683 + diff = (unsigned long) p->u.data & value; 5.684 5.685 - /* 5.686 - * Sets the SF, ZF, and PF status flags. CF and OF are set to 0 5.687 - */ 5.688 - regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF| 5.689 - X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF); 5.690 - set_eflags_ZF(size, diff, regs); 5.691 - set_eflags_SF(size, diff, regs); 5.692 - set_eflags_PF(size, diff, regs); 5.693 - break; 5.694 + /* 5.695 + * Sets the SF, ZF, and PF status flags. CF and OF are set to 0 5.696 + */ 5.697 + regs->eflags &= ~(X86_EFLAGS_CF|X86_EFLAGS_PF| 5.698 + X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_OF); 5.699 + set_eflags_ZF(size, diff, regs); 5.700 + set_eflags_SF(size, diff, regs); 5.701 + set_eflags_PF(size, diff, regs); 5.702 + break; 5.703 } 5.704 5.705 load_cpu_user_regs(regs); 5.706 @@ -644,7 +645,7 @@ void vmx_io_assist(struct vcpu *v) 5.707 if (vio == 0) { 5.708 VMX_DBG_LOG(DBG_LEVEL_1, 5.709 "bad shared page: %lx", (unsigned long) vio); 5.710 - printf("bad shared page: %lx\n", (unsigned long) vio); 5.711 + printf("bad shared page: %lx\n", (unsigned long) vio); 5.712 domain_crash_synchronous(); 5.713 } 5.714 5.715 @@ -655,15 +656,15 @@ void vmx_io_assist(struct vcpu *v) 5.716 /* clear IO wait VMX flag */ 5.717 if (test_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags)) { 5.718 if (p->state == STATE_IORESP_READY) { 5.719 - p->state = STATE_INVALID; 5.720 + p->state = STATE_INVALID; 5.721 clear_bit(ARCH_VMX_IO_WAIT, &v->arch.arch_vmx.flags); 5.722 5.723 - if (p->type == IOREQ_TYPE_PIO) 5.724 - vmx_pio_assist(regs, p, mpci_p); 5.725 - else 5.726 - vmx_mmio_assist(regs, p, mpci_p); 5.727 - } 5.728 - /* else an interrupt send event raced us */ 5.729 + if (p->type == IOREQ_TYPE_PIO) 5.730 + vmx_pio_assist(regs, p, mpci_p); 5.731 + else 5.732 + vmx_mmio_assist(regs, p, mpci_p); 5.733 + } 5.734 + /* else an interrupt send event raced us */ 5.735 } 5.736 } 5.737 5.738 @@ -730,7 +731,7 @@ static inline int __fls(u32 word) 5.739 return word ? bit : -1; 5.740 } 5.741 #else 5.742 -#define __fls(x) generic_fls(x) 5.743 +#define __fls(x) generic_fls(x) 5.744 static __inline__ int generic_fls(u32 x) 5.745 { 5.746 int r = 31; 5.747 @@ -839,23 +840,23 @@ interrupt_post_injection(struct vcpu * v 5.748 struct vmx_virpit_t *vpit = &(v->domain->arch.vmx_platform.vmx_pit); 5.749 switch(type) 5.750 { 5.751 - case VLAPIC_DELIV_MODE_EXT: 5.752 - if (vpit->pending_intr_nr && vector == vpit->vector) 5.753 - vpit->pending_intr_nr--; 5.754 - else 5.755 - clear_highest_bit(v, vector); 5.756 + case VLAPIC_DELIV_MODE_EXT: 5.757 + if (vpit->pending_intr_nr && vector == vpit->vector) 5.758 + vpit->pending_intr_nr--; 5.759 + else 5.760 + clear_highest_bit(v, vector); 5.761 5.762 - if (vector == vpit->vector && !vpit->first_injected){ 5.763 - vpit->first_injected = 1; 5.764 - vpit->pending_intr_nr = 0; 5.765 - } 5.766 - if (vector == vpit->vector) 5.767 - vpit->inject_point = NOW(); 5.768 - break; 5.769 + if (vector == vpit->vector && !vpit->first_injected){ 5.770 + vpit->first_injected = 1; 5.771 + vpit->pending_intr_nr = 0; 5.772 + } 5.773 + if (vector == vpit->vector) 5.774 + vpit->inject_point = NOW(); 5.775 + break; 5.776 5.777 - default: 5.778 - printk("Not support interrupt type\n"); 5.779 - break; 5.780 + default: 5.781 + printk("Not support interrupt type\n"); 5.782 + break; 5.783 } 5.784 } 5.785 5.786 @@ -897,51 +898,51 @@ asmlinkage void vmx_intr_assist(void) 5.787 return; 5.788 } 5.789 5.790 - __vmread(VM_ENTRY_INTR_INFO_FIELD, &intr_fields); 5.791 + __vmread(VM_ENTRY_INTR_INFO_FIELD, &intr_fields); 5.792 5.793 - if (intr_fields & INTR_INFO_VALID_MASK) { 5.794 - VMX_DBG_LOG(DBG_LEVEL_1, "vmx_intr_assist: intr_fields: %lx", 5.795 - intr_fields); 5.796 - return; 5.797 - } 5.798 + if (intr_fields & INTR_INFO_VALID_MASK) { 5.799 + VMX_DBG_LOG(DBG_LEVEL_1, "vmx_intr_assist: intr_fields: %lx", 5.800 + intr_fields); 5.801 + return; 5.802 + } 5.803 5.804 - __vmread(GUEST_INTERRUPTIBILITY_INFO, &interruptibility); 5.805 + __vmread(GUEST_INTERRUPTIBILITY_INFO, &interruptibility); 5.806 5.807 - if (interruptibility) { 5.808 - enable_irq_window(cpu_exec_control); 5.809 - VMX_DBG_LOG(DBG_LEVEL_1, "guesting pending: %x, interruptibility: %lx", 5.810 - highest_vector, interruptibility); 5.811 - return; 5.812 - } 5.813 + if (interruptibility) { 5.814 + enable_irq_window(cpu_exec_control); 5.815 + VMX_DBG_LOG(DBG_LEVEL_1, "guesting pending: %x, interruptibility: %lx", 5.816 + highest_vector, interruptibility); 5.817 + return; 5.818 + } 5.819 5.820 - __vmread(GUEST_RFLAGS, &eflags); 5.821 + __vmread(GUEST_RFLAGS, &eflags); 5.822 5.823 - switch (intr_type) { 5.824 - case VLAPIC_DELIV_MODE_EXT: 5.825 - if (irq_masked(eflags)) { 5.826 - enable_irq_window(cpu_exec_control); 5.827 - VMX_DBG_LOG(DBG_LEVEL_1, "guesting pending: %x, eflags: %lx", 5.828 - highest_vector, eflags); 5.829 - return; 5.830 - } 5.831 + switch (intr_type) { 5.832 + case VLAPIC_DELIV_MODE_EXT: 5.833 + if (irq_masked(eflags)) { 5.834 + enable_irq_window(cpu_exec_control); 5.835 + VMX_DBG_LOG(DBG_LEVEL_1, "guesting pending: %x, eflags: %lx", 5.836 + highest_vector, eflags); 5.837 + return; 5.838 + } 5.839 5.840 - vmx_inject_extint(v, highest_vector, VMX_INVALID_ERROR_CODE); 5.841 - TRACE_3D(TRC_VMX_INT, v->domain->domain_id, highest_vector, 0); 5.842 - break; 5.843 - case VLAPIC_DELIV_MODE_FIXED: 5.844 - case VLAPIC_DELIV_MODE_LPRI: 5.845 - case VLAPIC_DELIV_MODE_SMI: 5.846 - case VLAPIC_DELIV_MODE_NMI: 5.847 - case VLAPIC_DELIV_MODE_INIT: 5.848 - case VLAPIC_DELIV_MODE_STARTUP: 5.849 - default: 5.850 - printk("Unsupported interrupt type\n"); 5.851 - BUG(); 5.852 - break; 5.853 - } 5.854 + vmx_inject_extint(v, highest_vector, VMX_INVALID_ERROR_CODE); 5.855 + TRACE_3D(TRC_VMX_INT, v->domain->domain_id, highest_vector, 0); 5.856 + break; 5.857 + case VLAPIC_DELIV_MODE_FIXED: 5.858 + case VLAPIC_DELIV_MODE_LPRI: 5.859 + case VLAPIC_DELIV_MODE_SMI: 5.860 + case VLAPIC_DELIV_MODE_NMI: 5.861 + case VLAPIC_DELIV_MODE_INIT: 5.862 + case VLAPIC_DELIV_MODE_STARTUP: 5.863 + default: 5.864 + printk("Unsupported interrupt type\n"); 5.865 + BUG(); 5.866 + break; 5.867 + } 5.868 5.869 - interrupt_post_injection(v, highest_vector, intr_type); 5.870 - return; 5.871 + interrupt_post_injection(v, highest_vector, intr_type); 5.872 + return; 5.873 } 5.874 5.875 void vmx_do_resume(struct vcpu *d)
6.1 --- a/xen/arch/x86/vmx_platform.c Sun Sep 11 16:36:24 2005 +0000 6.2 +++ b/xen/arch/x86/vmx_platform.c Sun Sep 11 16:44:23 2005 +0000 6.3 @@ -55,17 +55,17 @@ void store_cpu_user_regs(struct cpu_user 6.4 static inline long __get_reg_value(unsigned long reg, int size) 6.5 { 6.6 switch(size) { 6.7 - case BYTE_64: 6.8 - return (char)(reg & 0xFF); 6.9 - case WORD: 6.10 - return (short)(reg & 0xFFFF); 6.11 - case LONG: 6.12 - return (int)(reg & 0xFFFFFFFF); 6.13 - case QUAD: 6.14 - return (long)(reg); 6.15 - default: 6.16 - printf("Error: (__get_reg_value) Invalid reg size\n"); 6.17 - domain_crash_synchronous(); 6.18 + case BYTE_64: 6.19 + return (char)(reg & 0xFF); 6.20 + case WORD: 6.21 + return (short)(reg & 0xFFFF); 6.22 + case LONG: 6.23 + return (int)(reg & 0xFFFFFFFF); 6.24 + case QUAD: 6.25 + return (long)(reg); 6.26 + default: 6.27 + printf("Error: (__get_reg_value) Invalid reg size\n"); 6.28 + domain_crash_synchronous(); 6.29 } 6.30 } 6.31 6.32 @@ -73,49 +73,49 @@ long get_reg_value(int size, int index, 6.33 { 6.34 if (size == BYTE) { 6.35 switch (index) { 6.36 - case 0: /* %al */ 6.37 - return (char)(regs->rax & 0xFF); 6.38 - case 1: /* %cl */ 6.39 - return (char)(regs->rcx & 0xFF); 6.40 - case 2: /* %dl */ 6.41 - return (char)(regs->rdx & 0xFF); 6.42 - case 3: /* %bl */ 6.43 - return (char)(regs->rbx & 0xFF); 6.44 - case 4: /* %ah */ 6.45 - return (char)((regs->rax & 0xFF00) >> 8); 6.46 - case 5: /* %ch */ 6.47 - return (char)((regs->rcx & 0xFF00) >> 8); 6.48 - case 6: /* %dh */ 6.49 - return (char)((regs->rdx & 0xFF00) >> 8); 6.50 - case 7: /* %bh */ 6.51 - return (char)((regs->rbx & 0xFF00) >> 8); 6.52 - default: 6.53 - printf("Error: (get_reg_value) Invalid index value\n"); 6.54 - domain_crash_synchronous(); 6.55 + case 0: /* %al */ 6.56 + return (char)(regs->rax & 0xFF); 6.57 + case 1: /* %cl */ 6.58 + return (char)(regs->rcx & 0xFF); 6.59 + case 2: /* %dl */ 6.60 + return (char)(regs->rdx & 0xFF); 6.61 + case 3: /* %bl */ 6.62 + return (char)(regs->rbx & 0xFF); 6.63 + case 4: /* %ah */ 6.64 + return (char)((regs->rax & 0xFF00) >> 8); 6.65 + case 5: /* %ch */ 6.66 + return (char)((regs->rcx & 0xFF00) >> 8); 6.67 + case 6: /* %dh */ 6.68 + return (char)((regs->rdx & 0xFF00) >> 8); 6.69 + case 7: /* %bh */ 6.70 + return (char)((regs->rbx & 0xFF00) >> 8); 6.71 + default: 6.72 + printf("Error: (get_reg_value) Invalid index value\n"); 6.73 + domain_crash_synchronous(); 6.74 } 6.75 - /* NOTREACHED */ 6.76 + /* NOTREACHED */ 6.77 } 6.78 6.79 switch (index) { 6.80 - case 0: return __get_reg_value(regs->rax, size); 6.81 - case 1: return __get_reg_value(regs->rcx, size); 6.82 - case 2: return __get_reg_value(regs->rdx, size); 6.83 - case 3: return __get_reg_value(regs->rbx, size); 6.84 - case 4: return __get_reg_value(regs->rsp, size); 6.85 - case 5: return __get_reg_value(regs->rbp, size); 6.86 - case 6: return __get_reg_value(regs->rsi, size); 6.87 - case 7: return __get_reg_value(regs->rdi, size); 6.88 - case 8: return __get_reg_value(regs->r8, size); 6.89 - case 9: return __get_reg_value(regs->r9, size); 6.90 - case 10: return __get_reg_value(regs->r10, size); 6.91 - case 11: return __get_reg_value(regs->r11, size); 6.92 - case 12: return __get_reg_value(regs->r12, size); 6.93 - case 13: return __get_reg_value(regs->r13, size); 6.94 - case 14: return __get_reg_value(regs->r14, size); 6.95 - case 15: return __get_reg_value(regs->r15, size); 6.96 - default: 6.97 - printf("Error: (get_reg_value) Invalid index value\n"); 6.98 - domain_crash_synchronous(); 6.99 + case 0: return __get_reg_value(regs->rax, size); 6.100 + case 1: return __get_reg_value(regs->rcx, size); 6.101 + case 2: return __get_reg_value(regs->rdx, size); 6.102 + case 3: return __get_reg_value(regs->rbx, size); 6.103 + case 4: return __get_reg_value(regs->rsp, size); 6.104 + case 5: return __get_reg_value(regs->rbp, size); 6.105 + case 6: return __get_reg_value(regs->rsi, size); 6.106 + case 7: return __get_reg_value(regs->rdi, size); 6.107 + case 8: return __get_reg_value(regs->r8, size); 6.108 + case 9: return __get_reg_value(regs->r9, size); 6.109 + case 10: return __get_reg_value(regs->r10, size); 6.110 + case 11: return __get_reg_value(regs->r11, size); 6.111 + case 12: return __get_reg_value(regs->r12, size); 6.112 + case 13: return __get_reg_value(regs->r13, size); 6.113 + case 14: return __get_reg_value(regs->r14, size); 6.114 + case 15: return __get_reg_value(regs->r15, size); 6.115 + default: 6.116 + printf("Error: (get_reg_value) Invalid index value\n"); 6.117 + domain_crash_synchronous(); 6.118 } 6.119 } 6.120 #elif defined (__i386__) 6.121 @@ -134,12 +134,12 @@ static inline long __get_reg_value(unsig 6.122 { 6.123 switch(size) { 6.124 case WORD: 6.125 - return (short)(reg & 0xFFFF); 6.126 + return (short)(reg & 0xFFFF); 6.127 case LONG: 6.128 - return (int)(reg & 0xFFFFFFFF); 6.129 + return (int)(reg & 0xFFFFFFFF); 6.130 default: 6.131 - printf("Error: (__get_reg_value) Invalid reg size\n"); 6.132 - domain_crash_synchronous(); 6.133 + printf("Error: (__get_reg_value) Invalid reg size\n"); 6.134 + domain_crash_synchronous(); 6.135 } 6.136 } 6.137 6.138 @@ -147,29 +147,29 @@ long get_reg_value(int size, int index, 6.139 { 6.140 if (size == BYTE) { 6.141 switch (index) { 6.142 - case 0: /* %al */ 6.143 + case 0: /* %al */ 6.144 return (char)(regs->eax & 0xFF); 6.145 - case 1: /* %cl */ 6.146 + case 1: /* %cl */ 6.147 return (char)(regs->ecx & 0xFF); 6.148 - case 2: /* %dl */ 6.149 + case 2: /* %dl */ 6.150 return (char)(regs->edx & 0xFF); 6.151 - case 3: /* %bl */ 6.152 + case 3: /* %bl */ 6.153 return (char)(regs->ebx & 0xFF); 6.154 - case 4: /* %ah */ 6.155 + case 4: /* %ah */ 6.156 return (char)((regs->eax & 0xFF00) >> 8); 6.157 - case 5: /* %ch */ 6.158 + case 5: /* %ch */ 6.159 return (char)((regs->ecx & 0xFF00) >> 8); 6.160 - case 6: /* %dh */ 6.161 + case 6: /* %dh */ 6.162 return (char)((regs->edx & 0xFF00) >> 8); 6.163 - case 7: /* %bh */ 6.164 + case 7: /* %bh */ 6.165 return (char)((regs->ebx & 0xFF00) >> 8); 6.166 default: 6.167 - printf("Error: (get_reg_value) Invalid index value\n"); 6.168 + printf("Error: (get_reg_value) Invalid index value\n"); 6.169 domain_crash_synchronous(); 6.170 } 6.171 - } 6.172 + } 6.173 6.174 - switch (index) { 6.175 + switch (index) { 6.176 case 0: return __get_reg_value(regs->eax, size); 6.177 case 1: return __get_reg_value(regs->ecx, size); 6.178 case 2: return __get_reg_value(regs->edx, size); 6.179 @@ -179,46 +179,46 @@ long get_reg_value(int size, int index, 6.180 case 6: return __get_reg_value(regs->esi, size); 6.181 case 7: return __get_reg_value(regs->edi, size); 6.182 default: 6.183 - printf("Error: (get_reg_value) Invalid index value\n"); 6.184 + printf("Error: (get_reg_value) Invalid index value\n"); 6.185 domain_crash_synchronous(); 6.186 } 6.187 } 6.188 #endif 6.189 6.190 static inline unsigned char *check_prefix(unsigned char *inst, 6.191 - struct instruction *thread_inst, unsigned char *rex_p) 6.192 + struct instruction *thread_inst, unsigned char *rex_p) 6.193 { 6.194 while (1) { 6.195 switch (*inst) { 6.196 - /* rex prefix for em64t instructions */ 6.197 - case 0x40 ... 0x4e: 6.198 - *rex_p = *inst; 6.199 - break; 6.200 + /* rex prefix for em64t instructions */ 6.201 + case 0x40 ... 0x4e: 6.202 + *rex_p = *inst; 6.203 + break; 6.204 case 0xf3: /* REPZ */ 6.205 - thread_inst->flags = REPZ; 6.206 - break; 6.207 + thread_inst->flags = REPZ; 6.208 + break; 6.209 case 0xf2: /* REPNZ */ 6.210 - thread_inst->flags = REPNZ; 6.211 - break; 6.212 + thread_inst->flags = REPNZ; 6.213 + break; 6.214 case 0xf0: /* LOCK */ 6.215 - break; 6.216 + break; 6.217 case 0x2e: /* CS */ 6.218 case 0x36: /* SS */ 6.219 case 0x3e: /* DS */ 6.220 case 0x26: /* ES */ 6.221 case 0x64: /* FS */ 6.222 case 0x65: /* GS */ 6.223 - thread_inst->seg_sel = *inst; 6.224 - break; 6.225 + thread_inst->seg_sel = *inst; 6.226 + break; 6.227 case 0x66: /* 32bit->16bit */ 6.228 - thread_inst->op_size = WORD; 6.229 - break; 6.230 - case 0x67: 6.231 - printf("Error: Not handling 0x67 (yet)\n"); 6.232 - domain_crash_synchronous(); 6.233 - break; 6.234 - default: 6.235 - return inst; 6.236 + thread_inst->op_size = WORD; 6.237 + break; 6.238 + case 0x67: 6.239 + printf("Error: Not handling 0x67 (yet)\n"); 6.240 + domain_crash_synchronous(); 6.241 + break; 6.242 + default: 6.243 + return inst; 6.244 } 6.245 inst++; 6.246 } 6.247 @@ -240,23 +240,23 @@ static inline unsigned long get_immediat 6.248 } 6.249 6.250 switch(mod) { 6.251 - case 0: 6.252 - if (rm == 5 || rm == 4) { 6.253 - if (op16) 6.254 - inst = inst + 2; //disp16, skip 2 bytes 6.255 - else 6.256 - inst = inst + 4; //disp32, skip 4 bytes 6.257 - } 6.258 - break; 6.259 - case 1: 6.260 - inst++; //disp8, skip 1 byte 6.261 - break; 6.262 - case 2: 6.263 + case 0: 6.264 + if (rm == 5 || rm == 4) { 6.265 if (op16) 6.266 inst = inst + 2; //disp16, skip 2 bytes 6.267 else 6.268 inst = inst + 4; //disp32, skip 4 bytes 6.269 - break; 6.270 + } 6.271 + break; 6.272 + case 1: 6.273 + inst++; //disp8, skip 1 byte 6.274 + break; 6.275 + case 2: 6.276 + if (op16) 6.277 + inst = inst + 2; //disp16, skip 2 bytes 6.278 + else 6.279 + inst = inst + 4; //disp32, skip 4 bytes 6.280 + break; 6.281 } 6.282 6.283 if (op_size == QUAD) 6.284 @@ -304,19 +304,19 @@ static void init_instruction(struct inst 6.285 } 6.286 6.287 #define GET_OP_SIZE_FOR_BYTE(op_size) \ 6.288 - do { \ 6.289 - if (rex) \ 6.290 - op_size = BYTE_64; \ 6.291 - else \ 6.292 - op_size = BYTE; \ 6.293 + do { \ 6.294 + if (rex) \ 6.295 + op_size = BYTE_64; \ 6.296 + else \ 6.297 + op_size = BYTE; \ 6.298 } while(0) 6.299 6.300 #define GET_OP_SIZE_FOR_NONEBYTE(op_size) \ 6.301 - do { \ 6.302 - if (rex & 0x8) \ 6.303 - op_size = QUAD; \ 6.304 - else if (op_size != WORD) \ 6.305 - op_size = LONG; \ 6.306 + do { \ 6.307 + if (rex & 0x8) \ 6.308 + op_size = QUAD; \ 6.309 + else if (op_size != WORD) \ 6.310 + op_size = LONG; \ 6.311 } while(0) 6.312 6.313 6.314 @@ -344,7 +344,7 @@ static int acc_mem(unsigned char size, s 6.315 * Decode mem,reg operands (as in <opcode> r32/16, m32/16) 6.316 */ 6.317 static int mem_reg(unsigned char size, unsigned char *opcode, 6.318 - struct instruction *instr, unsigned char rex) 6.319 + struct instruction *instr, unsigned char rex) 6.320 { 6.321 int index = get_index(opcode + 1, rex); 6.322 6.323 @@ -357,7 +357,7 @@ static int mem_reg(unsigned char size, u 6.324 * Decode reg,mem operands (as in <opcode> m32/16, r32/16) 6.325 */ 6.326 static int reg_mem(unsigned char size, unsigned char *opcode, 6.327 - struct instruction *instr, unsigned char rex) 6.328 + struct instruction *instr, unsigned char rex) 6.329 { 6.330 int index = get_index(opcode + 1, rex); 6.331 6.332 @@ -382,210 +382,210 @@ static int vmx_decode(unsigned char *opc 6.333 vm86 = 1; 6.334 6.335 if (vm86) { /* meaning is reversed */ 6.336 - if (instr->op_size == WORD) 6.337 - instr->op_size = LONG; 6.338 - else if (instr->op_size == LONG) 6.339 - instr->op_size = WORD; 6.340 - else if (instr->op_size == 0) 6.341 - instr->op_size = WORD; 6.342 + if (instr->op_size == WORD) 6.343 + instr->op_size = LONG; 6.344 + else if (instr->op_size == LONG) 6.345 + instr->op_size = WORD; 6.346 + else if (instr->op_size == 0) 6.347 + instr->op_size = WORD; 6.348 } 6.349 6.350 switch (*opcode) { 6.351 case 0x0B: /* or m32/16, r32/16 */ 6.352 - instr->instr = INSTR_OR; 6.353 - GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.354 - return mem_reg(instr->op_size, opcode, instr, rex); 6.355 + instr->instr = INSTR_OR; 6.356 + GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.357 + return mem_reg(instr->op_size, opcode, instr, rex); 6.358 6.359 case 0x20: /* and r8, m8 */ 6.360 - instr->instr = INSTR_AND; 6.361 - GET_OP_SIZE_FOR_BYTE(instr->op_size); 6.362 - return reg_mem(instr->op_size, opcode, instr, rex); 6.363 + instr->instr = INSTR_AND; 6.364 + GET_OP_SIZE_FOR_BYTE(instr->op_size); 6.365 + return reg_mem(instr->op_size, opcode, instr, rex); 6.366 6.367 case 0x21: /* and r32/16, m32/16 */ 6.368 - instr->instr = INSTR_AND; 6.369 - GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.370 - return reg_mem(instr->op_size, opcode, instr, rex); 6.371 + instr->instr = INSTR_AND; 6.372 + GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.373 + return reg_mem(instr->op_size, opcode, instr, rex); 6.374 6.375 case 0x23: /* and m32/16, r32/16 */ 6.376 - instr->instr = INSTR_AND; 6.377 - GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.378 - return mem_reg(instr->op_size, opcode, instr, rex); 6.379 + instr->instr = INSTR_AND; 6.380 + GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.381 + return mem_reg(instr->op_size, opcode, instr, rex); 6.382 6.383 case 0x30: /* xor r8, m8 */ 6.384 - instr->instr = INSTR_XOR; 6.385 - GET_OP_SIZE_FOR_BYTE(instr->op_size); 6.386 - return reg_mem(instr->op_size, opcode, instr, rex); 6.387 + instr->instr = INSTR_XOR; 6.388 + GET_OP_SIZE_FOR_BYTE(instr->op_size); 6.389 + return reg_mem(instr->op_size, opcode, instr, rex); 6.390 6.391 case 0x31: /* xor r32/16, m32/16 */ 6.392 - instr->instr = INSTR_XOR; 6.393 - GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.394 - return reg_mem(instr->op_size, opcode, instr, rex); 6.395 + instr->instr = INSTR_XOR; 6.396 + GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.397 + return reg_mem(instr->op_size, opcode, instr, rex); 6.398 6.399 case 0x39: /* cmp r32/16, m32/16 */ 6.400 - instr->instr = INSTR_CMP; 6.401 - GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.402 - return reg_mem(instr->op_size, opcode, instr, rex); 6.403 + instr->instr = INSTR_CMP; 6.404 + GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.405 + return reg_mem(instr->op_size, opcode, instr, rex); 6.406 6.407 case 0x80: 6.408 case 0x81: 6.409 - if (((opcode[1] >> 3) & 7) == 7) { /* cmp $imm, m32/16 */ 6.410 - instr->instr = INSTR_CMP; 6.411 + if (((opcode[1] >> 3) & 7) == 7) { /* cmp $imm, m32/16 */ 6.412 + instr->instr = INSTR_CMP; 6.413 6.414 - if (opcode[0] == 0x80) 6.415 - GET_OP_SIZE_FOR_BYTE(instr->op_size); 6.416 + if (opcode[0] == 0x80) 6.417 + GET_OP_SIZE_FOR_BYTE(instr->op_size); 6.418 else 6.419 - GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.420 + GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.421 6.422 - instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE); 6.423 - instr->immediate = get_immediate(vm86, opcode+1, BYTE); 6.424 - instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY); 6.425 + instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE); 6.426 + instr->immediate = get_immediate(vm86, opcode+1, BYTE); 6.427 + instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY); 6.428 6.429 return DECODE_success; 6.430 - } else 6.431 - return DECODE_failure; 6.432 + } else 6.433 + return DECODE_failure; 6.434 6.435 case 0x84: /* test m8, r8 */ 6.436 - instr->instr = INSTR_TEST; 6.437 - instr->op_size = BYTE; 6.438 - GET_OP_SIZE_FOR_BYTE(tmp_size); 6.439 - return mem_reg(tmp_size, opcode, instr, rex); 6.440 + instr->instr = INSTR_TEST; 6.441 + instr->op_size = BYTE; 6.442 + GET_OP_SIZE_FOR_BYTE(tmp_size); 6.443 + return mem_reg(tmp_size, opcode, instr, rex); 6.444 6.445 case 0x88: /* mov r8, m8 */ 6.446 - instr->instr = INSTR_MOV; 6.447 - instr->op_size = BYTE; 6.448 + instr->instr = INSTR_MOV; 6.449 + instr->op_size = BYTE; 6.450 GET_OP_SIZE_FOR_BYTE(tmp_size); 6.451 - return reg_mem(tmp_size, opcode, instr, rex); 6.452 + return reg_mem(tmp_size, opcode, instr, rex); 6.453 6.454 case 0x89: /* mov r32/16, m32/16 */ 6.455 - instr->instr = INSTR_MOV; 6.456 - GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.457 - return reg_mem(instr->op_size, opcode, instr, rex); 6.458 + instr->instr = INSTR_MOV; 6.459 + GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.460 + return reg_mem(instr->op_size, opcode, instr, rex); 6.461 6.462 case 0x8A: /* mov m8, r8 */ 6.463 - instr->instr = INSTR_MOV; 6.464 - instr->op_size = BYTE; 6.465 + instr->instr = INSTR_MOV; 6.466 + instr->op_size = BYTE; 6.467 GET_OP_SIZE_FOR_BYTE(tmp_size); 6.468 - return mem_reg(tmp_size, opcode, instr, rex); 6.469 + return mem_reg(tmp_size, opcode, instr, rex); 6.470 6.471 case 0x8B: /* mov m32/16, r32/16 */ 6.472 - instr->instr = INSTR_MOV; 6.473 - GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.474 - return mem_reg(instr->op_size, opcode, instr, rex); 6.475 + instr->instr = INSTR_MOV; 6.476 + GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.477 + return mem_reg(instr->op_size, opcode, instr, rex); 6.478 6.479 case 0xA0: /* mov <addr>, al */ 6.480 - instr->instr = INSTR_MOV; 6.481 - instr->op_size = BYTE; 6.482 + instr->instr = INSTR_MOV; 6.483 + instr->op_size = BYTE; 6.484 GET_OP_SIZE_FOR_BYTE(tmp_size); 6.485 - return mem_acc(tmp_size, instr); 6.486 + return mem_acc(tmp_size, instr); 6.487 6.488 case 0xA1: /* mov <addr>, ax/eax */ 6.489 - instr->instr = INSTR_MOV; 6.490 - GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.491 - return mem_acc(instr->op_size, instr); 6.492 + instr->instr = INSTR_MOV; 6.493 + GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.494 + return mem_acc(instr->op_size, instr); 6.495 6.496 case 0xA2: /* mov al, <addr> */ 6.497 - instr->instr = INSTR_MOV; 6.498 - instr->op_size = BYTE; 6.499 + instr->instr = INSTR_MOV; 6.500 + instr->op_size = BYTE; 6.501 GET_OP_SIZE_FOR_BYTE(tmp_size); 6.502 - return acc_mem(tmp_size, instr); 6.503 + return acc_mem(tmp_size, instr); 6.504 6.505 case 0xA3: /* mov ax/eax, <addr> */ 6.506 - instr->instr = INSTR_MOV; 6.507 - GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.508 - return acc_mem(instr->op_size, instr); 6.509 + instr->instr = INSTR_MOV; 6.510 + GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.511 + return acc_mem(instr->op_size, instr); 6.512 6.513 case 0xA4: /* movsb */ 6.514 - instr->instr = INSTR_MOVS; 6.515 - instr->op_size = BYTE; 6.516 + instr->instr = INSTR_MOVS; 6.517 + instr->op_size = BYTE; 6.518 return DECODE_success; 6.519 6.520 case 0xA5: /* movsw/movsl */ 6.521 - instr->instr = INSTR_MOVS; 6.522 - GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.523 - return DECODE_success; 6.524 + instr->instr = INSTR_MOVS; 6.525 + GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.526 + return DECODE_success; 6.527 6.528 case 0xAA: /* stosb */ 6.529 - instr->instr = INSTR_STOS; 6.530 - instr->op_size = BYTE; 6.531 + instr->instr = INSTR_STOS; 6.532 + instr->op_size = BYTE; 6.533 return DECODE_success; 6.534 6.535 case 0xAB: /* stosw/stosl */ 6.536 - instr->instr = INSTR_STOS; 6.537 - GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.538 - return DECODE_success; 6.539 + instr->instr = INSTR_STOS; 6.540 + GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.541 + return DECODE_success; 6.542 6.543 case 0xC6: 6.544 - if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm8, m8 */ 6.545 - instr->instr = INSTR_MOV; 6.546 - instr->op_size = BYTE; 6.547 + if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm8, m8 */ 6.548 + instr->instr = INSTR_MOV; 6.549 + instr->op_size = BYTE; 6.550 6.551 - instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE); 6.552 - instr->immediate = get_immediate(vm86, opcode+1, instr->op_size); 6.553 - instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY); 6.554 + instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE); 6.555 + instr->immediate = get_immediate(vm86, opcode+1, instr->op_size); 6.556 + instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY); 6.557 6.558 return DECODE_success; 6.559 - } else 6.560 - return DECODE_failure; 6.561 + } else 6.562 + return DECODE_failure; 6.563 6.564 case 0xC7: 6.565 - if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm16/32, m16/32 */ 6.566 - instr->instr = INSTR_MOV; 6.567 - GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.568 + if (((opcode[1] >> 3) & 7) == 0) { /* mov $imm16/32, m16/32 */ 6.569 + instr->instr = INSTR_MOV; 6.570 + GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.571 6.572 - instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE); 6.573 - instr->immediate = get_immediate(vm86, opcode+1, instr->op_size); 6.574 - instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY); 6.575 + instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE); 6.576 + instr->immediate = get_immediate(vm86, opcode+1, instr->op_size); 6.577 + instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY); 6.578 6.579 return DECODE_success; 6.580 - } else 6.581 - return DECODE_failure; 6.582 + } else 6.583 + return DECODE_failure; 6.584 6.585 case 0xF6: 6.586 - if (((opcode[1] >> 3) & 7) == 0) { /* testb $imm8, m8 */ 6.587 - instr->instr = INSTR_TEST; 6.588 - instr->op_size = BYTE; 6.589 + if (((opcode[1] >> 3) & 7) == 0) { /* testb $imm8, m8 */ 6.590 + instr->instr = INSTR_TEST; 6.591 + instr->op_size = BYTE; 6.592 6.593 - instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE); 6.594 - instr->immediate = get_immediate(vm86, opcode+1, instr->op_size); 6.595 - instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY); 6.596 + instr->operand[0] = mk_operand(instr->op_size, 0, 0, IMMEDIATE); 6.597 + instr->immediate = get_immediate(vm86, opcode+1, instr->op_size); 6.598 + instr->operand[1] = mk_operand(instr->op_size, 0, 0, MEMORY); 6.599 6.600 - return DECODE_success; 6.601 - } else 6.602 - return DECODE_failure; 6.603 + return DECODE_success; 6.604 + } else 6.605 + return DECODE_failure; 6.606 6.607 case 0x0F: 6.608 - break; 6.609 + break; 6.610 6.611 default: 6.612 - printf("%x, This opcode isn't handled yet!\n", *opcode); 6.613 + printf("%x, This opcode isn't handled yet!\n", *opcode); 6.614 return DECODE_failure; 6.615 } 6.616 6.617 switch (*++opcode) { 6.618 case 0xB6: /* movz m8, r16/r32 */ 6.619 - instr->instr = INSTR_MOVZ; 6.620 - GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.621 - index = get_index(opcode + 1, rex); 6.622 - instr->operand[0] = mk_operand(BYTE, 0, 0, MEMORY); 6.623 - instr->operand[1] = mk_operand(instr->op_size, index, 0, REGISTER); 6.624 - return DECODE_success; 6.625 + instr->instr = INSTR_MOVZ; 6.626 + GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); 6.627 + index = get_index(opcode + 1, rex); 6.628 + instr->operand[0] = mk_operand(BYTE, 0, 0, MEMORY); 6.629 + instr->operand[1] = mk_operand(instr->op_size, index, 0, REGISTER); 6.630 + return DECODE_success; 6.631 6.632 case 0xB7: /* movz m16, r32 */ 6.633 - instr->instr = INSTR_MOVZ; 6.634 - index = get_index(opcode + 1, rex); 6.635 - if (rex & 0x8) { 6.636 - instr->op_size = LONG; 6.637 - instr->operand[1] = mk_operand(QUAD, index, 0, REGISTER); 6.638 - } else { 6.639 - instr->op_size = WORD; 6.640 - instr->operand[1] = mk_operand(LONG, index, 0, REGISTER); 6.641 - } 6.642 - instr->operand[0] = mk_operand(instr->op_size, 0, 0, MEMORY); 6.643 - return DECODE_success; 6.644 + instr->instr = INSTR_MOVZ; 6.645 + index = get_index(opcode + 1, rex); 6.646 + if (rex & 0x8) { 6.647 + instr->op_size = LONG; 6.648 + instr->operand[1] = mk_operand(QUAD, index, 0, REGISTER); 6.649 + } else { 6.650 + instr->op_size = WORD; 6.651 + instr->operand[1] = mk_operand(LONG, index, 0, REGISTER); 6.652 + } 6.653 + instr->operand[0] = mk_operand(instr->op_size, 0, 0, MEMORY); 6.654 + return DECODE_success; 6.655 6.656 default: 6.657 - printf("0f %x, This opcode isn't handled yet\n", *opcode); 6.658 - return DECODE_failure; 6.659 + printf("0f %x, This opcode isn't handled yet\n", *opcode); 6.660 + return DECODE_failure; 6.661 } 6.662 } 6.663 6.664 @@ -599,7 +599,7 @@ int inst_copy_from_guest(unsigned char * 6.665 } 6.666 6.667 void send_mmio_req(unsigned char type, unsigned long gpa, 6.668 - unsigned long count, int size, long value, int dir, int pvalid) 6.669 + unsigned long count, int size, long value, int dir, int pvalid) 6.670 { 6.671 struct vcpu *d = current; 6.672 vcpu_iodata_t *vio; 6.673 @@ -636,12 +636,12 @@ void send_mmio_req(unsigned char type, u 6.674 p->df = regs->eflags & EF_DF ? 1 : 0; 6.675 6.676 if (pvalid) { 6.677 - if (vmx_paging_enabled(current)) 6.678 - p->u.pdata = (void *) gva_to_gpa(value); 6.679 + if (vmx_paging_enabled(current)) 6.680 + p->u.pdata = (void *) gva_to_gpa(value); 6.681 else 6.682 - p->u.pdata = (void *) value; /* guest VA == guest PA */ 6.683 + p->u.pdata = (void *) value; /* guest VA == guest PA */ 6.684 } else 6.685 - p->u.data = value; 6.686 + p->u.data = value; 6.687 6.688 p->state = STATE_IOREQ_READY; 6.689 6.690 @@ -656,7 +656,7 @@ void send_mmio_req(unsigned char type, u 6.691 } 6.692 6.693 static void mmio_operands(int type, unsigned long gpa, struct instruction *inst, 6.694 - struct mi_per_cpu_info *mpcip, struct cpu_user_regs *regs) 6.695 + struct mi_per_cpu_info *mpcip, struct cpu_user_regs *regs) 6.696 { 6.697 unsigned long value = 0; 6.698 int index, size; 6.699 @@ -669,24 +669,24 @@ static void mmio_operands(int type, unsi 6.700 mpcip->operand[1] = inst->operand[1]; /* destination */ 6.701 6.702 if (inst->operand[0] & REGISTER) { /* dest is memory */ 6.703 - index = operand_index(inst->operand[0]); 6.704 - value = get_reg_value(size, index, 0, regs); 6.705 - send_mmio_req(type, gpa, 1, size, value, IOREQ_WRITE, 0); 6.706 + index = operand_index(inst->operand[0]); 6.707 + value = get_reg_value(size, index, 0, regs); 6.708 + send_mmio_req(type, gpa, 1, size, value, IOREQ_WRITE, 0); 6.709 } else if (inst->operand[0] & IMMEDIATE) { /* dest is memory */ 6.710 - value = inst->immediate; 6.711 - send_mmio_req(type, gpa, 1, size, value, IOREQ_WRITE, 0); 6.712 + value = inst->immediate; 6.713 + send_mmio_req(type, gpa, 1, size, value, IOREQ_WRITE, 0); 6.714 } else if (inst->operand[0] & MEMORY) { /* dest is register */ 6.715 - /* send the request and wait for the value */ 6.716 - send_mmio_req(type, gpa, 1, size, 0, IOREQ_READ, 0); 6.717 + /* send the request and wait for the value */ 6.718 + send_mmio_req(type, gpa, 1, size, 0, IOREQ_READ, 0); 6.719 } else { 6.720 - printf("mmio_operands: invalid operand\n"); 6.721 - domain_crash_synchronous(); 6.722 + printf("mmio_operands: invalid operand\n"); 6.723 + domain_crash_synchronous(); 6.724 } 6.725 } 6.726 6.727 #define GET_REPEAT_COUNT() \ 6.728 (mmio_inst.flags & REPZ ? (vm86 ? regs->ecx & 0xFFFF : regs->ecx) : 1) 6.729 - 6.730 + 6.731 void handle_mmio(unsigned long va, unsigned long gpa) 6.732 { 6.733 unsigned long eip, eflags, cs; 6.734 @@ -721,11 +721,11 @@ void handle_mmio(unsigned long va, unsig 6.735 init_instruction(&mmio_inst); 6.736 6.737 if (vmx_decode(inst, &mmio_inst) == DECODE_failure) { 6.738 - printf("mmio opcode: va 0x%lx, gpa 0x%lx, len %ld:", 6.739 - va, gpa, inst_len); 6.740 - for (i = 0; i < inst_len; i++) 6.741 - printf(" %02x", inst[i] & 0xFF); 6.742 - printf("\n"); 6.743 + printf("mmio opcode: va 0x%lx, gpa 0x%lx, len %ld:", 6.744 + va, gpa, inst_len); 6.745 + for (i = 0; i < inst_len; i++) 6.746 + printf(" %02x", inst[i] & 0xFF); 6.747 + printf("\n"); 6.748 domain_crash_synchronous(); 6.749 } 6.750 6.751 @@ -734,116 +734,116 @@ void handle_mmio(unsigned long va, unsig 6.752 6.753 switch (mmio_inst.instr) { 6.754 case INSTR_MOV: 6.755 - mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs); 6.756 - break; 6.757 + mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs); 6.758 + break; 6.759 6.760 case INSTR_MOVS: 6.761 { 6.762 - unsigned long count = GET_REPEAT_COUNT(); 6.763 - unsigned long size = mmio_inst.op_size; 6.764 - int sign = regs->eflags & EF_DF ? -1 : 1; 6.765 - unsigned long addr = 0; 6.766 - int dir; 6.767 + unsigned long count = GET_REPEAT_COUNT(); 6.768 + unsigned long size = mmio_inst.op_size; 6.769 + int sign = regs->eflags & EF_DF ? -1 : 1; 6.770 + unsigned long addr = 0; 6.771 + int dir; 6.772 6.773 - /* determine non-MMIO address */ 6.774 - if (vm86) { 6.775 - unsigned long seg; 6.776 + /* determine non-MMIO address */ 6.777 + if (vm86) { 6.778 + unsigned long seg; 6.779 6.780 - __vmread(GUEST_ES_SELECTOR, &seg); 6.781 - if (((seg << 4) + (regs->edi & 0xFFFF)) == va) { 6.782 - dir = IOREQ_WRITE; 6.783 - __vmread(GUEST_DS_SELECTOR, &seg); 6.784 - addr = (seg << 4) + (regs->esi & 0xFFFF); 6.785 - } else { 6.786 - dir = IOREQ_READ; 6.787 - addr = (seg << 4) + (regs->edi & 0xFFFF); 6.788 - } 6.789 - } else { 6.790 - if (va == regs->edi) { 6.791 - dir = IOREQ_WRITE; 6.792 - addr = regs->esi; 6.793 - } else { 6.794 - dir = IOREQ_READ; 6.795 - addr = regs->edi; 6.796 - } 6.797 - } 6.798 + __vmread(GUEST_ES_SELECTOR, &seg); 6.799 + if (((seg << 4) + (regs->edi & 0xFFFF)) == va) { 6.800 + dir = IOREQ_WRITE; 6.801 + __vmread(GUEST_DS_SELECTOR, &seg); 6.802 + addr = (seg << 4) + (regs->esi & 0xFFFF); 6.803 + } else { 6.804 + dir = IOREQ_READ; 6.805 + addr = (seg << 4) + (regs->edi & 0xFFFF); 6.806 + } 6.807 + } else { 6.808 + if (va == regs->edi) { 6.809 + dir = IOREQ_WRITE; 6.810 + addr = regs->esi; 6.811 + } else { 6.812 + dir = IOREQ_READ; 6.813 + addr = regs->edi; 6.814 + } 6.815 + } 6.816 6.817 - mpcip->flags = mmio_inst.flags; 6.818 - mpcip->instr = mmio_inst.instr; 6.819 + mpcip->flags = mmio_inst.flags; 6.820 + mpcip->instr = mmio_inst.instr; 6.821 6.822 - /* 6.823 - * In case of a movs spanning multiple pages, we break the accesses 6.824 - * up into multiple pages (the device model works with non-continguous 6.825 - * physical guest pages). To copy just one page, we adjust %ecx and 6.826 - * do not advance %eip so that the next "rep movs" copies the next page. 6.827 - * Unaligned accesses, for example movsl starting at PGSZ-2, are 6.828 - * turned into a single copy where we handle the overlapping memory 6.829 - * copy ourself. After this copy succeeds, "rep movs" is executed 6.830 - * again. 6.831 - */ 6.832 - if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) { 6.833 - unsigned long value = 0; 6.834 + /* 6.835 + * In case of a movs spanning multiple pages, we break the accesses 6.836 + * up into multiple pages (the device model works with non-continguous 6.837 + * physical guest pages). To copy just one page, we adjust %ecx and 6.838 + * do not advance %eip so that the next "rep movs" copies the next page. 6.839 + * Unaligned accesses, for example movsl starting at PGSZ-2, are 6.840 + * turned into a single copy where we handle the overlapping memory 6.841 + * copy ourself. After this copy succeeds, "rep movs" is executed 6.842 + * again. 6.843 + */ 6.844 + if ((addr & PAGE_MASK) != ((addr + size - 1) & PAGE_MASK)) { 6.845 + unsigned long value = 0; 6.846 6.847 - mpcip->flags |= OVERLAP; 6.848 + mpcip->flags |= OVERLAP; 6.849 6.850 - regs->eip -= inst_len; /* do not advance %eip */ 6.851 + regs->eip -= inst_len; /* do not advance %eip */ 6.852 6.853 - if (dir == IOREQ_WRITE) 6.854 - vmx_copy(&value, addr, size, VMX_COPY_IN); 6.855 - send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, size, value, dir, 0); 6.856 - } else { 6.857 - if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK)) { 6.858 - regs->eip -= inst_len; /* do not advance %eip */ 6.859 + if (dir == IOREQ_WRITE) 6.860 + vmx_copy(&value, addr, size, VMX_COPY_IN); 6.861 + send_mmio_req(IOREQ_TYPE_COPY, gpa, 1, size, value, dir, 0); 6.862 + } else { 6.863 + if ((addr & PAGE_MASK) != ((addr + count * size - 1) & PAGE_MASK)) { 6.864 + regs->eip -= inst_len; /* do not advance %eip */ 6.865 6.866 - if (sign > 0) 6.867 - count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size; 6.868 - else 6.869 - count = (addr & ~PAGE_MASK) / size; 6.870 - } 6.871 + if (sign > 0) 6.872 + count = (PAGE_SIZE - (addr & ~PAGE_MASK)) / size; 6.873 + else 6.874 + count = (addr & ~PAGE_MASK) / size; 6.875 + } 6.876 6.877 - send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size, addr, dir, 1); 6.878 - } 6.879 + send_mmio_req(IOREQ_TYPE_COPY, gpa, count, size, addr, dir, 1); 6.880 + } 6.881 break; 6.882 } 6.883 6.884 case INSTR_MOVZ: 6.885 - mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs); 6.886 - break; 6.887 + mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs); 6.888 + break; 6.889 6.890 case INSTR_STOS: 6.891 - /* 6.892 - * Since the destination is always in (contiguous) mmio space we don't 6.893 - * need to break it up into pages. 6.894 - */ 6.895 - mpcip->flags = mmio_inst.flags; 6.896 - mpcip->instr = mmio_inst.instr; 6.897 + /* 6.898 + * Since the destination is always in (contiguous) mmio space we don't 6.899 + * need to break it up into pages. 6.900 + */ 6.901 + mpcip->flags = mmio_inst.flags; 6.902 + mpcip->instr = mmio_inst.instr; 6.903 send_mmio_req(IOREQ_TYPE_COPY, gpa, 6.904 - GET_REPEAT_COUNT(), mmio_inst.op_size, regs->eax, IOREQ_WRITE, 0); 6.905 - break; 6.906 + GET_REPEAT_COUNT(), mmio_inst.op_size, regs->eax, IOREQ_WRITE, 0); 6.907 + break; 6.908 6.909 case INSTR_OR: 6.910 - mmio_operands(IOREQ_TYPE_OR, gpa, &mmio_inst, mpcip, regs); 6.911 - break; 6.912 + mmio_operands(IOREQ_TYPE_OR, gpa, &mmio_inst, mpcip, regs); 6.913 + break; 6.914 6.915 case INSTR_AND: 6.916 - mmio_operands(IOREQ_TYPE_AND, gpa, &mmio_inst, mpcip, regs); 6.917 - break; 6.918 + mmio_operands(IOREQ_TYPE_AND, gpa, &mmio_inst, mpcip, regs); 6.919 + break; 6.920 6.921 case INSTR_XOR: 6.922 - mmio_operands(IOREQ_TYPE_XOR, gpa, &mmio_inst, mpcip, regs); 6.923 - break; 6.924 + mmio_operands(IOREQ_TYPE_XOR, gpa, &mmio_inst, mpcip, regs); 6.925 + break; 6.926 6.927 case INSTR_CMP: 6.928 - mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs); 6.929 - break; 6.930 + mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs); 6.931 + break; 6.932 6.933 case INSTR_TEST: 6.934 - mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs); 6.935 - break; 6.936 + mmio_operands(IOREQ_TYPE_COPY, gpa, &mmio_inst, mpcip, regs); 6.937 + break; 6.938 6.939 default: 6.940 - printf("Unhandled MMIO instruction\n"); 6.941 - domain_crash_synchronous(); 6.942 + printf("Unhandled MMIO instruction\n"); 6.943 + domain_crash_synchronous(); 6.944 } 6.945 } 6.946
7.1 --- a/xen/arch/x86/vmx_vmcs.c Sun Sep 11 16:36:24 2005 +0000 7.2 +++ b/xen/arch/x86/vmx_vmcs.c Sun Sep 11 16:44:23 2005 +0000 7.3 @@ -179,10 +179,10 @@ int vmx_setup_platform(struct vcpu *d, s 7.4 p = map_domain_page(mpfn); 7.5 d->domain->arch.vmx_platform.shared_page_va = (unsigned long)p; 7.6 7.7 - VMX_DBG_LOG(DBG_LEVEL_1, "eport: %x\n", iopacket_port(d->domain)); 7.8 + VMX_DBG_LOG(DBG_LEVEL_1, "eport: %x\n", iopacket_port(d->domain)); 7.9 7.10 - clear_bit(iopacket_port(d->domain), 7.11 - &d->domain->shared_info->evtchn_mask[0]); 7.12 + clear_bit(iopacket_port(d->domain), 7.13 + &d->domain->shared_info->evtchn_mask[0]); 7.14 7.15 return 0; 7.16 } 7.17 @@ -497,7 +497,7 @@ int modify_vmcs(struct arch_vmx_struct * 7.18 __vmptrst(old_phys_ptr); 7.19 if ((error = load_vmcs(arch_vmx, vmcs_phys_ptr))) { 7.20 printk("modify_vmcs: load_vmcs failed: VMCS = %lx\n", 7.21 - (unsigned long) vmcs_phys_ptr); 7.22 + (unsigned long) vmcs_phys_ptr); 7.23 return -EINVAL; 7.24 } 7.25 load_cpu_user_regs(regs);
8.1 --- a/xen/include/asm-x86/shadow_64.h Sun Sep 11 16:36:24 2005 +0000 8.2 +++ b/xen/include/asm-x86/shadow_64.h Sun Sep 11 16:44:23 2005 +0000 8.3 @@ -353,7 +353,7 @@ static inline void entry_general( 8.4 entry_remove_flags(sle, _PAGE_PSE); 8.5 8.6 if ( shadow_mode_log_dirty(d) || 8.7 - !(entry_get_flags(gle) & _PAGE_DIRTY) ) 8.8 + !(entry_get_flags(gle) & _PAGE_DIRTY) ) 8.9 { 8.10 pgentry_64_t *l1_p; 8.11 int i; 8.12 @@ -365,8 +365,9 @@ static inline void entry_general( 8.13 unmap_domain_page(l1_p); 8.14 } 8.15 } else { 8.16 - sle = entry_from_pfn(smfn, 8.17 - (entry_get_flags(gle) | _PAGE_RW | _PAGE_ACCESSED) & ~_PAGE_AVAIL); 8.18 + sle = entry_from_pfn( 8.19 + smfn, 8.20 + (entry_get_flags(gle) | _PAGE_RW | _PAGE_ACCESSED) & ~_PAGE_AVAIL); 8.21 entry_add_flags(gle, _PAGE_ACCESSED); 8.22 } 8.23 // XXX mafetter: Hmm...
9.1 --- a/xen/include/asm-x86/shadow_ops.h Sun Sep 11 16:36:24 2005 +0000 9.2 +++ b/xen/include/asm-x86/shadow_ops.h Sun Sep 11 16:44:23 2005 +0000 9.3 @@ -127,4 +127,4 @@ 9.4 #define guest_va_to_l1mfn va_to_l1mfn 9.5 #endif 9.6 9.7 -#endif /* _XEN_SHADOW_OPS_H */ 9.8 +#endif /* _XEN_SHADOW_OPS_H */
10.1 --- a/xen/include/asm-x86/vmx.h Sun Sep 11 16:36:24 2005 +0000 10.2 +++ b/xen/include/asm-x86/vmx.h Sun Sep 11 16:44:23 2005 +0000 10.3 @@ -150,9 +150,9 @@ extern unsigned int cpu_rev; 10.4 #define TYPE_MOV_TO_CR (0 << 4) 10.5 #define TYPE_MOV_FROM_CR (1 << 4) 10.6 #define TYPE_CLTS (2 << 4) 10.7 -#define TYPE_LMSW (3 << 4) 10.8 +#define TYPE_LMSW (3 << 4) 10.9 #define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose register */ 10.10 -#define LMSW_SOURCE_DATA (0xFFFF << 16) /* 16:31 lmsw source */ 10.11 +#define LMSW_SOURCE_DATA (0xFFFF << 16) /* 16:31 lmsw source */ 10.12 #define REG_EAX (0 << 8) 10.13 #define REG_ECX (1 << 8) 10.14 #define REG_EDX (2 << 8)
11.1 --- a/xen/include/asm-x86/vmx_platform.h Sun Sep 11 16:36:24 2005 +0000 11.2 +++ b/xen/include/asm-x86/vmx_platform.h Sun Sep 11 16:44:23 2005 +0000 11.3 @@ -16,6 +16,7 @@ 11.4 * Place - Suite 330, Boston, MA 02111-1307 USA. 11.5 * 11.6 */ 11.7 + 11.8 #ifndef __ASM_X86_VMX_PLATFORM_H__ 11.9 #define __ASM_X86_VMX_PLATFORM_H__ 11.10 11.11 @@ -52,19 +53,19 @@ 11.12 #define REPNZ 0x2 11.13 #define OVERLAP 0x4 11.14 11.15 -#define INSTR_PIO 1 11.16 -#define INSTR_OR 2 11.17 -#define INSTR_AND 3 11.18 -#define INSTR_XOR 4 11.19 -#define INSTR_CMP 5 11.20 -#define INSTR_MOV 6 11.21 -#define INSTR_MOVS 7 11.22 -#define INSTR_MOVZ 8 11.23 -#define INSTR_STOS 9 11.24 -#define INSTR_TEST 10 11.25 +#define INSTR_PIO 1 11.26 +#define INSTR_OR 2 11.27 +#define INSTR_AND 3 11.28 +#define INSTR_XOR 4 11.29 +#define INSTR_CMP 5 11.30 +#define INSTR_MOV 6 11.31 +#define INSTR_MOVS 7 11.32 +#define INSTR_MOVZ 8 11.33 +#define INSTR_STOS 9 11.34 +#define INSTR_TEST 10 11.35 11.36 struct instruction { 11.37 - __s8 instr; /* instruction type */ 11.38 + __s8 instr; /* instruction type */ 11.39 __s16 op_size; /* the operand's bit size, e.g. 16-bit or 32-bit */ 11.40 __u64 immediate; 11.41 __u16 seg_sel; /* segmentation selector */ 11.42 @@ -76,18 +77,18 @@ struct instruction { 11.43 11.44 struct mi_per_cpu_info { 11.45 int flags; 11.46 - int instr; /* instruction */ 11.47 - unsigned long operand[2]; /* operands */ 11.48 - unsigned long immediate; /* immediate portion */ 11.49 - struct cpu_user_regs *inst_decoder_regs; /* current context */ 11.50 + int instr; /* instruction */ 11.51 + unsigned long operand[2]; /* operands */ 11.52 + unsigned long immediate; /* immediate portion */ 11.53 + struct cpu_user_regs *inst_decoder_regs; /* current context */ 11.54 }; 11.55 11.56 struct virtual_platform_def { 11.57 - unsigned long *real_mode_data; /* E820, etc. */ 11.58 + unsigned long *real_mode_data; /* E820, etc. */ 11.59 unsigned long shared_page_va; 11.60 struct vmx_virpit_t vmx_pit; 11.61 struct vmx_handler_t vmx_handler; 11.62 - struct mi_per_cpu_info mpci; /* MMIO */ 11.63 + struct mi_per_cpu_info mpci; /* MMIO */ 11.64 }; 11.65 11.66 extern void handle_mmio(unsigned long, unsigned long);
12.1 --- a/xen/include/asm-x86/vmx_virpit.h Sun Sep 11 16:36:24 2005 +0000 12.2 +++ b/xen/include/asm-x86/vmx_virpit.h Sun Sep 11 16:44:23 2005 +0000 12.3 @@ -1,5 +1,6 @@ 12.4 #ifndef _VMX_VIRPIT_H 12.5 #define _VMX_VIRPIT_H 12.6 + 12.7 #include <xen/config.h> 12.8 #include <xen/init.h> 12.9 #include <xen/lib.h> 12.10 @@ -17,14 +18,14 @@ 12.11 12.12 struct vmx_virpit_t { 12.13 /* for simulation of counter 0 in mode 2*/ 12.14 - int vector; /* the pit irq vector */ 12.15 - unsigned int period; /* the frequency. e.g. 10ms*/ 12.16 + int vector; /* the pit irq vector */ 12.17 + unsigned int period; /* the frequency. e.g. 10ms*/ 12.18 s_time_t scheduled; /* scheduled timer interrupt */ 12.19 - unsigned int channel; /* the pit channel, counter 0~2 */ 12.20 + unsigned int channel; /* the pit channel, counter 0~2 */ 12.21 u64 *intr_bitmap; 12.22 - unsigned int pending_intr_nr; /* the couner for pending timer interrupts */ 12.23 - unsigned long long inject_point; /* the time inject virt intr */ 12.24 - struct ac_timer pit_timer; /* periodic timer for mode 2*/ 12.25 + unsigned int pending_intr_nr; /* the couner for pending timer interrupts */ 12.26 + unsigned long long inject_point; /* the time inject virt intr */ 12.27 + struct ac_timer pit_timer; /* periodic timer for mode 2*/ 12.28 int first_injected; /* flag to prevent shadow window */ 12.29 12.30 /* virtual PIT state for handle related I/O */ 12.31 @@ -32,8 +33,8 @@ struct vmx_virpit_t { 12.32 int count_LSB_latched; 12.33 int count_MSB_latched; 12.34 12.35 - unsigned int count; /* the 16 bit channel count */ 12.36 - unsigned int init_val; /* the init value for the counter */ 12.37 + unsigned int count; /* the 16 bit channel count */ 12.38 + unsigned int init_val; /* the init value for the counter */ 12.39 12.40 } ; 12.41