debuggers.hg

changeset 20643:7f611de6b93c

hvm: Share ASID logic between VMX and SVM.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Dec 08 14:14:27 2009 +0000 (2009-12-08)
parents 2d92ad3ef517
children 1f5f1674e53f
files xen/arch/x86/hvm/asid.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c xen/arch/x86/hvm/vmx/entry.S xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/asid.h xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/svm/asid.h xen/include/asm-x86/hvm/vmx/vmcs.h xen/include/asm-x86/hvm/vmx/vmx.h
line diff
     1.1 --- a/xen/arch/x86/hvm/asid.c	Tue Dec 08 10:33:08 2009 +0000
     1.2 +++ b/xen/arch/x86/hvm/asid.c	Tue Dec 08 14:14:27 2009 +0000
     1.3 @@ -20,7 +20,9 @@
     1.4  #include <xen/config.h>
     1.5  #include <xen/init.h>
     1.6  #include <xen/lib.h>
     1.7 -#include <xen/perfc.h>
     1.8 +#include <xen/sched.h>
     1.9 +#include <xen/smp.h>
    1.10 +#include <xen/percpu.h>
    1.11  #include <asm/hvm/asid.h>
    1.12  
    1.13  /*
    1.14 @@ -80,7 +82,7 @@ void hvm_asid_init(int nasids)
    1.15      data->next_asid = 1;
    1.16  }
    1.17  
    1.18 -void hvm_asid_invalidate_asid(struct vcpu *v)
    1.19 +void hvm_asid_flush_vcpu(struct vcpu *v)
    1.20  {
    1.21      v->arch.hvm_vcpu.asid_generation = 0;
    1.22  }
     2.1 --- a/xen/arch/x86/hvm/hvm.c	Tue Dec 08 10:33:08 2009 +0000
     2.2 +++ b/xen/arch/x86/hvm/hvm.c	Tue Dec 08 14:14:27 2009 +0000
     2.3 @@ -756,6 +756,8 @@ int hvm_vcpu_initialise(struct vcpu *v)
     2.4  {
     2.5      int rc;
     2.6  
     2.7 +    hvm_asid_flush_vcpu(v);
     2.8 +
     2.9      if ( cpu_has_xsave )
    2.10      {
    2.11          /* XSAVE/XRSTOR requires the save area be 64-byte-boundary aligned. */
     3.1 --- a/xen/arch/x86/hvm/svm/svm.c	Tue Dec 08 10:33:08 2009 +0000
     3.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Tue Dec 08 14:14:27 2009 +0000
     3.3 @@ -424,7 +424,7 @@ static void svm_update_guest_cr(struct v
     3.4          break;
     3.5      case 3:
     3.6          vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3];
     3.7 -        hvm_asid_invalidate_asid(v);
     3.8 +        hvm_asid_flush_vcpu(v);
     3.9          break;
    3.10      case 4:
    3.11          vmcb->cr4 = HVM_CR4_HOST_MASK;
    3.12 @@ -455,14 +455,6 @@ static void svm_update_guest_efer(struct
    3.13      svm_intercept_msr(v, MSR_IA32_SYSENTER_EIP, lma);
    3.14  }
    3.15  
    3.16 -static void svm_flush_guest_tlbs(void)
    3.17 -{
    3.18 -    /* Roll over the CPU's ASID generation, so it gets a clean TLB when we
    3.19 -     * next VMRUN.  (If ASIDs are disabled, the whole TLB is flushed on
    3.20 -     * VMRUN anyway). */
    3.21 -    hvm_asid_flush_core();
    3.22 -}
    3.23 -
    3.24  static void svm_sync_vmcb(struct vcpu *v)
    3.25  {
    3.26      struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
    3.27 @@ -704,7 +696,7 @@ static void svm_do_resume(struct vcpu *v
    3.28          hvm_migrate_timers(v);
    3.29  
    3.30          /* Migrating to another ASID domain.  Request a new ASID. */
    3.31 -        hvm_asid_invalidate_asid(v);
    3.32 +        hvm_asid_flush_vcpu(v);
    3.33      }
    3.34  
    3.35      /* Reflect the vlapic's TPR in the hardware vtpr */
    3.36 @@ -1250,7 +1242,6 @@ static struct hvm_function_table __read_
    3.37      .update_host_cr3      = svm_update_host_cr3,
    3.38      .update_guest_cr      = svm_update_guest_cr,
    3.39      .update_guest_efer    = svm_update_guest_efer,
    3.40 -    .flush_guest_tlbs     = svm_flush_guest_tlbs,
    3.41      .set_tsc_offset       = svm_set_tsc_offset,
    3.42      .inject_exception     = svm_inject_exception,
    3.43      .init_hypercall_page  = svm_init_hypercall_page,
     4.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Tue Dec 08 10:33:08 2009 +0000
     4.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Tue Dec 08 14:14:27 2009 +0000
     4.3 @@ -114,9 +114,6 @@ static int construct_vmcb(struct vcpu *v
     4.4      struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
     4.5      struct vmcb_struct *vmcb = arch_svm->vmcb;
     4.6  
     4.7 -    /* TLB control, and ASID assigment. */
     4.8 -    hvm_asid_invalidate_asid(v);
     4.9 -
    4.10      vmcb->general1_intercepts = 
    4.11          GENERAL1_INTERCEPT_INTR        | GENERAL1_INTERCEPT_NMI         |
    4.12          GENERAL1_INTERCEPT_SMI         | GENERAL1_INTERCEPT_INIT        |
     5.1 --- a/xen/arch/x86/hvm/vmx/entry.S	Tue Dec 08 10:33:08 2009 +0000
     5.2 +++ b/xen/arch/x86/hvm/vmx/entry.S	Tue Dec 08 14:14:27 2009 +0000
     5.3 @@ -142,9 +142,9 @@ vmx_asm_do_vmentry:
     5.4          call_with_regs(vmx_enter_realmode) 
     5.5  
     5.6  .Lvmx_not_realmode:
     5.7 +        call vmx_vmenter_helper
     5.8          mov  VCPU_hvm_guest_cr2(r(bx)),r(ax)
     5.9          mov  r(ax),%cr2
    5.10 -        call vmx_trace_vmentry
    5.11  
    5.12          lea  UREGS_rip(r(sp)),r(di)
    5.13          mov  $GUEST_RIP,%eax
     6.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Tue Dec 08 10:33:08 2009 +0000
     6.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Tue Dec 08 14:14:27 2009 +0000
     6.3 @@ -400,9 +400,12 @@ int vmx_cpu_up(void)
     6.4          BUG();
     6.5      }
     6.6  
     6.7 +    hvm_asid_init(cpu_has_vmx_vpid ? (1u << VMCS_VPID_WIDTH) : 0);
     6.8 +
     6.9      ept_sync_all();
    6.10  
    6.11 -    vpid_sync_all();
    6.12 +    if ( cpu_has_vmx_vpid )
    6.13 +        vpid_sync_all();
    6.14  
    6.15      return 1;
    6.16  }
    6.17 @@ -559,6 +562,9 @@ static int construct_vmcs(struct vcpu *v
    6.18  
    6.19      v->arch.hvm_vmx.secondary_exec_control = vmx_secondary_exec_control;
    6.20  
    6.21 +    /* Disable VPID for now: we decide when to enable it on VMENTER. */
    6.22 +    v->arch.hvm_vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
    6.23 +
    6.24      if ( paging_mode_hap(d) )
    6.25      {
    6.26          v->arch.hvm_vmx.exec_control &= ~(CPU_BASED_INVLPG_EXITING |
    6.27 @@ -736,7 +742,7 @@ static int construct_vmcs(struct vcpu *v
    6.28      }
    6.29  
    6.30      if ( cpu_has_vmx_vpid )
    6.31 -        __vmwrite(VIRTUAL_PROCESSOR_ID, v->arch.hvm_vmx.vpid);
    6.32 +        __vmwrite(VIRTUAL_PROCESSOR_ID, v->arch.hvm_vcpu.asid);
    6.33  
    6.34      if ( cpu_has_vmx_pat && paging_mode_hap(d) )
    6.35      {
    6.36 @@ -946,7 +952,7 @@ void vmx_do_resume(struct vcpu *v)
    6.37          hvm_migrate_timers(v);
    6.38          hvm_migrate_pirqs(v);
    6.39          vmx_set_host_env(v);
    6.40 -        vpid_sync_vcpu_all(v);
    6.41 +        hvm_asid_flush_vcpu(v);
    6.42      }
    6.43  
    6.44      debug_state = v->domain->debugger_attached;
     7.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Tue Dec 08 10:33:08 2009 +0000
     7.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Tue Dec 08 14:14:27 2009 +0000
     7.3 @@ -60,8 +60,6 @@ static void vmx_ctxt_switch_to(struct vc
     7.4  
     7.5  static int  vmx_alloc_vlapic_mapping(struct domain *d);
     7.6  static void vmx_free_vlapic_mapping(struct domain *d);
     7.7 -static int  vmx_alloc_vpid(struct vcpu *v);
     7.8 -static void vmx_free_vpid(struct vcpu *v);
     7.9  static void vmx_install_vlapic_mapping(struct vcpu *v);
    7.10  static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr);
    7.11  static void vmx_update_guest_efer(struct vcpu *v);
    7.12 @@ -104,9 +102,6 @@ static int vmx_vcpu_initialise(struct vc
    7.13  
    7.14      spin_lock_init(&v->arch.hvm_vmx.vmcs_lock);
    7.15  
    7.16 -    if ( (rc = vmx_alloc_vpid(v)) != 0 )
    7.17 -        return rc;
    7.18 -
    7.19      v->arch.schedule_tail    = vmx_do_resume;
    7.20      v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
    7.21      v->arch.ctxt_switch_to   = vmx_ctxt_switch_to;
    7.22 @@ -116,7 +111,6 @@ static int vmx_vcpu_initialise(struct vc
    7.23          dprintk(XENLOG_WARNING,
    7.24                  "Failed to create VMCS for vcpu %d: err=%d.\n",
    7.25                  v->vcpu_id, rc);
    7.26 -        vmx_free_vpid(v);
    7.27          return rc;
    7.28      }
    7.29  
    7.30 @@ -136,7 +130,6 @@ static void vmx_vcpu_destroy(struct vcpu
    7.31      vmx_destroy_vmcs(v);
    7.32      vpmu_destroy(v);
    7.33      passive_domain_destroy(v);
    7.34 -    vmx_free_vpid(v);
    7.35  }
    7.36  
    7.37  #ifdef __x86_64__
    7.38 @@ -1168,7 +1161,7 @@ static void vmx_update_guest_cr(struct v
    7.39          }
    7.40   
    7.41          __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
    7.42 -        vpid_sync_vcpu_all(v);
    7.43 +        hvm_asid_flush_vcpu(v);
    7.44          break;
    7.45      case 4:
    7.46          v->arch.hvm_vcpu.hw_cr[4] = HVM_CR4_HOST_MASK;
    7.47 @@ -1214,19 +1207,6 @@ static void vmx_update_guest_efer(struct
    7.48                     (v->arch.hvm_vcpu.guest_efer & EFER_SCE));
    7.49  }
    7.50  
    7.51 -static void vmx_flush_guest_tlbs(void)
    7.52 -{
    7.53 -    /*
    7.54 -     * If VPID (i.e. tagged TLB support) is not enabled, the fact that
    7.55 -     * we're in Xen at all means any guest will have a clean TLB when
    7.56 -     * it's next run, because VMRESUME will flush it for us.
    7.57 -     *
    7.58 -     * If enabled, we invalidate all translations associated with all
    7.59 -     * VPID values.
    7.60 -     */
    7.61 -    vpid_sync_all();
    7.62 -}
    7.63 -
    7.64  static void __ept_sync_domain(void *info)
    7.65  {
    7.66      struct domain *d = info;
    7.67 @@ -1358,7 +1338,7 @@ static void vmx_set_uc_mode(struct vcpu 
    7.68      if ( paging_mode_hap(v->domain) )
    7.69          ept_change_entry_emt_with_range(
    7.70              v->domain, 0, v->domain->arch.p2m->max_mapped_pfn);
    7.71 -    vpid_sync_all();
    7.72 +    hvm_asid_flush_vcpu(v);
    7.73  }
    7.74  
    7.75  static void vmx_set_info_guest(struct vcpu *v)
    7.76 @@ -1405,7 +1385,6 @@ static struct hvm_function_table __read_
    7.77      .update_host_cr3      = vmx_update_host_cr3,
    7.78      .update_guest_cr      = vmx_update_guest_cr,
    7.79      .update_guest_efer    = vmx_update_guest_efer,
    7.80 -    .flush_guest_tlbs     = vmx_flush_guest_tlbs,
    7.81      .set_tsc_offset       = vmx_set_tsc_offset,
    7.82      .inject_exception     = vmx_inject_exception,
    7.83      .init_hypercall_page  = vmx_init_hypercall_page,
    7.84 @@ -1424,9 +1403,6 @@ static struct hvm_function_table __read_
    7.85      .set_rdtsc_exiting    = vmx_set_rdtsc_exiting
    7.86  };
    7.87  
    7.88 -static unsigned long *vpid_bitmap;
    7.89 -#define VPID_BITMAP_SIZE (1u << VMCS_VPID_WIDTH)
    7.90 -
    7.91  void start_vmx(void)
    7.92  {
    7.93      static bool_t bootstrapped;
    7.94 @@ -1461,17 +1437,6 @@ void start_vmx(void)
    7.95      if ( cpu_has_vmx_ept )
    7.96          vmx_function_table.hap_supported = 1;
    7.97  
    7.98 -    if ( cpu_has_vmx_vpid )
    7.99 -    {
   7.100 -        vpid_bitmap = xmalloc_array(
   7.101 -            unsigned long, BITS_TO_LONGS(VPID_BITMAP_SIZE));
   7.102 -        BUG_ON(vpid_bitmap == NULL);
   7.103 -        memset(vpid_bitmap, 0, BITS_TO_LONGS(VPID_BITMAP_SIZE) * sizeof(long));
   7.104 -
   7.105 -        /* VPID 0 is used by VMX root mode (the hypervisor). */
   7.106 -        __set_bit(0, vpid_bitmap);
   7.107 -    }
   7.108 -
   7.109      setup_vmcs_dump();
   7.110  
   7.111      hvm_enable(&vmx_function_table);
   7.112 @@ -1584,7 +1549,7 @@ static void vmx_invlpg_intercept(unsigne
   7.113  {
   7.114      struct vcpu *curr = current;
   7.115      HVMTRACE_LONG_2D(INVLPG, /*invlpga=*/ 0, TRC_PAR_LONG(vaddr));
   7.116 -    if ( paging_invlpg(curr, vaddr) )
   7.117 +    if ( paging_invlpg(curr, vaddr) && cpu_has_vmx_vpid )
   7.118          vpid_sync_vcpu_gva(curr, vaddr);
   7.119  }
   7.120  
   7.121 @@ -1931,36 +1896,6 @@ static void vmx_free_vlapic_mapping(stru
   7.122          free_xenheap_page(mfn_to_virt(mfn));
   7.123  }
   7.124  
   7.125 -static int vmx_alloc_vpid(struct vcpu *v)
   7.126 -{
   7.127 -    int idx;
   7.128 -
   7.129 -    if ( !cpu_has_vmx_vpid )
   7.130 -        return 0;
   7.131 -
   7.132 -    do {
   7.133 -        idx = find_first_zero_bit(vpid_bitmap, VPID_BITMAP_SIZE);
   7.134 -        if ( idx >= VPID_BITMAP_SIZE )
   7.135 -        {
   7.136 -            dprintk(XENLOG_WARNING, "VMX VPID space exhausted.\n");
   7.137 -            return -EBUSY;
   7.138 -        }
   7.139 -    }
   7.140 -    while ( test_and_set_bit(idx, vpid_bitmap) );
   7.141 -
   7.142 -    v->arch.hvm_vmx.vpid = idx;
   7.143 -    return 0;
   7.144 -}
   7.145 -
   7.146 -static void vmx_free_vpid(struct vcpu *v)
   7.147 -{
   7.148 -    if ( !cpu_has_vmx_vpid )
   7.149 -        return;
   7.150 -
   7.151 -    if ( v->arch.hvm_vmx.vpid )
   7.152 -        clear_bit(v->arch.hvm_vmx.vpid, vpid_bitmap);
   7.153 -}
   7.154 -
   7.155  static void vmx_install_vlapic_mapping(struct vcpu *v)
   7.156  {
   7.157      paddr_t virt_page_ma, apic_page_ma;
   7.158 @@ -2675,8 +2610,44 @@ asmlinkage void vmx_vmexit_handler(struc
   7.159      }
   7.160  }
   7.161  
   7.162 -asmlinkage void vmx_trace_vmentry(void)
   7.163 +asmlinkage void vmx_vmenter_helper(void)
   7.164  {
   7.165 +    struct vcpu *curr = current;
   7.166 +    u32 new_asid, old_asid;
   7.167 +    bool_t need_flush;
   7.168 +
   7.169 +    if ( !cpu_has_vmx_vpid )
   7.170 +        goto out;
   7.171 +
   7.172 +    old_asid = curr->arch.hvm_vcpu.asid;
   7.173 +    need_flush = hvm_asid_handle_vmenter();
   7.174 +    new_asid = curr->arch.hvm_vcpu.asid;
   7.175 +
   7.176 +    if ( unlikely(new_asid != old_asid) )
   7.177 +    {
   7.178 +        __vmwrite(VIRTUAL_PROCESSOR_ID, new_asid);
   7.179 +        if ( !old_asid && new_asid )
   7.180 +        {
   7.181 +            /* VPID was disabled: now enabled. */
   7.182 +            curr->arch.hvm_vmx.secondary_exec_control |=
   7.183 +                SECONDARY_EXEC_ENABLE_VPID;
   7.184 +            __vmwrite(SECONDARY_VM_EXEC_CONTROL,
   7.185 +                      curr->arch.hvm_vmx.secondary_exec_control);
   7.186 +        }
   7.187 +        else if ( old_asid && !new_asid )
   7.188 +        {
   7.189 +            /* VPID was enabled: now disabled. */
   7.190 +            curr->arch.hvm_vmx.secondary_exec_control &=
   7.191 +                ~SECONDARY_EXEC_ENABLE_VPID;
   7.192 +            __vmwrite(SECONDARY_VM_EXEC_CONTROL,
   7.193 +                      curr->arch.hvm_vmx.secondary_exec_control);
   7.194 +        }
   7.195 +    }
   7.196 +
   7.197 +    if ( unlikely(need_flush) )
   7.198 +        vpid_sync_all();
   7.199 +
   7.200 + out:
   7.201      HVMTRACE_ND (VMENTRY, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
   7.202  }
   7.203  
     8.1 --- a/xen/include/asm-x86/hvm/asid.h	Tue Dec 08 10:33:08 2009 +0000
     8.2 +++ b/xen/include/asm-x86/hvm/asid.h	Tue Dec 08 14:14:27 2009 +0000
     8.3 @@ -21,14 +21,14 @@
     8.4  #define __ASM_X86_HVM_ASID_H__
     8.5  
     8.6  #include <xen/config.h>
     8.7 -#include <xen/sched.h>
     8.8 -#include <asm/processor.h>
     8.9 +
    8.10 +struct vcpu;
    8.11  
    8.12  /* Initialise ASID management for the current physical CPU. */
    8.13  void hvm_asid_init(int nasids);
    8.14  
    8.15  /* Invalidate a VCPU's current ASID allocation: forces re-allocation. */
    8.16 -void hvm_asid_invalidate_asid(struct vcpu *v);
    8.17 +void hvm_asid_flush_vcpu(struct vcpu *v);
    8.18  
    8.19  /* Flush all ASIDs on this processor core. */
    8.20  void hvm_asid_flush_core(void);
     9.1 --- a/xen/include/asm-x86/hvm/hvm.h	Tue Dec 08 10:33:08 2009 +0000
     9.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Tue Dec 08 14:14:27 2009 +0000
     9.3 @@ -23,6 +23,7 @@
     9.4  
     9.5  #include <asm/current.h>
     9.6  #include <asm/x86_emulate.h>
     9.7 +#include <asm/hvm/asid.h>
     9.8  #include <public/domctl.h>
     9.9  #include <public/hvm/save.h>
    9.10  
    9.11 @@ -100,13 +101,6 @@ struct hvm_function_table {
    9.12      void (*update_guest_cr)(struct vcpu *v, unsigned int cr);
    9.13      void (*update_guest_efer)(struct vcpu *v);
    9.14  
    9.15 -    /*
    9.16 -     * Called to ensure than all guest-specific mappings in a tagged TLB
    9.17 -     * are flushed; does *not* flush Xen's TLB entries, and on
    9.18 -     * processors without a tagged TLB it will be a noop.
    9.19 -     */
    9.20 -    void (*flush_guest_tlbs)(void);
    9.21 -
    9.22      void (*set_tsc_offset)(struct vcpu *v, u64 offset);
    9.23  
    9.24      void (*inject_exception)(unsigned int trapnr, int errcode,
    9.25 @@ -201,11 +195,15 @@ static inline void hvm_update_guest_efer
    9.26      hvm_funcs.update_guest_efer(v);
    9.27  }
    9.28  
    9.29 -static inline void 
    9.30 -hvm_flush_guest_tlbs(void)
    9.31 +/*
    9.32 + * Called to ensure than all guest-specific mappings in a tagged TLB are 
    9.33 + * flushed; does *not* flush Xen's TLB entries, and on processors without a 
    9.34 + * tagged TLB it will be a noop.
    9.35 + */
    9.36 +static inline void hvm_flush_guest_tlbs(void)
    9.37  {
    9.38      if ( hvm_enabled )
    9.39 -        hvm_funcs.flush_guest_tlbs();
    9.40 +        hvm_asid_flush_core();
    9.41  }
    9.42  
    9.43  void hvm_hypercall_page_initialise(struct domain *d,
    10.1 --- a/xen/include/asm-x86/hvm/svm/asid.h	Tue Dec 08 10:33:08 2009 +0000
    10.2 +++ b/xen/include/asm-x86/hvm/svm/asid.h	Tue Dec 08 14:14:27 2009 +0000
    10.3 @@ -41,7 +41,7 @@ static inline void svm_asid_g_invlpg(str
    10.4  #endif
    10.5  
    10.6      /* Safe fallback. Take a new ASID. */
    10.7 -    hvm_asid_invalidate_asid(v);
    10.8 +    hvm_asid_flush_vcpu(v);
    10.9  }
   10.10  
   10.11  #endif /* __ASM_X86_HVM_SVM_ASID_H__ */
    11.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h	Tue Dec 08 10:33:08 2009 +0000
    11.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h	Tue Dec 08 14:14:27 2009 +0000
    11.3 @@ -90,8 +90,6 @@ struct arch_vmx_struct {
    11.4      u32                  exec_control;
    11.5      u32                  secondary_exec_control;
    11.6  
    11.7 -    u16                  vpid;
    11.8 -
    11.9      /* PMU */
   11.10      struct vpmu_struct   vpmu;
   11.11  
    12.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Tue Dec 08 10:33:08 2009 +0000
    12.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Tue Dec 08 14:14:27 2009 +0000
    12.3 @@ -314,20 +314,12 @@ void ept_sync_domain(struct domain *d);
    12.4  
    12.5  static inline void vpid_sync_vcpu_gva(struct vcpu *v, unsigned long gva)
    12.6  {
    12.7 -    if ( cpu_has_vmx_vpid )
    12.8 -        __invvpid(0, v->arch.hvm_vmx.vpid, (u64)gva);
    12.9 -}
   12.10 -
   12.11 -static inline void vpid_sync_vcpu_all(struct vcpu *v)
   12.12 -{
   12.13 -    if ( cpu_has_vmx_vpid )
   12.14 -        __invvpid(1, v->arch.hvm_vmx.vpid, 0);
   12.15 +    __invvpid(0, v->arch.hvm_vcpu.asid, (u64)gva);
   12.16  }
   12.17  
   12.18  static inline void vpid_sync_all(void)
   12.19  {
   12.20 -    if ( cpu_has_vmx_vpid )
   12.21 -        __invvpid(2, 0, 0);
   12.22 +    __invvpid(2, 0, 0);
   12.23  }
   12.24  
   12.25  static inline void __vmxoff(void)