debuggers.hg

changeset 20642:2d92ad3ef517

hvm: Pull SVM ASID management into common HVM code where it can be shared.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Dec 08 10:33:08 2009 +0000 (2009-12-08)
parents 3122518646d3
children 7f611de6b93c
files xen/arch/x86/hvm/Makefile xen/arch/x86/hvm/asid.c xen/arch/x86/hvm/svm/asid.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c xen/include/asm-x86/hvm/asid.h xen/include/asm-x86/hvm/svm/asid.h xen/include/asm-x86/hvm/svm/vmcb.h xen/include/asm-x86/hvm/vcpu.h
line diff
     1.1 --- a/xen/arch/x86/hvm/Makefile	Tue Dec 08 07:55:21 2009 +0000
     1.2 +++ b/xen/arch/x86/hvm/Makefile	Tue Dec 08 10:33:08 2009 +0000
     1.3 @@ -1,6 +1,7 @@
     1.4  subdir-y += svm
     1.5  subdir-y += vmx
     1.6  
     1.7 +obj-y += asid.o
     1.8  obj-y += emulate.o
     1.9  obj-y += hpet.o
    1.10  obj-y += hvm.o
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/xen/arch/x86/hvm/asid.c	Tue Dec 08 10:33:08 2009 +0000
     2.3 @@ -0,0 +1,150 @@
     2.4 +/*
     2.5 + * asid.c: ASID management
     2.6 + * Copyright (c) 2007, Advanced Micro Devices, Inc.
     2.7 + * Copyright (c) 2009, Citrix Systems, Inc.
     2.8 + *
     2.9 + * This program is free software; you can redistribute it and/or modify it
    2.10 + * under the terms and conditions of the GNU General Public License,
    2.11 + * version 2, as published by the Free Software Foundation.
    2.12 + *
    2.13 + * This program is distributed in the hope it will be useful, but WITHOUT
    2.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    2.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    2.16 + * more details.
    2.17 + *
    2.18 + * You should have received a copy of the GNU General Public License along with
    2.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
    2.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
    2.21 + */
    2.22 +
    2.23 +#include <xen/config.h>
    2.24 +#include <xen/init.h>
    2.25 +#include <xen/lib.h>
    2.26 +#include <xen/perfc.h>
    2.27 +#include <asm/hvm/asid.h>
    2.28 +
    2.29 +/*
    2.30 + * ASIDs partition the physical TLB.  In the current implementation ASIDs are
    2.31 + * introduced to reduce the number of TLB flushes.  Each time the guest's
    2.32 + * virtual address space changes (e.g. due to an INVLPG, MOV-TO-{CR3, CR4}
    2.33 + * operation), instead of flushing the TLB, a new ASID is assigned.  This
    2.34 + * reduces the number of TLB flushes to at most 1/#ASIDs.  The biggest
    2.35 + * advantage is that hot parts of the hypervisor's code and data retain in
    2.36 + * the TLB.
    2.37 + *
    2.38 + * Sketch of the Implementation:
    2.39 + *
    2.40 + * ASIDs are a CPU-local resource.  As preemption of ASIDs is not possible,
    2.41 + * ASIDs are assigned in a round-robin scheme.  To minimize the overhead of
    2.42 + * ASID invalidation, at the time of a TLB flush,  ASIDs are tagged with a
    2.43 + * 64-bit generation.  Only on a generation overflow the code needs to
    2.44 + * invalidate all ASID information stored at the VCPUs with are run on the
    2.45 + * specific physical processor.  This overflow appears after about 2^80
    2.46 + * host processor cycles, so we do not optimize this case, but simply disable
    2.47 + * ASID useage to retain correctness.
    2.48 + */
    2.49 +
    2.50 +/* Per-CPU ASID management. */
    2.51 +struct hvm_asid_data {
    2.52 +   u64 core_asid_generation;
    2.53 +   u32 next_asid;
    2.54 +   u32 max_asid;
    2.55 +   bool_t disabled;
    2.56 +   bool_t initialised;
    2.57 +};
    2.58 +
    2.59 +static DEFINE_PER_CPU(struct hvm_asid_data, hvm_asid_data);
    2.60 +
    2.61 +void hvm_asid_init(int nasids)
    2.62 +{
    2.63 +    struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
    2.64 +
    2.65 +    /*
    2.66 +     * If already initialised, we just bump the generation to force a TLB
    2.67 +     * flush. Resetting the generation could be dangerous, if VCPUs still
    2.68 +     * exist that reference earlier generations on this CPU.
    2.69 +     */
    2.70 +    if ( test_and_set_bool(data->initialised) )
    2.71 +        return hvm_asid_flush_core();
    2.72 +
    2.73 +    data->max_asid = nasids - 1;
    2.74 +    data->disabled = (nasids <= 1);
    2.75 +
    2.76 +    printk("HVM: ASIDs %s \n",
    2.77 +           (data->disabled ? "disabled." : "enabled."));
    2.78 +
    2.79 +    /* Zero indicates 'invalid generation', so we start the count at one. */
    2.80 +    data->core_asid_generation = 1;
    2.81 +
    2.82 +    /* Zero indicates 'ASIDs disabled', so we start the count at one. */
    2.83 +    data->next_asid = 1;
    2.84 +}
    2.85 +
    2.86 +void hvm_asid_invalidate_asid(struct vcpu *v)
    2.87 +{
    2.88 +    v->arch.hvm_vcpu.asid_generation = 0;
    2.89 +}
    2.90 +
    2.91 +void hvm_asid_flush_core(void)
    2.92 +{
    2.93 +    struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
    2.94 +
    2.95 +    if ( data->disabled )
    2.96 +        return;
    2.97 +
    2.98 +    if ( likely(++data->core_asid_generation != 0) )
    2.99 +    {
   2.100 +        data->next_asid = 1;
   2.101 +        return;
   2.102 +    }
   2.103 +
   2.104 +    /*
   2.105 +     * ASID generations are 64 bit.  Overflow of generations never happens.
   2.106 +     * For safety, we simply disable ASIDs, so correctness is established; it
   2.107 +     * only runs a bit slower.
   2.108 +     */
   2.109 +    printk("HVM: ASID generation overrun. Disabling ASIDs.\n");
   2.110 +    data->disabled = 1;
   2.111 +}
   2.112 +
   2.113 +bool_t hvm_asid_handle_vmenter(void)
   2.114 +{
   2.115 +    struct vcpu *curr = current;
   2.116 +    struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
   2.117 +
   2.118 +    /* On erratum #170 systems we must flush the TLB. 
   2.119 +     * Generation overruns are taken here, too. */
   2.120 +    if ( data->disabled )
   2.121 +    {
   2.122 +        curr->arch.hvm_vcpu.asid = 0;
   2.123 +        return 0;
   2.124 +    }
   2.125 +
   2.126 +    /* Test if VCPU has valid ASID. */
   2.127 +    if ( curr->arch.hvm_vcpu.asid_generation == data->core_asid_generation )
   2.128 +        return 0;
   2.129 +
   2.130 +    /* If there are no free ASIDs, need to go to a new generation */
   2.131 +    if ( unlikely(data->next_asid > data->max_asid) )
   2.132 +        hvm_asid_flush_core();
   2.133 +
   2.134 +    /* Now guaranteed to be a free ASID. */
   2.135 +    curr->arch.hvm_vcpu.asid = data->next_asid++;
   2.136 +    curr->arch.hvm_vcpu.asid_generation = data->core_asid_generation;
   2.137 +
   2.138 +    /*
   2.139 +     * When we assign ASID 1, flush all TLB entries as we are starting a new
   2.140 +     * generation, and all old ASID allocations are now stale. 
   2.141 +     */
   2.142 +    return (curr->arch.hvm_vcpu.asid == 1);
   2.143 +}
   2.144 +
   2.145 +/*
   2.146 + * Local variables:
   2.147 + * mode: C
   2.148 + * c-set-style: "BSD"
   2.149 + * c-basic-offset: 4
   2.150 + * tab-width: 4
   2.151 + * indent-tabs-mode: nil
   2.152 + * End:
   2.153 + */
     3.1 --- a/xen/arch/x86/hvm/svm/asid.c	Tue Dec 08 07:55:21 2009 +0000
     3.2 +++ b/xen/arch/x86/hvm/svm/asid.c	Tue Dec 08 10:33:08 2009 +0000
     3.3 @@ -22,164 +22,16 @@
     3.4  #include <xen/perfc.h>
     3.5  #include <asm/hvm/svm/asid.h>
     3.6  
     3.7 -/*
     3.8 - * This is the interface to SVM's ASID management.  ASIDs partition the
     3.9 - * physical TLB for SVM.  In the current implementation ASIDs are introduced
    3.10 - * to reduce the number of TLB flushes.  Each time the guest's virtual
    3.11 - * address space changes (e.g. due to an INVLPG, MOV-TO-{CR3, CR4} operation),
    3.12 - * instead of flushing the TLB, a new ASID is assigned.  This reduces the
    3.13 - * number of TLB flushes to at most 1/#ASIDs (currently 1/64).  The biggest
    3.14 - * advantage is that hot parts of the hypervisor's code and data retain in
    3.15 - * the TLB.
    3.16 - *
    3.17 - * Sketch of the Implementation:
    3.18 - *
    3.19 - * ASIDs are a CPU-local resource.  As preemption of ASIDs is not possible,
    3.20 - * ASIDs are assigned in a round-robin scheme.  To minimize the overhead of
    3.21 - * ASID invalidation, at the time of a TLB flush,  ASIDs are tagged with a
    3.22 - * 64-bit generation.  Only on a generation overflow the code needs to
    3.23 - * invalidate all ASID information stored at the VCPUs with are run on the
    3.24 - * specific physical processor.  This overflow appears after about 2^80
    3.25 - * host processor cycles, so we do not optimize this case, but simply disable
    3.26 - * ASID useage to retain correctness.
    3.27 - */
    3.28 -
    3.29 -/* usable guest asids  [ 1 .. get_max_asid() ) */
    3.30 -#define SVM_ASID_FIRST_GUEST_ASID       1
    3.31 -
    3.32 -#define SVM_ASID_FIRST_GENERATION       0
    3.33 -
    3.34 -/* triggers the flush of all generations on all VCPUs */
    3.35 -#define SVM_ASID_LAST_GENERATION        (0xfffffffffffffffd)
    3.36 -
    3.37 -/* triggers assignment of new ASID to a VCPU */
    3.38 -#define SVM_ASID_INVALID_GENERATION     (SVM_ASID_LAST_GENERATION + 1)
    3.39 -
    3.40 -/* Per-CPU ASID management. */
    3.41 -struct svm_asid_data {
    3.42 -   u64 core_asid_generation;
    3.43 -   u32 next_asid;
    3.44 -   u32 max_asid;
    3.45 -   u32 erratum170:1;
    3.46 -   u32 initialised:1;
    3.47 -};
    3.48 -
    3.49 -static DEFINE_PER_CPU(struct svm_asid_data, svm_asid_data);
    3.50 -
    3.51 -/*
    3.52 - * Get handle to CPU-local ASID management data.
    3.53 - */
    3.54 -static struct svm_asid_data *svm_asid_core_data(void)
    3.55 -{
    3.56 -    return &this_cpu(svm_asid_data);
    3.57 -}
    3.58 -
    3.59 -/*
    3.60 - * Init ASID management for the current physical CPU.
    3.61 - */
    3.62  void svm_asid_init(struct cpuinfo_x86 *c)
    3.63  {
    3.64 -    int nasids;
    3.65 -    struct svm_asid_data *data = svm_asid_core_data();
    3.66 -
    3.67 -    /*
    3.68 -     * If already initialised, we just bump the generation to force a TLB
    3.69 -     * flush. Resetting the generation could be dangerous, if VCPUs still
    3.70 -     * exist that reference earlier generations on this CPU.
    3.71 -     */
    3.72 -    if ( data->initialised )
    3.73 -        return svm_asid_inc_generation();
    3.74 -    data->initialised = 1;
    3.75 -
    3.76 -    /* Find #ASID. */
    3.77 -    nasids = cpuid_ebx(0x8000000A);
    3.78 -    data->max_asid = nasids - 1;
    3.79 -
    3.80 -    /* Check if we can use ASIDs. */
    3.81 -    data->erratum170 =
    3.82 -        !((c->x86 == 0x10) ||
    3.83 -          ((c->x86 == 0xf) && (c->x86_model >= 0x68) && (c->x86_mask >= 1)));
    3.84 -
    3.85 -    printk("AMD SVM: ASIDs %s \n",
    3.86 -           (data->erratum170 ? "disabled." : "enabled."));
    3.87 -
    3.88 -    /* Initialize ASID assigment. */
    3.89 -    if ( data->erratum170 )
    3.90 -    {
    3.91 -        /* On errata #170, VCPUs and phys processors should have same
    3.92 -          generation.  We set both to invalid. */
    3.93 -        data->core_asid_generation = SVM_ASID_INVALID_GENERATION;
    3.94 -    }
    3.95 -    else
    3.96 -    {
    3.97 -        data->core_asid_generation = SVM_ASID_FIRST_GENERATION;
    3.98 -    }
    3.99 -
   3.100 -    /* ASIDs are assigned round-robin.  Start with the first. */
   3.101 -    data->next_asid = SVM_ASID_FIRST_GUEST_ASID;
   3.102 -}
   3.103 -
   3.104 -/*
   3.105 - * Force VCPU to fetch a new ASID.
   3.106 - */
   3.107 -void svm_asid_init_vcpu(struct vcpu *v)
   3.108 -{
   3.109 -    struct svm_asid_data *data = svm_asid_core_data();
   3.110 -
   3.111 -    /* Trigger asignment of a new ASID. */
   3.112 -    v->arch.hvm_svm.asid_generation = SVM_ASID_INVALID_GENERATION;
   3.113 +    int nasids = 0;
   3.114  
   3.115 -    /*
   3.116 -     * This erratum is bound to a physical processor.  The tlb_control
   3.117 -     * field is not changed by the processor.  We only set tlb_control
   3.118 -     * on VMCB creation and on a migration.
   3.119 -     */
   3.120 -    if ( data->erratum170 )
   3.121 -    {
   3.122 -        /* Flush TLB every VMRUN to handle Errata #170. */
   3.123 -        v->arch.hvm_svm.vmcb->tlb_control = 1;
   3.124 -        /* All guests use same ASID. */
   3.125 -        v->arch.hvm_svm.vmcb->guest_asid  = 1;
   3.126 -    }
   3.127 -    else
   3.128 -    {
   3.129 -        /* These fields are handled on VMRUN */
   3.130 -        v->arch.hvm_svm.vmcb->tlb_control = 0;
   3.131 -        v->arch.hvm_svm.vmcb->guest_asid  = 0;
   3.132 -    }
   3.133 -}
   3.134 +    /* Check for erratum #170, and leave ASIDs disabled if it's present. */
   3.135 +    if ( (c->x86 == 0x10) ||
   3.136 +         ((c->x86 == 0xf) && (c->x86_model >= 0x68) && (c->x86_mask >= 1)) )
   3.137 +        nasids = cpuid_ebx(0x8000000A);
   3.138  
   3.139 -/*
   3.140 - * Increase the Generation to make free ASIDs, and indirectly cause a 
   3.141 - * TLB flush of all ASIDs on the next vmrun.
   3.142 - */
   3.143 -void svm_asid_inc_generation(void)
   3.144 -{
   3.145 -    struct svm_asid_data *data = svm_asid_core_data();
   3.146 -
   3.147 -    if ( likely(data->core_asid_generation < SVM_ASID_LAST_GENERATION) )
   3.148 -    {
   3.149 -        /* Move to the next generation.  We can't flush the TLB now
   3.150 -         * because you need to vmrun to do that, and current might not
   3.151 -         * be a HVM vcpu, but the first HVM vcpu that runs after this 
   3.152 -         * will pick up ASID 1 and flush the TLBs. */
   3.153 -        data->core_asid_generation++;
   3.154 -        data->next_asid = SVM_ASID_FIRST_GUEST_ASID;
   3.155 -        return;
   3.156 -    }
   3.157 -
   3.158 -    /*
   3.159 -     * ASID generations are 64 bit.  Overflow of generations never happens.
   3.160 -     * For safety, we simply disable ASIDs and switch to erratum #170 mode on
   3.161 -     * this core (flushing TLB always). So correctness is established; it
   3.162 -     * only runs a bit slower.
   3.163 -     */
   3.164 -    if ( !data->erratum170 )
   3.165 -    {
   3.166 -        printk("AMD SVM: ASID generation overrun. Disabling ASIDs.\n");
   3.167 -        data->erratum170 = 1;
   3.168 -        data->core_asid_generation = SVM_ASID_INVALID_GENERATION;
   3.169 -    }
   3.170 +    hvm_asid_init(nasids);
   3.171  }
   3.172  
   3.173  /*
   3.174 @@ -188,47 +40,19 @@ void svm_asid_inc_generation(void)
   3.175   */
   3.176  asmlinkage void svm_asid_handle_vmrun(void)
   3.177  {
   3.178 -    struct vcpu *v = current;
   3.179 -    struct svm_asid_data *data = svm_asid_core_data();
   3.180 +    struct vcpu *curr = current;
   3.181 +    bool_t need_flush = hvm_asid_handle_vmenter();
   3.182  
   3.183 -    /* On erratum #170 systems we must flush the TLB. 
   3.184 -     * Generation overruns are taken here, too. */
   3.185 -    if ( data->erratum170 )
   3.186 +    /* ASID 0 indicates that ASIDs are disabled. */
   3.187 +    if ( curr->arch.hvm_vcpu.asid == 0 )
   3.188      {
   3.189 -        v->arch.hvm_svm.vmcb->guest_asid  = 1;
   3.190 -        v->arch.hvm_svm.vmcb->tlb_control = 1;
   3.191 -        return;
   3.192 -    }
   3.193 -
   3.194 -    /* Test if VCPU has valid ASID. */
   3.195 -    if ( likely(v->arch.hvm_svm.asid_generation ==
   3.196 -                data->core_asid_generation) )
   3.197 -    {
   3.198 -        /* May revert previous TLB-flush command. */
   3.199 -        v->arch.hvm_svm.vmcb->tlb_control = 0;
   3.200 +        curr->arch.hvm_svm.vmcb->guest_asid  = 1;
   3.201 +        curr->arch.hvm_svm.vmcb->tlb_control = 1;
   3.202          return;
   3.203      }
   3.204  
   3.205 -    /* If there are no free ASIDs, need to go to a new generation */
   3.206 -    if ( unlikely(data->next_asid > data->max_asid) )
   3.207 -        svm_asid_inc_generation();
   3.208 -
   3.209 -    /* Now guaranteed to be a free ASID. */
   3.210 -    v->arch.hvm_svm.vmcb->guest_asid = data->next_asid++;
   3.211 -    v->arch.hvm_svm.asid_generation  = data->core_asid_generation;
   3.212 -
   3.213 -    /* When we assign ASID 1, flush all TLB entries.  We need to do it 
   3.214 -     * here because svm_asid_inc_generation() can be called at any time, 
   3.215 -     * but the TLB flush can only happen on vmrun. */
   3.216 -    if ( v->arch.hvm_svm.vmcb->guest_asid == SVM_ASID_FIRST_GUEST_ASID )
   3.217 -        v->arch.hvm_svm.vmcb->tlb_control = 1;
   3.218 -    else
   3.219 -        v->arch.hvm_svm.vmcb->tlb_control = 0;
   3.220 -}
   3.221 -
   3.222 -void svm_asid_inv_asid(struct vcpu *v)
   3.223 -{
   3.224 -    v->arch.hvm_svm.asid_generation = SVM_ASID_INVALID_GENERATION;
   3.225 +    curr->arch.hvm_svm.vmcb->guest_asid  = curr->arch.hvm_vcpu.asid;
   3.226 +    curr->arch.hvm_svm.vmcb->tlb_control = need_flush;
   3.227  }
   3.228  
   3.229  /*
     4.1 --- a/xen/arch/x86/hvm/svm/svm.c	Tue Dec 08 07:55:21 2009 +0000
     4.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Tue Dec 08 10:33:08 2009 +0000
     4.3 @@ -424,7 +424,7 @@ static void svm_update_guest_cr(struct v
     4.4          break;
     4.5      case 3:
     4.6          vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3];
     4.7 -        svm_asid_inv_asid(v);
     4.8 +        hvm_asid_invalidate_asid(v);
     4.9          break;
    4.10      case 4:
    4.11          vmcb->cr4 = HVM_CR4_HOST_MASK;
    4.12 @@ -460,7 +460,7 @@ static void svm_flush_guest_tlbs(void)
    4.13      /* Roll over the CPU's ASID generation, so it gets a clean TLB when we
    4.14       * next VMRUN.  (If ASIDs are disabled, the whole TLB is flushed on
    4.15       * VMRUN anyway). */
    4.16 -    svm_asid_inc_generation();
    4.17 +    hvm_asid_flush_core();
    4.18  }
    4.19  
    4.20  static void svm_sync_vmcb(struct vcpu *v)
    4.21 @@ -704,7 +704,7 @@ static void svm_do_resume(struct vcpu *v
    4.22          hvm_migrate_timers(v);
    4.23  
    4.24          /* Migrating to another ASID domain.  Request a new ASID. */
    4.25 -        svm_asid_init_vcpu(v);
    4.26 +        hvm_asid_invalidate_asid(v);
    4.27      }
    4.28  
    4.29      /* Reflect the vlapic's TPR in the hardware vtpr */
     5.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Tue Dec 08 07:55:21 2009 +0000
     5.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Tue Dec 08 10:33:08 2009 +0000
     5.3 @@ -115,7 +115,7 @@ static int construct_vmcb(struct vcpu *v
     5.4      struct vmcb_struct *vmcb = arch_svm->vmcb;
     5.5  
     5.6      /* TLB control, and ASID assigment. */
     5.7 -    svm_asid_init_vcpu(v);
     5.8 +    hvm_asid_invalidate_asid(v);
     5.9  
    5.10      vmcb->general1_intercepts = 
    5.11          GENERAL1_INTERCEPT_INTR        | GENERAL1_INTERCEPT_NMI         |
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen/include/asm-x86/hvm/asid.h	Tue Dec 08 10:33:08 2009 +0000
     6.3 @@ -0,0 +1,50 @@
     6.4 +/*
     6.5 + * asid.h: ASID management
     6.6 + * Copyright (c) 2007, Advanced Micro Devices, Inc.
     6.7 + * Copyright (c) 2009, Citrix Systems, Inc.
     6.8 + *
     6.9 + * This program is free software; you can redistribute it and/or modify it
    6.10 + * under the terms and conditions of the GNU General Public License,
    6.11 + * version 2, as published by the Free Software Foundation.
    6.12 + *
    6.13 + * This program is distributed in the hope it will be useful, but WITHOUT
    6.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    6.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    6.16 + * more details.
    6.17 + *
    6.18 + * You should have received a copy of the GNU General Public License along with
    6.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
    6.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
    6.21 + */
    6.22 +
    6.23 +#ifndef __ASM_X86_HVM_ASID_H__
    6.24 +#define __ASM_X86_HVM_ASID_H__
    6.25 +
    6.26 +#include <xen/config.h>
    6.27 +#include <xen/sched.h>
    6.28 +#include <asm/processor.h>
    6.29 +
    6.30 +/* Initialise ASID management for the current physical CPU. */
    6.31 +void hvm_asid_init(int nasids);
    6.32 +
    6.33 +/* Invalidate a VCPU's current ASID allocation: forces re-allocation. */
    6.34 +void hvm_asid_invalidate_asid(struct vcpu *v);
    6.35 +
    6.36 +/* Flush all ASIDs on this processor core. */
    6.37 +void hvm_asid_flush_core(void);
    6.38 +
    6.39 +/* Called before entry to guest context. Checks ASID allocation, returns a
    6.40 + * boolean indicating whether all ASIDs must be flushed. */
    6.41 +bool_t hvm_asid_handle_vmenter(void);
    6.42 +
    6.43 +#endif /* __ASM_X86_HVM_ASID_H__ */
    6.44 +
    6.45 +/*
    6.46 + * Local variables:
    6.47 + * mode: C
    6.48 + * c-set-style: "BSD"
    6.49 + * c-basic-offset: 4
    6.50 + * tab-width: 4
    6.51 + * indent-tabs-mode: nil
    6.52 + * End:
    6.53 + */
     7.1 --- a/xen/include/asm-x86/hvm/svm/asid.h	Tue Dec 08 07:55:21 2009 +0000
     7.2 +++ b/xen/include/asm-x86/hvm/svm/asid.h	Tue Dec 08 10:33:08 2009 +0000
     7.3 @@ -22,15 +22,13 @@
     7.4  #include <xen/config.h>
     7.5  #include <asm/types.h>
     7.6  #include <asm/hvm/hvm.h>
     7.7 +#include <asm/hvm/asid.h>
     7.8  #include <asm/hvm/support.h>
     7.9  #include <asm/hvm/svm/svm.h>
    7.10  #include <asm/hvm/svm/vmcb.h>
    7.11  #include <asm/percpu.h>
    7.12  
    7.13  void svm_asid_init(struct cpuinfo_x86 *c);
    7.14 -void svm_asid_init_vcpu(struct vcpu *v);
    7.15 -void svm_asid_inv_asid(struct vcpu *v);
    7.16 -void svm_asid_inc_generation(void);
    7.17  
    7.18  static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_vaddr)
    7.19  {
    7.20 @@ -43,7 +41,7 @@ static inline void svm_asid_g_invlpg(str
    7.21  #endif
    7.22  
    7.23      /* Safe fallback. Take a new ASID. */
    7.24 -    svm_asid_inv_asid(v);
    7.25 +    hvm_asid_invalidate_asid(v);
    7.26  }
    7.27  
    7.28  #endif /* __ASM_X86_HVM_SVM_ASID_H__ */
     8.1 --- a/xen/include/asm-x86/hvm/svm/vmcb.h	Tue Dec 08 07:55:21 2009 +0000
     8.2 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h	Tue Dec 08 10:33:08 2009 +0000
     8.3 @@ -457,7 +457,6 @@ struct svm_domain {
     8.4  struct arch_svm_struct {
     8.5      struct vmcb_struct *vmcb;
     8.6      u64    vmcb_pa;
     8.7 -    u64    asid_generation; /* ASID tracking, moved here for cache locality. */
     8.8      unsigned long *msrpm;
     8.9      int    launch_core;
    8.10      bool_t vmcb_in_sync;    /* VMCB sync'ed with VMSAVE? */
     9.1 --- a/xen/include/asm-x86/hvm/vcpu.h	Tue Dec 08 07:55:21 2009 +0000
     9.2 +++ b/xen/include/asm-x86/hvm/vcpu.h	Tue Dec 08 10:33:08 2009 +0000
     9.3 @@ -70,6 +70,9 @@ struct hvm_vcpu {
     9.4      bool_t              debug_state_latch;
     9.5      bool_t              single_step;
     9.6  
     9.7 +    u64                 asid_generation;
     9.8 +    u32                 asid;
     9.9 +
    9.10      union {
    9.11          struct arch_vmx_struct vmx;
    9.12          struct arch_svm_struct svm;