debuggers.hg

view xen/arch/x86/hvm/svm/asid.c @ 19822:44fe7ad6fee8

x86 svm: Clean up and fix start_svm() to avoid memory leaks and
resetting ASID generations.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 18 09:47:49 2009 +0100 (2009-06-18)
parents 739d698986e9
children 809b20f066fb
line source
1 /*
2 * asid.c: handling ASIDs in SVM.
3 * Copyright (c) 2007, Advanced Micro Devices, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
19 #include <xen/config.h>
20 #include <xen/init.h>
21 #include <xen/lib.h>
22 #include <xen/perfc.h>
23 #include <asm/hvm/svm/asid.h>
25 /*
26 * This is the interface to SVM's ASID management. ASIDs partition the
27 * physical TLB for SVM. In the current implementation ASIDs are introduced
28 * to reduce the number of TLB flushes. Each time the guest's virtual
29 * address space changes (e.g. due to an INVLPG, MOV-TO-{CR3, CR4} operation),
30 * instead of flushing the TLB, a new ASID is assigned. This reduces the
31 * number of TLB flushes to at most 1/#ASIDs (currently 1/64). The biggest
32 * advantage is that hot parts of the hypervisor's code and data retain in
33 * the TLB.
34 *
35 * Sketch of the Implementation:
36 *
37 * ASIDs are a CPU-local resource. As preemption of ASIDs is not possible,
38 * ASIDs are assigned in a round-robin scheme. To minimize the overhead of
39 * ASID invalidation, at the time of a TLB flush, ASIDs are tagged with a
40 * 64-bit generation. Only on a generation overflow the code needs to
41 * invalidate all ASID information stored at the VCPUs with are run on the
42 * specific physical processor. This overflow appears after about 2^80
43 * host processor cycles, so we do not optimize this case, but simply disable
44 * ASID useage to retain correctness.
45 */
47 /* usable guest asids [ 1 .. get_max_asid() ) */
48 #define SVM_ASID_FIRST_GUEST_ASID 1
50 #define SVM_ASID_FIRST_GENERATION 0
52 /* triggers the flush of all generations on all VCPUs */
53 #define SVM_ASID_LAST_GENERATION (0xfffffffffffffffd)
55 /* triggers assignment of new ASID to a VCPU */
56 #define SVM_ASID_INVALID_GENERATION (SVM_ASID_LAST_GENERATION + 1)
58 /* Per-CPU ASID management. */
59 struct svm_asid_data {
60 u64 core_asid_generation;
61 u32 next_asid;
62 u32 max_asid;
63 u32 erratum170:1;
64 u32 initialised:1;
65 };
67 static DEFINE_PER_CPU(struct svm_asid_data, svm_asid_data);
69 /*
70 * Get handle to CPU-local ASID management data.
71 */
72 static struct svm_asid_data *svm_asid_core_data(void)
73 {
74 return &this_cpu(svm_asid_data);
75 }
77 /*
78 * Init ASID management for the current physical CPU.
79 */
80 void svm_asid_init(struct cpuinfo_x86 *c)
81 {
82 int nasids;
83 struct svm_asid_data *data = svm_asid_core_data();
85 /*
86 * If already initialised, we just bump the generation to force a TLB
87 * flush. Resetting the generation could be dangerous, if VCPUs still
88 * exist that reference earlier generations on this CPU.
89 */
90 if ( data->initialised )
91 return svm_asid_inc_generation();
92 data->initialised = 1;
94 /* Find #ASID. */
95 nasids = cpuid_ebx(0x8000000A);
96 data->max_asid = nasids - 1;
98 /* Check if we can use ASIDs. */
99 data->erratum170 =
100 !((c->x86 == 0x10) ||
101 ((c->x86 == 0xf) && (c->x86_model >= 0x68) && (c->x86_mask >= 1)));
103 printk("AMD SVM: ASIDs %s \n",
104 (data->erratum170 ? "disabled." : "enabled."));
106 /* Initialize ASID assigment. */
107 if ( data->erratum170 )
108 {
109 /* On errata #170, VCPUs and phys processors should have same
110 generation. We set both to invalid. */
111 data->core_asid_generation = SVM_ASID_INVALID_GENERATION;
112 }
113 else
114 {
115 data->core_asid_generation = SVM_ASID_FIRST_GENERATION;
116 }
118 /* ASIDs are assigned round-robin. Start with the first. */
119 data->next_asid = SVM_ASID_FIRST_GUEST_ASID;
120 }
122 /*
123 * Force VCPU to fetch a new ASID.
124 */
125 void svm_asid_init_vcpu(struct vcpu *v)
126 {
127 struct svm_asid_data *data = svm_asid_core_data();
129 /* Trigger asignment of a new ASID. */
130 v->arch.hvm_svm.asid_generation = SVM_ASID_INVALID_GENERATION;
132 /*
133 * This erratum is bound to a physical processor. The tlb_control
134 * field is not changed by the processor. We only set tlb_control
135 * on VMCB creation and on a migration.
136 */
137 if ( data->erratum170 )
138 {
139 /* Flush TLB every VMRUN to handle Errata #170. */
140 v->arch.hvm_svm.vmcb->tlb_control = 1;
141 /* All guests use same ASID. */
142 v->arch.hvm_svm.vmcb->guest_asid = 1;
143 }
144 else
145 {
146 /* These fields are handled on VMRUN */
147 v->arch.hvm_svm.vmcb->tlb_control = 0;
148 v->arch.hvm_svm.vmcb->guest_asid = 0;
149 }
150 }
152 /*
153 * Increase the Generation to make free ASIDs, and indirectly cause a
154 * TLB flush of all ASIDs on the next vmrun.
155 */
156 void svm_asid_inc_generation(void)
157 {
158 struct svm_asid_data *data = svm_asid_core_data();
160 if ( likely(data->core_asid_generation < SVM_ASID_LAST_GENERATION) )
161 {
162 /* Move to the next generation. We can't flush the TLB now
163 * because you need to vmrun to do that, and current might not
164 * be a HVM vcpu, but the first HVM vcpu that runs after this
165 * will pick up ASID 1 and flush the TLBs. */
166 data->core_asid_generation++;
167 data->next_asid = SVM_ASID_FIRST_GUEST_ASID;
168 return;
169 }
171 /*
172 * ASID generations are 64 bit. Overflow of generations never happens.
173 * For safety, we simply disable ASIDs and switch to erratum #170 mode on
174 * this core (flushing TLB always). So correctness is established; it
175 * only runs a bit slower.
176 */
177 if ( !data->erratum170 )
178 {
179 printk("AMD SVM: ASID generation overrun. Disabling ASIDs.\n");
180 data->erratum170 = 1;
181 data->core_asid_generation = SVM_ASID_INVALID_GENERATION;
182 }
183 }
185 /*
186 * Called directly before VMRUN. Checks if the VCPU needs a new ASID,
187 * assigns it, and if required, issues required TLB flushes.
188 */
189 asmlinkage void svm_asid_handle_vmrun(void)
190 {
191 struct vcpu *v = current;
192 struct svm_asid_data *data = svm_asid_core_data();
194 /* On erratum #170 systems we must flush the TLB.
195 * Generation overruns are taken here, too. */
196 if ( data->erratum170 )
197 {
198 v->arch.hvm_svm.vmcb->guest_asid = 1;
199 v->arch.hvm_svm.vmcb->tlb_control = 1;
200 return;
201 }
203 /* Test if VCPU has valid ASID. */
204 if ( likely(v->arch.hvm_svm.asid_generation ==
205 data->core_asid_generation) )
206 {
207 /* May revert previous TLB-flush command. */
208 v->arch.hvm_svm.vmcb->tlb_control = 0;
209 return;
210 }
212 /* If there are no free ASIDs, need to go to a new generation */
213 if ( unlikely(data->next_asid > data->max_asid) )
214 svm_asid_inc_generation();
216 /* Now guaranteed to be a free ASID. */
217 v->arch.hvm_svm.vmcb->guest_asid = data->next_asid++;
218 v->arch.hvm_svm.asid_generation = data->core_asid_generation;
220 /* When we assign ASID 1, flush all TLB entries. We need to do it
221 * here because svm_asid_inc_generation() can be called at any time,
222 * but the TLB flush can only happen on vmrun. */
223 if ( v->arch.hvm_svm.vmcb->guest_asid == SVM_ASID_FIRST_GUEST_ASID )
224 v->arch.hvm_svm.vmcb->tlb_control = 1;
225 else
226 v->arch.hvm_svm.vmcb->tlb_control = 0;
227 }
229 void svm_asid_inv_asid(struct vcpu *v)
230 {
231 v->arch.hvm_svm.asid_generation = SVM_ASID_INVALID_GENERATION;
232 }
234 /*
235 * Local variables:
236 * mode: C
237 * c-set-style: "BSD"
238 * c-basic-offset: 4
239 * tab-width: 4
240 * indent-tabs-mode: nil
241 * End:
242 */