debuggers.hg

view xen/arch/x86/hvm/svm/asid.c @ 0:7d21f7218375

Exact replica of unstable on 051908 + README-this
author Mukesh Rathor
date Mon May 19 15:34:57 2008 -0700 (2008-05-19)
parents
children b22f9ab1716a
line source
1 /*
2 * asid.c: handling ASIDs in SVM.
3 * Copyright (c) 2007, Advanced Micro Devices, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
19 #include <xen/config.h>
20 #include <xen/init.h>
21 #include <xen/lib.h>
22 #include <xen/perfc.h>
23 #include <asm/hvm/svm/asid.h>
25 /*
26 * This is the interface to SVM's ASID management. ASIDs partition the
27 * physical TLB for SVM. In the current implementation ASIDs are introduced
28 * to reduce the number of TLB flushes. Each time the guest's virtual
29 * address space changes (e.g. due to an INVLPG, MOV-TO-{CR3, CR4} operation),
30 * instead of flushing the TLB, a new ASID is assigned. This reduces the
31 * number of TLB flushes to at most 1/#ASIDs (currently 1/64). The biggest
32 * advantage is that hot parts of the hypervisor's code and data retain in
33 * the TLB.
34 *
35 * Sketch of the Implementation:
36 *
37 * ASIDs are a CPU-local resource. As preemption of ASIDs is not possible,
38 * ASIDs are assigned in a round-robin scheme. To minimize the overhead of
39 * ASID invalidation, at the time of a TLB flush, ASIDs are tagged with a
40 * 64-bit generation. Only on a generation overflow the code needs to
41 * invalidate all ASID information stored at the VCPUs with are run on the
42 * specific physical processor. This overflow appears after about 2^80
43 * host processor cycles, so we do not optimize this case, but simply disable
44 * ASID useage to retain correctness.
45 */
47 /* usable guest asids [ 1 .. get_max_asid() ) */
48 #define SVM_ASID_FIRST_GUEST_ASID 1
50 #define SVM_ASID_FIRST_GENERATION 0
52 /* triggers the flush of all generations on all VCPUs */
53 #define SVM_ASID_LAST_GENERATION (0xfffffffffffffffd)
55 /* triggers assignment of new ASID to a VCPU */
56 #define SVM_ASID_INVALID_GENERATION (SVM_ASID_LAST_GENERATION + 1)
58 /* Per-CPU ASID management. */
59 struct svm_asid_data {
60 u64 core_asid_generation;
61 u32 next_asid;
62 u32 max_asid;
63 u32 erratum170:1;
64 };
66 static DEFINE_PER_CPU(struct svm_asid_data, svm_asid_data);
68 /*
69 * Get handle to CPU-local ASID management data.
70 */
71 static struct svm_asid_data *svm_asid_core_data(void)
72 {
73 return &get_cpu_var(svm_asid_data);
74 }
76 /*
77 * Init ASID management for the current physical CPU.
78 */
79 void svm_asid_init(struct cpuinfo_x86 *c)
80 {
81 int nasids;
82 struct svm_asid_data *data = svm_asid_core_data();
84 /* Find #ASID. */
85 nasids = cpuid_ebx(0x8000000A);
86 data->max_asid = nasids - 1;
88 /* Check if we can use ASIDs. */
89 data->erratum170 =
90 !((c->x86 == 0x10) ||
91 ((c->x86 == 0xf) && (c->x86_model >= 0x68) && (c->x86_mask >= 1)));
93 printk("AMD SVM: ASIDs %s \n",
94 (data->erratum170 ? "disabled." : "enabled."));
96 /* Initialize ASID assigment. */
97 if ( data->erratum170 )
98 {
99 /* On errata #170, VCPUs and phys processors should have same
100 generation. We set both to invalid. */
101 data->core_asid_generation = SVM_ASID_INVALID_GENERATION;
102 }
103 else
104 {
105 data->core_asid_generation = SVM_ASID_FIRST_GENERATION;
106 }
108 /* ASIDs are assigned round-robin. Start with the first. */
109 data->next_asid = SVM_ASID_FIRST_GUEST_ASID;
110 }
112 /*
113 * Force VCPU to fetch a new ASID.
114 */
115 void svm_asid_init_vcpu(struct vcpu *v)
116 {
117 struct svm_asid_data *data = svm_asid_core_data();
119 /* Trigger asignment of a new ASID. */
120 v->arch.hvm_svm.asid_generation = SVM_ASID_INVALID_GENERATION;
122 /*
123 * This erratum is bound to a physical processor. The tlb_control
124 * field is not changed by the processor. We only set tlb_control
125 * on VMCB creation and on a migration.
126 */
127 if ( data->erratum170 )
128 {
129 /* Flush TLB every VMRUN to handle Errata #170. */
130 v->arch.hvm_svm.vmcb->tlb_control = 1;
131 /* All guests use same ASID. */
132 v->arch.hvm_svm.vmcb->guest_asid = 1;
133 }
134 else
135 {
136 /* These fields are handled on VMRUN */
137 v->arch.hvm_svm.vmcb->tlb_control = 0;
138 v->arch.hvm_svm.vmcb->guest_asid = 0;
139 }
140 }
142 /*
143 * Increase the Generation to make free ASIDs, and indirectly cause a
144 * TLB flush of all ASIDs on the next vmrun.
145 */
146 void svm_asid_inc_generation(void)
147 {
148 struct svm_asid_data *data = svm_asid_core_data();
150 if ( likely(data->core_asid_generation < SVM_ASID_LAST_GENERATION) )
151 {
152 /* Move to the next generation. We can't flush the TLB now
153 * because you need to vmrun to do that, and current might not
154 * be a HVM vcpu, but the first HVM vcpu that runs after this
155 * will pick up ASID 1 and flush the TLBs. */
156 data->core_asid_generation++;
157 data->next_asid = SVM_ASID_FIRST_GUEST_ASID;
158 return;
159 }
161 /*
162 * ASID generations are 64 bit. Overflow of generations never happens.
163 * For safety, we simply disable ASIDs and switch to erratum #170 mode on
164 * this core (flushing TLB always). So correctness is established; it
165 * only runs a bit slower.
166 */
167 if ( !data->erratum170 )
168 {
169 printk("AMD SVM: ASID generation overrun. Disabling ASIDs.\n");
170 data->erratum170 = 1;
171 data->core_asid_generation = SVM_ASID_INVALID_GENERATION;
172 }
173 }
175 /*
176 * Called directly before VMRUN. Checks if the VCPU needs a new ASID,
177 * assigns it, and if required, issues required TLB flushes.
178 */
179 asmlinkage void svm_asid_handle_vmrun(void)
180 {
181 struct vcpu *v = current;
182 struct svm_asid_data *data = svm_asid_core_data();
184 /* On erratum #170 systems we must flush the TLB.
185 * Generation overruns are taken here, too. */
186 if ( data->erratum170 )
187 {
188 v->arch.hvm_svm.vmcb->guest_asid = 1;
189 v->arch.hvm_svm.vmcb->tlb_control = 1;
190 return;
191 }
193 /* Test if VCPU has valid ASID. */
194 if ( likely(v->arch.hvm_svm.asid_generation ==
195 data->core_asid_generation) )
196 {
197 /* May revert previous TLB-flush command. */
198 v->arch.hvm_svm.vmcb->tlb_control = 0;
199 return;
200 }
202 /* If there are no free ASIDs, need to go to a new generation */
203 if ( unlikely(data->next_asid > data->max_asid) )
204 svm_asid_inc_generation();
206 /* Now guaranteed to be a free ASID. */
207 v->arch.hvm_svm.vmcb->guest_asid = data->next_asid++;
208 v->arch.hvm_svm.asid_generation = data->core_asid_generation;
210 /* When we assign ASID 1, flush all TLB entries. We need to do it
211 * here because svm_asid_inc_generation() can be called at any time,
212 * but the TLB flush can only happen on vmrun. */
213 if ( v->arch.hvm_svm.vmcb->guest_asid == SVM_ASID_FIRST_GUEST_ASID )
214 v->arch.hvm_svm.vmcb->tlb_control = 1;
215 else
216 v->arch.hvm_svm.vmcb->tlb_control = 0;
217 }
219 void svm_asid_inv_asid(struct vcpu *v)
220 {
221 v->arch.hvm_svm.asid_generation = SVM_ASID_INVALID_GENERATION;
222 }
224 /*
225 * Local variables:
226 * mode: C
227 * c-set-style: "BSD"
228 * c-basic-offset: 4
229 * tab-width: 4
230 * indent-tabs-mode: nil
231 * End:
232 */