debuggers.hg

view xen/arch/ia64/vmx/vmx_vcpu.c @ 16670:2900e4dacaa7

[IA64] xenoprof: don't modify mPSR.pp. VTi case

Don't modify mPSR.pp for xenoprof. VTi domain case
xenoprof manages mPSR.pp so that mPSR.pp shouldn't be modified.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Mon Dec 17 09:56:12 2007 -0700 (2007-12-17)
parents 4ac315e33f88
children ba569af64b44
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_vcpu.c: handling all virtual cpu related thing.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Fred yang (fred.yang@intel.com)
20 * Arun Sharma (arun.sharma@intel.com)
21 * Shaofan Li (Susue Li) <susie.li@intel.com>
22 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
23 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
24 */
25 #include <xen/sched.h>
26 #include <public/xen.h>
27 #include <asm/ia64_int.h>
28 #include <asm/vmx_vcpu.h>
29 #include <asm/regionreg.h>
30 #include <asm/tlb.h>
31 #include <asm/processor.h>
32 #include <asm/delay.h>
33 #include <asm/regs.h>
34 #include <asm/gcc_intrin.h>
35 #include <asm/vmx_mm_def.h>
36 #include <asm/vmx.h>
37 #include <asm/vmx_phy_mode.h>
38 #include <asm/debugger.h>
40 /**************************************************************************
41 VCPU general register access routines
42 **************************************************************************/
43 #include <asm/hw_irq.h>
44 #include <asm/vmx_pal_vsa.h>
45 #include <asm/kregs.h>
46 #include <linux/efi.h>
47 //unsigned long last_guest_rsm = 0x0;
49 #ifdef VTI_DEBUG
50 struct guest_psr_bundle{
51 unsigned long ip;
52 unsigned long psr;
53 };
55 struct guest_psr_bundle guest_psr_buf[100];
56 unsigned long guest_psr_index = 0;
57 #endif
60 void
61 vmx_ia64_set_dcr(VCPU *v)
62 {
63 /* xenoprof:
64 * don't change psr.pp.
65 * It is manipulated by xenoprof.
66 */
67 unsigned long dcr_bits = (IA64_DEFAULT_DCR_BITS & ~IA64_DCR_PP) |
68 (ia64_getreg(_IA64_REG_CR_DCR) & IA64_DCR_PP);
70 // if guest is runing on cpl > 0, set dcr.dm=1
71 // if geust is runing on cpl = 0, set dcr.dm=0
72 // because Guest OS may ld.s on tr mapped page.
73 if (!(VCPU(v, vpsr) & IA64_PSR_CPL))
74 dcr_bits &= ~IA64_DCR_DM;
76 ia64_set_dcr(dcr_bits);
77 }
80 void
81 vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
82 {
84 u64 mask;
85 REGS *regs;
86 IA64_PSR old_psr, new_psr;
87 old_psr.val=VCPU(vcpu, vpsr);
89 regs=vcpu_regs(vcpu);
90 /* We only support guest as:
91 * vpsr.pk = 0
92 * vpsr.is = 0
93 * Otherwise panic
94 */
95 if ( value & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM )) {
96 panic_domain (regs,"Setting unsupport guest psr!");
97 }
99 /*
100 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
101 * Since these bits will become 0, after success execution of each
102 * instruction, we will change set them to mIA64_PSR
103 */
104 VCPU(vcpu,vpsr) = value &
105 (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
106 IA64_PSR_ED | IA64_PSR_IA));
108 if ( !old_psr.i && (value & IA64_PSR_I) ) {
109 // vpsr.i 0->1
110 vcpu->arch.irq_new_condition = 1;
111 }
112 new_psr.val=VCPU(vcpu, vpsr);
113 #ifdef VTI_DEBUG
114 {
115 struct pt_regs *regs = vcpu_regs(vcpu);
116 guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
117 guest_psr_buf[guest_psr_index].psr = new_psr.val;
118 if (++guest_psr_index >= 100)
119 guest_psr_index = 0;
120 }
121 #endif
122 #if 0
123 if (old_psr.i != new_psr.i) {
124 if (old_psr.i)
125 last_guest_rsm = vcpu_regs(vcpu)->cr_iip;
126 else
127 last_guest_rsm = 0;
128 }
129 #endif
131 /*
132 * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
133 * , except for the following bits:
134 * ic/i/dt/si/rt/mc/it/bn/vm
135 */
136 mask = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_DT | IA64_PSR_SI |
137 IA64_PSR_RT | IA64_PSR_MC | IA64_PSR_IT | IA64_PSR_BN |
138 IA64_PSR_VM;
140 /* xenoprof:
141 * don't change psr.pp.
142 * It is manipulated by xenoprof.
143 */
144 mask |= IA64_PSR_PP;
146 regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) );
148 if (FP_PSR(vcpu) & IA64_PSR_DFH)
149 regs->cr_ipsr |= IA64_PSR_DFH;
151 if (unlikely(vcpu->domain->debugger_attached)) {
152 if (vcpu->domain->arch.debug_flags & XEN_IA64_DEBUG_FORCE_SS)
153 regs->cr_ipsr |= IA64_PSR_SS;
154 if (vcpu->domain->arch.debug_flags & XEN_IA64_DEBUG_FORCE_DB)
155 regs->cr_ipsr |= IA64_PSR_DB;
156 }
158 check_mm_mode_switch(vcpu, old_psr, new_psr);
159 return ;
160 }
162 IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
163 {
164 REGS *regs = vcpu_regs(vcpu);
165 IA64_PSR vpsr;
166 vpsr.val = VCPU(vcpu, vpsr);
168 if(!vpsr.ic)
169 VCPU(vcpu,ifs) = regs->cr_ifs;
170 regs->cr_ifs = IA64_IFS_V;
171 return (IA64_NO_FAULT);
172 }
174 IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
175 {
176 u64 rrval;
178 if (unlikely(is_reserved_rr_rid(vcpu, val))) {
179 gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val);
180 return IA64_RSVDREG_FAULT;
181 }
183 VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val;
184 switch((u64)(reg>>VRN_SHIFT)) {
185 case VRN7:
186 if (likely(vcpu == current))
187 vmx_switch_rr7(vrrtomrr(vcpu,val),
188 (void *)vcpu->arch.vhpt.hash, pal_vaddr );
189 break;
190 case VRN4:
191 rrval = vrrtomrr(vcpu,val);
192 vcpu->arch.metaphysical_saved_rr4 = rrval;
193 if (is_virtual_mode(vcpu) && likely(vcpu == current))
194 ia64_set_rr(reg,rrval);
195 break;
196 case VRN0:
197 rrval = vrrtomrr(vcpu,val);
198 vcpu->arch.metaphysical_saved_rr0 = rrval;
199 if (is_virtual_mode(vcpu) && likely(vcpu == current))
200 ia64_set_rr(reg,rrval);
201 break;
202 default:
203 if (likely(vcpu == current))
204 ia64_set_rr(reg,vrrtomrr(vcpu,val));
205 break;
206 }
208 return (IA64_NO_FAULT);
209 }
213 /**************************************************************************
214 VCPU protection key register access routines
215 **************************************************************************/
217 u64 vmx_vcpu_get_pkr(VCPU *vcpu, u64 reg)
218 {
219 return ((u64)ia64_get_pkr(reg));
220 }
222 IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, u64 reg, u64 val)
223 {
224 ia64_set_pkr(reg,val);
225 return (IA64_NO_FAULT);
226 }
228 #if 0
229 int tlb_debug=0;
230 check_entry(u64 va, u64 ps, char *str)
231 {
232 va &= ~ (PSIZE(ps)-1);
233 if ( va == 0x2000000002908000UL ||
234 va == 0x600000000000C000UL ) {
235 stop();
236 }
237 if (tlb_debug) printk("%s at %lx %lx\n", str, va, 1UL<<ps);
238 }
239 #endif
242 u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa)
243 {
244 ia64_rr rr,rr1;
245 vcpu_get_rr(vcpu,ifa,&rr.rrval);
246 rr1.rrval=0;
247 rr1.ps=rr.ps;
248 rr1.rid=rr.rid;
249 return (rr1.rrval);
250 }
255 IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
256 {
257 // TODO: Only allowed for current vcpu
258 u64 ifs, psr;
259 REGS *regs = vcpu_regs(vcpu);
260 psr = VCPU(vcpu,ipsr);
261 if (psr & IA64_PSR_BN)
262 vcpu_bsw1(vcpu);
263 else
264 vcpu_bsw0(vcpu);
265 vmx_vcpu_set_psr(vcpu,psr);
266 vmx_ia64_set_dcr(vcpu);
267 ifs=VCPU(vcpu,ifs);
268 if(ifs>>63)
269 regs->cr_ifs = ifs;
270 regs->cr_iip = VCPU(vcpu,iip);
271 return (IA64_NO_FAULT);
272 }
275 #if 0
276 IA64FAULT
277 vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, u64 *val)
278 {
279 IA64_PSR vpsr;
281 vpsr.val = vmx_vcpu_get_psr(vcpu);
282 if ( vpsr.bn ) {
283 *val=VCPU(vcpu,vgr[reg-16]);
284 // Check NAT bit
285 if ( VCPU(vcpu,vnat) & (1UL<<(reg-16)) ) {
286 // TODO
287 //panic ("NAT consumption fault\n");
288 return IA64_FAULT;
289 }
291 }
292 else {
293 *val=VCPU(vcpu,vbgr[reg-16]);
294 if ( VCPU(vcpu,vbnat) & (1UL<<reg) ) {
295 //panic ("NAT consumption fault\n");
296 return IA64_FAULT;
297 }
299 }
300 return IA64_NO_FAULT;
301 }
303 IA64FAULT
304 vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat)
305 {
306 IA64_PSR vpsr;
307 vpsr.val = vmx_vcpu_get_psr(vcpu);
308 if ( vpsr.bn ) {
309 VCPU(vcpu,vgr[reg-16]) = val;
310 if(nat){
311 VCPU(vcpu,vnat) |= ( 1UL<<(reg-16) );
312 }else{
313 VCPU(vcpu,vbnat) &= ~( 1UL<<(reg-16) );
314 }
315 }
316 else {
317 VCPU(vcpu,vbgr[reg-16]) = val;
318 if(nat){
319 VCPU(vcpu,vnat) |= ( 1UL<<(reg) );
320 }else{
321 VCPU(vcpu,vbnat) &= ~( 1UL<<(reg) );
322 }
323 }
324 return IA64_NO_FAULT;
325 }
327 #endif
328 #if 0
329 IA64FAULT
330 vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, u64 * val)
331 {
332 REGS *regs=vcpu_regs(vcpu);
333 int nat;
334 //TODO, Eddie
335 if (!regs) return 0;
336 #if 0
337 if (reg >= 16 && reg < 32) {
338 return vmx_vcpu_get_bgr(vcpu,reg,val);
339 }
340 #endif
341 getreg(reg,val,&nat,regs); // FIXME: handle NATs later
342 if(nat){
343 return IA64_FAULT;
344 }
345 return IA64_NO_FAULT;
346 }
348 // returns:
349 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
350 // IA64_NO_FAULT otherwise
352 IA64FAULT
353 vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat)
354 {
355 REGS *regs = vcpu_regs(vcpu);
356 long sof = (regs->cr_ifs) & 0x7f;
357 //TODO Eddie
359 if (!regs) return IA64_ILLOP_FAULT;
360 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
361 #if 0
362 if ( reg >= 16 && reg < 32 ) {
363 return vmx_vcpu_set_bgr(vcpu,reg, value, nat);
364 }
365 #endif
366 setreg(reg,value,nat,regs);
367 return IA64_NO_FAULT;
368 }
370 #endif
372 /*
373 VPSR can't keep track of below bits of guest PSR
374 This function gets guest PSR
375 */
377 u64 vmx_vcpu_get_psr(VCPU *vcpu)
378 {
379 u64 mask;
380 REGS *regs = vcpu_regs(vcpu);
381 mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
382 IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
383 return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
384 }
386 IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, u64 imm24)
387 {
388 u64 vpsr;
389 vpsr = vmx_vcpu_get_psr(vcpu);
390 vpsr &= (~imm24);
391 vmx_vcpu_set_psr(vcpu, vpsr);
392 return IA64_NO_FAULT;
393 }
396 IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, u64 imm24)
397 {
398 u64 vpsr;
399 vpsr = vmx_vcpu_get_psr(vcpu);
400 vpsr |= imm24;
401 vmx_vcpu_set_psr(vcpu, vpsr);
402 return IA64_NO_FAULT;
403 }
406 IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, u64 val)
407 {
408 val = (val & MASK(0, 32)) | (vmx_vcpu_get_psr(vcpu) & MASK(32, 32));
409 vmx_vcpu_set_psr(vcpu, val);
410 return IA64_NO_FAULT;
411 }
413 IA64FAULT
414 vmx_vcpu_set_tpr(VCPU *vcpu, u64 val)
415 {
416 VCPU(vcpu,tpr)=val;
417 vcpu->arch.irq_new_condition = 1;
418 return IA64_NO_FAULT;
419 }