debuggers.hg

view xen/arch/ia64/vmx/vmx_vcpu.c @ 0:7d21f7218375

Exact replica of unstable on 051908 + README-this
author Mukesh Rathor
date Mon May 19 15:34:57 2008 -0700 (2008-05-19)
parents
children 5c0bf00e371d
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /*
3 * vmx_vcpu.c: handling all virtual cpu related thing.
4 * Copyright (c) 2005, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Fred yang (fred.yang@intel.com)
20 * Arun Sharma (arun.sharma@intel.com)
21 * Shaofan Li (Susue Li) <susie.li@intel.com>
22 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
23 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
24 */
25 #include <xen/sched.h>
26 #include <public/xen.h>
27 #include <asm/ia64_int.h>
28 #include <asm/vmx_vcpu.h>
29 #include <asm/regionreg.h>
30 #include <asm/tlb.h>
31 #include <asm/processor.h>
32 #include <asm/delay.h>
33 #include <asm/regs.h>
34 #include <asm/gcc_intrin.h>
35 #include <asm/vmx_mm_def.h>
36 #include <asm/vmx.h>
37 #include <asm/vmx_phy_mode.h>
38 #include <asm/debugger.h>
40 /**************************************************************************
41 VCPU general register access routines
42 **************************************************************************/
43 #include <asm/hw_irq.h>
44 #include <asm/vmx_pal_vsa.h>
45 #include <asm/kregs.h>
46 #include <linux/efi.h>
47 //unsigned long last_guest_rsm = 0x0;
49 #ifdef VTI_DEBUG
50 struct guest_psr_bundle{
51 unsigned long ip;
52 unsigned long psr;
53 };
55 struct guest_psr_bundle guest_psr_buf[100];
56 unsigned long guest_psr_index = 0;
57 #endif
60 void
61 vmx_ia64_set_dcr(VCPU *v)
62 {
63 /* xenoprof:
64 * don't change psr.pp.
65 * It is manipulated by xenoprof.
66 */
67 unsigned long dcr_bits = (IA64_DEFAULT_DCR_BITS & ~IA64_DCR_PP) |
68 (ia64_getreg(_IA64_REG_CR_DCR) & IA64_DCR_PP);
70 // if guest is runing on cpl > 0, set dcr.dm=1
71 // if geust is runing on cpl = 0, set dcr.dm=0
72 // because Guest OS may ld.s on tr mapped page.
73 if (!(VCPU(v, vpsr) & IA64_PSR_CPL))
74 dcr_bits &= ~IA64_DCR_DM;
76 ia64_set_dcr(dcr_bits);
77 }
80 void
81 vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value)
82 {
84 u64 mask;
85 REGS *regs;
86 IA64_PSR old_psr, new_psr;
87 old_psr.val=VCPU(vcpu, vpsr);
89 regs=vcpu_regs(vcpu);
90 /* We only support guest as:
91 * vpsr.pk = 0
92 * vpsr.is = 0
93 * Otherwise panic
94 */
95 if ( value & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM )) {
96 panic_domain (regs,"Setting unsupport guest psr!");
97 }
99 /*
100 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
101 * Since these bits will become 0, after success execution of each
102 * instruction, we will change set them to mIA64_PSR
103 */
104 VCPU(vcpu,vpsr) = value &
105 (~ (IA64_PSR_ID |IA64_PSR_DA | IA64_PSR_DD |
106 IA64_PSR_ED | IA64_PSR_IA));
108 if ( !old_psr.i && (value & IA64_PSR_I) ) {
109 // vpsr.i 0->1
110 vcpu->arch.irq_new_condition = 1;
111 }
112 new_psr.val=VCPU(vcpu, vpsr);
113 #ifdef VTI_DEBUG
114 guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
115 guest_psr_buf[guest_psr_index].psr = new_psr.val;
116 if (++guest_psr_index >= 100)
117 guest_psr_index = 0;
118 #endif
119 #if 0
120 if (old_psr.i != new_psr.i) {
121 if (old_psr.i)
122 last_guest_rsm = vcpu_regs(vcpu)->cr_iip;
123 else
124 last_guest_rsm = 0;
125 }
126 #endif
128 /*
129 * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
130 * , except for the following bits:
131 * ic/i/dt/si/rt/mc/it/bn/vm
132 */
133 mask = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_DT | IA64_PSR_SI |
134 IA64_PSR_RT | IA64_PSR_MC | IA64_PSR_IT | IA64_PSR_BN |
135 IA64_PSR_VM;
137 /* xenoprof:
138 * don't change psr.pp.
139 * It is manipulated by xenoprof.
140 */
141 mask |= IA64_PSR_PP;
143 regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) );
145 if (FP_PSR(vcpu) & IA64_PSR_DFH)
146 regs->cr_ipsr |= IA64_PSR_DFH;
148 if (unlikely(vcpu->domain->debugger_attached)) {
149 if (vcpu->domain->arch.debug_flags & XEN_IA64_DEBUG_FORCE_SS)
150 regs->cr_ipsr |= IA64_PSR_SS;
151 if (vcpu->domain->arch.debug_flags & XEN_IA64_DEBUG_FORCE_DB)
152 regs->cr_ipsr |= IA64_PSR_DB;
153 }
155 check_mm_mode_switch(vcpu, old_psr, new_psr);
156 return ;
157 }
159 IA64FAULT vmx_vcpu_cover(VCPU *vcpu)
160 {
161 REGS *regs = vcpu_regs(vcpu);
162 IA64_PSR vpsr;
163 vpsr.val = VCPU(vcpu, vpsr);
165 if(!vpsr.ic)
166 VCPU(vcpu,ifs) = regs->cr_ifs;
167 regs->cr_ifs = IA64_IFS_V;
168 return (IA64_NO_FAULT);
169 }
171 IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, u64 reg, u64 val)
172 {
173 u64 rrval;
175 if (unlikely(is_reserved_rr_rid(vcpu, val))) {
176 gdprintk(XENLOG_DEBUG, "use of invalid rrval %lx\n", val);
177 return IA64_RSVDREG_FAULT;
178 }
180 VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val;
181 switch((u64)(reg>>VRN_SHIFT)) {
182 case VRN7:
183 if (likely(vcpu == current))
184 vmx_switch_rr7(vrrtomrr(vcpu,val), (void *)vcpu->arch.vhpt.hash,
185 pal_vaddr, vcpu->arch.privregs);
186 break;
187 case VRN4:
188 rrval = vrrtomrr(vcpu,val);
189 vcpu->arch.metaphysical_saved_rr4 = rrval;
190 if (is_virtual_mode(vcpu) && likely(vcpu == current))
191 ia64_set_rr(reg,rrval);
192 break;
193 case VRN0:
194 rrval = vrrtomrr(vcpu,val);
195 vcpu->arch.metaphysical_saved_rr0 = rrval;
196 if (is_virtual_mode(vcpu) && likely(vcpu == current))
197 ia64_set_rr(reg,rrval);
198 break;
199 default:
200 if (likely(vcpu == current))
201 ia64_set_rr(reg,vrrtomrr(vcpu,val));
202 break;
203 }
205 return (IA64_NO_FAULT);
206 }
210 /**************************************************************************
211 VCPU protection key register access routines
212 **************************************************************************/
214 u64 vmx_vcpu_get_pkr(VCPU *vcpu, u64 reg)
215 {
216 return ((u64)ia64_get_pkr(reg));
217 }
219 IA64FAULT vmx_vcpu_set_pkr(VCPU *vcpu, u64 reg, u64 val)
220 {
221 ia64_set_pkr(reg,val);
222 return (IA64_NO_FAULT);
223 }
225 #if 0
226 int tlb_debug=0;
227 check_entry(u64 va, u64 ps, char *str)
228 {
229 va &= ~ (PSIZE(ps)-1);
230 if ( va == 0x2000000002908000UL ||
231 va == 0x600000000000C000UL ) {
232 stop();
233 }
234 if (tlb_debug) printk("%s at %lx %lx\n", str, va, 1UL<<ps);
235 }
236 #endif
239 u64 vmx_vcpu_get_itir_on_fault(VCPU *vcpu, u64 ifa)
240 {
241 ia64_rr rr,rr1;
242 vcpu_get_rr(vcpu,ifa,&rr.rrval);
243 rr1.rrval=0;
244 rr1.ps=rr.ps;
245 rr1.rid=rr.rid;
246 return (rr1.rrval);
247 }
252 IA64FAULT vmx_vcpu_rfi(VCPU *vcpu)
253 {
254 // TODO: Only allowed for current vcpu
255 u64 ifs, psr;
256 REGS *regs = vcpu_regs(vcpu);
257 psr = VCPU(vcpu,ipsr);
258 if (psr & IA64_PSR_BN)
259 vcpu_bsw1(vcpu);
260 else
261 vcpu_bsw0(vcpu);
262 vmx_vcpu_set_psr(vcpu,psr);
263 vmx_ia64_set_dcr(vcpu);
264 ifs=VCPU(vcpu,ifs);
265 if(ifs>>63)
266 regs->cr_ifs = ifs;
267 regs->cr_iip = VCPU(vcpu,iip);
268 return (IA64_NO_FAULT);
269 }
272 #if 0
273 IA64FAULT
274 vmx_vcpu_get_bgr(VCPU *vcpu, unsigned int reg, u64 *val)
275 {
276 IA64_PSR vpsr;
278 vpsr.val = vmx_vcpu_get_psr(vcpu);
279 if ( vpsr.bn ) {
280 *val=VCPU(vcpu,vgr[reg-16]);
281 // Check NAT bit
282 if ( VCPU(vcpu,vnat) & (1UL<<(reg-16)) ) {
283 // TODO
284 //panic ("NAT consumption fault\n");
285 return IA64_FAULT;
286 }
288 }
289 else {
290 *val=VCPU(vcpu,vbgr[reg-16]);
291 if ( VCPU(vcpu,vbnat) & (1UL<<reg) ) {
292 //panic ("NAT consumption fault\n");
293 return IA64_FAULT;
294 }
296 }
297 return IA64_NO_FAULT;
298 }
300 IA64FAULT
301 vmx_vcpu_set_bgr(VCPU *vcpu, unsigned int reg, u64 val,int nat)
302 {
303 IA64_PSR vpsr;
304 vpsr.val = vmx_vcpu_get_psr(vcpu);
305 if ( vpsr.bn ) {
306 VCPU(vcpu,vgr[reg-16]) = val;
307 if(nat){
308 VCPU(vcpu,vnat) |= ( 1UL<<(reg-16) );
309 }else{
310 VCPU(vcpu,vbnat) &= ~( 1UL<<(reg-16) );
311 }
312 }
313 else {
314 VCPU(vcpu,vbgr[reg-16]) = val;
315 if(nat){
316 VCPU(vcpu,vnat) |= ( 1UL<<(reg) );
317 }else{
318 VCPU(vcpu,vbnat) &= ~( 1UL<<(reg) );
319 }
320 }
321 return IA64_NO_FAULT;
322 }
324 #endif
325 #if 0
326 IA64FAULT
327 vmx_vcpu_get_gr(VCPU *vcpu, unsigned reg, u64 * val)
328 {
329 REGS *regs=vcpu_regs(vcpu);
330 int nat;
331 //TODO, Eddie
332 if (!regs) return 0;
333 #if 0
334 if (reg >= 16 && reg < 32) {
335 return vmx_vcpu_get_bgr(vcpu,reg,val);
336 }
337 #endif
338 getreg(reg,val,&nat,regs); // FIXME: handle NATs later
339 if(nat){
340 return IA64_FAULT;
341 }
342 return IA64_NO_FAULT;
343 }
345 // returns:
346 // IA64_ILLOP_FAULT if the register would cause an Illegal Operation fault
347 // IA64_NO_FAULT otherwise
349 IA64FAULT
350 vmx_vcpu_set_gr(VCPU *vcpu, unsigned reg, u64 value, int nat)
351 {
352 REGS *regs = vcpu_regs(vcpu);
353 long sof = (regs->cr_ifs) & 0x7f;
354 //TODO Eddie
356 if (!regs) return IA64_ILLOP_FAULT;
357 if (reg >= sof + 32) return IA64_ILLOP_FAULT;
358 #if 0
359 if ( reg >= 16 && reg < 32 ) {
360 return vmx_vcpu_set_bgr(vcpu,reg, value, nat);
361 }
362 #endif
363 setreg(reg,value,nat,regs);
364 return IA64_NO_FAULT;
365 }
367 #endif
369 /*
370 VPSR can't keep track of below bits of guest PSR
371 This function gets guest PSR
372 */
374 u64 vmx_vcpu_get_psr(VCPU *vcpu)
375 {
376 u64 mask;
377 REGS *regs = vcpu_regs(vcpu);
378 mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
379 IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
380 return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
381 }
383 IA64FAULT vmx_vcpu_reset_psr_sm(VCPU *vcpu, u64 imm24)
384 {
385 u64 vpsr;
386 vpsr = vmx_vcpu_get_psr(vcpu);
387 vpsr &= (~imm24);
388 vmx_vcpu_set_psr(vcpu, vpsr);
389 return IA64_NO_FAULT;
390 }
393 IA64FAULT vmx_vcpu_set_psr_sm(VCPU *vcpu, u64 imm24)
394 {
395 u64 vpsr;
396 vpsr = vmx_vcpu_get_psr(vcpu);
397 vpsr |= imm24;
398 vmx_vcpu_set_psr(vcpu, vpsr);
399 return IA64_NO_FAULT;
400 }
403 IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, u64 val)
404 {
405 val = (val & MASK(0, 32)) | (vmx_vcpu_get_psr(vcpu) & MASK(32, 32));
406 vmx_vcpu_set_psr(vcpu, val);
407 return IA64_NO_FAULT;
408 }
410 IA64FAULT
411 vmx_vcpu_set_tpr(VCPU *vcpu, u64 val)
412 {
413 VCPU(vcpu,tpr)=val;
414 vcpu->arch.irq_new_condition = 1;
415 return IA64_NO_FAULT;
416 }