debuggers.hg

view xen/arch/ia64/privop.c @ 4619:5b9e241131fb

bitkeeper revision 1.1329 (42661815u5WPq8d5f4_axi2xWheybA)

Merge firebug.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk
into firebug.cl.cam.ac.uk:/local/scratch/kaf24/xeno-unstable-ia64.bk
author kaf24@firebug.cl.cam.ac.uk
date Wed Apr 20 08:51:33 2005 +0000 (2005-04-20)
parents a01199a95070 58efb3448933
children 674bf85fff9a f71cef640151 ddd290cc8f0d
line source
1 /*
2 * Privileged operation "API" handling functions.
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <asm/privop.h>
10 #include <asm/vcpu.h>
11 #include <asm/processor.h>
12 #include <asm/delay.h> // Debug only
13 //#include <debug.h>
15 long priv_verbose=0;
17 /**************************************************************************
18 Hypercall bundle creation
19 **************************************************************************/
22 void build_hypercall_bundle(UINT64 *imva, UINT64 brkimm, UINT64 hypnum, UINT64 ret)
23 {
24 INST64_A5 slot0;
25 INST64_I19 slot1;
26 INST64_B4 slot2;
27 IA64_BUNDLE bundle;
29 // slot1: mov r2 = hypnum (low 20 bits)
30 slot0.inst = 0;
31 slot0.qp = 0; slot0.r1 = 2; slot0.r3 = 0; slot0.major = 0x9;
32 slot0.imm7b = hypnum; slot0.imm9d = hypnum >> 7;
33 slot0.imm5c = hypnum >> 16; slot0.s = 0;
34 // slot1: break brkimm
35 slot1.inst = 0;
36 slot1.qp = 0; slot1.x6 = 0; slot1.x3 = 0; slot1.major = 0x0;
37 slot1.imm20 = brkimm; slot1.i = brkimm >> 20;
38 // if ret slot2: br.ret.sptk.many rp
39 // else slot2: br.cond.sptk.many rp
40 slot2.inst = 0; slot2.qp = 0; slot2.p = 1; slot2.b2 = 0;
41 slot2.wh = 0; slot2.d = 0; slot2.major = 0x0;
42 if (ret) {
43 slot2.btype = 4; slot2.x6 = 0x21;
44 }
45 else {
46 slot2.btype = 0; slot2.x6 = 0x20;
47 }
49 bundle.i64[0] = 0; bundle.i64[1] = 0;
50 bundle.template = 0x11;
51 bundle.slot0 = slot0.inst; bundle.slot2 = slot2.inst;
52 bundle.slot1a = slot1.inst; bundle.slot1b = slot1.inst >> 18;
54 *imva++ = bundle.i64[0]; *imva = bundle.i64[1];
55 }
57 /**************************************************************************
58 Privileged operation emulation routines
59 **************************************************************************/
61 IA64FAULT priv_rfi(VCPU *vcpu, INST64 inst)
62 {
63 return vcpu_rfi(vcpu);
64 }
66 IA64FAULT priv_bsw0(VCPU *vcpu, INST64 inst)
67 {
68 return vcpu_bsw0(vcpu);
69 }
71 IA64FAULT priv_bsw1(VCPU *vcpu, INST64 inst)
72 {
73 return vcpu_bsw1(vcpu);
74 }
76 IA64FAULT priv_cover(VCPU *vcpu, INST64 inst)
77 {
78 return vcpu_cover(vcpu);
79 }
81 IA64FAULT priv_ptc_l(VCPU *vcpu, INST64 inst)
82 {
83 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
84 UINT64 addr_range;
86 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
87 return vcpu_ptc_l(vcpu,vadr,addr_range);
88 }
90 IA64FAULT priv_ptc_e(VCPU *vcpu, INST64 inst)
91 {
92 UINT src = inst.M28.r3;
94 // NOTE: ptc_e with source gr > 63 is emulated as a fc r(y-64)
95 if (src > 63) return(vcpu_fc(vcpu,vcpu_get_gr(vcpu,src - 64)));
96 return vcpu_ptc_e(vcpu,vcpu_get_gr(vcpu,src));
97 }
99 IA64FAULT priv_ptc_g(VCPU *vcpu, INST64 inst)
100 {
101 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
102 UINT64 addr_range;
104 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
105 return vcpu_ptc_g(vcpu,vadr,addr_range);
106 }
108 IA64FAULT priv_ptc_ga(VCPU *vcpu, INST64 inst)
109 {
110 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
111 UINT64 addr_range;
113 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
114 return vcpu_ptc_ga(vcpu,vadr,addr_range);
115 }
117 IA64FAULT priv_ptr_d(VCPU *vcpu, INST64 inst)
118 {
119 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
120 UINT64 addr_range;
122 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
123 return vcpu_ptr_d(vcpu,vadr,addr_range);
124 }
126 IA64FAULT priv_ptr_i(VCPU *vcpu, INST64 inst)
127 {
128 UINT64 vadr = vcpu_get_gr(vcpu,inst.M45.r3);
129 UINT64 addr_range;
131 addr_range = 1 << ((vcpu_get_gr(vcpu,inst.M45.r2) & 0xfc) >> 2);
132 return vcpu_ptr_i(vcpu,vadr,addr_range);
133 }
135 IA64FAULT priv_tpa(VCPU *vcpu, INST64 inst)
136 {
137 UINT64 padr;
138 UINT fault;
139 UINT src = inst.M46.r3;
141 // NOTE: tpa with source gr > 63 is emulated as a ttag rx=r(y-64)
142 if (src > 63)
143 fault = vcpu_ttag(vcpu,vcpu_get_gr(vcpu,src-64),&padr);
144 else fault = vcpu_tpa(vcpu,vcpu_get_gr(vcpu,src),&padr);
145 if (fault == IA64_NO_FAULT)
146 return vcpu_set_gr(vcpu, inst.M46.r1, padr);
147 else return fault;
148 }
150 IA64FAULT priv_tak(VCPU *vcpu, INST64 inst)
151 {
152 UINT64 key;
153 UINT fault;
154 UINT src = inst.M46.r3;
156 // NOTE: tak with source gr > 63 is emulated as a thash rx=r(y-64)
157 if (src > 63)
158 fault = vcpu_thash(vcpu,vcpu_get_gr(vcpu,src-64),&key);
159 else fault = vcpu_tak(vcpu,vcpu_get_gr(vcpu,src),&key);
160 if (fault == IA64_NO_FAULT)
161 return vcpu_set_gr(vcpu, inst.M46.r1, key);
162 else return fault;
163 }
165 /************************************
166 * Insert translation register/cache
167 ************************************/
169 IA64FAULT priv_itr_d(VCPU *vcpu, INST64 inst)
170 {
171 UINT64 fault, itir, ifa, pte, slot;
173 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
174 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
175 return(IA64_ILLOP_FAULT);
176 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
177 return(IA64_ILLOP_FAULT);
178 pte = vcpu_get_gr(vcpu,inst.M42.r2);
179 slot = vcpu_get_gr(vcpu,inst.M42.r3);
181 return (vcpu_itr_d(vcpu,slot,pte,itir,ifa));
182 }
184 IA64FAULT priv_itr_i(VCPU *vcpu, INST64 inst)
185 {
186 UINT64 fault, itir, ifa, pte, slot;
188 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
189 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
190 return(IA64_ILLOP_FAULT);
191 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
192 return(IA64_ILLOP_FAULT);
193 pte = vcpu_get_gr(vcpu,inst.M42.r2);
194 slot = vcpu_get_gr(vcpu,inst.M42.r3);
196 return (vcpu_itr_i(vcpu,slot,pte,itir,ifa));
197 }
199 IA64FAULT priv_itc_d(VCPU *vcpu, INST64 inst)
200 {
201 UINT64 fault, itir, ifa, pte;
203 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
204 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
205 return(IA64_ILLOP_FAULT);
206 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
207 return(IA64_ILLOP_FAULT);
208 pte = vcpu_get_gr(vcpu,inst.M41.r2);
210 return (vcpu_itc_d(vcpu,pte,itir,ifa));
211 }
213 IA64FAULT priv_itc_i(VCPU *vcpu, INST64 inst)
214 {
215 UINT64 fault, itir, ifa, pte;
217 //if (!vcpu_get_psr_ic(vcpu)) return(IA64_ILLOP_FAULT);
218 if ((fault = vcpu_get_itir(vcpu,&itir)) != IA64_NO_FAULT)
219 return(IA64_ILLOP_FAULT);
220 if ((fault = vcpu_get_ifa(vcpu,&ifa)) != IA64_NO_FAULT)
221 return(IA64_ILLOP_FAULT);
222 pte = vcpu_get_gr(vcpu,inst.M41.r2);
224 return (vcpu_itc_i(vcpu,pte,itir,ifa));
225 }
227 /*************************************
228 * Moves to semi-privileged registers
229 *************************************/
231 IA64FAULT priv_mov_to_ar_imm(VCPU *vcpu, INST64 inst)
232 {
233 // I27 and M30 are identical for these fields
234 UINT64 ar3 = inst.M30.ar3;
235 UINT64 imm = vcpu_get_gr(vcpu,inst.M30.imm);
236 return (vcpu_set_ar(vcpu,ar3,imm));
237 }
239 IA64FAULT priv_mov_to_ar_reg(VCPU *vcpu, INST64 inst)
240 {
241 // I26 and M29 are identical for these fields
242 UINT64 ar3 = inst.M29.ar3;
244 if (inst.M29.r2 > 63 && inst.M29.ar3 < 8) { // privified mov from kr
245 UINT64 val;
246 if (vcpu_get_ar(vcpu,ar3,&val) != IA64_ILLOP_FAULT)
247 return vcpu_set_gr(vcpu, inst.M29.r2-64, val);
248 else return IA64_ILLOP_FAULT;
249 }
250 else {
251 UINT64 r2 = vcpu_get_gr(vcpu,inst.M29.r2);
252 return (vcpu_set_ar(vcpu,ar3,r2));
253 }
254 }
256 /********************************
257 * Moves to privileged registers
258 ********************************/
260 IA64FAULT priv_mov_to_pkr(VCPU *vcpu, INST64 inst)
261 {
262 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
263 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
264 return (vcpu_set_pkr(vcpu,r3,r2));
265 }
267 IA64FAULT priv_mov_to_rr(VCPU *vcpu, INST64 inst)
268 {
269 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
270 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
271 return (vcpu_set_rr(vcpu,r3,r2));
272 }
274 IA64FAULT priv_mov_to_dbr(VCPU *vcpu, INST64 inst)
275 {
276 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
277 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
278 return (vcpu_set_dbr(vcpu,r3,r2));
279 }
281 IA64FAULT priv_mov_to_ibr(VCPU *vcpu, INST64 inst)
282 {
283 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
284 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
285 return (vcpu_set_ibr(vcpu,r3,r2));
286 }
288 IA64FAULT priv_mov_to_pmc(VCPU *vcpu, INST64 inst)
289 {
290 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
291 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
292 return (vcpu_set_pmc(vcpu,r3,r2));
293 }
295 IA64FAULT priv_mov_to_pmd(VCPU *vcpu, INST64 inst)
296 {
297 UINT64 r3 = vcpu_get_gr(vcpu,inst.M42.r3);
298 UINT64 r2 = vcpu_get_gr(vcpu,inst.M42.r2);
299 return (vcpu_set_pmd(vcpu,r3,r2));
300 }
302 unsigned long to_cr_cnt[128] = { 0 };
304 IA64FAULT priv_mov_to_cr(VCPU *vcpu, INST64 inst)
305 {
306 UINT64 val = vcpu_get_gr(vcpu, inst.M32.r2);
307 to_cr_cnt[inst.M32.cr3]++;
308 switch (inst.M32.cr3) {
309 case 0: return vcpu_set_dcr(vcpu,val);
310 case 1: return vcpu_set_itm(vcpu,val);
311 case 2: return vcpu_set_iva(vcpu,val);
312 case 8: return vcpu_set_pta(vcpu,val);
313 case 16:return vcpu_set_ipsr(vcpu,val);
314 case 17:return vcpu_set_isr(vcpu,val);
315 case 19:return vcpu_set_iip(vcpu,val);
316 case 20:return vcpu_set_ifa(vcpu,val);
317 case 21:return vcpu_set_itir(vcpu,val);
318 case 22:return vcpu_set_iipa(vcpu,val);
319 case 23:return vcpu_set_ifs(vcpu,val);
320 case 24:return vcpu_set_iim(vcpu,val);
321 case 25:return vcpu_set_iha(vcpu,val);
322 case 64:return vcpu_set_lid(vcpu,val);
323 case 65:return IA64_ILLOP_FAULT;
324 case 66:return vcpu_set_tpr(vcpu,val);
325 case 67:return vcpu_set_eoi(vcpu,val);
326 case 68:return IA64_ILLOP_FAULT;
327 case 69:return IA64_ILLOP_FAULT;
328 case 70:return IA64_ILLOP_FAULT;
329 case 71:return IA64_ILLOP_FAULT;
330 case 72:return vcpu_set_itv(vcpu,val);
331 case 73:return vcpu_set_pmv(vcpu,val);
332 case 74:return vcpu_set_cmcv(vcpu,val);
333 case 80:return vcpu_set_lrr0(vcpu,val);
334 case 81:return vcpu_set_lrr1(vcpu,val);
335 default: return IA64_ILLOP_FAULT;
336 }
337 }
339 IA64FAULT priv_rsm(VCPU *vcpu, INST64 inst)
340 {
341 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
342 return vcpu_reset_psr_sm(vcpu,imm24);
343 }
345 IA64FAULT priv_ssm(VCPU *vcpu, INST64 inst)
346 {
347 UINT64 imm24 = (inst.M44.i<<23)|(inst.M44.i2<<21)|inst.M44.imm;
348 return vcpu_set_psr_sm(vcpu,imm24);
349 }
351 /**
352 * @todo Check for reserved bits and return IA64_RSVDREG_FAULT.
353 */
354 IA64FAULT priv_mov_to_psr(VCPU *vcpu, INST64 inst)
355 {
356 UINT64 val = vcpu_get_gr(vcpu, inst.M35.r2);
357 return vcpu_set_psr_l(vcpu,val);
358 }
360 /**********************************
361 * Moves from privileged registers
362 **********************************/
364 IA64FAULT priv_mov_from_rr(VCPU *vcpu, INST64 inst)
365 {
366 UINT64 val;
367 IA64FAULT fault;
369 if (inst.M43.r1 > 63) { // privified mov from cpuid
370 fault = vcpu_get_cpuid(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
371 if (fault == IA64_NO_FAULT)
372 return vcpu_set_gr(vcpu, inst.M43.r1-64, val);
373 }
374 else {
375 fault = vcpu_get_rr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
376 if (fault == IA64_NO_FAULT)
377 return vcpu_set_gr(vcpu, inst.M43.r1, val);
378 }
379 return fault;
380 }
382 IA64FAULT priv_mov_from_pkr(VCPU *vcpu, INST64 inst)
383 {
384 UINT64 val;
385 IA64FAULT fault;
387 fault = vcpu_get_pkr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
388 if (fault == IA64_NO_FAULT)
389 return vcpu_set_gr(vcpu, inst.M43.r1, val);
390 else return fault;
391 }
393 IA64FAULT priv_mov_from_dbr(VCPU *vcpu, INST64 inst)
394 {
395 UINT64 val;
396 IA64FAULT fault;
398 fault = vcpu_get_dbr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
399 if (fault == IA64_NO_FAULT)
400 return vcpu_set_gr(vcpu, inst.M43.r1, val);
401 else return fault;
402 }
404 IA64FAULT priv_mov_from_ibr(VCPU *vcpu, INST64 inst)
405 {
406 UINT64 val;
407 IA64FAULT fault;
409 fault = vcpu_get_ibr(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
410 if (fault == IA64_NO_FAULT)
411 return vcpu_set_gr(vcpu, inst.M43.r1, val);
412 else return fault;
413 }
415 IA64FAULT priv_mov_from_pmc(VCPU *vcpu, INST64 inst)
416 {
417 UINT64 val;
418 IA64FAULT fault;
420 fault = vcpu_get_pmc(vcpu,vcpu_get_gr(vcpu,inst.M43.r3),&val);
421 if (fault == IA64_NO_FAULT)
422 return vcpu_set_gr(vcpu, inst.M43.r1, val);
423 else return fault;
424 }
426 unsigned long from_cr_cnt[128] = { 0 };
428 #define cr_get(cr) \
429 ((fault = vcpu_get_##cr(vcpu,&val)) == IA64_NO_FAULT) ? \
430 vcpu_set_gr(vcpu, tgt, val) : fault;
432 IA64FAULT priv_mov_from_cr(VCPU *vcpu, INST64 inst)
433 {
434 UINT64 tgt = inst.M33.r1;
435 UINT64 val;
436 IA64FAULT fault;
438 from_cr_cnt[inst.M33.cr3]++;
439 switch (inst.M33.cr3) {
440 case 0: return cr_get(dcr);
441 case 1: return cr_get(itm);
442 case 2: return cr_get(iva);
443 case 8: return cr_get(pta);
444 case 16:return cr_get(ipsr);
445 case 17:return cr_get(isr);
446 case 19:return cr_get(iip);
447 case 20:return cr_get(ifa);
448 case 21:return cr_get(itir);
449 case 22:return cr_get(iipa);
450 case 23:return cr_get(ifs);
451 case 24:return cr_get(iim);
452 case 25:return cr_get(iha);
453 case 64:return cr_get(lid);
454 case 65:return cr_get(ivr);
455 case 66:return cr_get(tpr);
456 case 67:return vcpu_set_gr(vcpu,tgt,0L);
457 case 68:return cr_get(irr0);
458 case 69:return cr_get(irr1);
459 case 70:return cr_get(irr2);
460 case 71:return cr_get(irr3);
461 case 72:return cr_get(itv);
462 case 73:return cr_get(pmv);
463 case 74:return cr_get(cmcv);
464 case 80:return cr_get(lrr0);
465 case 81:return cr_get(lrr1);
466 default: return IA64_ILLOP_FAULT;
467 }
468 return IA64_ILLOP_FAULT;
469 }
471 IA64FAULT priv_mov_from_psr(VCPU *vcpu, INST64 inst)
472 {
473 UINT64 tgt = inst.M33.r1;
474 UINT64 val;
475 IA64FAULT fault;
477 if ((fault = vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
478 return vcpu_set_gr(vcpu, tgt, val);
479 else return fault;
480 }
482 /**************************************************************************
483 Privileged operation decode and dispatch routines
484 **************************************************************************/
486 IA64_SLOT_TYPE slot_types[0x20][3] = {
487 {M, I, I}, {M, I, I}, {M, I, I}, {M, I, I},
488 {M, I, ILLEGAL}, {M, I, ILLEGAL},
489 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
490 {M, M, I}, {M, M, I}, {M, M, I}, {M, M, I},
491 {M, F, I}, {M, F, I},
492 {M, M, F}, {M, M, F},
493 {M, I, B}, {M, I, B},
494 {M, B, B}, {M, B, B},
495 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
496 {B, B, B}, {B, B, B},
497 {M, M, B}, {M, M, B},
498 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL},
499 {M, F, B}, {M, F, B},
500 {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL}
501 };
503 // pointer to privileged emulation function
504 typedef IA64FAULT (*PPEFCN)(VCPU *vcpu, INST64 inst);
506 PPEFCN Mpriv_funcs[64] = {
507 priv_mov_to_rr, priv_mov_to_dbr, priv_mov_to_ibr, priv_mov_to_pkr,
508 priv_mov_to_pmc, priv_mov_to_pmd, 0, 0,
509 0, priv_ptc_l, priv_ptc_g, priv_ptc_ga,
510 priv_ptr_d, priv_ptr_i, priv_itr_d, priv_itr_i,
511 priv_mov_from_rr, priv_mov_from_dbr, priv_mov_from_ibr, priv_mov_from_pkr,
512 priv_mov_from_pmc, 0, 0, 0,
513 0, 0, 0, 0,
514 0, 0, priv_tpa, priv_tak,
515 0, 0, 0, 0,
516 priv_mov_from_cr, priv_mov_from_psr, 0, 0,
517 0, 0, 0, 0,
518 priv_mov_to_cr, priv_mov_to_psr, priv_itc_d, priv_itc_i,
519 0, 0, 0, 0,
520 priv_ptc_e, 0, 0, 0,
521 0, 0, 0, 0, 0, 0, 0, 0
522 };
524 struct {
525 unsigned long mov_to_ar_imm;
526 unsigned long mov_to_ar_reg;
527 unsigned long mov_from_ar;
528 unsigned long ssm;
529 unsigned long rsm;
530 unsigned long rfi;
531 unsigned long bsw0;
532 unsigned long bsw1;
533 unsigned long cover;
534 unsigned long Mpriv_cnt[64];
535 } privcnt = { 0 };
537 unsigned long privop_trace = 0;
539 IA64FAULT
540 priv_handle_op(VCPU *vcpu, REGS *regs, int privlvl)
541 {
542 IA64_BUNDLE bundle;
543 IA64_BUNDLE __get_domain_bundle(UINT64);
544 int slot;
545 IA64_SLOT_TYPE slot_type;
546 INST64 inst;
547 PPEFCN pfunc;
548 unsigned long ipsr = regs->cr_ipsr;
549 UINT64 iip = regs->cr_iip;
550 int x6;
552 // make a local copy of the bundle containing the privop
553 #if 1
554 bundle = __get_domain_bundle(iip);
555 if (!bundle.i64[0] && !bundle.i64[1])
556 #else
557 if (__copy_from_user(&bundle,iip,sizeof(bundle)))
558 #endif
559 {
560 //printf("*** priv_handle_op: privop bundle @%p not mapped, retrying\n",iip);
561 return IA64_RETRY;
562 }
563 #if 0
564 if (iip==0xa000000100001820) {
565 static int firstpagefault = 1;
566 if (firstpagefault) {
567 printf("*** First time to domain page fault!\n"); firstpagefault=0;
568 }
569 }
570 #endif
571 if (privop_trace) {
572 static long i = 400;
573 //if (i > 0) printf("privop @%p\n",iip);
574 if (i > 0) printf("priv_handle_op: @%p, itc=%lx, itm=%lx\n",
575 iip,ia64_get_itc(),ia64_get_itm());
576 i--;
577 }
578 slot = ((struct ia64_psr *)&ipsr)->ri;
579 if (!slot) inst.inst = (bundle.i64[0]>>5) & MASK_41;
580 else if (slot == 1)
581 inst.inst = ((bundle.i64[0]>>46) | bundle.i64[1]<<18) & MASK_41;
582 else if (slot == 2) inst.inst = (bundle.i64[1]>>23) & MASK_41;
583 else printf("priv_handle_op: illegal slot: %d\n", slot);
585 slot_type = slot_types[bundle.template][slot];
586 if (priv_verbose) {
587 printf("priv_handle_op: checking bundle at 0x%lx (op=0x%016lx) slot %d (type=%d)\n",
588 iip, (UINT64)inst.inst, slot, slot_type);
589 }
590 if (slot_type == B && inst.generic.major == 0 && inst.B8.x6 == 0x0) {
591 // break instr for privified cover
592 }
593 else if (privlvl != 2) return (IA64_ILLOP_FAULT);
594 switch (slot_type) {
595 case M:
596 if (inst.generic.major == 0) {
597 #if 0
598 if (inst.M29.x6 == 0 && inst.M29.x3 == 0) {
599 privcnt.cover++;
600 return priv_cover(vcpu,inst);
601 }
602 #endif
603 if (inst.M29.x3 != 0) break;
604 if (inst.M30.x4 == 8 && inst.M30.x2 == 2) {
605 privcnt.mov_to_ar_imm++;
606 return priv_mov_to_ar_imm(vcpu,inst);
607 }
608 if (inst.M44.x4 == 6) {
609 privcnt.ssm++;
610 return priv_ssm(vcpu,inst);
611 }
612 if (inst.M44.x4 == 7) {
613 privcnt.rsm++;
614 return priv_rsm(vcpu,inst);
615 }
616 break;
617 }
618 else if (inst.generic.major != 1) break;
619 x6 = inst.M29.x6;
620 if (x6 == 0x2a) {
621 if (inst.M29.r2 > 63 && inst.M29.ar3 < 8)
622 privcnt.mov_from_ar++; // privified mov from kr
623 else privcnt.mov_to_ar_reg++;
624 return priv_mov_to_ar_reg(vcpu,inst);
625 }
626 if (inst.M29.x3 != 0) break;
627 if (!(pfunc = Mpriv_funcs[x6])) break;
628 if (x6 == 0x1e || x6 == 0x1f) { // tpa or tak are "special"
629 if (inst.M46.r3 > 63) {
630 if (x6 == 0x1e) x6 = 0x1b;
631 else x6 = 0x1a;
632 }
633 }
634 privcnt.Mpriv_cnt[x6]++;
635 return (*pfunc)(vcpu,inst);
636 break;
637 case B:
638 if (inst.generic.major != 0) break;
639 if (inst.B8.x6 == 0x08) {
640 IA64FAULT fault;
641 privcnt.rfi++;
642 fault = priv_rfi(vcpu,inst);
643 if (fault == IA64_NO_FAULT) fault = IA64_RFI_IN_PROGRESS;
644 return fault;
645 }
646 if (inst.B8.x6 == 0x0c) {
647 privcnt.bsw0++;
648 return priv_bsw0(vcpu,inst);
649 }
650 if (inst.B8.x6 == 0x0d) {
651 privcnt.bsw1++;
652 return priv_bsw1(vcpu,inst);
653 }
654 if (inst.B8.x6 == 0x0) { // break instr for privified cover
655 privcnt.cover++;
656 return priv_cover(vcpu,inst);
657 }
658 break;
659 case I:
660 if (inst.generic.major != 0) break;
661 #if 0
662 if (inst.I26.x6 == 0 && inst.I26.x3 == 0) {
663 privcnt.cover++;
664 return priv_cover(vcpu,inst);
665 }
666 #endif
667 if (inst.I26.x3 != 0) break; // I26.x3 == I27.x3
668 if (inst.I26.x6 == 0x2a) {
669 if (inst.I26.r2 > 63 && inst.I26.ar3 < 8)
670 privcnt.mov_from_ar++; // privified mov from kr
671 else privcnt.mov_to_ar_reg++;
672 return priv_mov_to_ar_reg(vcpu,inst);
673 }
674 if (inst.I27.x6 == 0x0a) {
675 privcnt.mov_to_ar_imm++;
676 return priv_mov_to_ar_imm(vcpu,inst);
677 }
678 break;
679 default:
680 break;
681 }
682 //printf("We who are about do die salute you\n");
683 printf("handle_op: can't handle privop at 0x%lx (op=0x%016lx) slot %d (type=%d)\n",
684 iip, (UINT64)inst.inst, slot, slot_type);
685 //printf("vtop(0x%lx)==0x%lx\r\n", iip, tr_vtop(iip));
686 //thread_mozambique("privop fault\n");
687 return (IA64_ILLOP_FAULT);
688 }
690 /** Emulate a privileged operation.
691 *
692 * This should probably return 0 on success and the "trap number"
693 * (e.g. illegal operation for bad register, priv op for an
694 * instruction that isn't allowed, etc.) on "failure"
695 *
696 * @param vcpu virtual cpu
697 * @param isrcode interrupt service routine code
698 * @return fault
699 */
700 IA64FAULT
701 priv_emulate(VCPU *vcpu, REGS *regs, UINT64 isr)
702 {
703 IA64FAULT fault;
704 UINT64 ipsr = regs->cr_ipsr;
705 UINT64 isrcode = (isr >> 4) & 0xf;
706 int privlvl;
708 // handle privops masked as illops? and breaks (6)
709 if (isrcode != 1 && isrcode != 2 && isrcode != 0 && isrcode != 6) {
710 printf("priv_emulate: isrcode != 0 or 1 or 2\n");
711 printf("priv_emulate: returning ILLOP, not implemented!\n");
712 while (1);
713 return IA64_ILLOP_FAULT;
714 }
715 //if (isrcode != 1 && isrcode != 2) return 0;
716 vcpu_set_regs(vcpu,regs);
717 privlvl = (ipsr & IA64_PSR_CPL) >> IA64_PSR_CPL0_BIT;
718 // its OK for a privified-cover to be executed in user-land
719 fault = priv_handle_op(vcpu,regs,privlvl);
720 if (fault == IA64_NO_FAULT) { // success!!
721 // update iip/ipsr to point to the next instruction
722 (void)vcpu_increment_iip(vcpu);
723 }
724 else if (fault == IA64_EXTINT_VECTOR) {
725 // update iip/ipsr before delivering interrupt
726 (void)vcpu_increment_iip(vcpu);
727 }
728 else if (fault == IA64_RFI_IN_PROGRESS) return fault;
729 // success but don't update to next instruction
730 else if (fault == IA64_RETRY) {
731 //printf("Priv emulate gets IA64_RETRY\n");
732 //printf("priv_emulate: returning RETRY, not implemented!\n");
733 //while (1);
734 // don't update iip/ipsr, deliver
736 vcpu_force_data_miss(vcpu,regs->cr_iip);
737 return IA64_RETRY;
738 }
739 else if (priv_verbose) printf("unhandled operation from handle_op\n");
740 // if (fault == IA64_ILLOP_FAULT) {
741 // printf("priv_emulate: returning ILLOP, not implemented!\n");
742 // while (1);
743 // }
744 return fault;
745 }
748 /**************************************************************************
749 Privileged operation instrumentation routines
750 **************************************************************************/
752 char *Mpriv_str[64] = {
753 "mov_to_rr", "mov_to_dbr", "mov_to_ibr", "mov_to_pkr",
754 "mov_to_pmc", "mov_to_pmd", "<0x06>", "<0x07>",
755 "<0x08>", "ptc_l", "ptc_g", "ptc_ga",
756 "ptr_d", "ptr_i", "itr_d", "itr_i",
757 "mov_from_rr", "mov_from_dbr", "mov_from_ibr", "mov_from_pkr",
758 "mov_from_pmc", "<0x15>", "<0x16>", "<0x17>",
759 "<0x18>", "<0x19>", "privified-thash", "privified-ttag",
760 "<0x1c>", "<0x1d>", "tpa", "tak",
761 "<0x20>", "<0x21>", "<0x22>", "<0x23>",
762 "mov_from_cr", "mov_from_psr", "<0x26>", "<0x27>",
763 "<0x28>", "<0x29>", "<0x2a>", "<0x2b>",
764 "mov_to_cr", "mov_to_psr", "itc_d", "itc_i",
765 "<0x30>", "<0x31>", "<0x32>", "<0x33>",
766 "ptc_e", "<0x35>", "<0x36>", "<0x37>",
767 "<0x38>", "<0x39>", "<0x3a>", "<0x3b>",
768 "<0x3c>", "<0x3d>", "<0x3e>", "<0x3f>"
769 };
771 #define RS "Rsvd"
772 char *cr_str[128] = {
773 "dcr","itm","iva",RS,RS,RS,RS,RS,
774 "pta",RS,RS,RS,RS,RS,RS,RS,
775 "ipsr","isr",RS,"iip","ifa","itir","iipa","ifs",
776 "iim","iha",RS,RS,RS,RS,RS,RS,
777 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
778 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
779 "lid","ivr","tpr","eoi","irr0","irr1","irr2","irr3",
780 "itv","pmv","cmcv",RS,RS,RS,RS,RS,
781 "lrr0","lrr1",RS,RS,RS,RS,RS,RS,
782 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
783 RS,RS,RS,RS,RS,RS,RS,RS, RS,RS,RS,RS,RS,RS,RS,RS,
784 RS,RS,RS,RS,RS,RS,RS,RS
785 };
787 // FIXME: should use snprintf to ensure no buffer overflow
788 int dump_privop_counts(char *buf)
789 {
790 int i, j;
791 UINT64 sum = 0;
792 char *s = buf;
794 // this is ugly and should probably produce sorted output
795 // but it will have to do for now
796 sum += privcnt.mov_to_ar_imm; sum += privcnt.mov_to_ar_reg;
797 sum += privcnt.ssm; sum += privcnt.rsm;
798 sum += privcnt.rfi; sum += privcnt.bsw0;
799 sum += privcnt.bsw1; sum += privcnt.cover;
800 for (i=0; i < 64; i++) sum += privcnt.Mpriv_cnt[i];
801 s += sprintf(s,"Privop statistics: (Total privops: %ld)\r\n",sum);
802 if (privcnt.mov_to_ar_imm)
803 s += sprintf(s,"%10d %s [%d%%]\r\n", privcnt.mov_to_ar_imm,
804 "mov_to_ar_imm", (privcnt.mov_to_ar_imm*100L)/sum);
805 if (privcnt.mov_to_ar_reg)
806 s += sprintf(s,"%10d %s [%d%%]\r\n", privcnt.mov_to_ar_reg,
807 "mov_to_ar_reg", (privcnt.mov_to_ar_reg*100L)/sum);
808 if (privcnt.mov_from_ar)
809 s += sprintf(s,"%10d %s [%d%%]\r\n", privcnt.mov_from_ar,
810 "privified-mov_from_ar", (privcnt.mov_from_ar*100L)/sum);
811 if (privcnt.ssm)
812 s += sprintf(s,"%10d %s [%d%%]\r\n", privcnt.ssm,
813 "ssm", (privcnt.ssm*100L)/sum);
814 if (privcnt.rsm)
815 s += sprintf(s,"%10d %s [%d%%]\r\n", privcnt.rsm,
816 "rsm", (privcnt.rsm*100L)/sum);
817 if (privcnt.rfi)
818 s += sprintf(s,"%10d %s [%d%%]\r\n", privcnt.rfi,
819 "rfi", (privcnt.rfi*100L)/sum);
820 if (privcnt.bsw0)
821 s += sprintf(s,"%10d %s [%d%%]\r\n", privcnt.bsw0,
822 "bsw0", (privcnt.bsw0*100L)/sum);
823 if (privcnt.bsw1)
824 s += sprintf(s,"%10d %s [%d%%]\r\n", privcnt.bsw1,
825 "bsw1", (privcnt.bsw1*100L)/sum);
826 if (privcnt.cover)
827 s += sprintf(s,"%10d %s [%d%%]\r\n", privcnt.cover,
828 "cover", (privcnt.cover*100L)/sum);
829 for (i=0; i < 64; i++) if (privcnt.Mpriv_cnt[i]) {
830 if (!Mpriv_str[i]) s += sprintf(s,"PRIVSTRING NULL!!\r\n");
831 else s += sprintf(s,"%10d %s [%d%%]\r\n", privcnt.Mpriv_cnt[i],
832 Mpriv_str[i], (privcnt.Mpriv_cnt[i]*100L)/sum);
833 if (i == 0x24) { // mov from CR
834 s += sprintf(s," [");
835 for (j=0; j < 128; j++) if (from_cr_cnt[j]) {
836 if (!cr_str[j])
837 s += sprintf(s,"PRIVSTRING NULL!!\r\n");
838 s += sprintf(s,"%s(%d),",cr_str[j],from_cr_cnt[j]);
839 }
840 s += sprintf(s,"]\r\n");
841 }
842 else if (i == 0x2c) { // mov to CR
843 s += sprintf(s," [");
844 for (j=0; j < 128; j++) if (to_cr_cnt[j]) {
845 if (!cr_str[j])
846 s += sprintf(s,"PRIVSTRING NULL!!\r\n");
847 s += sprintf(s,"%s(%d),",cr_str[j],to_cr_cnt[j]);
848 }
849 s += sprintf(s,"]\r\n");
850 }
851 }
852 return s - buf;
853 }
855 int zero_privop_counts(char *buf)
856 {
857 int i, j;
858 char *s = buf;
860 // this is ugly and should probably produce sorted output
861 // but it will have to do for now
862 privcnt.mov_to_ar_imm = 0; privcnt.mov_to_ar_reg = 0;
863 privcnt.mov_from_ar = 0;
864 privcnt.ssm = 0; privcnt.rsm = 0;
865 privcnt.rfi = 0; privcnt.bsw0 = 0;
866 privcnt.bsw1 = 0; privcnt.cover = 0;
867 for (i=0; i < 64; i++) privcnt.Mpriv_cnt[i] = 0;
868 for (j=0; j < 128; j++) from_cr_cnt[j] = 0;
869 for (j=0; j < 128; j++) to_cr_cnt[j] = 0;
870 s += sprintf(s,"All privop statistics zeroed\r\n");
871 return s - buf;
872 }
874 #define TMPBUFLEN 8*1024
875 int dump_privop_counts_to_user(char __user *ubuf, int len)
876 {
877 char buf[TMPBUFLEN];
878 int n = dump_privop_counts(buf);
880 if (len < TMPBUFLEN) return -1;
881 if (__copy_to_user(ubuf,buf,n)) return -1;
882 return n;
883 }
885 int zero_privop_counts_to_user(char __user *ubuf, int len)
886 {
887 char buf[TMPBUFLEN];
888 int n = zero_privop_counts(buf);
890 if (len < TMPBUFLEN) return -1;
891 if (__copy_to_user(ubuf,buf,n)) return -1;
892 return n;
893 }