debuggers.hg

view xen/arch/ia64/tools/privify/privify.c @ 0:7d21f7218375

Exact replica of unstable on 051908 + README-this
author Mukesh Rathor
date Mon May 19 15:34:57 2008 -0700 (2008-05-19)
parents
children ae3b1e86f62d
line source
1 /*
2 * Binary translate privilege-sensitive ops to privileged
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include "privify.h"
11 typedef unsigned long long u64;
12 typedef unsigned long long IA64_INST;
14 typedef union U_IA64_BUNDLE {
15 u64 i64[2];
16 struct { u64 template:5,slot0:41,slot1a:18,slot1b:23,slot2:41; };
17 // NOTE: following doesn't work because bitfields can't cross natural
18 // size boundaries
19 //struct { u64 template:5, slot0:41, slot1:41, slot2:41; };
20 } IA64_BUNDLE;
22 typedef enum E_IA64_SLOT_TYPE { I, M, F, B, L, ILLEGAL } IA64_SLOT_TYPE;
24 typedef union U_INST64_A5 {
25 IA64_INST inst;
26 struct { u64 qp:6, r1:7, imm7b:7, r3:2, imm5c:5, imm9d:9, s:1, major:4; };
27 } INST64_A5;
29 typedef union U_INST64_B4 {
30 IA64_INST inst;
31 struct { u64 qp:6, btype:3, un3:3, p:1, b2:3, un11:11, x6:6, wh:2, d:1, un1:1, major:4; };
32 } INST64_B4;
34 typedef union U_INST64_B8 {
35 IA64_INST inst;
36 struct { u64 qp:6, un21:21, x6:6, un4:4, major:4; };
37 } INST64_B8;
39 typedef union U_INST64_B9 {
40 IA64_INST inst;
41 struct { u64 qp:6, imm20:20, :1, x6:6, :3, i:1, major:4; };
42 } INST64_B9;
44 typedef union U_INST64_I19 {
45 IA64_INST inst;
46 struct { u64 qp:6, imm20:20, :1, x6:6, x3:3, i:1, major:4; };
47 } INST64_I19;
49 typedef union U_INST64_I26 {
50 IA64_INST inst;
51 struct { u64 qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4;};
52 } INST64_I26;
54 typedef union U_INST64_I27 {
55 IA64_INST inst;
56 struct { u64 qp:6, :7, imm:7, ar3:7, x6:6, x3:3, s:1, major:4;};
57 } INST64_I27;
59 typedef union U_INST64_I28 { // not privileged (mov from AR)
60 IA64_INST inst;
61 struct { u64 qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4;};
62 } INST64_I28;
64 typedef union U_INST64_M28 {
65 IA64_INST inst;
66 struct { u64 qp:6, :14, r3:7, x6:6, x3:3, :1, major:4;};
67 } INST64_M28;
69 typedef union U_INST64_M29 {
70 IA64_INST inst;
71 struct { u64 qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4;};
72 } INST64_M29;
74 typedef union U_INST64_M30 {
75 IA64_INST inst;
76 struct { u64 qp:6, :7, imm:7, ar3:7,x4:4,x2:2,x3:3,s:1,major:4;};
77 } INST64_M30;
79 typedef union U_INST64_M31 {
80 IA64_INST inst;
81 struct { u64 qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4;};
82 } INST64_M31;
84 typedef union U_INST64_M32 {
85 IA64_INST inst;
86 struct { u64 qp:6, :7, r2:7, cr3:7, x6:6, x3:3, :1, major:4;};
87 } INST64_M32;
89 typedef union U_INST64_M33 {
90 IA64_INST inst;
91 struct { u64 qp:6, r1:7, :7, cr3:7, x6:6, x3:3, :1, major:4; };
92 } INST64_M33;
94 typedef union U_INST64_M35 {
95 IA64_INST inst;
96 struct { u64 qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
98 } INST64_M35;
100 typedef union U_INST64_M36 {
101 IA64_INST inst;
102 struct { u64 qp:6, r1:7, :14, x6:6, x3:3, :1, major:4; };
103 } INST64_M36;
105 typedef union U_INST64_M41 {
106 IA64_INST inst;
107 struct { u64 qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
108 } INST64_M41;
110 typedef union U_INST64_M42 {
111 IA64_INST inst;
112 struct { u64 qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
113 } INST64_M42;
115 typedef union U_INST64_M43 {
116 IA64_INST inst;
117 struct { u64 qp:6, r1:7, :7, r3:7, x6:6, x3:3, :1, major:4; };
118 } INST64_M43;
120 typedef union U_INST64_M44 {
121 IA64_INST inst;
122 struct { u64 qp:6, imm:21, x4:4, i2:2, x3:3, i:1, major:4; };
123 } INST64_M44;
125 typedef union U_INST64_M45 {
126 IA64_INST inst;
127 struct { u64 qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
128 } INST64_M45;
130 typedef union U_INST64_M46 {
131 IA64_INST inst;
132 struct { u64 qp:6, r1:7, un7:7, r3:7, x6:6, x3:3, un1:1, major:4; };
133 } INST64_M46;
135 typedef union U_INST64 {
136 IA64_INST inst;
137 struct { u64 :37, major:4; } generic;
138 INST64_A5 A5; // used in build_hypercall_bundle only
139 INST64_B4 B4; // used in build_hypercall_bundle only
140 INST64_B8 B8; // rfi, bsw.[01]
141 INST64_B9 B9; // break.b
142 INST64_I19 I19; // used in build_hypercall_bundle only
143 INST64_I26 I26; // mov register to ar (I unit)
144 INST64_I27 I27; // mov immediate to ar (I unit)
145 INST64_I28 I28; // mov from ar (I unit)
146 INST64_M28 M28; // purge translation cache entry
147 INST64_M29 M29; // mov register to ar (M unit)
148 INST64_M30 M30; // mov immediate to ar (M unit)
149 INST64_M31 M31; // mov from ar (M unit)
150 INST64_M32 M32; // mov reg to cr
151 INST64_M33 M33; // mov from cr
152 INST64_M35 M35; // mov to psr
153 INST64_M36 M36; // mov from psr
154 INST64_M41 M41; // translation cache insert
155 INST64_M42 M42; // mov to indirect reg/translation reg insert
156 INST64_M43 M43; // mov from indirect reg
157 INST64_M44 M44; // set/reset system mask
158 INST64_M45 M45; // translation purge
159 INST64_M46 M46; // translation access (tpa,tak)
160 } INST64;
162 #define MASK_41 ((u64)0x1ffffffffff)
164 long priv_verbose = 0;
165 #define verbose(a...) do { if (priv_verbose) printf(a); } while(0)
167 /*
168 * privify_inst
169 *
170 * Replaces privilege-sensitive instructions (and reads from write-trapping
171 * registers) with privileged/trapping instructions as follows:
172 * mov rx=ar.cflg -> mov ar.cflg=r(x+64) [**]
173 * mov rx=ar.ky -> mov ar.ky=r(x+64)
174 * fc rx -> ptc r(x+64)
175 * thash rx=ry -> tak rx=r(y+64)
176 * ttag rx=ry -> tpa rx=r(y+64)
177 * mov rx=cpuid[ry] -> mov r(x+64)=rr[ry]
178 * mov rx=pmd[ry] -> mov r(x+64)=pmc[ry] [**]
179 * cover -> break.b 0x1fffff
180 *
181 * [**] not currently implemented
182 */
183 IA64_INST privify_inst(IA64_INST inst_val,
184 IA64_SLOT_TYPE slot_type, IA64_BUNDLE *bp, char **msg)
185 {
186 INST64 inst = *(INST64 *)&inst_val;
188 *msg = 0;
189 switch (slot_type) {
190 case M:
191 // FIXME: Also use for mov_to/from_ar.cflag (M29/M30) (IA32 only)
192 if (inst.generic.major != 1) break;
193 if (inst.M46.x3 != 0) break;
194 if (inst.M31.x6 == 0x22 && inst.M31.ar3 < 8) {
195 // mov r1=kr -> mov kr=r1+64
196 verbose("privify_inst: privified mov r1=kr @%p\n",bp);
197 if (inst.M31.r1 >= 64) *msg = "mov r1=kr w/r1>63";
198 else privify_mov_from_kr_m(inst);
199 break;
200 }
201 if (inst.M29.x6 == 0x2a && inst.M29.ar3 < 8) {// mov kr=r1
202 if (inst.M29.r2 >= 64) *msg = "mov kr=r2 w/r2>63";
203 break;
204 }
205 if (inst.M28.x6 == 0x30) {
206 // fc r3-> ptc r3+64
207 verbose("privify_inst: privified fc r3 @%p\n",bp);
208 if (inst.M28.r3 >= 64) *msg = "fc r3 w/r3>63";
209 else privify_fc(inst);
210 break;
211 }
212 if (inst.M28.x6 == 0x34) {
213 if (inst.M28.r3 >= 64) *msg = "ptc.e w/r3>63";
214 break;
215 }
216 if (inst.M46.un7 != 0) break;
217 if (inst.M46.un1 != 0) break;
218 if (inst.M46.x6 == 0x1a) { // thash -> tak r1=r3+64
219 verbose("privify_inst: privified thash @%p\n",bp);
220 if (inst.M46.r3 >= 64) *msg = "thash w/r3>63";
221 else privify_thash(inst);
222 }
223 else if (inst.M46.x6 == 0x1b) { // ttag -> tpa r1=r3+64
224 verbose("privify_inst: privified ttag @%p\n",bp);
225 if (inst.M46.r3 >= 64) *msg = "ttag w/r3>63";
226 else privify_ttag(inst);
227 }
228 else if (inst.M43.x6 == 0x17) {
229 verbose("privify_inst: privified mov_from_cpuid @%p\n",bp);
230 if (inst.M43.r1 >= 64) *msg = "mov_from_cpuid w/r1>63";
231 else privify_mov_from_cpuid(inst);
232 }
233 else if (inst.M46.x6 == 0x1e) { // tpa
234 if (inst.M46.r3 >= 64) *msg = "tpa w/r3>63";
235 }
236 else if (inst.M46.x6 == 0x1f) { // tak
237 if (inst.M46.r3 >= 64) *msg = "tak w/r3>63";
238 }
239 else if (inst.M43.x6 == 0x10) {
240 if (inst.M43.r1 >= 64) *msg = "mov_to_rr w/r1>63";
241 }
242 break;
243 case B:
244 if (inst.generic.major != 0) break;
245 if (inst.B8.x6 == 0x2) { // cover -> break.b 0x1fffff
246 if (inst.B8.un21 != 0) break;
247 if (inst.B8.un4 != 0) break;
248 privify_cover(inst);
249 verbose("privify_inst: privified cover @%p\n",bp);
250 }
251 if (inst.B9.x6 == 0x0) { // (p15) break.b 0x1fffff -> cover
252 if (inst.B9.qp != 15) break;
253 if (inst.B9.imm20 != 0xfffff) break;
254 if (inst.B9.i != 1) break;
255 inst.B8.x6 = 0x2;
256 inst.B8.un21 = 0;
257 inst.B8.un4 = 0;
258 inst.B8.qp = 0;
259 verbose("privify_inst: unprivified pseudo-cover @%p\n",
260 bp);
261 }
262 break;
263 case I: // only used for privifying mov_from_ar
264 // FIXME: Also use for mov_to/from_ar.cflag (I26/I27) (IA32 only)
265 if (inst.generic.major != 0) break;
266 if (inst.I28.x6 == 0x32 && !inst.I28.x3 && inst.I28.ar3 < 8) {
267 // mov r1=kr -> mov kr=r1+64
268 verbose("privify_inst: privified mov r1=kr @%p\n",bp);
269 if (inst.I28.r1 >= 64) *msg = "mov r1=kr w/r1>63";
270 else privify_mov_from_kr_i(inst);
271 }
272 else if (inst.I26.x6 == 0x2a && !inst.I26.x3 &&
273 inst.I26.ar3 < 8) {// mov kr=r1
274 if (inst.I26.r2 >= 64) *msg = "mov kr=r2 w/r2>63";
275 }
276 break;
277 case F: case L: case ILLEGAL:
278 break;
279 }
280 return *(IA64_INST *)&inst;
281 }
283 #define read_slot1(b) (((b.i64[0]>>46L) | (b.i64[1]<<18UL)) & MASK_41)
284 // Not sure why, but this more obvious definition of read_slot1 doesn't work
285 // because the compiler treats (b.slot1b<<18UL) as a signed 32-bit integer
286 // so not enough bits get used and it gets sign extended to boot!
287 //#define read_slot1(b) ((b.slot1a | (b.slot1b<<18UL)) & MASK_41)
288 #define write_slot1(b,inst) do { b.slot1a=inst;b.slot1b=inst>>18UL;} while (0)
291 void privify_memory(void *start, unsigned long len)
292 {
293 IA64_BUNDLE bundle, *bp = (IA64_BUNDLE *)start;
294 IA64_INST tmp;
295 char *msg;
297 printf("privifying %ld bytes of memory at %p\n",len,start);
298 if ((unsigned long)start & 0xfL) {
299 printf("unaligned memory block in privify_memory\n");
300 }
301 len &= ~0xf;
302 for (bundle = *bp; len; len -= 16) {
303 switch(bundle.template) {
304 case 0x06: case 0x07: case 0x14: case 0x15:
305 case 0x1a: case 0x1b: case 0x1e: case 0x1f:
306 break;
307 case 0x16: case 0x17:
308 // may be B in slot0/1 but cover can only be slot2
309 bundle.slot2 = privify_inst(bundle.slot2,B,bp,&msg);
310 break;
311 case 0x00: case 0x01: case 0x02: case 0x03:
312 tmp = privify_inst(read_slot1(bundle),I,bp,&msg);
313 write_slot1(bundle,tmp);
314 case 0x0c: case 0x0d:
315 bundle.slot2 = privify_inst(bundle.slot2,I,bp,&msg);
316 case 0x04: case 0x05:
317 // could a privified cover be in slot2 here?
318 bundle.slot0 = privify_inst(bundle.slot0,M,bp,&msg);
319 break;
320 case 0x08: case 0x09: case 0x0a: case 0x0b:
321 bundle.slot2 = privify_inst(bundle.slot2,I,bp,&msg);
322 case 0x0e: case 0x0f:
323 bundle.slot0 = privify_inst(bundle.slot0,M,bp,&msg);
324 if (msg) break;
325 tmp = privify_inst(read_slot1(bundle),M,bp,&msg);
326 write_slot1(bundle,tmp);
327 break;
328 case 0x10: case 0x11:
329 tmp = privify_inst(read_slot1(bundle),I,bp,&msg);
330 write_slot1(bundle,tmp);
331 case 0x12: case 0x13:
332 // may be B in slot1 but cover can only be slot2
333 case 0x1c: case 0x1d:
334 bundle.slot0 = privify_inst(bundle.slot0,M,bp,&msg);
335 if (msg) break;
336 bundle.slot2 = privify_inst(bundle.slot2,B,bp,&msg);
337 break;
338 case 0x18: case 0x19:
339 bundle.slot0 = privify_inst(bundle.slot0,M,bp,&msg);
340 if (msg) break;
341 tmp = privify_inst(read_slot1(bundle),M,bp,&msg);
342 write_slot1(bundle,tmp);
343 if (msg) break;
344 bundle.slot2 = privify_inst(bundle.slot2,B,bp,&msg);
345 break;
346 }
347 if (msg) {
348 if (bundle.slot2)
349 printf("privify_memory: %s @%p\n",msg,bp);
350 else
351 printf("privify_memory: %s @%p probably not insts\n",
352 msg,bp);
353 printf("privify_memory: bundle=%p,%p\n",
354 bundle.i64[1],bundle.i64[0]);
355 }
356 *bp = bundle;
357 bundle = *++bp;
358 }
360 }