xen-vtx-unstable

view xen/include/asm-x86/shadow_64.h @ 6774:4d899a738d59

merge?
author cl349@firebug.cl.cam.ac.uk
date Tue Sep 13 15:05:49 2005 +0000 (2005-09-13)
parents 3feb7fa331ed 291e816acbf4
children e7c7196fa329 8ca0f98ba8e2
line source
1 /******************************************************************************
2 * include/asm-x86/shadow_64.h
3 *
4 * Copyright (c) 2005 Michael A Fetterman
5 * Based on an earlier implementation by Ian Pratt et al
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21 /*
22 * Jun Nakajima <jun.nakajima@intel.com>
23 * Chengyuan Li <chengyuan.li@intel.com>
24 *
25 * Extended to support 64-bit guests.
26 */
27 #ifndef _XEN_SHADOW_64_H
28 #define _XEN_SHADOW_64_H
29 #include <asm/shadow.h>
30 #include <asm/shadow_ops.h>
32 #define READ_FAULT 0
33 #define WRITE_FAULT 1
35 #define ERROR_W 2
36 #define ERROR_U 4
37 #define X86_64_SHADOW_DEBUG 0
39 #if X86_64_SHADOW_DEBUG
40 #define ESH_LOG(_f, _a...) \
41 printk(_f, ##_a)
42 #else
43 #define ESH_LOG(_f, _a...) ((void)0)
44 #endif
46 #define PAGING_L4 4UL
47 #define PAGING_L3 3UL
48 #define PAGING_L2 2UL
49 #define PAGING_L1 1UL
50 #define L_MASK 0xff
52 #define ROOT_LEVEL_64 PAGING_L4
53 #define ROOT_LEVEL_32 PAGING_L2
55 #define SHADOW_ENTRY (2UL << 16)
56 #define GUEST_ENTRY (1UL << 16)
58 #define GET_ENTRY (2UL << 8)
59 #define SET_ENTRY (1UL << 8)
61 #define PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
63 /* For 32-bit VMX guest to allocate shadow L1 & L2*/
64 #define SL1_ORDER 1
65 #define SL2_ORDER 2
67 typedef struct { intpte_t lo; } pgentry_64_t;
68 #define shadow_level_to_type(l) (l << 29)
69 #define shadow_type_to_level(t) (t >> 29)
71 #define entry_get_value(_x) ((_x).lo)
72 #define entry_get_pfn(_x) \
73 (((_x).lo & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT)
74 #define entry_get_paddr(_x) (((_x).lo & (PADDR_MASK&PAGE_MASK)))
75 #define entry_get_flags(_x) (get_pte_flags((_x).lo))
77 #define entry_empty() ((pgentry_64_t) { 0 })
78 #define entry_from_pfn(pfn, flags) \
79 ((pgentry_64_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
80 #define entry_add_flags(x, flags) ((x).lo |= put_pte_flags(flags))
81 #define entry_remove_flags(x, flags) ((x).lo &= ~put_pte_flags(flags))
82 #define entry_has_changed(x,y,flags) \
83 ( !!(((x).lo ^ (y).lo) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
85 #define PAE_SHADOW_SELF_ENTRY 259
86 #define PDP_ENTRIES 4
88 static inline int table_offset_64(unsigned long va, int level)
89 {
90 switch(level) {
91 case 1:
92 return (((va) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1));
93 case 2:
94 return (((va) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1));
95 case 3:
96 return (((va) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1));
97 #if CONFIG_PAGING_LEVELS >= 4
98 #ifndef GUEST_PGENTRY_32
99 case 4:
100 return (((va) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1));
101 #else
102 case 4:
103 return PAE_SHADOW_SELF_ENTRY;
104 #endif
105 #endif
106 default:
107 //printk("<table_offset_64> level %d is too big\n", level);
108 return -1;
109 }
110 }
112 static inline void free_out_of_sync_state(struct domain *d)
113 {
114 struct out_of_sync_entry *entry;
116 // NB: Be careful not to call something that manipulates this list
117 // while walking it. Remove one item at a time, and always
118 // restart from start of list.
119 //
120 while ( (entry = d->arch.out_of_sync) )
121 {
122 d->arch.out_of_sync = entry->next;
123 release_out_of_sync_entry(d, entry);
125 entry->next = d->arch.out_of_sync_free;
126 d->arch.out_of_sync_free = entry;
127 }
128 }
130 static inline pgentry_64_t *__entry(
131 struct vcpu *v, u64 va, u32 flag)
132 {
133 int i;
134 pgentry_64_t *le_e;
135 pgentry_64_t *le_p;
136 unsigned long mfn;
137 int index;
138 u32 level = flag & L_MASK;
139 struct domain *d = v->domain;
141 index = table_offset_64(va, ROOT_LEVEL_64);
142 if (flag & SHADOW_ENTRY)
143 le_e = (pgentry_64_t *)&v->arch.shadow_vtable[index];
144 else
145 le_e = (pgentry_64_t *)&v->arch.guest_vtable[index];
147 /*
148 * If it's not external mode, then mfn should be machine physical.
149 */
150 for (i = ROOT_LEVEL_64 - level; i > 0; i--) {
151 if (unlikely(!(entry_get_flags(*le_e) & _PAGE_PRESENT)))
152 return NULL;
153 mfn = entry_get_value(*le_e) >> PAGE_SHIFT;
154 if ((flag & GUEST_ENTRY) && shadow_mode_translate(d))
155 mfn = get_mfn_from_pfn(mfn);
156 le_p = (pgentry_64_t *)phys_to_virt(mfn << PAGE_SHIFT);
157 index = table_offset_64(va, (level + i - 1));
158 le_e = &le_p[index];
160 }
161 return le_e;
163 }
165 static inline pgentry_64_t *__rw_entry(
166 struct vcpu *ed, u64 va, void *e_p, u32 flag)
167 {
168 pgentry_64_t *le_e = __entry(ed, va, flag);
169 pgentry_64_t *e = (pgentry_64_t *)e_p;
170 if (le_e == NULL)
171 return NULL;
173 if (e) {
174 if (flag & SET_ENTRY)
175 *le_e = *e;
176 else
177 *e = *le_e;
178 }
179 return le_e;
180 }
181 #define __shadow_set_l4e(v, va, value) \
182 __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | PAGING_L4)
183 #define __shadow_get_l4e(v, va, sl4e) \
184 __rw_entry(v, va, sl4e, SHADOW_ENTRY | GET_ENTRY | PAGING_L4)
185 #define __shadow_set_l3e(v, va, value) \
186 __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | PAGING_L3)
187 #define __shadow_get_l3e(v, va, sl3e) \
188 __rw_entry(v, va, sl3e, SHADOW_ENTRY | GET_ENTRY | PAGING_L3)
189 #define __shadow_set_l2e(v, va, value) \
190 __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | PAGING_L2)
191 #define __shadow_get_l2e(v, va, sl2e) \
192 __rw_entry(v, va, sl2e, SHADOW_ENTRY | GET_ENTRY | PAGING_L2)
193 #define __shadow_set_l1e(v, va, value) \
194 __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | PAGING_L1)
195 #define __shadow_get_l1e(v, va, sl1e) \
196 __rw_entry(v, va, sl1e, SHADOW_ENTRY | GET_ENTRY | PAGING_L1)
198 #define __guest_set_l4e(v, va, value) \
199 __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | PAGING_L4)
200 #define __guest_get_l4e(v, va, gl4e) \
201 __rw_entry(v, va, gl4e, GUEST_ENTRY | GET_ENTRY | PAGING_L4)
202 #define __guest_set_l3e(v, va, value) \
203 __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | PAGING_L3)
204 #define __guest_get_l3e(v, va, sl3e) \
205 __rw_entry(v, va, gl3e, GUEST_ENTRY | GET_ENTRY | PAGING_L3)
207 static inline void * __guest_set_l2e(
208 struct vcpu *v, u64 va, void *value, int size)
209 {
210 switch(size) {
211 case 4:
212 // 32-bit guest
213 {
214 l2_pgentry_32_t *l2va;
216 l2va = (l2_pgentry_32_t *)v->arch.guest_vtable;
217 if (value)
218 l2va[l2_table_offset_32(va)] = *(l2_pgentry_32_t *)value;
219 return &l2va[l2_table_offset_32(va)];
220 }
221 case 8:
222 return __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | PAGING_L2);
223 default:
224 BUG();
225 return NULL;
226 }
227 return NULL;
228 }
230 #define __guest_set_l2e(v, va, value) \
231 ( __typeof__(value) )__guest_set_l2e(v, (u64)va, value, sizeof(*value))
233 static inline void * __guest_get_l2e(
234 struct vcpu *v, u64 va, void *gl2e, int size)
235 {
236 switch(size) {
237 case 4:
238 // 32-bit guest
239 {
240 l2_pgentry_32_t *l2va;
241 l2va = (l2_pgentry_32_t *)v->arch.guest_vtable;
242 if (gl2e)
243 *(l2_pgentry_32_t *)gl2e = l2va[l2_table_offset_32(va)];
244 return &l2va[l2_table_offset_32(va)];
245 }
246 case 8:
247 return __rw_entry(v, va, gl2e, GUEST_ENTRY | GET_ENTRY | PAGING_L2);
248 default:
249 BUG();
250 return NULL;
251 }
252 return NULL;
253 }
255 #define __guest_get_l2e(v, va, gl2e) \
256 (__typeof__ (gl2e))__guest_get_l2e(v, (u64)va, gl2e, sizeof(*gl2e))
258 static inline void * __guest_set_l1e(
259 struct vcpu *v, u64 va, void *value, int size)
260 {
261 switch(size) {
262 case 4:
263 // 32-bit guest
264 {
265 l2_pgentry_32_t gl2e;
266 l1_pgentry_32_t *l1va;
267 unsigned long l1mfn;
269 if (!__guest_get_l2e(v, va, &gl2e))
270 return NULL;
271 if (unlikely(!(l2e_get_flags_32(gl2e) & _PAGE_PRESENT)))
272 return NULL;
274 l1mfn = get_mfn_from_pfn(
275 l2e_get_pfn(gl2e));
277 l1va = (l1_pgentry_32_t *)
278 phys_to_virt(l1mfn << L1_PAGETABLE_SHIFT);
279 if (value)
280 l1va[l1_table_offset_32(va)] = *(l1_pgentry_32_t *)value;
282 return &l1va[l1_table_offset_32(va)];
283 }
285 case 8:
286 return __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | PAGING_L1);
287 default:
288 BUG();
289 return NULL;
290 }
291 return NULL;
292 }
294 #define __guest_set_l1e(v, va, value) \
295 ( __typeof__(value) )__guest_set_l1e(v, (u64)va, value, sizeof(*value))
297 static inline void * __guest_get_l1e(
298 struct vcpu *v, u64 va, void *gl1e, int size)
299 {
300 switch(size) {
301 case 4:
302 // 32-bit guest
303 {
304 l2_pgentry_32_t gl2e;
305 l1_pgentry_32_t *l1va;
306 unsigned long l1mfn;
308 if (!(__guest_get_l2e(v, va, &gl2e)))
309 return NULL;
312 if (unlikely(!(l2e_get_flags_32(gl2e) & _PAGE_PRESENT)))
313 return NULL;
316 l1mfn = get_mfn_from_pfn(
317 l2e_get_pfn(gl2e));
318 l1va = (l1_pgentry_32_t *) phys_to_virt(
319 l1mfn << L1_PAGETABLE_SHIFT);
320 if (gl1e)
321 *(l1_pgentry_32_t *)gl1e = l1va[l1_table_offset_32(va)];
323 return &l1va[l1_table_offset_32(va)];
324 }
325 case 8:
326 // 64-bit guest
327 return __rw_entry(v, va, gl1e, GUEST_ENTRY | GET_ENTRY | PAGING_L1);
328 default:
329 BUG();
330 return NULL;
331 }
332 return NULL;
333 }
335 #define __guest_get_l1e(v, va, gl1e) \
336 ( __typeof__(gl1e) )__guest_get_l1e(v, (u64)va, gl1e, sizeof(*gl1e))
338 static inline void entry_general(
339 struct domain *d,
340 pgentry_64_t *gle_p,
341 pgentry_64_t *sle_p,
342 unsigned long smfn, u32 level)
344 {
345 pgentry_64_t gle = *gle_p;
346 pgentry_64_t sle;
348 sle = entry_empty();
349 if ( (entry_get_flags(gle) & _PAGE_PRESENT) && (smfn != 0) )
350 {
351 if ((entry_get_flags(gle) & _PAGE_PSE) && level == PAGING_L2) {
352 sle = entry_from_pfn(smfn, entry_get_flags(gle));
353 entry_remove_flags(sle, _PAGE_PSE);
355 if ( shadow_mode_log_dirty(d) ||
356 !(entry_get_flags(gle) & _PAGE_DIRTY) )
357 {
358 pgentry_64_t *l1_p;
359 int i;
361 l1_p =(pgentry_64_t *)map_domain_page(smfn);
362 for (i = 0; i < L1_PAGETABLE_ENTRIES; i++)
363 entry_remove_flags(l1_p[i], _PAGE_RW);
365 unmap_domain_page(l1_p);
366 }
367 } else {
368 sle = entry_from_pfn(
369 smfn,
370 (entry_get_flags(gle) | _PAGE_RW | _PAGE_ACCESSED) & ~_PAGE_AVAIL);
371 entry_add_flags(gle, _PAGE_ACCESSED);
372 }
373 // XXX mafetter: Hmm...
374 // Shouldn't the dirty log be checked/updated here?
375 // Actually, it needs to be done in this function's callers.
376 //
377 *gle_p = gle;
378 }
380 if ( entry_get_value(sle) || entry_get_value(gle) )
381 SH_VVLOG("%s: gpde=%lx, new spde=%lx", __func__,
382 entry_get_value(gle), entry_get_value(sle));
384 *sle_p = sle;
385 }
387 static inline void entry_propagate_from_guest(
388 struct domain *d, pgentry_64_t *gle_p, pgentry_64_t *sle_p, u32 level)
389 {
390 pgentry_64_t gle = *gle_p;
391 unsigned long smfn = 0;
393 if ( entry_get_flags(gle) & _PAGE_PRESENT ) {
394 if ((entry_get_flags(gle) & _PAGE_PSE) && level == PAGING_L2) {
395 smfn = __shadow_status(d, entry_get_value(gle) >> PAGE_SHIFT, PGT_fl1_shadow);
396 } else {
397 smfn = __shadow_status(d, entry_get_pfn(gle),
398 shadow_level_to_type((level -1 )));
399 }
400 }
401 entry_general(d, gle_p, sle_p, smfn, level);
403 }
405 static int inline
406 validate_entry_change(
407 struct domain *d,
408 pgentry_64_t *new_gle_p,
409 pgentry_64_t *shadow_le_p,
410 u32 level)
411 {
412 pgentry_64_t old_sle, new_sle;
413 pgentry_64_t new_gle = *new_gle_p;
415 old_sle = *shadow_le_p;
416 entry_propagate_from_guest(d, &new_gle, &new_sle, level);
418 ESH_LOG("old_sle: %lx, new_gle: %lx, new_sle: %lx\n",
419 entry_get_value(old_sle), entry_get_value(new_gle),
420 entry_get_value(new_sle));
422 if ( ((entry_get_value(old_sle) | entry_get_value(new_sle)) & _PAGE_PRESENT) &&
423 entry_has_changed(old_sle, new_sle, _PAGE_PRESENT) )
424 {
425 perfc_incrc(validate_entry_changes);
427 if ( (entry_get_flags(new_sle) & _PAGE_PRESENT) &&
428 !get_shadow_ref(entry_get_pfn(new_sle)) )
429 BUG();
430 if ( entry_get_flags(old_sle) & _PAGE_PRESENT )
431 put_shadow_ref(entry_get_pfn(old_sle));
432 }
434 *shadow_le_p = new_sle;
436 return 1;
437 }
439 #endif