Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/arch/x86/mm/shadow/types.h
Line
Count
Source (jump to first uncovered line)
1
/******************************************************************************
2
 * arch/x86/mm/shadow/types.h
3
 *
4
 * Parts of this code are Copyright (c) 2006 by XenSource Inc.
5
 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
6
 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
7
 *
8
 * This program is free software; you can redistribute it and/or modify
9
 * it under the terms of the GNU General Public License as published by
10
 * the Free Software Foundation; either version 2 of the License, or
11
 * (at your option) any later version.
12
 *
13
 * This program is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
 * GNU General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU General Public License
19
 * along with this program; If not, see <http://www.gnu.org/licenses/>.
20
 */
21
22
#ifndef _XEN_SHADOW_TYPES_H
23
#define _XEN_SHADOW_TYPES_H
24
25
/* The number of levels in the shadow pagetable is entirely determined
26
 * by the number of levels in the guest pagetable */
27
#if GUEST_PAGING_LEVELS == 4
28
0
#define SHADOW_PAGING_LEVELS 4
29
#else
30
0
#define SHADOW_PAGING_LEVELS 3
31
#endif
32
33
/*
34
 * Define various types for handling pagetabels, based on these options:
35
 * SHADOW_PAGING_LEVELS : Number of levels of shadow pagetables
36
 * GUEST_PAGING_LEVELS  : Number of levels of guest pagetables
37
 */
38
39
#if SHADOW_PAGING_LEVELS == 3
40
0
#define SHADOW_L1_PAGETABLE_ENTRIES     512
41
0
#define SHADOW_L2_PAGETABLE_ENTRIES     512
42
0
#define SHADOW_L3_PAGETABLE_ENTRIES       4
43
0
#define SHADOW_L1_PAGETABLE_SHIFT        12
44
0
#define SHADOW_L2_PAGETABLE_SHIFT        21
45
0
#define SHADOW_L3_PAGETABLE_SHIFT        30
46
#else /* SHADOW_PAGING_LEVELS == 4 */
47
0
#define SHADOW_L1_PAGETABLE_ENTRIES     512
48
0
#define SHADOW_L2_PAGETABLE_ENTRIES     512
49
0
#define SHADOW_L3_PAGETABLE_ENTRIES     512
50
0
#define SHADOW_L4_PAGETABLE_ENTRIES     512
51
0
#define SHADOW_L1_PAGETABLE_SHIFT        12
52
0
#define SHADOW_L2_PAGETABLE_SHIFT        21
53
0
#define SHADOW_L3_PAGETABLE_SHIFT        30
54
0
#define SHADOW_L4_PAGETABLE_SHIFT        39
55
#endif
56
57
/* Types of the shadow page tables */
58
typedef l1_pgentry_t shadow_l1e_t;
59
typedef l2_pgentry_t shadow_l2e_t;
60
typedef l3_pgentry_t shadow_l3e_t;
61
#if SHADOW_PAGING_LEVELS >= 4
62
typedef l4_pgentry_t shadow_l4e_t;
63
#endif
64
65
/* Access functions for them */
66
static inline paddr_t shadow_l1e_get_paddr(shadow_l1e_t sl1e)
67
0
{ return l1e_get_paddr(sl1e); }
68
static inline paddr_t shadow_l2e_get_paddr(shadow_l2e_t sl2e)
69
0
{ return l2e_get_paddr(sl2e); }
70
static inline paddr_t shadow_l3e_get_paddr(shadow_l3e_t sl3e)
71
0
{ return l3e_get_paddr(sl3e); }
72
#if SHADOW_PAGING_LEVELS >= 4
73
static inline paddr_t shadow_l4e_get_paddr(shadow_l4e_t sl4e)
74
0
{ return l4e_get_paddr(sl4e); }
75
#endif
76
77
static inline mfn_t shadow_l1e_get_mfn(shadow_l1e_t sl1e)
78
0
{ return l1e_get_mfn(sl1e); }
79
static inline mfn_t shadow_l2e_get_mfn(shadow_l2e_t sl2e)
80
0
{ return l2e_get_mfn(sl2e); }
81
static inline mfn_t shadow_l3e_get_mfn(shadow_l3e_t sl3e)
82
0
{ return l3e_get_mfn(sl3e); }
83
#if SHADOW_PAGING_LEVELS >= 4
84
static inline mfn_t shadow_l4e_get_mfn(shadow_l4e_t sl4e)
85
0
{ return l4e_get_mfn(sl4e); }
86
#endif
87
88
static inline u32 shadow_l1e_get_flags(shadow_l1e_t sl1e)
89
0
{ return l1e_get_flags(sl1e); }
90
static inline u32 shadow_l2e_get_flags(shadow_l2e_t sl2e)
91
0
{ return l2e_get_flags(sl2e); }
92
static inline u32 shadow_l3e_get_flags(shadow_l3e_t sl3e)
93
0
{ return l3e_get_flags(sl3e); }
94
#if SHADOW_PAGING_LEVELS >= 4
95
static inline u32 shadow_l4e_get_flags(shadow_l4e_t sl4e)
96
0
{ return l4e_get_flags(sl4e); }
97
#endif
98
99
static inline shadow_l1e_t
100
shadow_l1e_remove_flags(shadow_l1e_t sl1e, u32 flags)
101
0
{ l1e_remove_flags(sl1e, flags); return sl1e; }
102
static inline shadow_l1e_t
103
shadow_l1e_flip_flags(shadow_l1e_t sl1e, u32 flags)
104
0
{ l1e_flip_flags(sl1e, flags); return sl1e; }
105
106
static inline shadow_l1e_t shadow_l1e_empty(void)
107
0
{ return l1e_empty(); }
108
static inline shadow_l2e_t shadow_l2e_empty(void)
109
0
{ return l2e_empty(); }
110
static inline shadow_l3e_t shadow_l3e_empty(void)
111
0
{ return l3e_empty(); }
112
#if SHADOW_PAGING_LEVELS >= 4
113
static inline shadow_l4e_t shadow_l4e_empty(void)
114
0
{ return l4e_empty(); }
115
#endif
116
117
static inline shadow_l1e_t shadow_l1e_from_mfn(mfn_t mfn, u32 flags)
118
0
{ return l1e_from_mfn(mfn, flags); }
119
static inline shadow_l2e_t shadow_l2e_from_mfn(mfn_t mfn, u32 flags)
120
0
{ return l2e_from_mfn(mfn, flags); }
121
static inline shadow_l3e_t shadow_l3e_from_mfn(mfn_t mfn, u32 flags)
122
0
{ return l3e_from_mfn(mfn, flags); }
123
#if SHADOW_PAGING_LEVELS >= 4
124
static inline shadow_l4e_t shadow_l4e_from_mfn(mfn_t mfn, u32 flags)
125
0
{ return l4e_from_mfn(mfn, flags); }
126
#endif
127
128
#define shadow_l1_table_offset(a) l1_table_offset(a)
129
#define shadow_l2_table_offset(a) l2_table_offset(a)
130
#define shadow_l3_table_offset(a) l3_table_offset(a)
131
0
#define shadow_l4_table_offset(a) l4_table_offset(a)
132
133
/**************************************************************************/
134
/* Access to the linear mapping of shadow page tables. */
135
136
/* Offsets into each level of the linear mapping for a virtual address. */
137
#define shadow_l1_linear_offset(_a)                                           \
138
0
        (((_a) & VADDR_MASK) >> SHADOW_L1_PAGETABLE_SHIFT)
139
#define shadow_l2_linear_offset(_a)                                           \
140
0
        (((_a) & VADDR_MASK) >> SHADOW_L2_PAGETABLE_SHIFT)
141
#define shadow_l3_linear_offset(_a)                                           \
142
0
        (((_a) & VADDR_MASK) >> SHADOW_L3_PAGETABLE_SHIFT)
143
#define shadow_l4_linear_offset(_a)                                           \
144
0
        (((_a) & VADDR_MASK) >> SHADOW_L4_PAGETABLE_SHIFT)
145
146
/* Where to find each level of the linear mapping.  For PV guests, we use
147
 * the shadow linear-map self-entry as many times as we need.  For HVM
148
 * guests, the shadow doesn't have a linear-map self-entry so we must use
149
 * the monitor-table's linear-map entry N-1 times and then the shadow-map
150
 * entry once. */
151
0
#define __sh_linear_l1_table ((shadow_l1e_t *)(SH_LINEAR_PT_VIRT_START))
152
0
#define __sh_linear_l2_table ((shadow_l2e_t *)                               \
153
0
    (__sh_linear_l1_table + shadow_l1_linear_offset(SH_LINEAR_PT_VIRT_START)))
154
155
// shadow linear L3 and L4 tables only exist in 4 level paging...
156
#if SHADOW_PAGING_LEVELS == 4
157
0
#define __sh_linear_l3_table ((shadow_l3e_t *)                               \
158
0
    (__sh_linear_l2_table + shadow_l2_linear_offset(SH_LINEAR_PT_VIRT_START)))
159
#define __sh_linear_l4_table ((shadow_l4e_t *)                               \
160
    (__sh_linear_l3_table + shadow_l3_linear_offset(SH_LINEAR_PT_VIRT_START)))
161
#endif
162
163
0
#define sh_linear_l1_table(v) ({ \
164
0
    ASSERT(current == (v)); \
165
0
    __sh_linear_l1_table; \
166
0
})
167
168
// XXX -- these should not be conditional on is_hvm_vcpu(v), but rather on
169
//        shadow_mode_external(d)...
170
//
171
0
#define sh_linear_l2_table(v) ({ \
172
0
    ASSERT(current == (v)); \
173
0
    ((shadow_l2e_t *) \
174
0
     (is_hvm_vcpu(v) ? __linear_l1_table : __sh_linear_l1_table) + \
175
0
     shadow_l1_linear_offset(SH_LINEAR_PT_VIRT_START)); \
176
0
})
177
178
#if SHADOW_PAGING_LEVELS >= 4
179
0
#define sh_linear_l3_table(v) ({ \
180
0
    ASSERT(current == (v)); \
181
0
    ((shadow_l3e_t *) \
182
0
     (is_hvm_vcpu(v) ? __linear_l2_table : __sh_linear_l2_table) + \
183
0
      shadow_l2_linear_offset(SH_LINEAR_PT_VIRT_START)); \
184
0
})
185
186
// we use l4_pgentry_t instead of shadow_l4e_t below because shadow_l4e_t is
187
// not defined for when xen_levels==4 & shadow_levels==3...
188
0
#define sh_linear_l4_table(v) ({ \
189
0
    ASSERT(current == (v)); \
190
0
    ((l4_pgentry_t *) \
191
0
     (is_hvm_vcpu(v) ? __linear_l3_table : __sh_linear_l3_table) + \
192
0
      shadow_l3_linear_offset(SH_LINEAR_PT_VIRT_START)); \
193
0
})
194
#endif
195
196
 /* Override get_gfn to work with gfn_t */
197
#undef get_gfn_query
198
0
#define get_gfn_query(d, g, t) get_gfn_type((d), gfn_x(g), (t), 0)
199
200
/* The shadow types needed for the various levels. */
201
202
#if GUEST_PAGING_LEVELS == 2
203
0
#define SH_type_l1_shadow  SH_type_l1_32_shadow
204
0
#define SH_type_l2_shadow  SH_type_l2_32_shadow
205
0
#define SH_type_fl1_shadow SH_type_fl1_32_shadow
206
#elif GUEST_PAGING_LEVELS == 3
207
0
#define SH_type_l1_shadow  SH_type_l1_pae_shadow
208
0
#define SH_type_fl1_shadow SH_type_fl1_pae_shadow
209
0
#define SH_type_l2_shadow  SH_type_l2_pae_shadow
210
0
#define SH_type_l2h_shadow SH_type_l2h_pae_shadow
211
#else
212
0
#define SH_type_l1_shadow  SH_type_l1_64_shadow
213
0
#define SH_type_fl1_shadow SH_type_fl1_64_shadow
214
0
#define SH_type_l2_shadow  SH_type_l2_64_shadow
215
0
#define SH_type_l2h_shadow SH_type_l2h_64_shadow
216
0
#define SH_type_l3_shadow  SH_type_l3_64_shadow
217
0
#define SH_type_l4_shadow  SH_type_l4_64_shadow
218
#endif
219
220
/* macros for dealing with the naming of the internal function names of the
221
 * shadow code's external entry points.
222
 */
223
0
#define INTERNAL_NAME(name) SHADOW_INTERNAL_NAME(name, GUEST_PAGING_LEVELS)
224
225
/* macros for renaming the primary entry points, so that they are more
226
 * easily distinguished from a debugger
227
 */
228
#define sh_page_fault              INTERNAL_NAME(sh_page_fault)
229
#define sh_invlpg                  INTERNAL_NAME(sh_invlpg)
230
#define sh_gva_to_gfn              INTERNAL_NAME(sh_gva_to_gfn)
231
#define sh_update_cr3              INTERNAL_NAME(sh_update_cr3)
232
#define sh_rm_write_access_from_l1 INTERNAL_NAME(sh_rm_write_access_from_l1)
233
#define sh_rm_mappings_from_l1     INTERNAL_NAME(sh_rm_mappings_from_l1)
234
#define sh_remove_l1_shadow        INTERNAL_NAME(sh_remove_l1_shadow)
235
#define sh_remove_l2_shadow        INTERNAL_NAME(sh_remove_l2_shadow)
236
#define sh_remove_l3_shadow        INTERNAL_NAME(sh_remove_l3_shadow)
237
#define sh_map_and_validate_gl4e   INTERNAL_NAME(sh_map_and_validate_gl4e)
238
#define sh_map_and_validate_gl3e   INTERNAL_NAME(sh_map_and_validate_gl3e)
239
#define sh_map_and_validate_gl2e   INTERNAL_NAME(sh_map_and_validate_gl2e)
240
#define sh_map_and_validate_gl2he  INTERNAL_NAME(sh_map_and_validate_gl2he)
241
#define sh_map_and_validate_gl1e   INTERNAL_NAME(sh_map_and_validate_gl1e)
242
#define sh_destroy_l4_shadow       INTERNAL_NAME(sh_destroy_l4_shadow)
243
#define sh_destroy_l3_shadow       INTERNAL_NAME(sh_destroy_l3_shadow)
244
#define sh_destroy_l2_shadow       INTERNAL_NAME(sh_destroy_l2_shadow)
245
#define sh_destroy_l1_shadow       INTERNAL_NAME(sh_destroy_l1_shadow)
246
#define sh_unhook_32b_mappings     INTERNAL_NAME(sh_unhook_32b_mappings)
247
#define sh_unhook_pae_mappings     INTERNAL_NAME(sh_unhook_pae_mappings)
248
#define sh_unhook_64b_mappings     INTERNAL_NAME(sh_unhook_64b_mappings)
249
#define sh_paging_mode             INTERNAL_NAME(sh_paging_mode)
250
#define sh_detach_old_tables       INTERNAL_NAME(sh_detach_old_tables)
251
#define sh_x86_emulate_write       INTERNAL_NAME(sh_x86_emulate_write)
252
#define sh_x86_emulate_cmpxchg     INTERNAL_NAME(sh_x86_emulate_cmpxchg)
253
0
#define sh_audit_l1_table          INTERNAL_NAME(sh_audit_l1_table)
254
0
#define sh_audit_fl1_table         INTERNAL_NAME(sh_audit_fl1_table)
255
0
#define sh_audit_l2_table          INTERNAL_NAME(sh_audit_l2_table)
256
#define sh_audit_l3_table          INTERNAL_NAME(sh_audit_l3_table)
257
#define sh_audit_l4_table          INTERNAL_NAME(sh_audit_l4_table)
258
#define sh_guess_wrmap             INTERNAL_NAME(sh_guess_wrmap)
259
#define sh_clear_shadow_entry      INTERNAL_NAME(sh_clear_shadow_entry)
260
261
#if SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC
262
#define sh_resync_l1               INTERNAL_NAME(sh_resync_l1)
263
#define sh_safe_not_to_sync        INTERNAL_NAME(sh_safe_not_to_sync)
264
#define sh_rm_write_access_from_sl1p INTERNAL_NAME(sh_rm_write_access_from_sl1p)
265
#endif
266
267
/* sh_make_monitor_table depends only on the number of shadow levels */
268
#define sh_make_monitor_table \
269
        SHADOW_SH_NAME(sh_make_monitor_table, SHADOW_PAGING_LEVELS)
270
#define sh_destroy_monitor_table \
271
        SHADOW_SH_NAME(sh_destroy_monitor_table, SHADOW_PAGING_LEVELS)
272
273
mfn_t sh_make_monitor_table(struct vcpu *v);
274
void sh_destroy_monitor_table(struct vcpu *v, mfn_t mmfn);
275
276
#if SHADOW_PAGING_LEVELS == 3
277
#define MFN_FITS_IN_HVM_CR3(_MFN) !(mfn_x(_MFN) >> 20)
278
#endif
279
280
#define SH_PRI_pte  PRIpte
281
#define SH_PRI_gpte PRI_gpte
282
#define SH_PRI_gfn  PRI_gfn
283
284
285
#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
286
/******************************************************************************
287
 * We implement a "fast path" for two special cases: faults that require
288
 * MMIO emulation, and faults where the guest PTE is not present.  We
289
 * record these as shadow l1 entries that have reserved bits set in
290
 * them, so we can spot them immediately in the fault handler and handle
291
 * them without needing to hold the paging lock or walk the guest
292
 * pagetables.
293
 *
294
 * This is only feasible for PAE and 64bit Xen: 32-bit non-PAE PTEs don't
295
 * have reserved bits that we can use for this.
296
 */
297
298
0
#define SH_L1E_MAGIC 0xffffffff00000001ULL
299
static inline int sh_l1e_is_magic(shadow_l1e_t sl1e)
300
0
{
301
0
    return ((sl1e.l1 & SH_L1E_MAGIC) == SH_L1E_MAGIC);
302
0
}
303
304
/* Guest not present: a single magic value */
305
static inline shadow_l1e_t sh_l1e_gnp(void)
306
0
{
307
0
    return (shadow_l1e_t){ -1ULL };
308
0
}
309
310
static inline int sh_l1e_is_gnp(shadow_l1e_t sl1e)
311
0
{
312
0
    return (sl1e.l1 == sh_l1e_gnp().l1);
313
0
}
314
315
/* MMIO: an invalid PTE that contains the GFN of the equivalent guest l1e.
316
 * We store 28 bits of GFN in bits 4:32 of the entry.
317
 * The present bit is set, and the U/S and R/W bits are taken from the guest.
318
 * Bit 3 is always 0, to differentiate from gnp above.  */
319
0
#define SH_L1E_MMIO_MAGIC       0xffffffff00000001ULL
320
0
#define SH_L1E_MMIO_MAGIC_MASK  0xffffffff00000009ULL
321
0
#define SH_L1E_MMIO_GFN_MASK    0x00000000fffffff0ULL
322
0
#define SH_L1E_MMIO_GFN_SHIFT   4
323
324
static inline shadow_l1e_t sh_l1e_mmio(gfn_t gfn, u32 gflags)
325
0
{
326
0
    return (shadow_l1e_t) { (SH_L1E_MMIO_MAGIC
327
0
                             | (gfn_x(gfn) << SH_L1E_MMIO_GFN_SHIFT)
328
0
                             | (gflags & (_PAGE_USER|_PAGE_RW))) };
329
0
}
330
331
static inline int sh_l1e_is_mmio(shadow_l1e_t sl1e)
332
0
{
333
0
    return ((sl1e.l1 & SH_L1E_MMIO_MAGIC_MASK) == SH_L1E_MMIO_MAGIC);
334
0
}
335
336
static inline gfn_t sh_l1e_mmio_get_gfn(shadow_l1e_t sl1e)
337
0
{
338
0
    return _gfn((sl1e.l1 & SH_L1E_MMIO_GFN_MASK) >> SH_L1E_MMIO_GFN_SHIFT);
339
0
}
340
341
static inline u32 sh_l1e_mmio_get_flags(shadow_l1e_t sl1e)
342
0
{
343
0
    return (u32)((sl1e.l1 & (_PAGE_USER|_PAGE_RW)));
344
0
}
345
346
#else
347
348
#define sh_l1e_gnp() shadow_l1e_empty()
349
#define sh_l1e_mmio(_gfn, _flags) shadow_l1e_empty()
350
#define sh_l1e_is_magic(_e) (0)
351
352
#endif /* SHOPT_FAST_FAULT_PATH */
353
354
355
#endif /* _XEN_SHADOW_TYPES_H */
356
357
/*
358
 * Local variables:
359
 * mode: C
360
 * c-file-style: "BSD"
361
 * c-basic-offset: 4
362
 * indent-tabs-mode: nil
363
 * End:
364
 */