Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/arch/x86/mm/shadow/private.h
Line
Count
Source (jump to first uncovered line)
1
/******************************************************************************
2
 * arch/x86/mm/shadow/private.h
3
 *
4
 * Shadow code that is private, and does not need to be multiply compiled.
5
 * Parts of this code are Copyright (c) 2006 by XenSource Inc.
6
 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
7
 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
8
 *
9
 * This program is free software; you can redistribute it and/or modify
10
 * it under the terms of the GNU General Public License as published by
11
 * the Free Software Foundation; either version 2 of the License, or
12
 * (at your option) any later version.
13
 *
14
 * This program is distributed in the hope that it will be useful,
15
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17
 * GNU General Public License for more details.
18
 *
19
 * You should have received a copy of the GNU General Public License
20
 * along with this program; If not, see <http://www.gnu.org/licenses/>.
21
 */
22
23
#ifndef _XEN_SHADOW_PRIVATE_H
24
#define _XEN_SHADOW_PRIVATE_H
25
26
// In order to override the definition of mfn_to_page, we make sure page.h has
27
// been included...
28
#include <asm/page.h>
29
#include <xen/domain_page.h>
30
#include <asm/x86_emulate.h>
31
#include <asm/hvm/support.h>
32
#include <asm/atomic.h>
33
34
#include "../mm-locks.h"
35
36
/******************************************************************************
37
 * Levels of self-test and paranoia
38
 */
39
40
#define SHADOW_AUDIT_HASH           0x01  /* Check current hash bucket */
41
#define SHADOW_AUDIT_HASH_FULL      0x02  /* Check every hash bucket */
42
#define SHADOW_AUDIT_ENTRIES        0x04  /* Check this walk's shadows */
43
#define SHADOW_AUDIT_ENTRIES_FULL   0x08  /* Check every shadow */
44
#define SHADOW_AUDIT_ENTRIES_MFNS   0x10  /* Check gfn-mfn map in shadows */
45
46
#ifdef NDEBUG
47
#define SHADOW_AUDIT                   0
48
#define SHADOW_AUDIT_ENABLE            0
49
#else
50
#define SHADOW_AUDIT                0x15  /* Basic audit of all */
51
0
#define SHADOW_AUDIT_ENABLE         shadow_audit_enable
52
extern int shadow_audit_enable;
53
#endif
54
55
/******************************************************************************
56
 * Levels of optimization
57
 */
58
59
#define SHOPT_WRITABLE_HEURISTIC  0x01  /* Guess at RW PTEs via linear maps */
60
#define SHOPT_EARLY_UNSHADOW      0x02  /* Unshadow l1s on fork or exit */
61
#define SHOPT_FAST_FAULT_PATH     0x04  /* Fast-path MMIO and not-present */
62
#define SHOPT_PREFETCH            0x08  /* Shadow multiple entries per fault */
63
0
#define SHOPT_LINUX_L3_TOPLEVEL   0x10  /* Pin l3es on early 64bit linux */
64
#define SHOPT_SKIP_VERIFY         0x20  /* Skip PTE v'fy when safe to do so */
65
#define SHOPT_VIRTUAL_TLB         0x40  /* Cache guest v->p translations */
66
#define SHOPT_FAST_EMULATION      0x80  /* Fast write emulation */
67
#define SHOPT_OUT_OF_SYNC        0x100  /* Allow guest writes to L1 PTs */
68
69
#define SHADOW_OPTIMIZATIONS     0x1ff
70
71
72
/******************************************************************************
73
 * Debug and error-message output
74
 */
75
76
#define SHADOW_PRINTK(_f, _a...)                                     \
77
0
    debugtrace_printk("sh: %s(): " _f, __func__, ##_a)
78
#define SHADOW_ERROR(_f, _a...)                                      \
79
0
    printk("sh error: %s(): " _f, __func__, ##_a)
80
#define SHADOW_DEBUG(flag, _f, _a...)                                \
81
0
    do {                                                              \
82
0
        if (SHADOW_DEBUG_ ## flag)                                   \
83
0
            debugtrace_printk("shdebug: %s(): " _f, __func__, ##_a); \
84
0
    } while (0)
85
86
// The flags for use with SHADOW_DEBUG:
87
0
#define SHADOW_DEBUG_PROPAGATE         1
88
0
#define SHADOW_DEBUG_MAKE_SHADOW       1
89
0
#define SHADOW_DEBUG_DESTROY_SHADOW    1
90
#define SHADOW_DEBUG_A_AND_D           1
91
0
#define SHADOW_DEBUG_EMULATE           1
92
#define SHADOW_DEBUG_P2M               1
93
#define SHADOW_DEBUG_LOGDIRTY          0
94
95
/******************************************************************************
96
 * Tracing
97
 */
98
DECLARE_PER_CPU(uint32_t,trace_shadow_path_flags);
99
100
#define TRACE_SHADOW_PATH_FLAG(_x)                      \
101
0
    do {                                                \
102
0
        this_cpu(trace_shadow_path_flags) |= (1<<(_x));      \
103
0
    } while(0)
104
105
#define TRACE_CLEAR_PATH_FLAGS                  \
106
0
    this_cpu(trace_shadow_path_flags) = 0
107
108
enum {
109
    TRCE_SFLAG_SET_AD,
110
    TRCE_SFLAG_SET_A,
111
    TRCE_SFLAG_SHADOW_L1_GET_REF,
112
    TRCE_SFLAG_SHADOW_L1_PUT_REF,
113
    TRCE_SFLAG_L2_PROPAGATE,
114
    TRCE_SFLAG_SET_CHANGED,
115
    TRCE_SFLAG_SET_FLUSH,
116
    TRCE_SFLAG_SET_ERROR,
117
    TRCE_SFLAG_DEMOTE,
118
    TRCE_SFLAG_PROMOTE,
119
    TRCE_SFLAG_WRMAP,
120
    TRCE_SFLAG_WRMAP_GUESS_FOUND,
121
    TRCE_SFLAG_WRMAP_BRUTE_FORCE,
122
    TRCE_SFLAG_EARLY_UNSHADOW,
123
    TRCE_SFLAG_EMULATION_2ND_PT_WRITTEN,
124
    TRCE_SFLAG_EMULATION_LAST_FAILED,
125
    TRCE_SFLAG_EMULATE_FULL_PT,
126
    TRCE_SFLAG_PREALLOC_UNHOOK,
127
    TRCE_SFLAG_UNSYNC,
128
    TRCE_SFLAG_OOS_FIXUP_ADD,
129
    TRCE_SFLAG_OOS_FIXUP_EVICT,
130
};
131
132
133
/* Size (in bytes) of a guest PTE */
134
#if GUEST_PAGING_LEVELS >= 3
135
# define GUEST_PTE_SIZE 8
136
#else
137
# define GUEST_PTE_SIZE 4
138
#endif
139
140
/******************************************************************************
141
 * Auditing routines
142
 */
143
144
#if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES_FULL
145
extern void shadow_audit_tables(struct vcpu *v);
146
#else
147
0
#define shadow_audit_tables(_v) do {} while(0)
148
#endif
149
150
/******************************************************************************
151
 * Macro for dealing with the naming of the internal names of the
152
 * shadow code's external entry points.
153
 */
154
#define SHADOW_INTERNAL_NAME_(name, kind, value)        \
155
0
    name ## __ ## kind ## _ ## value
156
#define SHADOW_INTERNAL_NAME(name, guest_levels)        \
157
0
    SHADOW_INTERNAL_NAME_(name, guest, guest_levels)
158
#define SHADOW_SH_NAME(name, shadow_levels)             \
159
    SHADOW_INTERNAL_NAME_(name, sh, shadow_levels)
160
161
#define GUEST_LEVELS  2
162
#include "multi.h"
163
#undef GUEST_LEVELS
164
165
#define GUEST_LEVELS  3
166
#include "multi.h"
167
#undef GUEST_LEVELS
168
169
#define GUEST_LEVELS  4
170
#include "multi.h"
171
#undef GUEST_LEVELS
172
173
/* Shadow type codes */
174
#define SH_type_none           (0U) /* on the shadow free list */
175
0
#define SH_type_min_shadow     (1U)
176
0
#define SH_type_l1_32_shadow   (1U) /* shadowing a 32-bit L1 guest page */
177
0
#define SH_type_fl1_32_shadow  (2U) /* L1 shadow for a 32b 4M superpage */
178
0
#define SH_type_l2_32_shadow   (3U) /* shadowing a 32-bit L2 guest page */
179
0
#define SH_type_l1_pae_shadow  (4U) /* shadowing a pae L1 page */
180
0
#define SH_type_fl1_pae_shadow (5U) /* L1 shadow for pae 2M superpg */
181
0
#define SH_type_l2_pae_shadow  (6U) /* shadowing a pae L2-low page */
182
0
#define SH_type_l2h_pae_shadow (7U) /* shadowing a pae L2-high page */
183
0
#define SH_type_l1_64_shadow   (8U) /* shadowing a 64-bit L1 page */
184
0
#define SH_type_fl1_64_shadow  (9U) /* L1 shadow for 64-bit 2M superpg */
185
0
#define SH_type_l2_64_shadow  (10U) /* shadowing a 64-bit L2 page */
186
0
#define SH_type_l2h_64_shadow (11U) /* shadowing a compat PAE L2 high page */
187
0
#define SH_type_l3_64_shadow  (12U) /* shadowing a 64-bit L3 page */
188
0
#define SH_type_l4_64_shadow  (13U) /* shadowing a 64-bit L4 page */
189
0
#define SH_type_max_shadow    (13U)
190
0
#define SH_type_p2m_table     (14U) /* in use as the p2m table */
191
0
#define SH_type_monitor_table (15U) /* in use as a monitor table */
192
0
#define SH_type_oos_snapshot  (16U) /* in use as OOS snapshot */
193
#define SH_type_unused        (17U)
194
195
/*
196
 * What counts as a pinnable shadow?
197
 */
198
199
static inline int sh_type_is_pinnable(struct domain *d, unsigned int t)
200
0
{
201
0
    /* Top-level shadow types in each mode can be pinned, so that they
202
0
     * persist even when not currently in use in a guest CR3 */
203
0
    if ( t == SH_type_l2_32_shadow
204
0
         || t == SH_type_l2_pae_shadow
205
0
         || t == SH_type_l2h_pae_shadow
206
0
         || t == SH_type_l4_64_shadow )
207
0
        return 1;
208
0
209
0
#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL)
210
0
    /* Early 64-bit linux used three levels of pagetables for the guest
211
0
     * and context switched by changing one l4 entry in a per-cpu l4
212
0
     * page.  When we're shadowing those kernels, we have to pin l3
213
0
     * shadows so they don't just evaporate on every context switch.
214
0
     * For all other guests, we'd rather use the up-pointer field in l3s. */
215
0
    if ( unlikely((d->arch.paging.shadow.opt_flags & SHOPT_LINUX_L3_TOPLEVEL)
216
0
                  && t == SH_type_l3_64_shadow) )
217
0
        return 1;
218
0
#endif
219
0
220
0
    /* Everything else is not pinnable, and can use the "up" pointer */
221
0
    return 0;
222
0
}
Unexecuted instantiation: common.c:sh_type_is_pinnable
Unexecuted instantiation: multi.c:sh_type_is_pinnable
223
224
static inline int sh_type_has_up_pointer(struct domain *d, unsigned int t)
225
0
{
226
0
    /* Multi-page shadows don't have up-pointers */
227
0
    if ( t == SH_type_l1_32_shadow
228
0
         || t == SH_type_fl1_32_shadow
229
0
         || t == SH_type_l2_32_shadow )
230
0
        return 0;
231
0
    /* Pinnable shadows don't have up-pointers either */
232
0
    return !sh_type_is_pinnable(d, t);
233
0
}
Unexecuted instantiation: common.c:sh_type_has_up_pointer
Unexecuted instantiation: multi.c:sh_type_has_up_pointer
234
235
static inline void sh_terminate_list(struct page_list_head *tmp_list)
236
0
{
237
0
#ifndef PAGE_LIST_NULL
238
    /* The temporary list-head is on our stack.  Invalidate the
239
     * pointers to it in the shadows, just to get a clean failure if
240
     * we accidentally follow them. */
241
    tmp_list->prev->next = LIST_POISON1;
242
    tmp_list->next->prev = LIST_POISON2;
243
#endif
244
0
}
Unexecuted instantiation: multi.c:sh_terminate_list
Unexecuted instantiation: common.c:sh_terminate_list
245
246
/*
247
 * Definitions for the shadow_flags field in page_info.
248
 * These flags are stored on *guest* pages...
249
 * Bits 1-13 are encodings for the shadow types.
250
 */
251
#define SHF_page_type_mask \
252
0
    (((1u << (SH_type_max_shadow + 1u)) - 1u) - \
253
0
     ((1u << SH_type_min_shadow) - 1u))
254
255
0
#define SHF_L1_32   (1u << SH_type_l1_32_shadow)
256
0
#define SHF_FL1_32  (1u << SH_type_fl1_32_shadow)
257
0
#define SHF_L2_32   (1u << SH_type_l2_32_shadow)
258
0
#define SHF_L1_PAE  (1u << SH_type_l1_pae_shadow)
259
0
#define SHF_FL1_PAE (1u << SH_type_fl1_pae_shadow)
260
0
#define SHF_L2_PAE  (1u << SH_type_l2_pae_shadow)
261
0
#define SHF_L2H_PAE (1u << SH_type_l2h_pae_shadow)
262
0
#define SHF_L1_64   (1u << SH_type_l1_64_shadow)
263
0
#define SHF_FL1_64  (1u << SH_type_fl1_64_shadow)
264
0
#define SHF_L2_64   (1u << SH_type_l2_64_shadow)
265
0
#define SHF_L2H_64  (1u << SH_type_l2h_64_shadow)
266
0
#define SHF_L3_64   (1u << SH_type_l3_64_shadow)
267
0
#define SHF_L4_64   (1u << SH_type_l4_64_shadow)
268
269
0
#define SHF_32  (SHF_L1_32|SHF_FL1_32|SHF_L2_32)
270
0
#define SHF_PAE (SHF_L1_PAE|SHF_FL1_PAE|SHF_L2_PAE|SHF_L2H_PAE)
271
0
#define SHF_64  (SHF_L1_64|SHF_FL1_64|SHF_L2_64|SHF_L2H_64|SHF_L3_64|SHF_L4_64)
272
273
0
#define SHF_L1_ANY  (SHF_L1_32|SHF_L1_PAE|SHF_L1_64)
274
275
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
276
/* Marks a guest L1 page table which is shadowed but not write-protected.
277
 * If set, then *only* L1 shadows (SHF_L1_*) are allowed.
278
 *
279
 * out_of_sync indicates that the shadow tables may not reflect the
280
 * guest tables.  If it is clear, then the shadow tables *must* reflect
281
 * the guest tables.
282
 *
283
 * oos_may_write indicates that a page may have writable mappings.
284
 *
285
 * Most of the time the flags are synonymous.  There is a short period of time
286
 * during resync that oos_may_write is clear but out_of_sync is not.  If a
287
 * codepath is called during that time and is sensitive to oos issues, it may
288
 * need to use the second flag.
289
 */
290
0
#define SHF_out_of_sync (1u<<30)
291
0
#define SHF_oos_may_write (1u<<29)
292
293
#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) */
294
295
0
#define SHF_pagetable_dying (1u<<31)
296
297
static inline int sh_page_has_multiple_shadows(struct page_info *pg)
298
0
{
299
0
    u32 shadows;
300
0
    if ( !(pg->count_info & PGC_page_table) )
301
0
        return 0;
302
0
    shadows = pg->shadow_flags & SHF_page_type_mask;
303
0
    /* More than one type bit set in shadow-flags? */
304
0
    return ( (shadows & ~(1UL << find_first_set_bit(shadows))) != 0 );
305
0
}
Unexecuted instantiation: common.c:sh_page_has_multiple_shadows
Unexecuted instantiation: multi.c:sh_page_has_multiple_shadows
306
307
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
308
/* The caller must verify this is reasonable to call; i.e., valid mfn,
309
 * domain is translated, &c */
310
static inline int page_is_out_of_sync(struct page_info *p)
311
0
{
312
0
    return (p->count_info & PGC_page_table)
313
0
        && (p->shadow_flags & SHF_out_of_sync);
314
0
}
Unexecuted instantiation: common.c:page_is_out_of_sync
Unexecuted instantiation: multi.c:page_is_out_of_sync
315
316
static inline int mfn_is_out_of_sync(mfn_t gmfn)
317
0
{
318
0
    return page_is_out_of_sync(mfn_to_page(mfn_x(gmfn)));
319
0
}
Unexecuted instantiation: multi.c:mfn_is_out_of_sync
Unexecuted instantiation: common.c:mfn_is_out_of_sync
320
321
static inline int page_oos_may_write(struct page_info *p)
322
0
{
323
0
    return (p->count_info & PGC_page_table)
324
0
        && (p->shadow_flags & SHF_oos_may_write);
325
0
}
Unexecuted instantiation: common.c:page_oos_may_write
Unexecuted instantiation: multi.c:page_oos_may_write
326
327
static inline int mfn_oos_may_write(mfn_t gmfn)
328
0
{
329
0
    return page_oos_may_write(mfn_to_page(mfn_x(gmfn)));
330
0
}
Unexecuted instantiation: multi.c:mfn_oos_may_write
Unexecuted instantiation: common.c:mfn_oos_may_write
331
#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) */
332
333
/* Figure out the size (in pages) of a given shadow type */
334
extern const u8 sh_type_to_size[SH_type_unused];
335
static inline unsigned int
336
shadow_size(unsigned int shadow_type)
337
0
{
338
0
    ASSERT(shadow_type < ARRAY_SIZE(sh_type_to_size));
339
0
    return sh_type_to_size[shadow_type];
340
0
}
Unexecuted instantiation: common.c:shadow_size
Unexecuted instantiation: multi.c:shadow_size
341
342
/******************************************************************************
343
 * Various function declarations
344
 */
345
346
/* Hash table functions */
347
mfn_t shadow_hash_lookup(struct domain *d, unsigned long n, unsigned int t);
348
void  shadow_hash_insert(struct domain *d,
349
                         unsigned long n, unsigned int t, mfn_t smfn);
350
void  shadow_hash_delete(struct domain *d,
351
                         unsigned long n, unsigned int t, mfn_t smfn);
352
353
/* shadow promotion */
354
void shadow_promote(struct domain *d, mfn_t gmfn, u32 type);
355
void shadow_demote(struct domain *d, mfn_t gmfn, u32 type);
356
357
/* Shadow page allocation functions */
358
void  shadow_prealloc(struct domain *d, u32 shadow_type, unsigned int count);
359
mfn_t shadow_alloc(struct domain *d,
360
                    u32 shadow_type,
361
                    unsigned long backpointer);
362
void  shadow_free(struct domain *d, mfn_t smfn);
363
364
/* Install the xen mappings in various flavours of shadow */
365
void sh_install_xen_entries_in_l4(struct domain *, mfn_t gl4mfn, mfn_t sl4mfn);
366
367
/* Update the shadows in response to a pagetable write from Xen */
368
int sh_validate_guest_entry(struct vcpu *v, mfn_t gmfn, void *entry, u32 size);
369
370
/* Update the shadows in response to a pagetable write from a HVM guest */
371
void sh_validate_guest_pt_write(struct vcpu *v, mfn_t gmfn,
372
                                void *entry, u32 size);
373
374
/* Remove all writeable mappings of a guest frame from the shadows.
375
 * Returns non-zero if we need to flush TLBs.
376
 * level and fault_addr desribe how we found this to be a pagetable;
377
 * level==0 means we have some other reason for revoking write access. */
378
extern int sh_remove_write_access(struct domain *d, mfn_t readonly_mfn,
379
                                  unsigned int level,
380
                                  unsigned long fault_addr);
381
382
/* Functions that atomically write PT/P2M entries and update state */
383
void shadow_write_p2m_entry(struct domain *d, unsigned long gfn,
384
                            l1_pgentry_t *p, l1_pgentry_t new,
385
                            unsigned int level);
386
387
/* Update all the things that are derived from the guest's CR0/CR3/CR4.
388
 * Called to initialize paging structures if the paging mode
389
 * has changed, and when bringing up a VCPU for the first time. */
390
void shadow_update_paging_modes(struct vcpu *v);
391
392
/* Unhook the non-Xen mappings in this top-level shadow mfn.
393
 * With user_only == 1, unhooks only the user-mode mappings. */
394
void shadow_unhook_mappings(struct domain *d, mfn_t smfn, int user_only);
395
396
/* Returns a mapped pointer to write to, or one of the following error
397
 * indicators. */
398
0
#define MAPPING_UNHANDLEABLE ERR_PTR(~(long)X86EMUL_UNHANDLEABLE)
399
0
#define MAPPING_EXCEPTION    ERR_PTR(~(long)X86EMUL_EXCEPTION)
400
0
#define MAPPING_SILENT_FAIL  ERR_PTR(~(long)X86EMUL_OKAY)
401
void *sh_emulate_map_dest(struct vcpu *v, unsigned long vaddr,
402
                          unsigned int bytes, struct sh_emulate_ctxt *sh_ctxt);
403
void sh_emulate_unmap_dest(struct vcpu *v, void *addr, unsigned int bytes,
404
                           struct sh_emulate_ctxt *sh_ctxt);
405
406
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
407
/* Allow a shadowed page to go out of sync */
408
int sh_unsync(struct vcpu *v, mfn_t gmfn);
409
410
/* Pull an out-of-sync page back into sync. */
411
void sh_resync(struct domain *d, mfn_t gmfn);
412
413
void oos_fixup_add(struct domain *d, mfn_t gmfn, mfn_t smfn, unsigned long off);
414
415
int sh_remove_write_access_from_sl1p(struct domain *d, mfn_t gmfn,
416
                                     mfn_t smfn, unsigned long offset);
417
418
/* Pull all out-of-sync shadows back into sync.  If skip != 0, we try
419
 * to avoid resyncing where we think we can get away with it. */
420
421
void sh_resync_all(struct vcpu *v, int skip, int this, int others);
422
423
static inline void
424
shadow_resync_all(struct vcpu *v)
425
0
{
426
0
    sh_resync_all(v, 0 /* skip */, 1 /* this */, 1 /* others */);
427
0
}
Unexecuted instantiation: common.c:shadow_resync_all
Unexecuted instantiation: multi.c:shadow_resync_all
428
429
static inline void
430
shadow_resync_current_vcpu(struct vcpu *v)
431
0
{
432
0
    sh_resync_all(v, 0 /* skip */, 1 /* this */, 0 /* others */);
433
0
}
Unexecuted instantiation: multi.c:shadow_resync_current_vcpu
Unexecuted instantiation: common.c:shadow_resync_current_vcpu
434
435
static inline void
436
shadow_sync_other_vcpus(struct vcpu *v)
437
0
{
438
0
    sh_resync_all(v, 1 /* skip */, 0 /* this */, 1 /* others */);
439
0
}
Unexecuted instantiation: common.c:shadow_sync_other_vcpus
Unexecuted instantiation: multi.c:shadow_sync_other_vcpus
440
441
void oos_audit_hash_is_present(struct domain *d, mfn_t gmfn);
442
mfn_t oos_snapshot_lookup(struct domain *d, mfn_t gmfn);
443
444
#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) */
445
446
447
/* Reset the up-pointers of every L3 shadow to 0.
448
 * This is called when l3 shadows stop being pinnable, to clear out all
449
 * the list-head bits so the up-pointer field is properly inititalised. */
450
void sh_reset_l3_up_pointers(struct vcpu *v);
451
452
/******************************************************************************
453
 * Flags used in the return value of the shadow_set_lXe() functions...
454
 */
455
456
/* We actually wrote something new to the shadow */
457
0
#define SHADOW_SET_CHANGED            0x1
458
/* Caller should flush TLBs to clear the old entry */
459
0
#define SHADOW_SET_FLUSH              0x2
460
/* Something went wrong: the shadow entry was invalid or refcount failed */
461
0
#define SHADOW_SET_ERROR              0x4
462
463
464
/******************************************************************************
465
 * MFN/page-info handling
466
 */
467
468
/* Override macros from asm/page.h to make them work with mfn_t */
469
#undef mfn_to_page
470
0
#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
471
#undef page_to_mfn
472
0
#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
473
474
/* Override pagetable_t <-> struct page_info conversions to work with mfn_t */
475
#undef pagetable_get_page
476
0
#define pagetable_get_page(x)   mfn_to_page(pagetable_get_mfn(x))
477
#undef pagetable_from_page
478
0
#define pagetable_from_page(pg) pagetable_from_mfn(page_to_mfn(pg))
479
480
0
#define backpointer(sp) _mfn(pdx_to_pfn((unsigned long)(sp)->v.sh.back))
481
static inline unsigned long __backpointer(const struct page_info *sp)
482
0
{
483
0
    switch (sp->u.sh.type)
484
0
    {
485
0
    case SH_type_fl1_32_shadow:
486
0
    case SH_type_fl1_pae_shadow:
487
0
    case SH_type_fl1_64_shadow:
488
0
        return sp->v.sh.back;
489
0
    }
490
0
    return pdx_to_pfn(sp->v.sh.back);
491
0
}
Unexecuted instantiation: common.c:__backpointer
Unexecuted instantiation: multi.c:__backpointer
492
493
static inline int
494
sh_mfn_is_a_page_table(mfn_t gmfn)
495
0
{
496
0
    struct page_info *page = mfn_to_page(gmfn);
497
0
    struct domain *owner;
498
0
    unsigned long type_info;
499
0
500
0
    if ( !mfn_valid(gmfn) )
501
0
        return 0;
502
0
503
0
    owner = page_get_owner(page);
504
0
    if ( owner && shadow_mode_refcounts(owner)
505
0
         && (page->count_info & PGC_page_table) )
506
0
        return 1;
507
0
508
0
    type_info = page->u.inuse.type_info & PGT_type_mask;
509
0
    return type_info && (type_info <= PGT_l4_page_table);
510
0
}
Unexecuted instantiation: multi.c:sh_mfn_is_a_page_table
Unexecuted instantiation: common.c:sh_mfn_is_a_page_table
511
512
/**************************************************************************/
513
/* Shadow-page refcounting. */
514
515
void sh_destroy_shadow(struct domain *d, mfn_t smfn);
516
517
/* Increase the refcount of a shadow page.  Arguments are the mfn to refcount,
518
 * and the physical address of the shadow entry that holds the ref (or zero
519
 * if the ref is held by something else).
520
 * Returns 0 for failure, 1 for success. */
521
static inline int sh_get_ref(struct domain *d, mfn_t smfn, paddr_t entry_pa)
522
0
{
523
0
    u32 x, nx;
524
0
    struct page_info *sp = mfn_to_page(smfn);
525
0
526
0
    ASSERT(mfn_valid(smfn));
527
0
    ASSERT(sp->u.sh.head);
528
0
529
0
    x = sp->u.sh.count;
530
0
    nx = x + 1;
531
0
532
0
    if ( unlikely(nx >= 1U<<26) )
533
0
    {
534
0
        SHADOW_PRINTK("shadow ref overflow, gmfn=%lx smfn=%lx\n",
535
0
                       __backpointer(sp), mfn_x(smfn));
536
0
        return 0;
537
0
    }
538
0
539
0
    /* Guarded by the paging lock, so no need for atomic update */
540
0
    sp->u.sh.count = nx;
541
0
542
0
    /* We remember the first shadow entry that points to each shadow. */
543
0
    if ( entry_pa != 0
544
0
         && sh_type_has_up_pointer(d, sp->u.sh.type)
545
0
         && sp->up == 0 )
546
0
        sp->up = entry_pa;
547
0
548
0
    return 1;
549
0
}
Unexecuted instantiation: common.c:sh_get_ref
Unexecuted instantiation: multi.c:sh_get_ref
550
551
552
/* Decrease the refcount of a shadow page.  As for get_ref, takes the
553
 * physical address of the shadow entry that held this reference. */
554
static inline void sh_put_ref(struct domain *d, mfn_t smfn, paddr_t entry_pa)
555
0
{
556
0
    u32 x, nx;
557
0
    struct page_info *sp = mfn_to_page(smfn);
558
0
559
0
    ASSERT(mfn_valid(smfn));
560
0
    ASSERT(sp->u.sh.head);
561
0
    ASSERT(!(sp->count_info & PGC_count_mask));
562
0
563
0
    /* If this is the entry in the up-pointer, remove it */
564
0
    if ( entry_pa != 0
565
0
         && sh_type_has_up_pointer(d, sp->u.sh.type)
566
0
         && sp->up == entry_pa )
567
0
        sp->up = 0;
568
0
569
0
    x = sp->u.sh.count;
570
0
    nx = x - 1;
571
0
572
0
    if ( unlikely(x == 0) )
573
0
    {
574
0
        SHADOW_ERROR("shadow ref underflow, smfn=%lx oc=%08x t=%#x\n",
575
0
                     mfn_x(smfn), sp->u.sh.count, sp->u.sh.type);
576
0
        BUG();
577
0
    }
578
0
579
0
    /* Guarded by the paging lock, so no need for atomic update */
580
0
    sp->u.sh.count = nx;
581
0
582
0
    if ( unlikely(nx == 0) )
583
0
        sh_destroy_shadow(d, smfn);
584
0
}
Unexecuted instantiation: common.c:sh_put_ref
Unexecuted instantiation: multi.c:sh_put_ref
585
586
587
/* Walk the list of pinned shadows, from the tail forwards,
588
 * skipping the non-head-page entries */
589
static inline struct page_info *
590
prev_pinned_shadow(struct page_info *page,
591
                   const struct domain *d)
592
0
{
593
0
    struct page_info *p;
594
0
    const struct page_list_head *pin_list;
595
0
596
0
    pin_list = &d->arch.paging.shadow.pinned_shadows;
597
0
598
0
    if ( page_list_empty(pin_list) || page == page_list_first(pin_list) )
599
0
        return NULL;
600
0
601
0
    if ( page == NULL ) /* If no current place, start at the tail */
602
0
        p = page_list_last(pin_list);
603
0
    else
604
0
        p = page_list_prev(page, pin_list);
605
0
    /* Skip over the non-tail parts of multi-page shadows */
606
0
    if ( p && p->u.sh.type == SH_type_l2_32_shadow )
607
0
    {
608
0
        p = page_list_prev(p, pin_list);
609
0
        ASSERT(p && p->u.sh.type == SH_type_l2_32_shadow);
610
0
        p = page_list_prev(p, pin_list);
611
0
        ASSERT(p && p->u.sh.type == SH_type_l2_32_shadow);
612
0
        p = page_list_prev(p, pin_list);
613
0
        ASSERT(p && p->u.sh.type == SH_type_l2_32_shadow);
614
0
    }
615
0
    ASSERT(!p || p->u.sh.head);
616
0
    return p;
617
0
}
Unexecuted instantiation: multi.c:prev_pinned_shadow
Unexecuted instantiation: common.c:prev_pinned_shadow
618
619
#define foreach_pinned_shadow(dom, pos, tmp)                    \
620
0
    for ( pos = prev_pinned_shadow(NULL, (dom));                \
621
0
          pos ? (tmp = prev_pinned_shadow(pos, (dom)), 1) : 0;  \
622
0
          pos = tmp )
623
624
/*
625
 * Pin a shadow page: take an extra refcount, set the pin bit,
626
 * and put the shadow at the head of the list of pinned shadows.
627
 * Returns false for failure, true for success.
628
 */
629
static inline bool sh_pin(struct domain *d, mfn_t smfn)
630
0
{
631
0
    struct page_info *sp[4];
632
0
    struct page_list_head *pin_list;
633
0
    unsigned int i, pages;
634
0
    bool already_pinned;
635
0
636
0
    ASSERT(mfn_valid(smfn));
637
0
    sp[0] = mfn_to_page(smfn);
638
0
    pages = shadow_size(sp[0]->u.sh.type);
639
0
    already_pinned = sp[0]->u.sh.pinned;
640
0
    ASSERT(sh_type_is_pinnable(d, sp[0]->u.sh.type));
641
0
    ASSERT(sp[0]->u.sh.head);
642
0
643
0
    pin_list = &d->arch.paging.shadow.pinned_shadows;
644
0
    if ( already_pinned && sp[0] == page_list_first(pin_list) )
645
0
        return true;
646
0
647
0
    /* Treat the up-to-four pages of the shadow as a unit in the list ops */
648
0
    for ( i = 1; i < pages; i++ )
649
0
    {
650
0
        sp[i] = page_list_next(sp[i - 1], pin_list);
651
0
        ASSERT(sp[i]->u.sh.type == sp[0]->u.sh.type);
652
0
        ASSERT(!sp[i]->u.sh.head);
653
0
    }
654
0
655
0
    if ( already_pinned )
656
0
    {
657
0
        /* Take it out of the pinned-list so it can go at the front */
658
0
        for ( i = 0; i < pages; i++ )
659
0
            page_list_del(sp[i], pin_list);
660
0
    }
661
0
    else
662
0
    {
663
0
        /* Not pinned: pin it! */
664
0
        if ( !sh_get_ref(d, smfn, 0) )
665
0
            return false;
666
0
        sp[0]->u.sh.pinned = 1;
667
0
    }
668
0
669
0
    /* Put it at the head of the list of pinned shadows */
670
0
    for ( i = pages; i > 0; i-- )
671
0
        page_list_add(sp[i - 1], pin_list);
672
0
673
0
    return true;
674
0
}
Unexecuted instantiation: multi.c:sh_pin
Unexecuted instantiation: common.c:sh_pin
675
676
/* Unpin a shadow page: unset the pin bit, take the shadow off the list
677
 * of pinned shadows, and release the extra ref. */
678
static inline void sh_unpin(struct domain *d, mfn_t smfn)
679
0
{
680
0
    struct page_list_head tmp_list, *pin_list;
681
0
    struct page_info *sp, *next;
682
0
    unsigned int i, head_type;
683
0
684
0
    ASSERT(mfn_valid(smfn));
685
0
    sp = mfn_to_page(smfn);
686
0
    head_type = sp->u.sh.type;
687
0
    ASSERT(sh_type_is_pinnable(d, sp->u.sh.type));
688
0
    ASSERT(sp->u.sh.head);
689
0
690
0
    if ( !sp->u.sh.pinned )
691
0
        return;
692
0
    sp->u.sh.pinned = 0;
693
0
694
0
    /* Cut the sub-list out of the list of pinned shadows,
695
0
     * stitching it back into a list fragment of its own. */
696
0
    pin_list = &d->arch.paging.shadow.pinned_shadows;
697
0
    INIT_PAGE_LIST_HEAD(&tmp_list);
698
0
    for ( i = 0; i < shadow_size(head_type); i++ )
699
0
    {
700
0
        ASSERT(sp->u.sh.type == head_type);
701
0
        ASSERT(!i || !sp->u.sh.head);
702
0
        next = page_list_next(sp, pin_list);
703
0
        page_list_del(sp, pin_list);
704
0
        page_list_add_tail(sp, &tmp_list);
705
0
        sp = next;
706
0
    }
707
0
    sh_terminate_list(&tmp_list);
708
0
709
0
    sh_put_ref(d, smfn, 0);
710
0
}
Unexecuted instantiation: multi.c:sh_unpin
Unexecuted instantiation: common.c:sh_unpin
711
712
713
/**************************************************************************/
714
/* PTE-write emulation. */
715
716
struct sh_emulate_ctxt {
717
    struct x86_emulate_ctxt ctxt;
718
719
    /* Cache of up to 31 bytes of instruction. */
720
    uint8_t insn_buf[31];
721
    uint8_t insn_buf_bytes;
722
    unsigned long insn_buf_eip;
723
724
    /* Cache of segment registers already gathered for this emulation. */
725
    unsigned int valid_seg_regs;
726
    struct segment_register seg_reg[6];
727
728
    /* MFNs being written to in write/cmpxchg callbacks */
729
    mfn_t mfn[2];
730
731
#if (SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY)
732
    /* Special case for avoiding having to verify writes: remember
733
     * whether the old value had its low bit (_PAGE_PRESENT) clear. */
734
    int low_bit_was_clear:1;
735
#endif
736
};
737
738
const struct x86_emulate_ops *shadow_init_emulation(
739
    struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
740
void shadow_continue_emulation(
741
    struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
742
743
#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
744
/**************************************************************************/
745
/* Virtual TLB entries
746
 *
747
 * We keep a cache of virtual-to-physical translations that we have seen
748
 * since the last TLB flush.  This is safe to use for frame translations,
749
 * but callers need to re-check the actual guest tables if the lookup fails.
750
 *
751
 * Lookups and updates are protected by a per-vTLB (and hence per-vcpu)
752
 * lock.  This lock is held *only* while reading or writing the table,
753
 * so it is safe to take in any non-interrupt context.  Most lookups
754
 * happen with v==current, so we expect contention to be low.
755
 */
756
757
0
#define VTLB_ENTRIES 13
758
759
struct shadow_vtlb {
760
    unsigned long page_number;      /* Guest virtual address >> PAGE_SHIFT  */
761
    unsigned long frame_number;     /* Guest physical address >> PAGE_SHIFT */
762
    uint32_t pfec;     /* PF error code of the lookup that filled this
763
                        * entry.  A pfec of zero means the slot is empty
764
                        * (since that would require us to re-try anyway) */
765
};
766
767
/* Call whenever the guest flushes hit actual TLB */
768
static inline void vtlb_flush(struct vcpu *v)
769
0
{
770
0
    spin_lock(&v->arch.paging.vtlb_lock);
771
0
    memset(v->arch.paging.vtlb, 0, VTLB_ENTRIES * sizeof (struct shadow_vtlb));
772
0
    spin_unlock(&v->arch.paging.vtlb_lock);
773
0
}
Unexecuted instantiation: multi.c:vtlb_flush
Unexecuted instantiation: common.c:vtlb_flush
774
775
static inline int vtlb_hash(unsigned long page_number)
776
0
{
777
0
    return page_number % VTLB_ENTRIES;
778
0
}
Unexecuted instantiation: multi.c:vtlb_hash
Unexecuted instantiation: common.c:vtlb_hash
779
780
/* Put a translation into the vTLB, potentially clobbering an old one */
781
static inline void vtlb_insert(struct vcpu *v, unsigned long page,
782
                               unsigned long frame, uint32_t pfec)
783
0
{
784
0
    struct shadow_vtlb entry =
785
0
        { .page_number = page, .frame_number = frame, .pfec = pfec };
786
0
    spin_lock(&v->arch.paging.vtlb_lock);
787
0
    v->arch.paging.vtlb[vtlb_hash(page)] = entry;
788
0
    spin_unlock(&v->arch.paging.vtlb_lock);
789
0
}
Unexecuted instantiation: common.c:vtlb_insert
Unexecuted instantiation: multi.c:vtlb_insert
790
791
/* Look a translation up in the vTLB.  Returns INVALID_GFN if not found. */
792
static inline unsigned long vtlb_lookup(struct vcpu *v,
793
                                        unsigned long va, uint32_t pfec)
794
0
{
795
0
    unsigned long page_number = va >> PAGE_SHIFT;
796
0
    unsigned long frame_number = gfn_x(INVALID_GFN);
797
0
    int i = vtlb_hash(page_number);
798
0
799
0
    spin_lock(&v->arch.paging.vtlb_lock);
800
0
    if ( v->arch.paging.vtlb[i].pfec != 0
801
0
         && v->arch.paging.vtlb[i].page_number == page_number
802
0
         /* Any successful walk that had at least these pfec bits is OK */
803
0
         && (v->arch.paging.vtlb[i].pfec & pfec) == pfec )
804
0
    {
805
0
        frame_number = v->arch.paging.vtlb[i].frame_number;
806
0
    }
807
0
    spin_unlock(&v->arch.paging.vtlb_lock);
808
0
    return frame_number;
809
0
}
Unexecuted instantiation: multi.c:vtlb_lookup
Unexecuted instantiation: common.c:vtlb_lookup
810
#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
811
812
static inline int sh_check_page_has_no_refs(struct page_info *page)
813
0
{
814
0
    unsigned long count = read_atomic(&page->count_info);
815
0
    return ( (count & PGC_count_mask) ==
816
0
             ((count & PGC_allocated) ? 1 : 0) );
817
0
}
Unexecuted instantiation: multi.c:sh_check_page_has_no_refs
Unexecuted instantiation: common.c:sh_check_page_has_no_refs
818
819
#endif /* _XEN_SHADOW_PRIVATE_H */
820
821
/*
822
 * Local variables:
823
 * mode: C
824
 * c-file-style: "BSD"
825
 * c-basic-offset: 4
826
 * indent-tabs-mode: nil
827
 * End:
828
 */