Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/include/xen/tmem_xen.h
Line
Count
Source (jump to first uncovered line)
1
/******************************************************************************
2
 * tmem_xen.h
3
 *
4
 * Xen-specific Transcendent memory
5
 *
6
 * Copyright (c) 2009, Dan Magenheimer, Oracle Corp.
7
 */
8
9
#ifndef __XEN_TMEM_XEN_H__
10
#define __XEN_TMEM_XEN_H__
11
12
#include <xen/mm.h> /* heap alloc/free */
13
#include <xen/pfn.h>
14
#include <xen/xmalloc.h> /* xmalloc/xfree */
15
#include <xen/sched.h>  /* struct domain */
16
#include <xen/guest_access.h> /* copy_from_guest */
17
#include <xen/hash.h> /* hash_long */
18
#include <xen/domain_page.h> /* __map_domain_page */
19
#include <xen/rbtree.h> /* struct rb_root */
20
#include <xsm/xsm.h> /* xsm_tmem_control */
21
#include <public/tmem.h>
22
#ifdef CONFIG_COMPAT
23
#include <compat/tmem.h>
24
#endif
25
typedef uint32_t pagesize_t;  /* like size_t, must handle largest PAGE_SIZE */
26
27
#define IS_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
28
#define IS_VALID_PAGE(_pi)    mfn_valid(_mfn(page_to_mfn(_pi)))
29
30
extern struct page_list_head tmem_page_list;
31
extern spinlock_t tmem_page_list_lock;
32
extern unsigned long tmem_page_list_pages;
33
extern atomic_t freeable_page_count;
34
35
extern int tmem_init(void);
36
0
#define tmem_hash hash_long
37
38
extern bool opt_tmem_compress;
39
static inline bool tmem_compression_enabled(void)
40
0
{
41
0
    return opt_tmem_compress;
42
0
}
Unexecuted instantiation: memory.c:tmem_compression_enabled
Unexecuted instantiation: page_alloc.c:tmem_compression_enabled
Unexecuted instantiation: tmem.c:tmem_compression_enabled
Unexecuted instantiation: tmem_xen.c:tmem_compression_enabled
Unexecuted instantiation: tmem_control.c:tmem_compression_enabled
Unexecuted instantiation: setup.c:tmem_compression_enabled
43
44
#ifdef CONFIG_TMEM
45
extern bool opt_tmem;
46
static inline bool tmem_enabled(void)
47
4.23M
{
48
4.23M
    return opt_tmem;
49
4.23M
}
Unexecuted instantiation: setup.c:tmem_enabled
Unexecuted instantiation: tmem_control.c:tmem_enabled
Unexecuted instantiation: tmem_xen.c:tmem_enabled
tmem.c:tmem_enabled
Line
Count
Source
47
86.5k
{
48
86.5k
    return opt_tmem;
49
86.5k
}
page_alloc.c:tmem_enabled
Line
Count
Source
47
4.14M
{
48
4.14M
    return opt_tmem;
49
4.14M
}
Unexecuted instantiation: memory.c:tmem_enabled
50
51
static inline void tmem_disable(void)
52
0
{
53
0
    opt_tmem = false;
54
0
}
Unexecuted instantiation: memory.c:tmem_disable
Unexecuted instantiation: setup.c:tmem_disable
Unexecuted instantiation: tmem_control.c:tmem_disable
Unexecuted instantiation: tmem_xen.c:tmem_disable
Unexecuted instantiation: tmem.c:tmem_disable
Unexecuted instantiation: page_alloc.c:tmem_disable
55
#else
56
static inline bool tmem_enabled(void)
57
{
58
    return false;
59
}
60
61
static inline void tmem_disable(void)
62
{
63
}
64
#endif /* CONFIG_TMEM */
65
66
/*
67
 * Memory free page list management
68
 */
69
70
static inline struct page_info *tmem_page_list_get(void)
71
0
{
72
0
    struct page_info *pi;
73
0
74
0
    spin_lock(&tmem_page_list_lock);
75
0
    if ( (pi = page_list_remove_head(&tmem_page_list)) != NULL )
76
0
        tmem_page_list_pages--;
77
0
    spin_unlock(&tmem_page_list_lock);
78
0
    ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
79
0
    return pi;
80
0
}
Unexecuted instantiation: setup.c:tmem_page_list_get
Unexecuted instantiation: tmem_control.c:tmem_page_list_get
Unexecuted instantiation: tmem_xen.c:tmem_page_list_get
Unexecuted instantiation: tmem.c:tmem_page_list_get
Unexecuted instantiation: page_alloc.c:tmem_page_list_get
Unexecuted instantiation: memory.c:tmem_page_list_get
81
82
static inline void tmem_page_list_put(struct page_info *pi)
83
0
{
84
0
    ASSERT(IS_VALID_PAGE(pi));
85
0
    spin_lock(&tmem_page_list_lock);
86
0
    page_list_add(pi, &tmem_page_list);
87
0
    tmem_page_list_pages++;
88
0
    spin_unlock(&tmem_page_list_lock);
89
0
}
Unexecuted instantiation: tmem_control.c:tmem_page_list_put
Unexecuted instantiation: tmem_xen.c:tmem_page_list_put
Unexecuted instantiation: tmem.c:tmem_page_list_put
Unexecuted instantiation: page_alloc.c:tmem_page_list_put
Unexecuted instantiation: memory.c:tmem_page_list_put
Unexecuted instantiation: setup.c:tmem_page_list_put
90
91
/*
92
 * Memory allocation for persistent data 
93
 */
94
static inline struct page_info *__tmem_alloc_page_thispool(struct domain *d)
95
0
{
96
0
    struct page_info *pi;
97
0
98
0
    /* note that this tot_pages check is not protected by d->page_alloc_lock,
99
0
     * so may race and periodically fail in donate_page or alloc_domheap_pages
100
0
     * That's OK... neither is a problem, though chatty if log_lvl is set */ 
101
0
    if ( d->tot_pages >= d->max_pages )
102
0
        return NULL;
103
0
104
0
    if ( tmem_page_list_pages )
105
0
    {
106
0
        if ( (pi = tmem_page_list_get()) != NULL )
107
0
        {
108
0
            if ( donate_page(d,pi,0) == 0 )
109
0
                goto out;
110
0
            else
111
0
                tmem_page_list_put(pi);
112
0
        }
113
0
    }
114
0
115
0
    pi = alloc_domheap_pages(d,0,MEMF_tmem);
116
0
117
0
out:
118
0
    ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
119
0
    return pi;
120
0
}
Unexecuted instantiation: page_alloc.c:__tmem_alloc_page_thispool
Unexecuted instantiation: memory.c:__tmem_alloc_page_thispool
Unexecuted instantiation: setup.c:__tmem_alloc_page_thispool
Unexecuted instantiation: tmem_control.c:__tmem_alloc_page_thispool
Unexecuted instantiation: tmem_xen.c:__tmem_alloc_page_thispool
Unexecuted instantiation: tmem.c:__tmem_alloc_page_thispool
121
122
static inline void __tmem_free_page_thispool(struct page_info *pi)
123
0
{
124
0
    struct domain *d = page_get_owner(pi);
125
0
126
0
    ASSERT(IS_VALID_PAGE(pi));
127
0
    if ( (d == NULL) || steal_page(d,pi,0) == 0 )
128
0
        tmem_page_list_put(pi);
129
0
    else
130
0
    {
131
0
        scrub_one_page(pi);
132
0
        ASSERT((pi->count_info & ~(PGC_allocated | 1)) == 0);
133
0
        free_domheap_pages(pi,0);
134
0
    }
135
0
}
Unexecuted instantiation: memory.c:__tmem_free_page_thispool
Unexecuted instantiation: setup.c:__tmem_free_page_thispool
Unexecuted instantiation: page_alloc.c:__tmem_free_page_thispool
Unexecuted instantiation: tmem.c:__tmem_free_page_thispool
Unexecuted instantiation: tmem_xen.c:__tmem_free_page_thispool
Unexecuted instantiation: tmem_control.c:__tmem_free_page_thispool
136
137
/*
138
 * Memory allocation for ephemeral (non-persistent) data
139
 */
140
static inline struct page_info *__tmem_alloc_page(void)
141
0
{
142
0
    struct page_info *pi = tmem_page_list_get();
143
0
144
0
    if ( pi == NULL)
145
0
        pi = alloc_domheap_pages(0,0,MEMF_tmem);
146
0
147
0
    if ( pi )
148
0
        atomic_inc(&freeable_page_count);
149
0
    ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
150
0
    return pi;
151
0
}
Unexecuted instantiation: page_alloc.c:__tmem_alloc_page
Unexecuted instantiation: setup.c:__tmem_alloc_page
Unexecuted instantiation: tmem_control.c:__tmem_alloc_page
Unexecuted instantiation: memory.c:__tmem_alloc_page
Unexecuted instantiation: tmem.c:__tmem_alloc_page
Unexecuted instantiation: tmem_xen.c:__tmem_alloc_page
152
153
static inline void __tmem_free_page(struct page_info *pi)
154
0
{
155
0
    ASSERT(IS_VALID_PAGE(pi));
156
0
    tmem_page_list_put(pi);
157
0
    atomic_dec(&freeable_page_count);
158
0
}
Unexecuted instantiation: setup.c:__tmem_free_page
Unexecuted instantiation: tmem_control.c:__tmem_free_page
Unexecuted instantiation: page_alloc.c:__tmem_free_page
Unexecuted instantiation: tmem.c:__tmem_free_page
Unexecuted instantiation: tmem_xen.c:__tmem_free_page
Unexecuted instantiation: memory.c:__tmem_free_page
159
160
/*  "Client" (==domain) abstraction */
161
static inline struct client *tmem_client_from_cli_id(domid_t cli_id)
162
0
{
163
0
    struct client *c;
164
0
    struct domain *d = rcu_lock_domain_by_id(cli_id);
165
0
    if (d == NULL)
166
0
        return NULL;
167
0
    c = d->tmem_client;
168
0
    rcu_unlock_domain(d);
169
0
    return c;
170
0
}
Unexecuted instantiation: memory.c:tmem_client_from_cli_id
Unexecuted instantiation: page_alloc.c:tmem_client_from_cli_id
Unexecuted instantiation: setup.c:tmem_client_from_cli_id
Unexecuted instantiation: tmem.c:tmem_client_from_cli_id
Unexecuted instantiation: tmem_xen.c:tmem_client_from_cli_id
Unexecuted instantiation: tmem_control.c:tmem_client_from_cli_id
171
172
/* these typedefs are in the public/tmem.h interface
173
typedef XEN_GUEST_HANDLE(void) cli_mfn_t;
174
typedef XEN_GUEST_HANDLE(char) cli_va_t;
175
*/
176
typedef XEN_GUEST_HANDLE_PARAM(tmem_op_t) tmem_cli_op_t;
177
typedef XEN_GUEST_HANDLE_PARAM(char) tmem_cli_va_param_t;
178
179
static inline int tmem_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t uops)
180
0
{
181
0
#ifdef CONFIG_COMPAT
182
0
    if ( is_hvm_vcpu(current) ? hvm_guest_x86_mode(current) != 8
183
0
                              : is_pv_32bit_vcpu(current) )
184
0
    {
185
0
        int rc;
186
0
        enum XLAT_tmem_op_u u;
187
0
        tmem_op_compat_t cop;
188
0
189
0
        rc = copy_from_guest(&cop, guest_handle_cast(uops, void), 1);
190
0
        if ( rc )
191
0
            return rc;
192
0
        switch ( cop.cmd )
193
0
        {
194
0
        case TMEM_NEW_POOL:   u = XLAT_tmem_op_u_creat; break;
195
0
        default:              u = XLAT_tmem_op_u_gen ;  break;
196
0
        }
197
0
        XLAT_tmem_op(op, &cop);
198
0
        return 0;
199
0
    }
200
0
#endif
201
0
    return copy_from_guest(op, uops, 1);
202
0
}
Unexecuted instantiation: memory.c:tmem_get_tmemop_from_client
Unexecuted instantiation: page_alloc.c:tmem_get_tmemop_from_client
Unexecuted instantiation: tmem.c:tmem_get_tmemop_from_client
Unexecuted instantiation: tmem_control.c:tmem_get_tmemop_from_client
Unexecuted instantiation: setup.c:tmem_get_tmemop_from_client
Unexecuted instantiation: tmem_xen.c:tmem_get_tmemop_from_client
203
204
0
#define tmem_cli_buf_null guest_handle_from_ptr(NULL, char)
205
0
#define TMEM_CLI_ID_NULL ((domid_t)((domid_t)-1L))
206
0
#define tmem_cli_id_str "domid"
207
0
#define tmem_client_str "domain"
208
209
int tmem_decompress_to_client(xen_pfn_t, void *, size_t,
210
           tmem_cli_va_param_t);
211
int tmem_compress_from_client(xen_pfn_t, void **, size_t *,
212
           tmem_cli_va_param_t);
213
214
int tmem_copy_from_client(struct page_info *, xen_pfn_t, tmem_cli_va_param_t);
215
int tmem_copy_to_client(xen_pfn_t, struct page_info *, tmem_cli_va_param_t);
216
217
0
#define tmem_client_err(fmt, args...)  printk(XENLOG_G_ERR fmt, ##args)
218
0
#define tmem_client_warn(fmt, args...) printk(XENLOG_G_WARNING fmt, ##args)
219
0
#define tmem_client_info(fmt, args...) printk(XENLOG_G_INFO fmt, ##args)
220
221
/* Global statistics (none need to be locked). */
222
struct tmem_statistics {
223
    unsigned long total_tmem_ops;
224
    unsigned long errored_tmem_ops;
225
    unsigned long total_flush_pool;
226
    unsigned long alloc_failed;
227
    unsigned long alloc_page_failed;
228
    unsigned long evicted_pgs;
229
    unsigned long evict_attempts;
230
    unsigned long relinq_pgs;
231
    unsigned long relinq_attempts;
232
    unsigned long max_evicts_per_relinq;
233
    unsigned long low_on_memory;
234
    unsigned long deduped_puts;
235
    unsigned long tot_good_eph_puts;
236
    int global_obj_count_max;
237
    int global_pgp_count_max;
238
    int global_pcd_count_max;
239
    int global_page_count_max;
240
    int global_rtree_node_count_max;
241
    long global_eph_count_max;
242
    unsigned long failed_copies;
243
    unsigned long pcd_tot_tze_size;
244
    unsigned long pcd_tot_csize;
245
    /* Global counters (should use long_atomic_t access). */
246
    atomic_t global_obj_count;
247
    atomic_t global_pgp_count;
248
    atomic_t global_pcd_count;
249
    atomic_t global_page_count;
250
    atomic_t global_rtree_node_count;
251
};
252
253
0
#define atomic_inc_and_max(_c) do { \
254
0
    atomic_inc(&tmem_stats._c); \
255
0
    if ( _atomic_read(tmem_stats._c) > tmem_stats._c##_max ) \
256
0
        tmem_stats._c##_max = _atomic_read(tmem_stats._c); \
257
0
} while (0)
258
259
0
#define atomic_dec_and_assert(_c) do { \
260
0
    atomic_dec(&tmem_stats._c); \
261
0
    ASSERT(_atomic_read(tmem_stats._c) >= 0); \
262
0
} while (0)
263
264
0
#define MAX_GLOBAL_SHARED_POOLS  16
265
struct tmem_global {
266
    struct list_head ephemeral_page_list;  /* All pages in ephemeral pools. */
267
    struct list_head client_list;
268
    struct tmem_pool *shared_pools[MAX_GLOBAL_SHARED_POOLS];
269
    bool shared_auth;
270
    long eph_count;  /* Atomicity depends on eph_lists_spinlock. */
271
    atomic_t client_weight_total;
272
};
273
274
0
#define MAX_POOLS_PER_DOMAIN 16
275
276
struct tmem_pool;
277
struct tmem_page_descriptor;
278
struct tmem_page_content_descriptor;
279
struct client {
280
    struct list_head client_list;
281
    struct tmem_pool *pools[MAX_POOLS_PER_DOMAIN];
282
    struct domain *domain;
283
    struct xmem_pool *persistent_pool;
284
    struct list_head ephemeral_page_list;
285
    long eph_count, eph_count_max;
286
    domid_t cli_id;
287
    xen_tmem_client_t info;
288
    /* For save/restore/migration. */
289
    bool was_frozen;
290
    struct list_head persistent_invalidated_list;
291
    struct tmem_page_descriptor *cur_pgp;
292
    /* Statistics collection. */
293
    unsigned long compress_poor, compress_nomem;
294
    unsigned long compressed_pages;
295
    uint64_t compressed_sum_size;
296
    uint64_t total_cycles;
297
    unsigned long succ_pers_puts, succ_eph_gets, succ_pers_gets;
298
    /* Shared pool authentication. */
299
    uint64_t shared_auth_uuid[MAX_GLOBAL_SHARED_POOLS][2];
300
};
301
302
0
#define POOL_PAGESHIFT (PAGE_SHIFT - 12)
303
0
#define OBJ_HASH_BUCKETS 256 /* Must be power of two. */
304
0
#define OBJ_HASH_BUCKETS_MASK (OBJ_HASH_BUCKETS-1)
305
306
0
#define is_persistent(_p)  (_p->persistent)
307
0
#define is_shared(_p)      (_p->shared)
308
309
struct tmem_pool {
310
    bool shared;
311
    bool persistent;
312
    bool is_dying;
313
    struct client *client;
314
    uint64_t uuid[2]; /* 0 for private, non-zero for shared. */
315
    uint32_t pool_id;
316
    rwlock_t pool_rwlock;
317
    struct rb_root obj_rb_root[OBJ_HASH_BUCKETS]; /* Protected by pool_rwlock. */
318
    struct list_head share_list; /* Valid if shared. */
319
    int shared_count; /* Valid if shared. */
320
    /* For save/restore/migration. */
321
    struct list_head persistent_page_list;
322
    struct tmem_page_descriptor *cur_pgp;
323
    /* Statistics collection. */
324
    atomic_t pgp_count;
325
    int pgp_count_max;
326
    long obj_count;  /* Atomicity depends on pool_rwlock held for write. */
327
    long obj_count_max;
328
    unsigned long objnode_count, objnode_count_max;
329
    uint64_t sum_life_cycles;
330
    uint64_t sum_evicted_cycles;
331
    unsigned long puts, good_puts, no_mem_puts;
332
    unsigned long dup_puts_flushed, dup_puts_replaced;
333
    unsigned long gets, found_gets;
334
    unsigned long flushs, flushs_found;
335
    unsigned long flush_objs, flush_objs_found;
336
};
337
338
struct share_list {
339
    struct list_head share_list;
340
    struct client *client;
341
};
342
343
#endif /* __XEN_TMEM_XEN_H__ */