Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/common/tmem_xen.c
Line
Count
Source (jump to first uncovered line)
1
/******************************************************************************
2
 * tmem-xen.c
3
 *
4
 * Xen-specific Transcendent memory
5
 *
6
 * Copyright (c) 2009, Dan Magenheimer, Oracle Corp.
7
 */
8
9
#include <xen/tmem.h>
10
#include <xen/tmem_xen.h>
11
#include <xen/lzo.h> /* compression code */
12
#include <xen/paging.h>
13
#include <xen/domain_page.h>
14
#include <xen/cpu.h>
15
#include <xen/init.h>
16
17
bool __read_mostly opt_tmem;
18
boolean_param("tmem", opt_tmem);
19
20
bool __read_mostly opt_tmem_compress;
21
boolean_param("tmem_compress", opt_tmem_compress);
22
23
atomic_t freeable_page_count = ATOMIC_INIT(0);
24
25
/* these are a concurrency bottleneck, could be percpu and dynamically
26
 * allocated iff opt_tmem_compress */
27
#define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
28
0
#define LZO_DSTMEM_PAGES 2
29
static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, workmem);
30
static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, dstmem);
31
static DEFINE_PER_CPU_READ_MOSTLY(void *, scratch_page);
32
33
#if defined(CONFIG_ARM)
34
static inline void *cli_get_page(xen_pfn_t cmfn, unsigned long *pcli_mfn,
35
                                 struct page_info **pcli_pfp, bool cli_write)
36
{
37
    ASSERT_UNREACHABLE();
38
    return NULL;
39
}
40
41
static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
42
                                unsigned long cli_mfn, bool mark_dirty)
43
{
44
    ASSERT_UNREACHABLE();
45
}
46
#else
47
#include <asm/p2m.h>
48
49
static inline void *cli_get_page(xen_pfn_t cmfn, unsigned long *pcli_mfn,
50
                                 struct page_info **pcli_pfp, bool cli_write)
51
0
{
52
0
    p2m_type_t t;
53
0
    struct page_info *page;
54
0
55
0
    page = get_page_from_gfn(current->domain, cmfn, &t, P2M_ALLOC);
56
0
    if ( !page || t != p2m_ram_rw )
57
0
    {
58
0
        if ( page )
59
0
            put_page(page);
60
0
        return NULL;
61
0
    }
62
0
63
0
    if ( cli_write && !get_page_type(page, PGT_writable_page) )
64
0
    {
65
0
        put_page(page);
66
0
        return NULL;
67
0
    }
68
0
69
0
    *pcli_mfn = page_to_mfn(page);
70
0
    *pcli_pfp = page;
71
0
    return map_domain_page(_mfn(*pcli_mfn));
72
0
}
73
74
static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
75
                                unsigned long cli_mfn, bool mark_dirty)
76
0
{
77
0
    if ( mark_dirty )
78
0
    {
79
0
        put_page_and_type(cli_pfp);
80
0
        paging_mark_dirty(current->domain, _mfn(cli_mfn));
81
0
    }
82
0
    else
83
0
        put_page(cli_pfp);
84
0
    unmap_domain_page(cli_va);
85
0
}
86
#endif
87
88
int tmem_copy_from_client(struct page_info *pfp,
89
    xen_pfn_t cmfn, tmem_cli_va_param_t clibuf)
90
0
{
91
0
    unsigned long tmem_mfn, cli_mfn = 0;
92
0
    char *tmem_va, *cli_va = NULL;
93
0
    struct page_info *cli_pfp = NULL;
94
0
    int rc = 1;
95
0
96
0
    ASSERT(pfp != NULL);
97
0
    tmem_mfn = page_to_mfn(pfp);
98
0
    tmem_va = map_domain_page(_mfn(tmem_mfn));
99
0
    if ( guest_handle_is_null(clibuf) )
100
0
    {
101
0
        cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 0);
102
0
        if ( cli_va == NULL )
103
0
        {
104
0
            unmap_domain_page(tmem_va);
105
0
            return -EFAULT;
106
0
        }
107
0
    }
108
0
    smp_mb();
109
0
    if ( cli_va )
110
0
    {
111
0
        memcpy(tmem_va, cli_va, PAGE_SIZE);
112
0
        cli_put_page(cli_va, cli_pfp, cli_mfn, 0);
113
0
    }
114
0
    else
115
0
        rc = -EINVAL;
116
0
    unmap_domain_page(tmem_va);
117
0
    return rc;
118
0
}
119
120
int tmem_compress_from_client(xen_pfn_t cmfn,
121
    void **out_va, size_t *out_len, tmem_cli_va_param_t clibuf)
122
0
{
123
0
    int ret = 0;
124
0
    unsigned char *dmem = this_cpu(dstmem);
125
0
    unsigned char *wmem = this_cpu(workmem);
126
0
    char *scratch = this_cpu(scratch_page);
127
0
    struct page_info *cli_pfp = NULL;
128
0
    unsigned long cli_mfn = 0;
129
0
    void *cli_va = NULL;
130
0
131
0
    if ( dmem == NULL || wmem == NULL )
132
0
        return 0;  /* no buffer, so can't compress */
133
0
    if ( guest_handle_is_null(clibuf) )
134
0
    {
135
0
        cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 0);
136
0
        if ( cli_va == NULL )
137
0
            return -EFAULT;
138
0
    }
139
0
    else if ( !scratch )
140
0
        return 0;
141
0
    else if ( copy_from_guest(scratch, clibuf, PAGE_SIZE) )
142
0
        return -EFAULT;
143
0
    smp_mb();
144
0
    ret = lzo1x_1_compress(cli_va ?: scratch, PAGE_SIZE, dmem, out_len, wmem);
145
0
    ASSERT(ret == LZO_E_OK);
146
0
    *out_va = dmem;
147
0
    if ( cli_va )
148
0
        cli_put_page(cli_va, cli_pfp, cli_mfn, 0);
149
0
    return 1;
150
0
}
151
152
int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp,
153
    tmem_cli_va_param_t clibuf)
154
0
{
155
0
    unsigned long tmem_mfn, cli_mfn = 0;
156
0
    char *tmem_va, *cli_va = NULL;
157
0
    struct page_info *cli_pfp = NULL;
158
0
    int rc = 1;
159
0
160
0
    ASSERT(pfp != NULL);
161
0
    if ( guest_handle_is_null(clibuf) )
162
0
    {
163
0
        cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 1);
164
0
        if ( cli_va == NULL )
165
0
            return -EFAULT;
166
0
    }
167
0
    tmem_mfn = page_to_mfn(pfp);
168
0
    tmem_va = map_domain_page(_mfn(tmem_mfn));
169
0
    if ( cli_va )
170
0
    {
171
0
        memcpy(cli_va, tmem_va, PAGE_SIZE);
172
0
        cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
173
0
    }
174
0
    else
175
0
        rc = -EINVAL;
176
0
    unmap_domain_page(tmem_va);
177
0
    smp_mb();
178
0
    return rc;
179
0
}
180
181
int tmem_decompress_to_client(xen_pfn_t cmfn, void *tmem_va,
182
                                    size_t size, tmem_cli_va_param_t clibuf)
183
0
{
184
0
    unsigned long cli_mfn = 0;
185
0
    struct page_info *cli_pfp = NULL;
186
0
    void *cli_va = NULL;
187
0
    char *scratch = this_cpu(scratch_page);
188
0
    size_t out_len = PAGE_SIZE;
189
0
    int ret;
190
0
191
0
    if ( guest_handle_is_null(clibuf) )
192
0
    {
193
0
        cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 1);
194
0
        if ( cli_va == NULL )
195
0
            return -EFAULT;
196
0
    }
197
0
    else if ( !scratch )
198
0
        return 0;
199
0
    ret = lzo1x_decompress_safe(tmem_va, size, cli_va ?: scratch, &out_len);
200
0
    ASSERT(ret == LZO_E_OK);
201
0
    ASSERT(out_len == PAGE_SIZE);
202
0
    if ( cli_va )
203
0
        cli_put_page(cli_va, cli_pfp, cli_mfn, 1);
204
0
    else if ( copy_to_guest(clibuf, scratch, PAGE_SIZE) )
205
0
        return -EFAULT;
206
0
    smp_mb();
207
0
    return 1;
208
0
}
209
210
/******************  XEN-SPECIFIC HOST INITIALIZATION ********************/
211
static int dstmem_order, workmem_order;
212
213
static int cpu_callback(
214
    struct notifier_block *nfb, unsigned long action, void *hcpu)
215
0
{
216
0
    unsigned int cpu = (unsigned long)hcpu;
217
0
218
0
    switch ( action )
219
0
    {
220
0
    case CPU_UP_PREPARE: {
221
0
        if ( per_cpu(dstmem, cpu) == NULL )
222
0
            per_cpu(dstmem, cpu) = alloc_xenheap_pages(dstmem_order, 0);
223
0
        if ( per_cpu(workmem, cpu) == NULL )
224
0
            per_cpu(workmem, cpu) = alloc_xenheap_pages(workmem_order, 0);
225
0
        if ( per_cpu(scratch_page, cpu) == NULL )
226
0
            per_cpu(scratch_page, cpu) = alloc_xenheap_page();
227
0
        break;
228
0
    }
229
0
    case CPU_DEAD:
230
0
    case CPU_UP_CANCELED: {
231
0
        if ( per_cpu(dstmem, cpu) != NULL )
232
0
        {
233
0
            free_xenheap_pages(per_cpu(dstmem, cpu), dstmem_order);
234
0
            per_cpu(dstmem, cpu) = NULL;
235
0
        }
236
0
        if ( per_cpu(workmem, cpu) != NULL )
237
0
        {
238
0
            free_xenheap_pages(per_cpu(workmem, cpu), workmem_order);
239
0
            per_cpu(workmem, cpu) = NULL;
240
0
        }
241
0
        if ( per_cpu(scratch_page, cpu) != NULL )
242
0
        {
243
0
            free_xenheap_page(per_cpu(scratch_page, cpu));
244
0
            per_cpu(scratch_page, cpu) = NULL;
245
0
        }
246
0
        break;
247
0
    }
248
0
    default:
249
0
        break;
250
0
    }
251
0
252
0
    return NOTIFY_DONE;
253
0
}
254
255
static struct notifier_block cpu_nfb = {
256
    .notifier_call = cpu_callback
257
};
258
259
int __init tmem_init(void)
260
0
{
261
0
    unsigned int cpu;
262
0
263
0
    dstmem_order = get_order_from_pages(LZO_DSTMEM_PAGES);
264
0
    workmem_order = get_order_from_bytes(LZO1X_1_MEM_COMPRESS);
265
0
266
0
    for_each_online_cpu ( cpu )
267
0
    {
268
0
        void *hcpu = (void *)(long)cpu;
269
0
        cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
270
0
    }
271
0
272
0
    register_cpu_notifier(&cpu_nfb);
273
0
274
0
    return 1;
275
0
}