Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/common/vmap.c
Line
Count
Source (jump to first uncovered line)
1
#ifdef VMAP_VIRT_START
2
#include <xen/bitmap.h>
3
#include <xen/cache.h>
4
#include <xen/init.h>
5
#include <xen/mm.h>
6
#include <xen/pfn.h>
7
#include <xen/spinlock.h>
8
#include <xen/types.h>
9
#include <xen/vmap.h>
10
#include <asm/page.h>
11
12
static DEFINE_SPINLOCK(vm_lock);
13
static void *__read_mostly vm_base[VMAP_REGION_NR];
14
2
#define vm_bitmap(x) ((unsigned long *)vm_base[x])
15
/* highest allocated bit in the bitmap */
16
static unsigned int __read_mostly vm_top[VMAP_REGION_NR];
17
/* total number of bits in the bitmap */
18
static unsigned int __read_mostly vm_end[VMAP_REGION_NR];
19
/* lowest known clear bit in the bitmap */
20
static unsigned int vm_low[VMAP_REGION_NR];
21
22
void __init vm_init_type(enum vmap_region type, void *start, void *end)
23
1
{
24
1
    unsigned int i, nr;
25
1
    unsigned long va;
26
1
27
1
    ASSERT(!vm_base[type]);
28
1
29
1
    vm_base[type] = start;
30
1
    vm_end[type] = PFN_DOWN(end - start);
31
1
    vm_low[type]= PFN_UP((vm_end[type] + 7) / 8);
32
1
    nr = PFN_UP((vm_low[type] + 7) / 8);
33
1
    vm_top[type] = nr * PAGE_SIZE * 8;
34
1
35
2
    for ( i = 0, va = (unsigned long)vm_bitmap(type); i < nr; ++i, va += PAGE_SIZE )
36
1
    {
37
1
        struct page_info *pg = alloc_domheap_page(NULL, 0);
38
1
39
1
        map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR);
40
1
        clear_page((void *)va);
41
1
    }
42
1
    bitmap_fill(vm_bitmap(type), vm_low[type]);
43
1
44
1
    /* Populate page tables for the bitmap if necessary. */
45
1
    populate_pt_range(va, 0, vm_low[type] - nr);
46
1
}
47
48
static void *vm_alloc(unsigned int nr, unsigned int align,
49
                      enum vmap_region t)
50
48
{
51
48
    unsigned int start, bit;
52
48
53
48
    if ( !align )
54
0
        align = 1;
55
48
    else if ( align & (align - 1) )
56
0
        align &= -align;
57
48
58
48
    ASSERT((t >= VMAP_DEFAULT) && (t < VMAP_REGION_NR));
59
48
    if ( !vm_base[t] )
60
0
        return NULL;
61
48
62
48
    spin_lock(&vm_lock);
63
48
    for ( ; ; )
64
48
    {
65
48
        struct page_info *pg;
66
48
67
48
        ASSERT(vm_low[t] == vm_top[t] || !test_bit(vm_low[t], vm_bitmap(t)));
68
48
        for ( start = vm_low[t]; start < vm_top[t]; )
69
48
        {
70
48
            bit = find_next_bit(vm_bitmap(t), vm_top[t], start + 1);
71
48
            if ( bit > vm_top[t] )
72
0
                bit = vm_top[t];
73
48
            /*
74
48
             * Note that this skips the first bit, making the
75
48
             * corresponding page a guard one.
76
48
             */
77
48
            start = (start + align) & ~(align - 1);
78
48
            if ( bit < vm_top[t] )
79
0
            {
80
0
                if ( start + nr < bit )
81
0
                    break;
82
0
                start = find_next_zero_bit(vm_bitmap(t), vm_top[t], bit + 1);
83
0
            }
84
48
            else
85
48
            {
86
48
                if ( start + nr <= bit )
87
48
                    break;
88
0
                start = bit;
89
0
            }
90
48
        }
91
48
92
48
        if ( start < vm_top[t] )
93
48
            break;
94
48
95
0
        spin_unlock(&vm_lock);
96
0
97
0
        if ( vm_top[t] >= vm_end[t] )
98
0
            return NULL;
99
0
100
0
        pg = alloc_domheap_page(NULL, 0);
101
0
        if ( !pg )
102
0
            return NULL;
103
0
104
0
        spin_lock(&vm_lock);
105
0
106
0
        if ( start >= vm_top[t] )
107
0
        {
108
0
            unsigned long va = (unsigned long)vm_bitmap(t) + vm_top[t] / 8;
109
0
110
0
            if ( !map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR) )
111
0
            {
112
0
                clear_page((void *)va);
113
0
                vm_top[t] += PAGE_SIZE * 8;
114
0
                if ( vm_top[t] > vm_end[t] )
115
0
                    vm_top[t] = vm_end[t];
116
0
                continue;
117
0
            }
118
0
        }
119
0
120
0
        free_domheap_page(pg);
121
0
122
0
        if ( start >= vm_top[t] )
123
0
        {
124
0
            spin_unlock(&vm_lock);
125
0
            return NULL;
126
0
        }
127
0
    }
128
48
129
97
    for ( bit = start; bit < start + nr; ++bit )
130
49
        __set_bit(bit, vm_bitmap(t));
131
48
    if ( bit < vm_top[t] )
132
48
        ASSERT(!test_bit(bit, vm_bitmap(t)));
133
48
    else
134
0
        ASSERT(bit == vm_top[t]);
135
48
    if ( start <= vm_low[t] + 2 )
136
48
        vm_low[t] = bit;
137
48
    spin_unlock(&vm_lock);
138
48
139
48
    return vm_base[t] + start * PAGE_SIZE;
140
48
}
141
142
static unsigned int vm_index(const void *va, enum vmap_region type)
143
2
{
144
2
    unsigned long addr = (unsigned long)va & ~(PAGE_SIZE - 1);
145
2
    unsigned int idx;
146
2
    unsigned long start = (unsigned long)vm_base[type];
147
2
148
2
    if ( !start )
149
0
        return 0;
150
2
151
2
    if ( addr < start + (vm_end[type] / 8) ||
152
2
         addr >= start + vm_top[type] * PAGE_SIZE )
153
0
        return 0;
154
2
155
2
    idx = PFN_DOWN(va - vm_base[type]);
156
2
    return !test_bit(idx - 1, vm_bitmap(type)) &&
157
2
           test_bit(idx, vm_bitmap(type)) ? idx : 0;
158
2
}
159
160
static unsigned int vm_size(const void *va, enum vmap_region type)
161
1
{
162
1
    unsigned int start = vm_index(va, type), end;
163
1
164
1
    if ( !start )
165
0
        return 0;
166
1
167
1
    end = find_next_zero_bit(vm_bitmap(type), vm_top[type], start + 1);
168
1
169
1
    return min(end, vm_top[type]) - start;
170
1
}
171
172
static void vm_free(const void *va)
173
1
{
174
1
    enum vmap_region type = VMAP_DEFAULT;
175
1
    unsigned int bit = vm_index(va, type);
176
1
177
1
    if ( !bit )
178
0
    {
179
0
        type = VMAP_XEN;
180
0
        bit = vm_index(va, type);
181
0
    }
182
1
183
1
    if ( !bit )
184
0
    {
185
0
        WARN_ON(va != NULL);
186
0
        return;
187
0
    }
188
1
189
1
    spin_lock(&vm_lock);
190
1
    if ( bit < vm_low[type] )
191
1
    {
192
1
        vm_low[type] = bit - 1;
193
1
        while ( !test_bit(vm_low[type] - 1, vm_bitmap(type)) )
194
0
            --vm_low[type];
195
1
    }
196
2
    while ( __test_and_clear_bit(bit, vm_bitmap(type)) )
197
1
        if ( ++bit == vm_top[type] )
198
0
            break;
199
1
    spin_unlock(&vm_lock);
200
1
}
201
202
void *__vmap(const mfn_t *mfn, unsigned int granularity,
203
             unsigned int nr, unsigned int align, unsigned int flags,
204
             enum vmap_region type)
205
48
{
206
48
    void *va = vm_alloc(nr * granularity, align, type);
207
48
    unsigned long cur = (unsigned long)va;
208
48
209
97
    for ( ; va && nr--; ++mfn, cur += PAGE_SIZE * granularity )
210
49
    {
211
49
        if ( map_pages_to_xen(cur, mfn_x(*mfn), granularity, flags) )
212
0
        {
213
0
            vunmap(va);
214
0
            va = NULL;
215
0
        }
216
49
    }
217
48
218
48
    return va;
219
48
}
220
221
void *vmap(const mfn_t *mfn, unsigned int nr)
222
24
{
223
24
    return __vmap(mfn, 1, nr, 1, PAGE_HYPERVISOR, VMAP_DEFAULT);
224
24
}
225
226
void vunmap(const void *va)
227
1
{
228
1
    unsigned long addr = (unsigned long)va;
229
1
    unsigned int pages = vm_size(va, VMAP_DEFAULT);
230
1
231
1
    if ( !pages )
232
0
        pages = vm_size(va, VMAP_XEN);
233
1
234
1
#ifndef _PAGE_NONE
235
    destroy_xen_mappings(addr, addr + PAGE_SIZE * pages);
236
#else /* Avoid tearing down intermediate page tables. */
237
1
    map_pages_to_xen(addr, 0, pages, _PAGE_NONE);
238
1
#endif
239
1
    vm_free(va);
240
1
}
241
242
static void *vmalloc_type(size_t size, enum vmap_region type)
243
1
{
244
1
    mfn_t *mfn;
245
1
    size_t pages, i;
246
1
    struct page_info *pg;
247
1
    void *va;
248
1
249
1
    ASSERT(size);
250
1
251
1
    pages = PFN_UP(size);
252
1
    mfn = xmalloc_array(mfn_t, pages);
253
1
    if ( mfn == NULL )
254
0
        return NULL;
255
1
256
3
    for ( i = 0; i < pages; i++ )
257
2
    {
258
2
        pg = alloc_domheap_page(NULL, 0);
259
2
        if ( pg == NULL )
260
0
            goto error;
261
2
        mfn[i] = _mfn(page_to_mfn(pg));
262
2
    }
263
1
264
1
    va = __vmap(mfn, 1, pages, 1, PAGE_HYPERVISOR, type);
265
1
    if ( va == NULL )
266
0
        goto error;
267
1
268
1
    xfree(mfn);
269
1
    return va;
270
1
271
0
 error:
272
0
    while ( i-- )
273
0
        free_domheap_page(mfn_to_page(mfn_x(mfn[i])));
274
0
    xfree(mfn);
275
0
    return NULL;
276
1
}
277
278
void *vmalloc(size_t size)
279
0
{
280
0
    return vmalloc_type(size, VMAP_DEFAULT);
281
0
}
282
283
void *vmalloc_xen(size_t size)
284
0
{
285
0
    return vmalloc_type(size, VMAP_XEN);
286
0
}
287
288
void *vzalloc(size_t size)
289
1
{
290
1
    void *p = vmalloc_type(size, VMAP_DEFAULT);
291
1
    int i;
292
1
293
1
    if ( p == NULL )
294
0
        return NULL;
295
1
296
3
    for ( i = 0; i < size; i += PAGE_SIZE )
297
2
        clear_page(p + i);
298
1
299
1
    return p;
300
1
}
301
302
void vfree(void *va)
303
0
{
304
0
    unsigned int i, pages;
305
0
    struct page_info *pg;
306
0
    PAGE_LIST_HEAD(pg_list);
307
0
    enum vmap_region type = VMAP_DEFAULT;
308
0
309
0
    if ( !va )
310
0
        return;
311
0
312
0
    pages = vm_size(va, type);
313
0
    if ( !pages )
314
0
    {
315
0
        type = VMAP_XEN;
316
0
        pages = vm_size(va, type);
317
0
    }
318
0
    ASSERT(pages);
319
0
320
0
    for ( i = 0; i < pages; i++ )
321
0
    {
322
0
        struct page_info *page = vmap_to_page(va + i * PAGE_SIZE);
323
0
324
0
        ASSERT(page);
325
0
        page_list_add(page, &pg_list);
326
0
    }
327
0
    vunmap(va);
328
0
329
0
    while ( (pg = page_list_remove_head(&pg_list)) != NULL )
330
0
        free_domheap_page(pg);
331
0
}
332
#endif