Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/arch/x86/pv/mm.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * pv/mm.c
3
 *
4
 * Memory managment code for PV guests
5
 *
6
 * Copyright (c) 2002-2005 K A Fraser
7
 * Copyright (c) 2004 Christian Limpach
8
 *
9
 * This program is free software; you can redistribute it and/or
10
 * modify it under the terms and conditions of the GNU General Public
11
 * License, version 2, as published by the Free Software Foundation.
12
 *
13
 * This program is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16
 * General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU General Public
19
 * License along with this program; If not, see <http://www.gnu.org/licenses/>.
20
 */
21
22
#include <xen/guest_access.h>
23
24
#include <asm/current.h>
25
#include <asm/p2m.h>
26
27
#include "mm.h"
28
29
/* Override macros from asm/page.h to make them work with mfn_t */
30
#undef mfn_to_page
31
#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
32
#undef page_to_mfn
33
#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
34
35
/*
36
 * Get a mapping of a PV guest's l1e for this linear address.  The return
37
 * pointer should be unmapped using unmap_domain_page().
38
 */
39
l1_pgentry_t *map_guest_l1e(unsigned long linear, mfn_t *gl1mfn)
40
0
{
41
0
    l2_pgentry_t l2e;
42
0
43
0
    ASSERT(!paging_mode_translate(current->domain));
44
0
    ASSERT(!paging_mode_external(current->domain));
45
0
46
0
    if ( unlikely(!__addr_ok(linear)) )
47
0
        return NULL;
48
0
49
0
    /* Find this l1e and its enclosing l1mfn in the linear map. */
50
0
    if ( __copy_from_user(&l2e,
51
0
                          &__linear_l2_table[l2_linear_offset(linear)],
52
0
                          sizeof(l2_pgentry_t)) )
53
0
        return NULL;
54
0
55
0
    /* Check flags that it will be safe to read the l1e. */
56
0
    if ( (l2e_get_flags(l2e) & (_PAGE_PRESENT | _PAGE_PSE)) != _PAGE_PRESENT )
57
0
        return NULL;
58
0
59
0
    *gl1mfn = l2e_get_mfn(l2e);
60
0
61
0
    return (l1_pgentry_t *)map_domain_page(*gl1mfn) + l1_table_offset(linear);
62
0
}
63
64
/*
65
 * Read the guest's l1e that maps this address, from the kernel-mode
66
 * page tables.
67
 */
68
static l1_pgentry_t guest_get_eff_kern_l1e(unsigned long linear)
69
0
{
70
0
    struct vcpu *curr = current;
71
0
    const bool user_mode = !(curr->arch.flags & TF_kernel_mode);
72
0
    l1_pgentry_t l1e;
73
0
74
0
    if ( user_mode )
75
0
        toggle_guest_mode(curr);
76
0
77
0
    l1e = guest_get_eff_l1e(linear);
78
0
79
0
    if ( user_mode )
80
0
        toggle_guest_mode(curr);
81
0
82
0
    return l1e;
83
0
}
84
85
/*
86
 * Map a guest's LDT page (covering the byte at @offset from start of the LDT)
87
 * into Xen's virtual range.  Returns true if the mapping changed, false
88
 * otherwise.
89
 */
90
bool pv_map_ldt_shadow_page(unsigned int offset)
91
0
{
92
0
    struct vcpu *curr = current;
93
0
    struct domain *currd = curr->domain;
94
0
    struct page_info *page;
95
0
    l1_pgentry_t gl1e, *pl1e;
96
0
    unsigned long linear = curr->arch.pv_vcpu.ldt_base + offset;
97
0
98
0
    BUG_ON(unlikely(in_irq()));
99
0
100
0
    /*
101
0
     * Hardware limit checking should guarantee this property.  NB. This is
102
0
     * safe as updates to the LDT can only be made by MMUEXT_SET_LDT to the
103
0
     * current vcpu, and vcpu_reset() will block until this vcpu has been
104
0
     * descheduled before continuing.
105
0
     */
106
0
    ASSERT((offset >> 3) <= curr->arch.pv_vcpu.ldt_ents);
107
0
108
0
    if ( is_pv_32bit_domain(currd) )
109
0
        linear = (uint32_t)linear;
110
0
111
0
    gl1e = guest_get_eff_kern_l1e(linear);
112
0
    if ( unlikely(!(l1e_get_flags(gl1e) & _PAGE_PRESENT)) )
113
0
        return false;
114
0
115
0
    page = get_page_from_gfn(currd, l1e_get_pfn(gl1e), NULL, P2M_ALLOC);
116
0
    if ( unlikely(!page) )
117
0
        return false;
118
0
119
0
    if ( unlikely(!get_page_type(page, PGT_seg_desc_page)) )
120
0
    {
121
0
        put_page(page);
122
0
        return false;
123
0
    }
124
0
125
0
    pl1e = &pv_ldt_ptes(curr)[offset >> PAGE_SHIFT];
126
0
    l1e_add_flags(gl1e, _PAGE_RW);
127
0
128
0
    spin_lock(&curr->arch.pv_vcpu.shadow_ldt_lock);
129
0
    l1e_write(pl1e, gl1e);
130
0
    curr->arch.pv_vcpu.shadow_ldt_mapcnt++;
131
0
    spin_unlock(&curr->arch.pv_vcpu.shadow_ldt_lock);
132
0
133
0
    return true;
134
0
}
135
136
/*
137
 * Local variables:
138
 * mode: C
139
 * c-file-style: "BSD"
140
 * c-basic-offset: 4
141
 * tab-width: 4
142
 * indent-tabs-mode: nil
143
 * End:
144
 */