Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/arch/x86/hvm/domain.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * HVM domain specific functions.
3
 *
4
 * Copyright (C) 2017 Citrix Systems R&D
5
 *
6
 * This program is free software; you can redistribute it and/or
7
 * modify it under the terms and conditions of the GNU General Public
8
 * License, version 2, as published by the Free Software Foundation.
9
 *
10
 * This program is distributed in the hope that it will be useful,
11
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13
 * General Public License for more details.
14
 *
15
 * You should have received a copy of the GNU General Public
16
 * License along with this program; If not, see <http://www.gnu.org/licenses/>.
17
 */
18
19
#include <xen/domain_page.h>
20
#include <xen/errno.h>
21
#include <xen/lib.h>
22
#include <xen/paging.h>
23
#include <xen/sched.h>
24
25
#include <public/hvm/hvm_vcpu.h>
26
27
static int check_segment(struct segment_register *reg, enum x86_segment seg)
28
5
{
29
5
30
5
    if ( reg->pad != 0 )
31
0
    {
32
0
        gprintk(XENLOG_ERR, "Segment attribute bits 12-15 are not zero\n");
33
0
        return -EINVAL;
34
0
    }
35
5
36
5
    if ( reg->attr == 0 )
37
1
    {
38
1
        if ( seg != x86_seg_ds && seg != x86_seg_es )
39
0
        {
40
0
            gprintk(XENLOG_ERR, "Null selector provided for CS, SS or TR\n");
41
0
            return -EINVAL;
42
0
        }
43
1
        return 0;
44
1
    }
45
5
46
4
    if ( seg == x86_seg_tr )
47
1
    {
48
1
        if ( reg->s )
49
0
        {
50
0
            gprintk(XENLOG_ERR, "Code or data segment provided for TR\n");
51
0
            return -EINVAL;
52
0
        }
53
1
54
1
        if ( reg->type != SYS_DESC_tss_busy )
55
0
        {
56
0
            gprintk(XENLOG_ERR, "Non-32-bit-TSS segment provided for TR\n");
57
0
            return -EINVAL;
58
0
        }
59
1
    }
60
3
    else if ( !reg->s )
61
0
    {
62
0
        gprintk(XENLOG_ERR,
63
0
                "System segment provided for a code or data segment\n");
64
0
        return -EINVAL;
65
0
    }
66
4
67
4
    if ( !reg->p )
68
0
    {
69
0
        gprintk(XENLOG_ERR, "Non-present segment provided\n");
70
0
        return -EINVAL;
71
0
    }
72
4
73
4
    switch ( seg )
74
4
    {
75
1
    case x86_seg_cs:
76
1
        if ( !(reg->type & 0x8) )
77
0
        {
78
0
            gprintk(XENLOG_ERR, "Non-code segment provided for CS\n");
79
0
            return -EINVAL;
80
0
        }
81
1
        break;
82
1
83
1
    case x86_seg_ss:
84
1
        if ( (reg->type & 0x8) || !(reg->type & 0x2) )
85
0
        {
86
0
            gprintk(XENLOG_ERR, "Non-writeable segment provided for SS\n");
87
0
            return -EINVAL;
88
0
        }
89
1
        break;
90
1
91
1
    case x86_seg_ds:
92
1
    case x86_seg_es:
93
1
        if ( (reg->type & 0x8) && !(reg->type & 0x2) )
94
0
        {
95
0
            gprintk(XENLOG_ERR, "Non-readable segment provided for DS or ES\n");
96
0
            return -EINVAL;
97
0
        }
98
1
        break;
99
1
100
1
    case x86_seg_tr:
101
1
        break;
102
1
103
0
    default:
104
0
        ASSERT_UNREACHABLE();
105
0
        return -EINVAL;
106
4
    }
107
4
108
4
    return 0;
109
4
}
110
111
/* Called by VCPUOP_initialise for HVM guests. */
112
int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx)
113
1
{
114
1
    struct cpu_user_regs *uregs = &v->arch.user_regs;
115
1
    struct segment_register cs, ds, ss, es, tr;
116
1
    const char *errstr;
117
1
    int rc;
118
1
119
1
    if ( ctx->pad != 0 )
120
0
        return -EINVAL;
121
1
122
1
    switch ( ctx->mode )
123
1
    {
124
0
    default:
125
0
        return -EINVAL;
126
0
127
1
    case VCPU_HVM_MODE_32B:
128
1
    {
129
1
        const struct vcpu_hvm_x86_32 *regs = &ctx->cpu_regs.x86_32;
130
1
        uint32_t limit;
131
1
132
1
        if ( ctx->cpu_regs.x86_32.pad1 != 0 ||
133
1
             ctx->cpu_regs.x86_32.pad2[0] != 0 ||
134
1
             ctx->cpu_regs.x86_32.pad2[1] != 0 ||
135
1
             ctx->cpu_regs.x86_32.pad2[2] != 0 )
136
0
            return -EINVAL;
137
1
138
5
#define SEG(s, r) ({                                                        \
139
5
    s = (struct segment_register)                                           \
140
5
        { 0, { (r)->s ## _ar }, (r)->s ## _limit, (r)->s ## _base };        \
141
5
    /* Set accessed / busy bit for present segments. */                     \
142
5
    if ( s.p )                                                              \
143
4
        s.type |= (x86_seg_##s != x86_seg_tr ? 1 : 2);                      \
144
5
    check_segment(&s, x86_seg_ ## s); })
145
1
146
1
        rc = SEG(cs, regs);
147
1
        rc |= SEG(ds, regs);
148
1
        rc |= SEG(ss, regs);
149
1
        rc |= SEG(es, regs);
150
1
        rc |= SEG(tr, regs);
151
1
#undef SEG
152
1
153
1
        if ( rc != 0 )
154
0
            return rc;
155
1
156
1
        /* Basic sanity checks. */
157
1
        limit = cs.limit;
158
1
        if ( cs.g )
159
1
            limit = (limit << 12) | 0xfff;
160
1
        if ( regs->eip > limit )
161
0
        {
162
0
            gprintk(XENLOG_ERR, "EIP (%#08x) outside CS limit (%#08x)\n",
163
0
                    regs->eip, limit);
164
0
            return -EINVAL;
165
0
        }
166
1
167
1
        if ( ss.dpl != cs.dpl )
168
0
        {
169
0
            gprintk(XENLOG_ERR, "SS.DPL (%u) is different than CS.DPL (%u)\n",
170
0
                    ss.dpl, cs.dpl);
171
0
            return -EINVAL;
172
0
        }
173
1
174
1
        if ( ds.p && ds.dpl > cs.dpl )
175
0
        {
176
0
            gprintk(XENLOG_ERR, "DS.DPL (%u) is greater than CS.DPL (%u)\n",
177
0
                    ds.dpl, cs.dpl);
178
0
            return -EINVAL;
179
0
        }
180
1
181
1
        if ( es.p && es.dpl > cs.dpl )
182
0
        {
183
0
            gprintk(XENLOG_ERR, "ES.DPL (%u) is greater than CS.DPL (%u)\n",
184
0
                    es.dpl, cs.dpl);
185
0
            return -EINVAL;
186
0
        }
187
1
188
1
        if ( (regs->efer & EFER_LMA) && !(regs->efer & EFER_LME) )
189
0
        {
190
0
            gprintk(XENLOG_ERR, "EFER.LMA set without EFER.LME (%#016lx)\n",
191
0
                    regs->efer);
192
0
            return -EINVAL;
193
0
        }
194
1
195
1
        uregs->rax    = regs->eax;
196
1
        uregs->rcx    = regs->ecx;
197
1
        uregs->rdx    = regs->edx;
198
1
        uregs->rbx    = regs->ebx;
199
1
        uregs->rsp    = regs->esp;
200
1
        uregs->rbp    = regs->ebp;
201
1
        uregs->rsi    = regs->esi;
202
1
        uregs->rdi    = regs->edi;
203
1
        uregs->rip    = regs->eip;
204
1
        uregs->rflags = regs->eflags;
205
1
206
1
        v->arch.hvm_vcpu.guest_cr[0] = regs->cr0;
207
1
        v->arch.hvm_vcpu.guest_cr[3] = regs->cr3;
208
1
        v->arch.hvm_vcpu.guest_cr[4] = regs->cr4;
209
1
        v->arch.hvm_vcpu.guest_efer  = regs->efer;
210
1
    }
211
1
    break;
212
1
213
0
    case VCPU_HVM_MODE_64B:
214
0
    {
215
0
        const struct vcpu_hvm_x86_64 *regs = &ctx->cpu_regs.x86_64;
216
0
217
0
        /* Basic sanity checks. */
218
0
        if ( !is_canonical_address(regs->rip) )
219
0
        {
220
0
            gprintk(XENLOG_ERR, "RIP contains a non-canonical address (%#lx)\n",
221
0
                    regs->rip);
222
0
            return -EINVAL;
223
0
        }
224
0
225
0
        if ( !(regs->cr0 & X86_CR0_PG) )
226
0
        {
227
0
            gprintk(XENLOG_ERR, "CR0 doesn't have paging enabled (%#016lx)\n",
228
0
                    regs->cr0);
229
0
            return -EINVAL;
230
0
        }
231
0
232
0
        if ( !(regs->cr4 & X86_CR4_PAE) )
233
0
        {
234
0
            gprintk(XENLOG_ERR, "CR4 doesn't have PAE enabled (%#016lx)\n",
235
0
                    regs->cr4);
236
0
            return -EINVAL;
237
0
        }
238
0
239
0
        if ( !(regs->efer & EFER_LME) )
240
0
        {
241
0
            gprintk(XENLOG_ERR, "EFER doesn't have LME enabled (%#016lx)\n",
242
0
                    regs->efer);
243
0
            return -EINVAL;
244
0
        }
245
0
246
0
        uregs->rax    = regs->rax;
247
0
        uregs->rcx    = regs->rcx;
248
0
        uregs->rdx    = regs->rdx;
249
0
        uregs->rbx    = regs->rbx;
250
0
        uregs->rsp    = regs->rsp;
251
0
        uregs->rbp    = regs->rbp;
252
0
        uregs->rsi    = regs->rsi;
253
0
        uregs->rdi    = regs->rdi;
254
0
        uregs->rip    = regs->rip;
255
0
        uregs->rflags = regs->rflags;
256
0
257
0
        v->arch.hvm_vcpu.guest_cr[0] = regs->cr0;
258
0
        v->arch.hvm_vcpu.guest_cr[3] = regs->cr3;
259
0
        v->arch.hvm_vcpu.guest_cr[4] = regs->cr4;
260
0
        v->arch.hvm_vcpu.guest_efer  = regs->efer;
261
0
262
0
#define SEG(l, a) (struct segment_register){ 0, { a }, l, 0 }
263
0
        cs = SEG(~0u, 0xa9b); /* 64bit code segment. */
264
0
        ds = ss = es = SEG(~0u, 0xc93);
265
0
        tr = SEG(0x67, 0x8b); /* 64bit TSS (busy). */
266
0
#undef SEG
267
0
    }
268
0
    break;
269
1
270
1
    }
271
1
272
1
    if ( v->arch.hvm_vcpu.guest_efer & EFER_LME )
273
0
        v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
274
1
275
1
    if ( v->arch.hvm_vcpu.guest_cr[4] & ~hvm_cr4_guest_valid_bits(v, 0) )
276
0
    {
277
0
        gprintk(XENLOG_ERR, "Bad CR4 value: %#016lx\n",
278
0
                v->arch.hvm_vcpu.guest_cr[4]);
279
0
        return -EINVAL;
280
0
    }
281
1
282
1
    errstr = hvm_efer_valid(v, v->arch.hvm_vcpu.guest_efer, -1);
283
1
    if ( errstr )
284
0
    {
285
0
        gprintk(XENLOG_ERR, "Bad EFER value (%#016lx): %s\n",
286
0
               v->arch.hvm_vcpu.guest_efer, errstr);
287
0
        return -EINVAL;
288
0
    }
289
1
290
1
    hvm_update_guest_cr(v, 0);
291
1
    hvm_update_guest_cr(v, 3);
292
1
    hvm_update_guest_cr(v, 4);
293
1
    hvm_update_guest_efer(v);
294
1
295
1
    if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
296
0
    {
297
0
        /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
298
0
        struct page_info *page = get_page_from_gfn(v->domain,
299
0
                                 v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT,
300
0
                                 NULL, P2M_ALLOC);
301
0
        if ( !page )
302
0
        {
303
0
            gprintk(XENLOG_ERR, "Invalid CR3: %#lx\n",
304
0
                    v->arch.hvm_vcpu.guest_cr[3]);
305
0
            return -EINVAL;
306
0
        }
307
0
308
0
        v->arch.guest_table = pagetable_from_page(page);
309
0
    }
310
1
311
1
    hvm_set_segment_register(v, x86_seg_cs, &cs);
312
1
    hvm_set_segment_register(v, x86_seg_ds, &ds);
313
1
    hvm_set_segment_register(v, x86_seg_ss, &ss);
314
1
    hvm_set_segment_register(v, x86_seg_es, &es);
315
1
    hvm_set_segment_register(v, x86_seg_tr, &tr);
316
1
317
1
    /* Sync AP's TSC with BSP's. */
318
1
    v->arch.hvm_vcpu.cache_tsc_offset =
319
1
        v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
320
1
    hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset,
321
1
                             v->domain->arch.hvm_domain.sync_tsc);
322
1
323
1
    paging_update_paging_modes(v);
324
1
325
1
    v->is_initialised = 1;
326
1
    set_bit(_VPF_down, &v->pause_flags);
327
1
328
1
    return 0;
329
1
}
330
331
/*
332
 * Local variables:
333
 * mode: C
334
 * c-file-style: "BSD"
335
 * c-basic-offset: 4
336
 * tab-width: 4
337
 * indent-tabs-mode: nil
338
 * End:
339
 */