Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/arch/x86/pv/hypercall.c
Line
Count
Source (jump to first uncovered line)
1
/******************************************************************************
2
 * arch/x86/pv/hypercall.c
3
 *
4
 * PV hypercall dispatching routines
5
 *
6
 * This program is free software; you can redistribute it and/or modify
7
 * it under the terms of the GNU General Public License as published by
8
 * the Free Software Foundation; either version 2 of the License, or
9
 * (at your option) any later version.
10
 *
11
 * This program is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
 * GNU General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU General Public License
17
 * along with this program; If not, see <http://www.gnu.org/licenses/>.
18
 *
19
 * Copyright (c) 2017 Citrix Systems Ltd.
20
 */
21
22
#include <xen/compiler.h>
23
#include <xen/hypercall.h>
24
#include <xen/trace.h>
25
26
#define HYPERCALL(x)                                                \
27
    [ __HYPERVISOR_ ## x ] = { (hypercall_fn_t *) do_ ## x,         \
28
                               (hypercall_fn_t *) do_ ## x }
29
#define COMPAT_CALL(x)                                              \
30
    [ __HYPERVISOR_ ## x ] = { (hypercall_fn_t *) do_ ## x,         \
31
                               (hypercall_fn_t *) compat_ ## x }
32
33
#define do_arch_1             paging_domctl_continuation
34
35
static const hypercall_table_t pv_hypercall_table[] = {
36
    COMPAT_CALL(set_trap_table),
37
    HYPERCALL(mmu_update),
38
    COMPAT_CALL(set_gdt),
39
    HYPERCALL(stack_switch),
40
    COMPAT_CALL(set_callbacks),
41
    HYPERCALL(fpu_taskswitch),
42
    HYPERCALL(sched_op_compat),
43
    COMPAT_CALL(platform_op),
44
    HYPERCALL(set_debugreg),
45
    HYPERCALL(get_debugreg),
46
    COMPAT_CALL(update_descriptor),
47
    COMPAT_CALL(memory_op),
48
    COMPAT_CALL(multicall),
49
    COMPAT_CALL(update_va_mapping),
50
    COMPAT_CALL(set_timer_op),
51
    HYPERCALL(event_channel_op_compat),
52
    COMPAT_CALL(xen_version),
53
    HYPERCALL(console_io),
54
    COMPAT_CALL(physdev_op_compat),
55
    COMPAT_CALL(grant_table_op),
56
    COMPAT_CALL(vm_assist),
57
    COMPAT_CALL(update_va_mapping_otherdomain),
58
    COMPAT_CALL(iret),
59
    COMPAT_CALL(vcpu_op),
60
    HYPERCALL(set_segment_base),
61
    COMPAT_CALL(mmuext_op),
62
    COMPAT_CALL(xsm_op),
63
    COMPAT_CALL(nmi_op),
64
    COMPAT_CALL(sched_op),
65
    COMPAT_CALL(callback_op),
66
#ifdef CONFIG_XENOPROF
67
    COMPAT_CALL(xenoprof_op),
68
#endif
69
    HYPERCALL(event_channel_op),
70
    COMPAT_CALL(physdev_op),
71
    HYPERCALL(hvm_op),
72
    HYPERCALL(sysctl),
73
    HYPERCALL(domctl),
74
#ifdef CONFIG_KEXEC
75
    COMPAT_CALL(kexec_op),
76
#endif
77
#ifdef CONFIG_TMEM
78
    HYPERCALL(tmem_op),
79
#endif
80
    HYPERCALL(xenpmu_op),
81
    COMPAT_CALL(dm_op),
82
    HYPERCALL(mca),
83
    HYPERCALL(arch_1),
84
};
85
86
#undef do_arch_1
87
#undef COMPAT_CALL
88
#undef HYPERCALL
89
90
void pv_hypercall(struct cpu_user_regs *regs)
91
0
{
92
0
    struct vcpu *curr = current;
93
0
    unsigned long eax;
94
0
95
0
    ASSERT(guest_kernel_mode(curr, regs));
96
0
97
0
    eax = is_pv_32bit_vcpu(curr) ? regs->eax : regs->rax;
98
0
99
0
    BUILD_BUG_ON(ARRAY_SIZE(pv_hypercall_table) >
100
0
                 ARRAY_SIZE(hypercall_args_table));
101
0
102
0
    if ( (eax >= ARRAY_SIZE(pv_hypercall_table)) ||
103
0
         !pv_hypercall_table[eax].native )
104
0
    {
105
0
        regs->rax = -ENOSYS;
106
0
        return;
107
0
    }
108
0
109
0
    curr->hcall_preempted = false;
110
0
111
0
    if ( !is_pv_32bit_vcpu(curr) )
112
0
    {
113
0
        unsigned long rdi = regs->rdi;
114
0
        unsigned long rsi = regs->rsi;
115
0
        unsigned long rdx = regs->rdx;
116
0
        unsigned long r10 = regs->r10;
117
0
        unsigned long r8 = regs->r8;
118
0
        unsigned long r9 = regs->r9;
119
0
120
0
#ifndef NDEBUG
121
0
        /* Deliberately corrupt parameter regs not used by this hypercall. */
122
0
        switch ( hypercall_args_table[eax].native )
123
0
        {
124
0
        case 0: rdi = 0xdeadbeefdeadf00dUL;
125
0
        case 1: rsi = 0xdeadbeefdeadf00dUL;
126
0
        case 2: rdx = 0xdeadbeefdeadf00dUL;
127
0
        case 3: r10 = 0xdeadbeefdeadf00dUL;
128
0
        case 4: r8 = 0xdeadbeefdeadf00dUL;
129
0
        case 5: r9 = 0xdeadbeefdeadf00dUL;
130
0
        }
131
0
#endif
132
0
        if ( unlikely(tb_init_done) )
133
0
        {
134
0
            unsigned long args[6] = { rdi, rsi, rdx, r10, r8, r9 };
135
0
136
0
            __trace_hypercall(TRC_PV_HYPERCALL_V2, eax, args);
137
0
        }
138
0
139
0
        regs->rax = pv_hypercall_table[eax].native(rdi, rsi, rdx, r10, r8, r9);
140
0
141
0
#ifndef NDEBUG
142
0
        if ( !curr->hcall_preempted )
143
0
        {
144
0
            /* Deliberately corrupt parameter regs used by this hypercall. */
145
0
            switch ( hypercall_args_table[eax].native )
146
0
            {
147
0
            case 6: regs->r9  = 0xdeadbeefdeadf00dUL;
148
0
            case 5: regs->r8  = 0xdeadbeefdeadf00dUL;
149
0
            case 4: regs->r10 = 0xdeadbeefdeadf00dUL;
150
0
            case 3: regs->rdx = 0xdeadbeefdeadf00dUL;
151
0
            case 2: regs->rsi = 0xdeadbeefdeadf00dUL;
152
0
            case 1: regs->rdi = 0xdeadbeefdeadf00dUL;
153
0
            }
154
0
        }
155
0
#endif
156
0
    }
157
0
    else
158
0
    {
159
0
        unsigned int ebx = regs->ebx;
160
0
        unsigned int ecx = regs->ecx;
161
0
        unsigned int edx = regs->edx;
162
0
        unsigned int esi = regs->esi;
163
0
        unsigned int edi = regs->edi;
164
0
        unsigned int ebp = regs->ebp;
165
0
166
0
#ifndef NDEBUG
167
0
        /* Deliberately corrupt parameter regs not used by this hypercall. */
168
0
        switch ( hypercall_args_table[eax].compat )
169
0
        {
170
0
        case 0: ebx = 0xdeadf00d;
171
0
        case 1: ecx = 0xdeadf00d;
172
0
        case 2: edx = 0xdeadf00d;
173
0
        case 3: esi = 0xdeadf00d;
174
0
        case 4: edi = 0xdeadf00d;
175
0
        case 5: ebp = 0xdeadf00d;
176
0
        }
177
0
#endif
178
0
179
0
        if ( unlikely(tb_init_done) )
180
0
        {
181
0
            unsigned long args[6] = { ebx, ecx, edx, esi, edi, ebp };
182
0
183
0
            __trace_hypercall(TRC_PV_HYPERCALL_V2, eax, args);
184
0
        }
185
0
186
0
        curr->hcall_compat = true;
187
0
        regs->eax = pv_hypercall_table[eax].compat(ebx, ecx, edx, esi, edi, ebp);
188
0
        curr->hcall_compat = false;
189
0
190
0
#ifndef NDEBUG
191
0
        if ( !curr->hcall_preempted )
192
0
        {
193
0
            /* Deliberately corrupt parameter regs used by this hypercall. */
194
0
            switch ( hypercall_args_table[eax].compat )
195
0
            {
196
0
            case 6: regs->ebp = 0xdeadf00d;
197
0
            case 5: regs->edi = 0xdeadf00d;
198
0
            case 4: regs->esi = 0xdeadf00d;
199
0
            case 3: regs->edx = 0xdeadf00d;
200
0
            case 2: regs->ecx = 0xdeadf00d;
201
0
            case 1: regs->ebx = 0xdeadf00d;
202
0
            }
203
0
        }
204
0
#endif
205
0
    }
206
0
207
0
    /*
208
0
     * PV guests use SYSCALL or INT $0x82 to make a hypercall, both of which
209
0
     * have trap semantics.  If the hypercall has been preempted, rewind the
210
0
     * instruction pointer to reexecute the instruction.
211
0
     */
212
0
    if ( curr->hcall_preempted )
213
0
        regs->rip -= 2;
214
0
215
0
    perfc_incr(hypercalls);
216
0
}
217
218
enum mc_disposition arch_do_multicall_call(struct mc_state *state)
219
0
{
220
0
    struct vcpu *curr = current;
221
0
    unsigned long op;
222
0
223
0
    if ( !is_pv_32bit_vcpu(curr) )
224
0
    {
225
0
        struct multicall_entry *call = &state->call;
226
0
227
0
        op = call->op;
228
0
        if ( (op < ARRAY_SIZE(pv_hypercall_table)) &&
229
0
             pv_hypercall_table[op].native )
230
0
            call->result = pv_hypercall_table[op].native(
231
0
                call->args[0], call->args[1], call->args[2],
232
0
                call->args[3], call->args[4], call->args[5]);
233
0
        else
234
0
            call->result = -ENOSYS;
235
0
    }
236
0
#ifdef CONFIG_COMPAT
237
0
    else
238
0
    {
239
0
        struct compat_multicall_entry *call = &state->compat_call;
240
0
241
0
        op = call->op;
242
0
        if ( (op < ARRAY_SIZE(pv_hypercall_table)) &&
243
0
             pv_hypercall_table[op].compat )
244
0
            call->result = pv_hypercall_table[op].compat(
245
0
                call->args[0], call->args[1], call->args[2],
246
0
                call->args[3], call->args[4], call->args[5]);
247
0
        else
248
0
            call->result = -ENOSYS;
249
0
    }
250
0
#endif
251
0
252
0
    return unlikely(op == __HYPERVISOR_iret)
253
0
           ? mc_exit
254
0
           : likely(guest_kernel_mode(curr, guest_cpu_user_regs()))
255
0
             ? mc_continue : mc_preempt;
256
0
}
257
258
void hypercall_page_initialise_ring3_kernel(void *hypercall_page)
259
0
{
260
0
    void *p = hypercall_page;
261
0
    unsigned int i;
262
0
263
0
    /* Fill in all the transfer points with template machine code. */
264
0
    for ( i = 0; i < (PAGE_SIZE / 32); i++, p += 32 )
265
0
    {
266
0
        if ( i == __HYPERVISOR_iret )
267
0
            continue;
268
0
269
0
        *(u8  *)(p+ 0) = 0x51;    /* push %rcx */
270
0
        *(u16 *)(p+ 1) = 0x5341;  /* push %r11 */
271
0
        *(u8  *)(p+ 3) = 0xb8;    /* mov  $<i>,%eax */
272
0
        *(u32 *)(p+ 4) = i;
273
0
        *(u16 *)(p+ 8) = 0x050f;  /* syscall */
274
0
        *(u16 *)(p+10) = 0x5b41;  /* pop  %r11 */
275
0
        *(u8  *)(p+12) = 0x59;    /* pop  %rcx */
276
0
        *(u8  *)(p+13) = 0xc3;    /* ret */
277
0
    }
278
0
279
0
    /*
280
0
     * HYPERVISOR_iret is special because it doesn't return and expects a
281
0
     * special stack frame. Guests jump at this transfer point instead of
282
0
     * calling it.
283
0
     */
284
0
    p = hypercall_page + (__HYPERVISOR_iret * 32);
285
0
    *(u8  *)(p+ 0) = 0x51;    /* push %rcx */
286
0
    *(u16 *)(p+ 1) = 0x5341;  /* push %r11 */
287
0
    *(u8  *)(p+ 3) = 0x50;    /* push %rax */
288
0
    *(u8  *)(p+ 4) = 0xb8;    /* mov  $__HYPERVISOR_iret,%eax */
289
0
    *(u32 *)(p+ 5) = __HYPERVISOR_iret;
290
0
    *(u16 *)(p+ 9) = 0x050f;  /* syscall */
291
0
}
292
293
void hypercall_page_initialise_ring1_kernel(void *hypercall_page)
294
0
{
295
0
    void *p = hypercall_page;
296
0
    unsigned int i;
297
0
298
0
    /* Fill in all the transfer points with template machine code. */
299
0
300
0
    for ( i = 0; i < (PAGE_SIZE / 32); i++, p += 32 )
301
0
    {
302
0
        if ( i == __HYPERVISOR_iret )
303
0
            continue;
304
0
305
0
        *(u8  *)(p+ 0) = 0xb8;    /* mov  $<i>,%eax */
306
0
        *(u32 *)(p+ 1) = i;
307
0
        *(u16 *)(p+ 5) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int  $xx */
308
0
        *(u8  *)(p+ 7) = 0xc3;    /* ret */
309
0
    }
310
0
311
0
    /*
312
0
     * HYPERVISOR_iret is special because it doesn't return and expects a
313
0
     * special stack frame. Guests jump at this transfer point instead of
314
0
     * calling it.
315
0
     */
316
0
    p = hypercall_page + (__HYPERVISOR_iret * 32);
317
0
    *(u8  *)(p+ 0) = 0x50;    /* push %eax */
318
0
    *(u8  *)(p+ 1) = 0xb8;    /* mov  $__HYPERVISOR_iret,%eax */
319
0
    *(u32 *)(p+ 2) = __HYPERVISOR_iret;
320
0
    *(u16 *)(p+ 6) = (HYPERCALL_VECTOR << 8) | 0xcd; /* int  $xx */
321
0
}
322
323
/*
324
 * Local variables:
325
 * mode: C
326
 * c-file-style: "BSD"
327
 * c-basic-offset: 4
328
 * tab-width: 4
329
 * indent-tabs-mode: nil
330
 * End:
331
 */
332