Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/arch/x86/pv/iret.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * pv/iret.c
3
 *
4
 * iret hypercall handling code
5
 *
6
 * This program is free software; you can redistribute it and/or
7
 * modify it under the terms and conditions of the GNU General Public
8
 * License, version 2, as published by the Free Software Foundation.
9
 *
10
 * This program is distributed in the hope that it will be useful,
11
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13
 * General Public License for more details.
14
 *
15
 * You should have received a copy of the GNU General Public
16
 * License along with this program; If not, see
17
 * <http://www.gnu.org/licenses/>.
18
 */
19
20
#include <xen/guest_access.h>
21
#include <xen/lib.h>
22
#include <xen/sched.h>
23
24
#include <asm/current.h>
25
#include <asm/traps.h>
26
27
/* Override macros from asm/page.h to make them work with mfn_t */
28
#undef mfn_to_page
29
#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
30
#undef page_to_mfn
31
#define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
32
33
unsigned long do_iret(void)
34
0
{
35
0
    struct cpu_user_regs *regs = guest_cpu_user_regs();
36
0
    struct iret_context iret_saved;
37
0
    struct vcpu *v = current;
38
0
39
0
    if ( unlikely(copy_from_user(&iret_saved, (void *)regs->rsp,
40
0
                                 sizeof(iret_saved))) )
41
0
    {
42
0
        gprintk(XENLOG_ERR,
43
0
                "Fault while reading IRET context from guest stack\n");
44
0
        goto exit_and_crash;
45
0
    }
46
0
47
0
    /* Returning to user mode? */
48
0
    if ( (iret_saved.cs & 3) == 3 )
49
0
    {
50
0
        if ( unlikely(pagetable_is_null(v->arch.guest_table_user)) )
51
0
        {
52
0
            gprintk(XENLOG_ERR,
53
0
                    "Guest switching to user mode with no user page tables\n");
54
0
            goto exit_and_crash;
55
0
        }
56
0
        toggle_guest_mode(v);
57
0
    }
58
0
59
0
    if ( VM_ASSIST(v->domain, architectural_iopl) )
60
0
        v->arch.pv_vcpu.iopl = iret_saved.rflags & X86_EFLAGS_IOPL;
61
0
62
0
    regs->rip    = iret_saved.rip;
63
0
    regs->cs     = iret_saved.cs | 3; /* force guest privilege */
64
0
    regs->rflags = ((iret_saved.rflags & ~(X86_EFLAGS_IOPL|X86_EFLAGS_VM))
65
0
                    | X86_EFLAGS_IF);
66
0
    regs->rsp    = iret_saved.rsp;
67
0
    regs->ss     = iret_saved.ss | 3; /* force guest privilege */
68
0
69
0
    if ( !(iret_saved.flags & VGCF_in_syscall) )
70
0
    {
71
0
        regs->entry_vector &= ~TRAP_syscall;
72
0
        regs->r11 = iret_saved.r11;
73
0
        regs->rcx = iret_saved.rcx;
74
0
    }
75
0
76
0
    /* Restore upcall mask from supplied EFLAGS.IF. */
77
0
    vcpu_info(v, evtchn_upcall_mask) = !(iret_saved.rflags & X86_EFLAGS_IF);
78
0
79
0
    async_exception_cleanup(v);
80
0
81
0
    /* Saved %rax gets written back to regs->rax in entry.S. */
82
0
    return iret_saved.rax;
83
0
84
0
 exit_and_crash:
85
0
    domain_crash(v->domain);
86
0
    return 0;
87
0
}
88
89
unsigned int compat_iret(void)
90
0
{
91
0
    struct cpu_user_regs *regs = guest_cpu_user_regs();
92
0
    struct vcpu *v = current;
93
0
    u32 eflags;
94
0
95
0
    /* Trim stack pointer to 32 bits. */
96
0
    regs->rsp = (u32)regs->rsp;
97
0
98
0
    /* Restore EAX (clobbered by hypercall). */
99
0
    if ( unlikely(__get_user(regs->eax, (u32 *)regs->rsp)) )
100
0
    {
101
0
        domain_crash(v->domain);
102
0
        return 0;
103
0
    }
104
0
105
0
    /* Restore CS and EIP. */
106
0
    if ( unlikely(__get_user(regs->eip, (u32 *)regs->rsp + 1)) ||
107
0
        unlikely(__get_user(regs->cs, (u32 *)regs->rsp + 2)) )
108
0
    {
109
0
        domain_crash(v->domain);
110
0
        return 0;
111
0
    }
112
0
113
0
    /*
114
0
     * Fix up and restore EFLAGS. We fix up in a local staging area
115
0
     * to avoid firing the BUG_ON(IOPL) check in arch_get_info_guest.
116
0
     */
117
0
    if ( unlikely(__get_user(eflags, (u32 *)regs->rsp + 3)) )
118
0
    {
119
0
        domain_crash(v->domain);
120
0
        return 0;
121
0
    }
122
0
123
0
    if ( VM_ASSIST(v->domain, architectural_iopl) )
124
0
        v->arch.pv_vcpu.iopl = eflags & X86_EFLAGS_IOPL;
125
0
126
0
    regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
127
0
128
0
    if ( unlikely(eflags & X86_EFLAGS_VM) )
129
0
    {
130
0
        /*
131
0
         * Cannot return to VM86 mode: inject a GP fault instead. Note that
132
0
         * the GP fault is reported on the first VM86 mode instruction, not on
133
0
         * the IRET (which is why we can simply leave the stack frame as-is
134
0
         * (except for perhaps having to copy it), which in turn seems better
135
0
         * than teaching create_bounce_frame() to needlessly deal with vm86
136
0
         * mode frames).
137
0
         */
138
0
        const struct trap_info *ti;
139
0
        u32 x, ksp = v->arch.pv_vcpu.kernel_sp - 40;
140
0
        unsigned int i;
141
0
        int rc = 0;
142
0
143
0
        gdprintk(XENLOG_ERR, "VM86 mode unavailable (ksp:%08X->%08X)\n",
144
0
                 regs->esp, ksp);
145
0
        if ( ksp < regs->esp )
146
0
        {
147
0
            for (i = 1; i < 10; ++i)
148
0
            {
149
0
                rc |= __get_user(x, (u32 *)regs->rsp + i);
150
0
                rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
151
0
            }
152
0
        }
153
0
        else if ( ksp > regs->esp )
154
0
        {
155
0
            for ( i = 9; i > 0; --i )
156
0
            {
157
0
                rc |= __get_user(x, (u32 *)regs->rsp + i);
158
0
                rc |= __put_user(x, (u32 *)(unsigned long)ksp + i);
159
0
            }
160
0
        }
161
0
        if ( rc )
162
0
        {
163
0
            domain_crash(v->domain);
164
0
            return 0;
165
0
        }
166
0
        regs->esp = ksp;
167
0
        regs->ss = v->arch.pv_vcpu.kernel_ss;
168
0
169
0
        ti = &v->arch.pv_vcpu.trap_ctxt[TRAP_gp_fault];
170
0
        if ( TI_GET_IF(ti) )
171
0
            eflags &= ~X86_EFLAGS_IF;
172
0
        regs->eflags &= ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
173
0
                          X86_EFLAGS_NT|X86_EFLAGS_TF);
174
0
        if ( unlikely(__put_user(0, (u32 *)regs->rsp)) )
175
0
        {
176
0
            domain_crash(v->domain);
177
0
            return 0;
178
0
        }
179
0
        regs->eip = ti->address;
180
0
        regs->cs = ti->cs;
181
0
    }
182
0
    else if ( unlikely(ring_0(regs)) )
183
0
    {
184
0
        domain_crash(v->domain);
185
0
        return 0;
186
0
    }
187
0
    else if ( ring_1(regs) )
188
0
        regs->esp += 16;
189
0
    /* Return to ring 2/3: restore ESP and SS. */
190
0
    else if ( __get_user(regs->ss, (u32 *)regs->rsp + 5) ||
191
0
              __get_user(regs->esp, (u32 *)regs->rsp + 4) )
192
0
    {
193
0
        domain_crash(v->domain);
194
0
        return 0;
195
0
    }
196
0
197
0
    /* Restore upcall mask from supplied EFLAGS.IF. */
198
0
    vcpu_info(v, evtchn_upcall_mask) = !(eflags & X86_EFLAGS_IF);
199
0
200
0
    async_exception_cleanup(v);
201
0
202
0
    /*
203
0
     * The hypercall exit path will overwrite EAX with this return
204
0
     * value.
205
0
     */
206
0
    return regs->eax;
207
0
}
208
209
/*
210
 * Local variables:
211
 * mode: C
212
 * c-file-style: "BSD"
213
 * c-basic-offset: 4
214
 * tab-width: 4
215
 * indent-tabs-mode: nil
216
 * End:
217
 */