Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/arch/x86/hvm/monitor.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * arch/x86/hvm/monitor.c
3
 *
4
 * Arch-specific hardware virtual machine event abstractions.
5
 *
6
 * Copyright (c) 2004, Intel Corporation.
7
 * Copyright (c) 2005, International Business Machines Corporation.
8
 * Copyright (c) 2008, Citrix Systems, Inc.
9
 * Copyright (c) 2016, Bitdefender S.R.L.
10
 * Copyright (c) 2016, Tamas K Lengyel (tamas@tklengyel.com)
11
 *
12
 * This program is free software; you can redistribute it and/or modify it
13
 * under the terms and conditions of the GNU General Public License,
14
 * version 2, as published by the Free Software Foundation.
15
 *
16
 * This program is distributed in the hope it will be useful, but WITHOUT
17
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18
 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19
 * more details.
20
 *
21
 * You should have received a copy of the GNU General Public License along with
22
 * this program; If not, see <http://www.gnu.org/licenses/>.
23
 */
24
25
#include <xen/vm_event.h>
26
#include <xen/monitor.h>
27
#include <asm/hvm/monitor.h>
28
#include <asm/monitor.h>
29
#include <asm/paging.h>
30
#include <asm/vm_event.h>
31
#include <public/vm_event.h>
32
33
bool hvm_monitor_cr(unsigned int index, unsigned long value, unsigned long old)
34
3.80k
{
35
3.80k
    struct vcpu *curr = current;
36
3.80k
    struct arch_domain *ad = &curr->domain->arch;
37
3.80k
    unsigned int ctrlreg_bitmask = monitor_ctrlreg_bitmask(index);
38
3.80k
39
3.80k
    if ( (ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask) &&
40
0
         (!(ad->monitor.write_ctrlreg_onchangeonly & ctrlreg_bitmask) ||
41
0
          value != old) &&
42
0
         (!((value ^ old) & ad->monitor.write_ctrlreg_mask[index])) )
43
0
    {
44
0
        bool sync = ad->monitor.write_ctrlreg_sync & ctrlreg_bitmask;
45
0
46
0
        vm_event_request_t req = {
47
0
            .reason = VM_EVENT_REASON_WRITE_CTRLREG,
48
0
            .u.write_ctrlreg.index = index,
49
0
            .u.write_ctrlreg.new_value = value,
50
0
            .u.write_ctrlreg.old_value = old
51
0
        };
52
0
53
0
        if ( monitor_traps(curr, sync, &req) >= 0 )
54
0
            return 1;
55
0
    }
56
3.80k
57
3.80k
    return 0;
58
3.80k
}
59
60
bool hvm_monitor_emul_unimplemented(void)
61
0
{
62
0
    struct vcpu *curr = current;
63
0
64
0
    /*
65
0
     * Send a vm_event to the monitor to signal that the current
66
0
     * instruction couldn't be emulated.
67
0
     */
68
0
    vm_event_request_t req = {
69
0
        .reason = VM_EVENT_REASON_EMUL_UNIMPLEMENTED,
70
0
        .vcpu_id  = curr->vcpu_id,
71
0
    };
72
0
73
0
    return curr->domain->arch.monitor.emul_unimplemented_enabled &&
74
0
        monitor_traps(curr, true, &req) == 1;
75
0
}
76
77
void hvm_monitor_msr(unsigned int msr, uint64_t value)
78
0
{
79
0
    struct vcpu *curr = current;
80
0
81
0
    if ( monitored_msr(curr->domain, msr) )
82
0
    {
83
0
        vm_event_request_t req = {
84
0
            .reason = VM_EVENT_REASON_MOV_TO_MSR,
85
0
            .u.mov_to_msr.msr = msr,
86
0
            .u.mov_to_msr.value = value,
87
0
        };
88
0
89
0
        monitor_traps(curr, 1, &req);
90
0
    }
91
0
}
92
93
void hvm_monitor_descriptor_access(uint64_t exit_info,
94
                                   uint64_t vmx_exit_qualification,
95
                                   uint8_t descriptor, bool is_write)
96
0
{
97
0
    vm_event_request_t req = {
98
0
        .reason = VM_EVENT_REASON_DESCRIPTOR_ACCESS,
99
0
        .u.desc_access.descriptor = descriptor,
100
0
        .u.desc_access.is_write = is_write,
101
0
    };
102
0
103
0
    if ( cpu_has_vmx )
104
0
    {
105
0
        req.u.desc_access.arch.vmx.instr_info = exit_info;
106
0
        req.u.desc_access.arch.vmx.exit_qualification = vmx_exit_qualification;
107
0
    }
108
0
    else
109
0
    {
110
0
        req.u.desc_access.arch.svm.exitinfo = exit_info;
111
0
    }
112
0
113
0
    monitor_traps(current, true, &req);
114
0
}
115
116
static inline unsigned long gfn_of_rip(unsigned long rip)
117
0
{
118
0
    struct vcpu *curr = current;
119
0
    struct segment_register sreg;
120
0
    uint32_t pfec = PFEC_page_present | PFEC_insn_fetch;
121
0
122
0
    if ( hvm_get_cpl(curr) == 3 )
123
0
        pfec |= PFEC_user_mode;
124
0
125
0
    hvm_get_segment_register(curr, x86_seg_cs, &sreg);
126
0
127
0
    return paging_gva_to_gfn(curr, sreg.base + rip, &pfec);
128
0
}
129
130
int hvm_monitor_debug(unsigned long rip, enum hvm_monitor_debug_type type,
131
                      unsigned long trap_type, unsigned long insn_length)
132
0
{
133
0
    struct vcpu *curr = current;
134
0
    struct arch_domain *ad = &curr->domain->arch;
135
0
    vm_event_request_t req = {};
136
0
    bool sync;
137
0
138
0
    switch ( type )
139
0
    {
140
0
    case HVM_MONITOR_SOFTWARE_BREAKPOINT:
141
0
        if ( !ad->monitor.software_breakpoint_enabled )
142
0
            return 0;
143
0
        req.reason = VM_EVENT_REASON_SOFTWARE_BREAKPOINT;
144
0
        req.u.software_breakpoint.gfn = gfn_of_rip(rip);
145
0
        req.u.software_breakpoint.type = trap_type;
146
0
        req.u.software_breakpoint.insn_length = insn_length;
147
0
        sync = true;
148
0
        break;
149
0
150
0
    case HVM_MONITOR_SINGLESTEP_BREAKPOINT:
151
0
        if ( !ad->monitor.singlestep_enabled )
152
0
            return 0;
153
0
        req.reason = VM_EVENT_REASON_SINGLESTEP;
154
0
        req.u.singlestep.gfn = gfn_of_rip(rip);
155
0
        sync = true;
156
0
        break;
157
0
158
0
    case HVM_MONITOR_DEBUG_EXCEPTION:
159
0
        if ( !ad->monitor.debug_exception_enabled )
160
0
            return 0;
161
0
        req.reason = VM_EVENT_REASON_DEBUG_EXCEPTION;
162
0
        req.u.debug_exception.gfn = gfn_of_rip(rip);
163
0
        req.u.debug_exception.type = trap_type;
164
0
        req.u.debug_exception.insn_length = insn_length;
165
0
        sync = !!ad->monitor.debug_exception_sync;
166
0
        break;
167
0
168
0
    default:
169
0
        return -EOPNOTSUPP;
170
0
    }
171
0
172
0
    return monitor_traps(curr, sync, &req);
173
0
}
174
175
int hvm_monitor_cpuid(unsigned long insn_length, unsigned int leaf,
176
                      unsigned int subleaf)
177
2.66k
{
178
2.66k
    struct vcpu *curr = current;
179
2.66k
    struct arch_domain *ad = &curr->domain->arch;
180
2.66k
    vm_event_request_t req = {};
181
2.66k
182
2.66k
    if ( !ad->monitor.cpuid_enabled )
183
2.66k
        return 0;
184
2.66k
185
0
    req.reason = VM_EVENT_REASON_CPUID;
186
0
    req.u.cpuid.insn_length = insn_length;
187
0
    req.u.cpuid.leaf = leaf;
188
0
    req.u.cpuid.subleaf = subleaf;
189
0
190
0
    return monitor_traps(curr, 1, &req);
191
2.66k
}
192
193
void hvm_monitor_interrupt(unsigned int vector, unsigned int type,
194
                           unsigned int err, uint64_t cr2)
195
0
{
196
0
    vm_event_request_t req = {
197
0
        .reason = VM_EVENT_REASON_INTERRUPT,
198
0
        .u.interrupt.x86.vector = vector,
199
0
        .u.interrupt.x86.type = type,
200
0
        .u.interrupt.x86.error_code = err,
201
0
        .u.interrupt.x86.cr2 = cr2,
202
0
    };
203
0
204
0
    monitor_traps(current, 1, &req);
205
0
}
206
207
/*
208
 * Local variables:
209
 * mode: C
210
 * c-file-style: "BSD"
211
 * c-basic-offset: 4
212
 * tab-width: 4
213
 * indent-tabs-mode: nil
214
 * End:
215
 */