Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/include/public/vm_event.h
Line
Count
Source (jump to first uncovered line)
1
/******************************************************************************
2
 * vm_event.h
3
 *
4
 * Memory event common structures.
5
 *
6
 * Copyright (c) 2009 by Citrix Systems, Inc. (Patrick Colp)
7
 *
8
 * Permission is hereby granted, free of charge, to any person obtaining a copy
9
 * of this software and associated documentation files (the "Software"), to
10
 * deal in the Software without restriction, including without limitation the
11
 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
12
 * sell copies of the Software, and to permit persons to whom the Software is
13
 * furnished to do so, subject to the following conditions:
14
 *
15
 * The above copyright notice and this permission notice shall be included in
16
 * all copies or substantial portions of the Software.
17
 *
18
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24
 * DEALINGS IN THE SOFTWARE.
25
 */
26
27
#ifndef _XEN_PUBLIC_VM_EVENT_H
28
#define _XEN_PUBLIC_VM_EVENT_H
29
30
#include "xen.h"
31
32
0
#define VM_EVENT_INTERFACE_VERSION 0x00000002
33
34
#if defined(__XEN__) || defined(__XEN_TOOLS__)
35
36
#include "io/ring.h"
37
38
/*
39
 * Memory event flags
40
 */
41
42
/*
43
 * VCPU_PAUSED in a request signals that the vCPU triggering the event has been
44
 *  paused
45
 * VCPU_PAUSED in a response signals to unpause the vCPU
46
 */
47
0
#define VM_EVENT_FLAG_VCPU_PAUSED        (1 << 0)
48
/* Flags to aid debugging vm_event */
49
0
#define VM_EVENT_FLAG_FOREIGN            (1 << 1)
50
/*
51
 * The following flags can be set in response to a mem_access event.
52
 *
53
 * Emulate the fault-causing instruction (if set in the event response flags).
54
 * This will allow the guest to continue execution without lifting the page
55
 * access restrictions.
56
 */
57
0
#define VM_EVENT_FLAG_EMULATE            (1 << 2)
58
/*
59
 * Same as VM_EVENT_FLAG_EMULATE, but with write operations or operations
60
 * potentially having side effects (like memory mapped or port I/O) disabled.
61
 */
62
0
#define VM_EVENT_FLAG_EMULATE_NOWRITE    (1 << 3)
63
/*
64
 * Toggle singlestepping on vm_event response.
65
 * Requires the vCPU to be paused already (synchronous events only).
66
 */
67
0
#define VM_EVENT_FLAG_TOGGLE_SINGLESTEP  (1 << 4)
68
/*
69
 * Data is being sent back to the hypervisor in the event response, to be
70
 * returned by the read function when emulating an instruction.
71
 * This flag is only useful when combined with VM_EVENT_FLAG_EMULATE
72
 * and takes precedence if combined with VM_EVENT_FLAG_EMULATE_NOWRITE
73
 * (i.e. if both VM_EVENT_FLAG_EMULATE_NOWRITE and
74
 * VM_EVENT_FLAG_SET_EMUL_READ_DATA are set, only the latter will be honored).
75
 */
76
0
#define VM_EVENT_FLAG_SET_EMUL_READ_DATA (1 << 5)
77
/*
78
 * Deny completion of the operation that triggered the event.
79
 * Currently only useful for MSR and control-register write events.
80
 * Requires the vCPU to be paused already (synchronous events only).
81
 */
82
0
#define VM_EVENT_FLAG_DENY               (1 << 6)
83
/*
84
 * This flag can be set in a request or a response
85
 *
86
 * On a request, indicates that the event occurred in the alternate p2m
87
 * specified by the altp2m_idx request field.
88
 *
89
 * On a response, indicates that the VCPU should resume in the alternate p2m
90
 * specified by the altp2m_idx response field if possible.
91
 */
92
0
#define VM_EVENT_FLAG_ALTERNATE_P2M      (1 << 7)
93
/*
94
 * Set the vCPU registers to the values in the  vm_event response.
95
 * At the moment x86-only, applies to EAX-EDX, ESP, EBP, ESI, EDI, R8-R15,
96
 * EFLAGS, and EIP.
97
 * Requires the vCPU to be paused already (synchronous events only).
98
 */
99
0
#define VM_EVENT_FLAG_SET_REGISTERS      (1 << 8)
100
/*
101
 * Instruction cache is being sent back to the hypervisor in the event response
102
 * to be used by the emulator. This flag is only useful when combined with
103
 * VM_EVENT_FLAG_EMULATE and does not take presedence if combined with
104
 * VM_EVENT_FLAG_EMULATE_NOWRITE or VM_EVENT_FLAG_SET_EMUL_READ_DATA, (i.e.
105
 * if any of those flags are set, only those will be honored).
106
 */
107
0
#define VM_EVENT_FLAG_SET_EMUL_INSN_DATA (1 << 9)
108
/*
109
 * Have a one-shot VM_EVENT_REASON_INTERRUPT event sent for the first
110
 * interrupt pending after resuming the VCPU.
111
 */
112
0
#define VM_EVENT_FLAG_GET_NEXT_INTERRUPT (1 << 10)
113
114
/*
115
 * Reasons for the vm event request
116
 */
117
118
/* Default case */
119
#define VM_EVENT_REASON_UNKNOWN                 0
120
/* Memory access violation */
121
0
#define VM_EVENT_REASON_MEM_ACCESS              1
122
/* Memory sharing event */
123
0
#define VM_EVENT_REASON_MEM_SHARING             2
124
/* Memory paging event */
125
0
#define VM_EVENT_REASON_MEM_PAGING              3
126
/* A control register was updated */
127
0
#define VM_EVENT_REASON_WRITE_CTRLREG           4
128
/* An MSR was updated. */
129
0
#define VM_EVENT_REASON_MOV_TO_MSR              5
130
/* Debug operation executed (e.g. int3) */
131
0
#define VM_EVENT_REASON_SOFTWARE_BREAKPOINT     6
132
/* Single-step (e.g. MTF) */
133
0
#define VM_EVENT_REASON_SINGLESTEP              7
134
/* An event has been requested via HVMOP_guest_request_vm_event. */
135
0
#define VM_EVENT_REASON_GUEST_REQUEST           8
136
/* A debug exception was caught */
137
0
#define VM_EVENT_REASON_DEBUG_EXCEPTION         9
138
/* CPUID executed */
139
0
#define VM_EVENT_REASON_CPUID                   10
140
/*
141
 * Privileged call executed (e.g. SMC).
142
 * Note: event may be generated even if SMC condition check fails on some CPUs.
143
 *       As this behavior is CPU-specific, users are advised to not rely on it.
144
 *       These kinds of events will be filtered out in future versions.
145
 */
146
#define VM_EVENT_REASON_PRIVILEGED_CALL         11
147
/* An interrupt has been delivered. */
148
0
#define VM_EVENT_REASON_INTERRUPT               12
149
/* A descriptor table register was accessed. */
150
0
#define VM_EVENT_REASON_DESCRIPTOR_ACCESS       13
151
/* Current instruction is not implemented by the emulator */
152
0
#define VM_EVENT_REASON_EMUL_UNIMPLEMENTED      14
153
154
/* Supported values for the vm_event_write_ctrlreg index. */
155
3.79k
#define VM_EVENT_X86_CR0    0
156
0
#define VM_EVENT_X86_CR3    1
157
0
#define VM_EVENT_X86_CR4    2
158
11
#define VM_EVENT_X86_XCR0   3
159
160
/*
161
 * Using custom vCPU structs (i.e. not hvm_hw_cpu) for both x86 and ARM
162
 * so as to not fill the vm_event ring buffer too quickly.
163
 */
164
struct vm_event_regs_x86 {
165
    uint64_t rax;
166
    uint64_t rcx;
167
    uint64_t rdx;
168
    uint64_t rbx;
169
    uint64_t rsp;
170
    uint64_t rbp;
171
    uint64_t rsi;
172
    uint64_t rdi;
173
    uint64_t r8;
174
    uint64_t r9;
175
    uint64_t r10;
176
    uint64_t r11;
177
    uint64_t r12;
178
    uint64_t r13;
179
    uint64_t r14;
180
    uint64_t r15;
181
    uint64_t rflags;
182
    uint64_t dr7;
183
    uint64_t rip;
184
    uint64_t cr0;
185
    uint64_t cr2;
186
    uint64_t cr3;
187
    uint64_t cr4;
188
    uint64_t sysenter_cs;
189
    uint64_t sysenter_esp;
190
    uint64_t sysenter_eip;
191
    uint64_t msr_efer;
192
    uint64_t msr_star;
193
    uint64_t msr_lstar;
194
    uint64_t fs_base;
195
    uint64_t gs_base;
196
    uint32_t cs_arbytes;
197
    uint32_t _pad;
198
};
199
200
/*
201
 * Only the register 'pc' can be set on a vm_event response using the
202
 * VM_EVENT_FLAG_SET_REGISTERS flag.
203
 */
204
struct vm_event_regs_arm {
205
    uint64_t ttbr0;
206
    uint64_t ttbr1;
207
    uint64_t ttbcr;
208
    uint64_t pc;
209
    uint32_t cpsr;
210
    uint32_t _pad;
211
};
212
213
/*
214
 * mem_access flag definitions
215
 *
216
 * These flags are set only as part of a mem_event request.
217
 *
218
 * R/W/X: Defines the type of violation that has triggered the event
219
 *        Multiple types can be set in a single violation!
220
 * GLA_VALID: If the gla field holds a guest VA associated with the event
221
 * FAULT_WITH_GLA: If the violation was triggered by accessing gla
222
 * FAULT_IN_GPT: If the violation was triggered during translating gla
223
 */
224
0
#define MEM_ACCESS_R                (1 << 0)
225
0
#define MEM_ACCESS_W                (1 << 1)
226
0
#define MEM_ACCESS_X                (1 << 2)
227
0
#define MEM_ACCESS_RWX              (MEM_ACCESS_R | MEM_ACCESS_W | MEM_ACCESS_X)
228
0
#define MEM_ACCESS_RW               (MEM_ACCESS_R | MEM_ACCESS_W)
229
0
#define MEM_ACCESS_RX               (MEM_ACCESS_R | MEM_ACCESS_X)
230
0
#define MEM_ACCESS_WX               (MEM_ACCESS_W | MEM_ACCESS_X)
231
0
#define MEM_ACCESS_GLA_VALID        (1 << 3)
232
0
#define MEM_ACCESS_FAULT_WITH_GLA   (1 << 4)
233
0
#define MEM_ACCESS_FAULT_IN_GPT     (1 << 5)
234
235
struct vm_event_mem_access {
236
    uint64_t gfn;
237
    uint64_t offset;
238
    uint64_t gla;   /* if flags has MEM_ACCESS_GLA_VALID set */
239
    uint32_t flags; /* MEM_ACCESS_* */
240
    uint32_t _pad;
241
};
242
243
struct vm_event_write_ctrlreg {
244
    uint32_t index;
245
    uint32_t _pad;
246
    uint64_t new_value;
247
    uint64_t old_value;
248
};
249
250
struct vm_event_singlestep {
251
    uint64_t gfn;
252
};
253
254
struct vm_event_debug {
255
    uint64_t gfn;
256
    uint32_t insn_length;
257
    uint8_t type;        /* HVMOP_TRAP_* */
258
    uint8_t _pad[3];
259
};
260
261
struct vm_event_mov_to_msr {
262
    uint64_t msr;
263
    uint64_t value;
264
};
265
266
0
#define VM_EVENT_DESC_IDTR           1
267
0
#define VM_EVENT_DESC_GDTR           2
268
0
#define VM_EVENT_DESC_LDTR           3
269
0
#define VM_EVENT_DESC_TR             4
270
271
struct vm_event_desc_access {
272
    union {
273
        struct {
274
            uint32_t instr_info;         /* VMX: VMCS Instruction-Information */
275
            uint32_t _pad1;
276
            uint64_t exit_qualification; /* VMX: VMCS Exit Qualification */
277
        } vmx;
278
        struct {
279
            uint64_t exitinfo;           /* SVM: VMCB EXITINFO */
280
            uint64_t _pad2;
281
        } svm;
282
    } arch;
283
    uint8_t descriptor;                  /* VM_EVENT_DESC_* */
284
    uint8_t is_write;
285
    uint8_t _pad[6];
286
};
287
288
struct vm_event_cpuid {
289
    uint32_t insn_length;
290
    uint32_t leaf;
291
    uint32_t subleaf;
292
    uint32_t _pad;
293
};
294
295
struct vm_event_interrupt_x86 {
296
    uint32_t vector;
297
    uint32_t type;
298
    uint32_t error_code;
299
    uint32_t _pad;
300
    uint64_t cr2;
301
};
302
303
0
#define MEM_PAGING_DROP_PAGE       (1 << 0)
304
0
#define MEM_PAGING_EVICT_FAIL      (1 << 1)
305
306
struct vm_event_paging {
307
    uint64_t gfn;
308
    uint32_t p2mt;
309
    uint32_t flags;
310
};
311
312
struct vm_event_sharing {
313
    uint64_t gfn;
314
    uint32_t p2mt;
315
    uint32_t _pad;
316
};
317
318
struct vm_event_emul_read_data {
319
    uint32_t size;
320
    /* The struct is used in a union with vm_event_regs_x86. */
321
    uint8_t  data[sizeof(struct vm_event_regs_x86) - sizeof(uint32_t)];
322
};
323
324
struct vm_event_emul_insn_data {
325
    uint8_t data[16]; /* Has to be completely filled */
326
};
327
328
typedef struct vm_event_st {
329
    uint32_t version;   /* VM_EVENT_INTERFACE_VERSION */
330
    uint32_t flags;     /* VM_EVENT_FLAG_* */
331
    uint32_t reason;    /* VM_EVENT_REASON_* */
332
    uint32_t vcpu_id;
333
    uint16_t altp2m_idx; /* may be used during request and response */
334
    uint16_t _pad[3];
335
336
    union {
337
        struct vm_event_paging                mem_paging;
338
        struct vm_event_sharing               mem_sharing;
339
        struct vm_event_mem_access            mem_access;
340
        struct vm_event_write_ctrlreg         write_ctrlreg;
341
        struct vm_event_mov_to_msr            mov_to_msr;
342
        struct vm_event_desc_access           desc_access;
343
        struct vm_event_singlestep            singlestep;
344
        struct vm_event_debug                 software_breakpoint;
345
        struct vm_event_debug                 debug_exception;
346
        struct vm_event_cpuid                 cpuid;
347
        union {
348
            struct vm_event_interrupt_x86     x86;
349
        } interrupt;
350
    } u;
351
352
    union {
353
        union {
354
            struct vm_event_regs_x86 x86;
355
            struct vm_event_regs_arm arm;
356
        } regs;
357
358
        union {
359
            struct vm_event_emul_read_data read;
360
            struct vm_event_emul_insn_data insn;
361
        } emul;
362
    } data;
363
} vm_event_request_t, vm_event_response_t;
364
365
DEFINE_RING_TYPES(vm_event, vm_event_request_t, vm_event_response_t);
366
367
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
368
#endif /* _XEN_PUBLIC_VM_EVENT_H */
369
370
/*
371
 * Local variables:
372
 * mode: C
373
 * c-file-style: "BSD"
374
 * c-basic-offset: 4
375
 * tab-width: 4
376
 * indent-tabs-mode: nil
377
 * End:
378
 */