Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/include/asm/hvm/support.h
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * support.h: HVM support routines used by VT-x and SVM.
3
 *
4
 * Leendert van Doorn, leendert@watson.ibm.com
5
 * Copyright (c) 2005, International Business Machines Corporation.
6
 *
7
 * This program is free software; you can redistribute it and/or modify it
8
 * under the terms and conditions of the GNU General Public License,
9
 * version 2, as published by the Free Software Foundation.
10
 *
11
 * This program is distributed in the hope it will be useful, but WITHOUT
12
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14
 * more details.
15
 *
16
 * You should have received a copy of the GNU General Public License along with
17
 * this program; If not, see <http://www.gnu.org/licenses/>.
18
 */
19
20
#ifndef __ASM_X86_HVM_SUPPORT_H__
21
#define __ASM_X86_HVM_SUPPORT_H__
22
23
#include <xen/types.h>
24
#include <xen/sched.h>
25
#include <asm/hvm/save.h>
26
#include <asm/processor.h>
27
#include <asm/p2m.h>
28
29
#ifndef NDEBUG
30
#define DBG_LEVEL_0                 (1 << 0)
31
#define DBG_LEVEL_1                 (1 << 1)
32
#define DBG_LEVEL_2                 (1 << 2)
33
#define DBG_LEVEL_3                 (1 << 3)
34
#define DBG_LEVEL_IO                (1 << 4)
35
#define DBG_LEVEL_VMMU              (1 << 5)
36
#define DBG_LEVEL_VLAPIC            (1 << 6)
37
#define DBG_LEVEL_VLAPIC_TIMER      (1 << 7)
38
#define DBG_LEVEL_VLAPIC_INTERRUPT  (1 << 8)
39
#define DBG_LEVEL_IOAPIC            (1 << 9)
40
#define DBG_LEVEL_HCALL             (1 << 10)
41
#define DBG_LEVEL_MSR               (1 << 11)
42
43
extern unsigned int opt_hvm_debug_level;
44
#define HVM_DBG_LOG(level, _f, _a...)                                         \
45
676k
    do {                                                                      \
46
676k
        if ( unlikely((level) & opt_hvm_debug_level) )                        \
47
0
            printk("[HVM:%d.%d] <%s> " _f "\n",                               \
48
0
                   current->domain->domain_id, current->vcpu_id, __func__,    \
49
0
                   ## _a);                                                    \
50
676k
    } while (0)
51
#else
52
#define HVM_DBG_LOG(level, _f, _a...) do {} while (0)
53
#endif
54
55
extern unsigned long hvm_io_bitmap[];
56
57
enum hvm_translation_result {
58
    HVMTRANS_okay,
59
    HVMTRANS_bad_linear_to_gfn,
60
    HVMTRANS_bad_gfn_to_mfn,
61
    HVMTRANS_unhandleable,
62
    HVMTRANS_gfn_paged_out,
63
    HVMTRANS_gfn_shared,
64
};
65
66
/*
67
 * Copy to/from a guest physical address.
68
 * Returns HVMTRANS_okay, else HVMTRANS_bad_gfn_to_mfn if the given physical
69
 * address range does not map entirely onto ordinary machine memory.
70
 */
71
enum hvm_translation_result hvm_copy_to_guest_phys(
72
    paddr_t paddr, void *buf, int size, struct vcpu *v);
73
enum hvm_translation_result hvm_copy_from_guest_phys(
74
    void *buf, paddr_t paddr, int size);
75
76
/*
77
 * Copy to/from a guest linear address. @pfec should include PFEC_user_mode
78
 * if emulating a user-mode access (CPL=3). All other flags in @pfec are
79
 * managed by the called function: it is therefore optional for the caller
80
 * to set them.
81
 * 
82
 * Returns:
83
 *  HVMTRANS_okay: Copy was entirely successful.
84
 *  HVMTRANS_bad_gfn_to_mfn: Some guest physical address did not map to
85
 *                           ordinary machine memory.
86
 *  HVMTRANS_bad_linear_to_gfn: Some guest linear address did not have a
87
 *                              valid mapping to a guest physical address.
88
 *                              The pagefault_info_t structure will be filled
89
 *                              in if provided.
90
 */
91
typedef struct pagefault_info
92
{
93
    unsigned long linear;
94
    int ec;
95
} pagefault_info_t;
96
97
enum hvm_translation_result hvm_copy_to_guest_linear(
98
    unsigned long addr, void *buf, int size, uint32_t pfec,
99
    pagefault_info_t *pfinfo);
100
enum hvm_translation_result hvm_copy_from_guest_linear(
101
    void *buf, unsigned long addr, int size, uint32_t pfec,
102
    pagefault_info_t *pfinfo);
103
enum hvm_translation_result hvm_fetch_from_guest_linear(
104
    void *buf, unsigned long addr, int size, uint32_t pfec,
105
    pagefault_info_t *pfinfo);
106
107
/*
108
 * Get a reference on the page under an HVM physical or linear address.  If
109
 * linear, a pagewalk is performed using pfec (fault details optionally in
110
 * pfinfo).
111
 * On success, returns HVMTRANS_okay with a reference taken on **_page.
112
 */
113
enum hvm_translation_result hvm_translate_get_page(
114
    struct vcpu *v, unsigned long addr, bool linear, uint32_t pfec,
115
    pagefault_info_t *pfinfo, struct page_info **page_p,
116
    gfn_t *gfn_p, p2m_type_t *p2mt_p);
117
118
609k
#define HVM_HCALL_completed  0 /* hypercall completed - no further action */
119
0
#define HVM_HCALL_preempted  1 /* hypercall preempted - re-execute VMCALL */
120
int hvm_hypercall(struct cpu_user_regs *regs);
121
122
void hvm_hlt(unsigned int eflags);
123
void hvm_triple_fault(void);
124
125
0
#define VM86_TSS_UPDATED (1ULL << 63)
126
void hvm_prepare_vm86_tss(struct vcpu *v, uint32_t base, uint32_t limit);
127
128
void hvm_rdtsc_intercept(struct cpu_user_regs *regs);
129
130
int __must_check hvm_handle_xsetbv(u32 index, u64 new_bv);
131
132
void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value);
133
134
/*
135
 * These functions all return X86EMUL return codes.  For hvm_set_*(), the
136
 * caller is responsible for injecting #GP[0] if X86EMUL_EXCEPTION is
137
 * returned.
138
 */
139
int hvm_set_efer(uint64_t value);
140
int hvm_set_cr0(unsigned long value, bool_t may_defer);
141
int hvm_set_cr3(unsigned long value, bool_t may_defer);
142
int hvm_set_cr4(unsigned long value, bool_t may_defer);
143
int hvm_descriptor_access_intercept(uint64_t exit_info,
144
                                    uint64_t vmx_exit_qualification,
145
                                    unsigned int descriptor, bool is_write);
146
int hvm_mov_to_cr(unsigned int cr, unsigned int gpr);
147
int hvm_mov_from_cr(unsigned int cr, unsigned int gpr);
148
void hvm_ud_intercept(struct cpu_user_regs *);
149
150
/*
151
 * May return X86EMUL_EXCEPTION, at which point the caller is responsible for
152
 * injecting a #GP fault.  Used to support speculative reads.
153
 */
154
int __must_check hvm_msr_read_intercept(
155
    unsigned int msr, uint64_t *msr_content);
156
int __must_check hvm_msr_write_intercept(
157
    unsigned int msr, uint64_t msr_content, bool_t may_defer);
158
159
#endif /* __ASM_X86_HVM_SUPPORT_H__ */
160
161
/*
162
 * Local variables:
163
 * mode: C
164
 * c-file-style: "BSD"
165
 * c-basic-offset: 4
166
 * tab-width: 4
167
 * indent-tabs-mode: nil
168
 * End:
169
 */