/root/src/xen/xen/arch/x86/hvm/nestedhvm.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Nested HVM |
3 | | * Copyright (c) 2011, Advanced Micro Devices, Inc. |
4 | | * Author: Christoph Egger <Christoph.Egger@amd.com> |
5 | | * |
6 | | * This program is free software; you can redistribute it and/or modify it |
7 | | * under the terms and conditions of the GNU General Public License, |
8 | | * version 2, as published by the Free Software Foundation. |
9 | | * |
10 | | * This program is distributed in the hope it will be useful, but WITHOUT |
11 | | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
12 | | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
13 | | * more details. |
14 | | * |
15 | | * You should have received a copy of the GNU General Public License along with |
16 | | * this program; If not, see <http://www.gnu.org/licenses/>. |
17 | | */ |
18 | | |
19 | | #include <asm/msr.h> |
20 | | #include <asm/hvm/support.h> |
21 | | #include <asm/hvm/hvm.h> |
22 | | #include <asm/p2m.h> /* for struct p2m_domain */ |
23 | | #include <asm/hvm/nestedhvm.h> |
24 | | #include <asm/event.h> /* for local_event_delivery_(en|dis)able */ |
25 | | #include <asm/paging.h> /* for paging_mode_hap() */ |
26 | | |
27 | | static unsigned long *shadow_io_bitmap[3]; |
28 | | |
29 | | /* Nested HVM on/off per domain */ |
30 | | bool nestedhvm_enabled(const struct domain *d) |
31 | 2.99M | { |
32 | 2.99M | return is_hvm_domain(d) && d->arch.hvm_domain.params && |
33 | 2.86M | d->arch.hvm_domain.params[HVM_PARAM_NESTEDHVM]; |
34 | 2.99M | } |
35 | | |
36 | | /* Nested VCPU */ |
37 | | bool_t |
38 | | nestedhvm_vcpu_in_guestmode(struct vcpu *v) |
39 | 59.1M | { |
40 | 59.1M | return vcpu_nestedhvm(v).nv_guestmode; |
41 | 59.1M | } |
42 | | |
43 | | void |
44 | | nestedhvm_vcpu_reset(struct vcpu *v) |
45 | 0 | { |
46 | 0 | struct nestedvcpu *nv = &vcpu_nestedhvm(v); |
47 | 0 |
|
48 | 0 | nv->nv_vmentry_pending = 0; |
49 | 0 | nv->nv_vmexit_pending = 0; |
50 | 0 | nv->nv_vmswitch_in_progress = 0; |
51 | 0 | nv->nv_ioport80 = 0; |
52 | 0 | nv->nv_ioportED = 0; |
53 | 0 |
|
54 | 0 | hvm_unmap_guest_frame(nv->nv_vvmcx, 1); |
55 | 0 | nv->nv_vvmcx = NULL; |
56 | 0 | nv->nv_vvmcxaddr = INVALID_PADDR; |
57 | 0 | nv->nv_flushp2m = 0; |
58 | 0 | nv->nv_p2m = NULL; |
59 | 0 | nv->stale_np2m = false; |
60 | 0 | nv->np2m_generation = 0; |
61 | 0 |
|
62 | 0 | hvm_asid_flush_vcpu_asid(&nv->nv_n2asid); |
63 | 0 |
|
64 | 0 | if ( hvm_funcs.nhvm_vcpu_reset ) |
65 | 0 | hvm_funcs.nhvm_vcpu_reset(v); |
66 | 0 |
|
67 | 0 | /* vcpu is in host mode */ |
68 | 0 | nestedhvm_vcpu_exit_guestmode(v); |
69 | 0 | } |
70 | | |
71 | | int |
72 | | nestedhvm_vcpu_initialise(struct vcpu *v) |
73 | 0 | { |
74 | 0 | int rc = -EOPNOTSUPP; |
75 | 0 |
|
76 | 0 | if ( !shadow_io_bitmap[0] ) |
77 | 0 | return -ENOMEM; |
78 | 0 |
|
79 | 0 | if ( !hvm_funcs.nhvm_vcpu_initialise || |
80 | 0 | ((rc = hvm_funcs.nhvm_vcpu_initialise(v)) != 0) ) |
81 | 0 | return rc; |
82 | 0 |
|
83 | 0 | nestedhvm_vcpu_reset(v); |
84 | 0 | return 0; |
85 | 0 | } |
86 | | |
87 | | void |
88 | | nestedhvm_vcpu_destroy(struct vcpu *v) |
89 | 0 | { |
90 | 0 | if ( hvm_funcs.nhvm_vcpu_destroy ) |
91 | 0 | hvm_funcs.nhvm_vcpu_destroy(v); |
92 | 0 | } |
93 | | |
94 | | static void |
95 | | nestedhvm_flushtlb_ipi(void *info) |
96 | 0 | { |
97 | 0 | struct vcpu *v = current; |
98 | 0 | struct domain *d = info; |
99 | 0 |
|
100 | 0 | ASSERT(d != NULL); |
101 | 0 | if (v->domain != d) { |
102 | 0 | /* This cpu doesn't belong to the domain */ |
103 | 0 | return; |
104 | 0 | } |
105 | 0 |
|
106 | 0 | /* Just flush the ASID (or request a new one). |
107 | 0 | * This is cheaper than flush_tlb_local() and has |
108 | 0 | * the same desired effect. |
109 | 0 | */ |
110 | 0 | hvm_asid_flush_core(); |
111 | 0 | vcpu_nestedhvm(v).nv_p2m = NULL; |
112 | 0 | vcpu_nestedhvm(v).stale_np2m = true; |
113 | 0 | } |
114 | | |
115 | | void |
116 | | nestedhvm_vmcx_flushtlb(struct p2m_domain *p2m) |
117 | 0 | { |
118 | 0 | on_selected_cpus(p2m->dirty_cpumask, nestedhvm_flushtlb_ipi, |
119 | 0 | p2m->domain, 1); |
120 | 0 | cpumask_clear(p2m->dirty_cpumask); |
121 | 0 | } |
122 | | |
123 | | bool_t |
124 | | nestedhvm_is_n2(struct vcpu *v) |
125 | 364k | { |
126 | 364k | if (!nestedhvm_enabled(v->domain) |
127 | 0 | || nestedhvm_vmswitch_in_progress(v) |
128 | 0 | || !nestedhvm_paging_mode_hap(v)) |
129 | 364k | return 0; |
130 | 364k | |
131 | 18.4E | if (nestedhvm_vcpu_in_guestmode(v)) |
132 | 0 | return 1; |
133 | 18.4E | |
134 | 18.4E | return 0; |
135 | 18.4E | } |
136 | | |
137 | | /* Common shadow IO Permission bitmap */ |
138 | | |
139 | | /* There four global patterns of io bitmap each guest can |
140 | | * choose depending on interception of io port 0x80 and/or |
141 | | * 0xED (shown in table below). |
142 | | * The users of the bitmap patterns are in SVM/VMX specific code. |
143 | | * |
144 | | * bitmap port 0x80 port 0xed |
145 | | * hvm_io_bitmap cleared cleared |
146 | | * iomap[0] cleared set |
147 | | * iomap[1] set cleared |
148 | | * iomap[2] set set |
149 | | */ |
150 | | |
151 | | static int __init |
152 | | nestedhvm_setup(void) |
153 | 1 | { |
154 | 1 | /* Same format and size as hvm_io_bitmap (Intel needs only 2 pages). */ |
155 | 1 | unsigned nr = cpu_has_vmx ? 2 : 3; |
156 | 1 | unsigned int i, order = get_order_from_pages(nr); |
157 | 1 | |
158 | 1 | if ( !hvm_funcs.name ) |
159 | 0 | return 0; |
160 | 1 | |
161 | 1 | /* shadow_io_bitmaps can't be declared static because |
162 | 1 | * they must fulfill hw requirements (page aligned section) |
163 | 1 | * and doing so triggers the ASSERT(va >= XEN_VIRT_START) |
164 | 1 | * in __virt_to_maddr() |
165 | 1 | * |
166 | 1 | * So as a compromise pre-allocate them when xen boots. |
167 | 1 | * This function must be called from within start_xen() when |
168 | 1 | * it is valid to use _xmalloc() |
169 | 1 | */ |
170 | 1 | |
171 | 4 | for ( i = 0; i < ARRAY_SIZE(shadow_io_bitmap); i++ ) |
172 | 3 | { |
173 | 3 | shadow_io_bitmap[i] = alloc_xenheap_pages(order, 0); |
174 | 3 | if ( !shadow_io_bitmap[i] ) |
175 | 0 | { |
176 | 0 | while ( i-- ) |
177 | 0 | { |
178 | 0 | free_xenheap_pages(shadow_io_bitmap[i], order); |
179 | 0 | shadow_io_bitmap[i] = NULL; |
180 | 0 | } |
181 | 0 | return -ENOMEM; |
182 | 0 | } |
183 | 3 | memset(shadow_io_bitmap[i], ~0U, nr << PAGE_SHIFT); |
184 | 3 | } |
185 | 1 | |
186 | 1 | __clear_bit(0x80, shadow_io_bitmap[0]); |
187 | 1 | __clear_bit(0xed, shadow_io_bitmap[1]); |
188 | 1 | |
189 | 1 | return 0; |
190 | 1 | } |
191 | | __initcall(nestedhvm_setup); |
192 | | |
193 | | unsigned long * |
194 | | nestedhvm_vcpu_iomap_get(bool_t port_80, bool_t port_ed) |
195 | 0 | { |
196 | 0 | int i; |
197 | 0 |
|
198 | 0 | if (!hvm_port80_allowed) |
199 | 0 | port_80 = 1; |
200 | 0 |
|
201 | 0 | if (port_80 == 0) { |
202 | 0 | if (port_ed == 0) |
203 | 0 | return hvm_io_bitmap; |
204 | 0 | i = 0; |
205 | 0 | } else { |
206 | 0 | if (port_ed == 0) |
207 | 0 | i = 1; |
208 | 0 | else |
209 | 0 | i = 2; |
210 | 0 | } |
211 | 0 |
|
212 | 0 | return shadow_io_bitmap[i]; |
213 | 0 | } |