/root/src/xen/xen/include/asm/guest_pt.h
Line | Count | Source (jump to first uncovered line) |
1 | | /****************************************************************************** |
2 | | * xen/asm-x86/guest_pt.h |
3 | | * |
4 | | * Types and accessors for guest pagetable entries, as distinct from |
5 | | * Xen's pagetable types. |
6 | | * |
7 | | * Users must #define GUEST_PAGING_LEVELS to 2, 3 or 4 before including |
8 | | * this file. |
9 | | * |
10 | | * Parts of this code are Copyright (c) 2006 by XenSource Inc. |
11 | | * Parts of this code are Copyright (c) 2006 by Michael A Fetterman |
12 | | * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al. |
13 | | * |
14 | | * This program is free software; you can redistribute it and/or modify |
15 | | * it under the terms of the GNU General Public License as published by |
16 | | * the Free Software Foundation; either version 2 of the License, or |
17 | | * (at your option) any later version. |
18 | | * |
19 | | * This program is distributed in the hope that it will be useful, |
20 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
21 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
22 | | * GNU General Public License for more details. |
23 | | * |
24 | | * You should have received a copy of the GNU General Public License |
25 | | * along with this program; If not, see <http://www.gnu.org/licenses/>. |
26 | | */ |
27 | | |
28 | | #ifndef _XEN_ASM_GUEST_PT_H |
29 | | #define _XEN_ASM_GUEST_PT_H |
30 | | |
31 | | #if !defined(GUEST_PAGING_LEVELS) |
32 | | #error GUEST_PAGING_LEVELS not defined |
33 | | #endif |
34 | | |
35 | | static inline paddr_t |
36 | | gfn_to_paddr(gfn_t gfn) |
37 | 0 | { |
38 | 0 | return ((paddr_t)gfn_x(gfn)) << PAGE_SHIFT; |
39 | 0 | } Unexecuted instantiation: multi.c:gfn_to_paddr Unexecuted instantiation: guest_walk.c:gfn_to_paddr |
40 | | |
41 | | /* Override get_gfn to work with gfn_t */ |
42 | | #undef get_gfn |
43 | 0 | #define get_gfn(d, g, t) get_gfn_type((d), gfn_x(g), (t), P2M_ALLOC) |
44 | | |
45 | | /* Mask covering the reserved bits from superpage alignment. */ |
46 | | #define SUPERPAGE_RSVD(bit) \ |
47 | 225k | (((1ul << (bit)) - 1) & ~(_PAGE_PSE_PAT | (_PAGE_PSE_PAT - 1ul))) |
48 | | |
49 | | static inline uint32_t fold_pse36(uint64_t val) |
50 | 0 | { |
51 | 0 | return (val & ~(0x1fful << 13)) | ((val & (0x1fful << 32)) >> (32 - 13)); |
52 | 0 | } Unexecuted instantiation: guest_walk.c:fold_pse36 Unexecuted instantiation: multi.c:fold_pse36 |
53 | | static inline uint64_t unfold_pse36(uint32_t val) |
54 | 0 | { |
55 | 0 | return (val & ~(0x1fful << 13)) | ((val & (0x1fful << 13)) << (32 - 13)); |
56 | 0 | } Unexecuted instantiation: multi.c:unfold_pse36 Unexecuted instantiation: guest_walk.c:unfold_pse36 |
57 | | |
58 | | /* Types of the guest's page tables and access functions for them */ |
59 | | |
60 | | #if GUEST_PAGING_LEVELS == 2 |
61 | | |
62 | 0 | #define GUEST_L1_PAGETABLE_ENTRIES 1024 |
63 | 0 | #define GUEST_L2_PAGETABLE_ENTRIES 1024 |
64 | | |
65 | 0 | #define GUEST_L1_PAGETABLE_SHIFT 12 |
66 | 0 | #define GUEST_L2_PAGETABLE_SHIFT 22 |
67 | | |
68 | 139k | #define GUEST_L1_PAGETABLE_RSVD 0 |
69 | 365k | #define GUEST_L2_PAGETABLE_RSVD 0 |
70 | | |
71 | | typedef uint32_t guest_intpte_t; |
72 | | typedef struct { guest_intpte_t l1; } guest_l1e_t; |
73 | | typedef struct { guest_intpte_t l2; } guest_l2e_t; |
74 | | |
75 | | #define PRI_gpte "08x" |
76 | | |
77 | | static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e) |
78 | 366k | { return _gfn(gl1e.l1 >> PAGE_SHIFT); } Unexecuted instantiation: multi.c:guest_l1e_get_gfn guest_walk.c:guest_l1e_get_gfn Line | Count | Source | 78 | 366k | { return _gfn(gl1e.l1 >> PAGE_SHIFT); } |
|
79 | | static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e) |
80 | 365k | { return _gfn(gl2e.l2 >> PAGE_SHIFT); } guest_walk.c:guest_l2e_get_gfn Line | Count | Source | 80 | 365k | { return _gfn(gl2e.l2 >> PAGE_SHIFT); } |
Unexecuted instantiation: multi.c:guest_l2e_get_gfn |
81 | | |
82 | | static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e) |
83 | 506k | { return gl1e.l1 & 0xfff; } Unexecuted instantiation: multi.c:guest_l1e_get_flags guest_walk.c:guest_l1e_get_flags Line | Count | Source | 83 | 506k | { return gl1e.l1 & 0xfff; } |
|
84 | | static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e) |
85 | 590k | { return gl2e.l2 & 0xfff; } Unexecuted instantiation: multi.c:guest_l2e_get_flags guest_walk.c:guest_l2e_get_flags Line | Count | Source | 85 | 590k | { return gl2e.l2 & 0xfff; } |
|
86 | | |
87 | | static inline u32 guest_l1e_get_pkey(guest_l1e_t gl1e) |
88 | 0 | { return 0; } |
89 | | static inline u32 guest_l2e_get_pkey(guest_l2e_t gl2e) |
90 | 0 | { return 0; } Unexecuted instantiation: guest_walk.c:guest_l2e_get_pkey Unexecuted instantiation: multi.c:guest_l2e_get_pkey |
91 | | |
92 | | static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags) |
93 | 0 | { return (guest_l1e_t) { (gfn_x(gfn) << PAGE_SHIFT) | flags }; } |
94 | | static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags) |
95 | 0 | { return (guest_l2e_t) { (gfn_x(gfn) << PAGE_SHIFT) | flags }; } Unexecuted instantiation: guest_walk.c:guest_l2e_from_gfn Unexecuted instantiation: multi.c:guest_l2e_from_gfn |
96 | | |
97 | | #define guest_l1_table_offset(_va) \ |
98 | 0 | (((_va) >> GUEST_L1_PAGETABLE_SHIFT) & (GUEST_L1_PAGETABLE_ENTRIES - 1)) |
99 | | #define guest_l2_table_offset(_va) \ |
100 | 0 | (((_va) >> GUEST_L2_PAGETABLE_SHIFT) & (GUEST_L2_PAGETABLE_ENTRIES - 1)) |
101 | | |
102 | | #else /* GUEST_PAGING_LEVELS != 2 */ |
103 | | |
104 | | #if GUEST_PAGING_LEVELS == 3 |
105 | | |
106 | 0 | #define GUEST_L1_PAGETABLE_ENTRIES 512 |
107 | | #define GUEST_L2_PAGETABLE_ENTRIES 512 |
108 | | #define GUEST_L3_PAGETABLE_ENTRIES 4 |
109 | | |
110 | | #define GUEST_L1_PAGETABLE_SHIFT 12 |
111 | | #define GUEST_L2_PAGETABLE_SHIFT 21 |
112 | | #define GUEST_L3_PAGETABLE_SHIFT 30 |
113 | | |
114 | | #define GUEST_L1_PAGETABLE_RSVD 0x7ff0000000000000ul |
115 | | #define GUEST_L2_PAGETABLE_RSVD 0x7ff0000000000000ul |
116 | | #define GUEST_L3_PAGETABLE_RSVD \ |
117 | 365k | (0xfff0000000000000ul | _PAGE_GLOBAL | _PAGE_PSE | _PAGE_DIRTY | \ |
118 | 365k | _PAGE_ACCESSED | _PAGE_USER | _PAGE_RW) |
119 | | |
120 | | #else /* GUEST_PAGING_LEVELS == 4 */ |
121 | | |
122 | 225k | #define GUEST_L1_PAGETABLE_ENTRIES 512 |
123 | 0 | #define GUEST_L2_PAGETABLE_ENTRIES 512 |
124 | | #define GUEST_L3_PAGETABLE_ENTRIES 512 |
125 | | #define GUEST_L4_PAGETABLE_ENTRIES 512 |
126 | | |
127 | | #define GUEST_L1_PAGETABLE_SHIFT 12 |
128 | | #define GUEST_L2_PAGETABLE_SHIFT 21 |
129 | | #define GUEST_L3_PAGETABLE_SHIFT 30 |
130 | | #define GUEST_L4_PAGETABLE_SHIFT 39 |
131 | | |
132 | | #define GUEST_L1_PAGETABLE_RSVD 0 |
133 | | #define GUEST_L2_PAGETABLE_RSVD 0 |
134 | 0 | #define GUEST_L3_PAGETABLE_RSVD 0 |
135 | | /* NB L4e._PAGE_GLOBAL is reserved for AMD, but ignored for Intel. */ |
136 | 366k | #define GUEST_L4_PAGETABLE_RSVD _PAGE_PSE |
137 | | |
138 | | #endif |
139 | | |
140 | | typedef l1_pgentry_t guest_l1e_t; |
141 | | typedef l2_pgentry_t guest_l2e_t; |
142 | | typedef l3_pgentry_t guest_l3e_t; |
143 | | #if GUEST_PAGING_LEVELS >= 4 |
144 | | typedef l4_pgentry_t guest_l4e_t; |
145 | | #endif |
146 | | typedef intpte_t guest_intpte_t; |
147 | | |
148 | | #define PRI_gpte "016"PRIx64 |
149 | | |
150 | | static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e) |
151 | | { return _gfn(l1e_get_paddr(gl1e) >> PAGE_SHIFT); } |
152 | | static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e) |
153 | | { return _gfn(l2e_get_paddr(gl2e) >> PAGE_SHIFT); } |
154 | | static inline gfn_t guest_l3e_get_gfn(guest_l3e_t gl3e) |
155 | 366k | { return _gfn(l3e_get_paddr(gl3e) >> PAGE_SHIFT); } Unexecuted instantiation: multi.c:guest_l3e_get_gfn guest_walk.c:guest_l3e_get_gfn Line | Count | Source | 155 | 366k | { return _gfn(l3e_get_paddr(gl3e) >> PAGE_SHIFT); } |
|
156 | | #if GUEST_PAGING_LEVELS >= 4 |
157 | | static inline gfn_t guest_l4e_get_gfn(guest_l4e_t gl4e) |
158 | 365k | { return _gfn(l4e_get_paddr(gl4e) >> PAGE_SHIFT); } Unexecuted instantiation: multi.c:guest_l4e_get_gfn guest_walk.c:guest_l4e_get_gfn Line | Count | Source | 158 | 365k | { return _gfn(l4e_get_paddr(gl4e) >> PAGE_SHIFT); } |
|
159 | | #endif |
160 | | |
161 | | static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e) |
162 | | { return l1e_get_flags(gl1e); } |
163 | | static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e) |
164 | | { return l2e_get_flags(gl2e); } |
165 | | static inline u32 guest_l3e_get_flags(guest_l3e_t gl3e) |
166 | 365k | { return l3e_get_flags(gl3e); } guest_walk.c:guest_l3e_get_flags Line | Count | Source | 166 | 365k | { return l3e_get_flags(gl3e); } |
Unexecuted instantiation: multi.c:guest_l3e_get_flags |
167 | | #if GUEST_PAGING_LEVELS >= 4 |
168 | | static inline u32 guest_l4e_get_flags(guest_l4e_t gl4e) |
169 | 365k | { return l4e_get_flags(gl4e); } guest_walk.c:guest_l4e_get_flags Line | Count | Source | 169 | 365k | { return l4e_get_flags(gl4e); } |
Unexecuted instantiation: multi.c:guest_l4e_get_flags |
170 | | #endif |
171 | | |
172 | | static inline u32 guest_l1e_get_pkey(guest_l1e_t gl1e) |
173 | 0 | { return l1e_get_pkey(gl1e); } |
174 | | static inline u32 guest_l2e_get_pkey(guest_l2e_t gl2e) |
175 | | { return l2e_get_pkey(gl2e); } |
176 | | static inline u32 guest_l3e_get_pkey(guest_l3e_t gl3e) |
177 | 0 | { return l3e_get_pkey(gl3e); } Unexecuted instantiation: multi.c:guest_l3e_get_pkey Unexecuted instantiation: guest_walk.c:guest_l3e_get_pkey |
178 | | |
179 | | static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags) |
180 | 225k | { return l1e_from_pfn(gfn_x(gfn), flags); } |
181 | | static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags) |
182 | | { return l2e_from_pfn(gfn_x(gfn), flags); } |
183 | | static inline guest_l3e_t guest_l3e_from_gfn(gfn_t gfn, u32 flags) |
184 | 0 | { return l3e_from_pfn(gfn_x(gfn), flags); } Unexecuted instantiation: guest_walk.c:guest_l3e_from_gfn Unexecuted instantiation: multi.c:guest_l3e_from_gfn |
185 | | #if GUEST_PAGING_LEVELS >= 4 |
186 | | static inline guest_l4e_t guest_l4e_from_gfn(gfn_t gfn, u32 flags) |
187 | 0 | { return l4e_from_pfn(gfn_x(gfn), flags); } Unexecuted instantiation: guest_walk.c:guest_l4e_from_gfn Unexecuted instantiation: multi.c:guest_l4e_from_gfn |
188 | | #endif |
189 | | |
190 | 504k | #define guest_l1_table_offset(a) l1_table_offset(a) |
191 | 729k | #define guest_l2_table_offset(a) l2_table_offset(a) |
192 | 729k | #define guest_l3_table_offset(a) l3_table_offset(a) |
193 | 729k | #define guest_l4_table_offset(a) l4_table_offset(a) |
194 | | |
195 | | #endif /* GUEST_PAGING_LEVELS != 2 */ |
196 | | |
197 | | /* Mask of the GFNs covered by an L2 or L3 superpage */ |
198 | 225k | #define GUEST_L2_GFN_MASK (GUEST_L1_PAGETABLE_ENTRIES - 1) |
199 | | #define GUEST_L3_GFN_MASK \ |
200 | 0 | ((GUEST_L2_PAGETABLE_ENTRIES * GUEST_L1_PAGETABLE_ENTRIES) - 1) |
201 | | |
202 | | |
203 | | /* Which pagetable features are supported on this vcpu? */ |
204 | | |
205 | | static inline bool guest_can_use_l2_superpages(const struct vcpu *v) |
206 | 365k | { |
207 | 365k | /* |
208 | 365k | * PV guests use Xen's paging settings. Being 4-level, 2M |
209 | 365k | * superpages are unconditionally supported. |
210 | 365k | * |
211 | 365k | * The L2 _PAGE_PSE bit must be honoured in HVM guests, whenever |
212 | 365k | * CR4.PSE is set or the guest is in PAE or long mode. |
213 | 365k | * It's also used in the dummy PT for vcpus with CR0.PG cleared. |
214 | 365k | */ |
215 | 365k | return (is_pv_vcpu(v) || |
216 | 365k | GUEST_PAGING_LEVELS != 2 || |
217 | 0 | !hvm_paging_enabled(v) || |
218 | 0 | (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PSE)); |
219 | 365k | } guest_walk.c:guest_can_use_l2_superpages Line | Count | Source | 206 | 365k | { | 207 | 365k | /* | 208 | 365k | * PV guests use Xen's paging settings. Being 4-level, 2M | 209 | 365k | * superpages are unconditionally supported. | 210 | 365k | * | 211 | 365k | * The L2 _PAGE_PSE bit must be honoured in HVM guests, whenever | 212 | 365k | * CR4.PSE is set or the guest is in PAE or long mode. | 213 | 365k | * It's also used in the dummy PT for vcpus with CR0.PG cleared. | 214 | 365k | */ | 215 | 365k | return (is_pv_vcpu(v) || | 216 | 365k | GUEST_PAGING_LEVELS != 2 || | 217 | 0 | !hvm_paging_enabled(v) || | 218 | 0 | (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PSE)); | 219 | 365k | } |
Unexecuted instantiation: multi.c:guest_can_use_l2_superpages |
220 | | |
221 | | static inline bool guest_can_use_l3_superpages(const struct domain *d) |
222 | 366k | { |
223 | 366k | /* |
224 | 366k | * There are no control register settings for the hardware pagewalk on the |
225 | 366k | * subject of 1G superpages. |
226 | 366k | * |
227 | 366k | * Shadow pagetables don't support 1GB superpages at all, and will always |
228 | 366k | * treat L3 _PAGE_PSE as reserved. |
229 | 366k | * |
230 | 366k | * With HAP however, if the guest constructs a 1GB superpage on capable |
231 | 366k | * hardware, it will function irrespective of whether the feature is |
232 | 366k | * advertised. Xen's model of performing a pagewalk should match. |
233 | 366k | */ |
234 | 366k | return GUEST_PAGING_LEVELS >= 4 && paging_mode_hap(d) && cpu_has_page1gb; |
235 | 366k | } Unexecuted instantiation: multi.c:guest_can_use_l3_superpages guest_walk.c:guest_can_use_l3_superpages Line | Count | Source | 222 | 366k | { | 223 | 366k | /* | 224 | 366k | * There are no control register settings for the hardware pagewalk on the | 225 | 366k | * subject of 1G superpages. | 226 | 366k | * | 227 | 366k | * Shadow pagetables don't support 1GB superpages at all, and will always | 228 | 366k | * treat L3 _PAGE_PSE as reserved. | 229 | 366k | * | 230 | 366k | * With HAP however, if the guest constructs a 1GB superpage on capable | 231 | 366k | * hardware, it will function irrespective of whether the feature is | 232 | 366k | * advertised. Xen's model of performing a pagewalk should match. | 233 | 366k | */ | 234 | 366k | return GUEST_PAGING_LEVELS >= 4 && paging_mode_hap(d) && cpu_has_page1gb; | 235 | 366k | } |
|
236 | | |
237 | | static inline bool guest_can_use_pse36(const struct domain *d) |
238 | 0 | { |
239 | 0 | /* |
240 | 0 | * Only called in the context of 2-level guests, after |
241 | 0 | * guest_can_use_l2_superpages() has indicated true. |
242 | 0 | * |
243 | 0 | * Shadow pagetables don't support PSE36 superpages at all, and will |
244 | 0 | * always treat them as reserved. |
245 | 0 | * |
246 | 0 | * With HAP however, once L2 superpages are active, here are no control |
247 | 0 | * register settings for the hardware pagewalk on the subject of PSE36. |
248 | 0 | * If the guest constructs a PSE36 superpage on capable hardware, it will |
249 | 0 | * function irrespective of whether the feature is advertised. Xen's |
250 | 0 | * model of performing a pagewalk should match. |
251 | 0 | */ |
252 | 0 | return paging_mode_hap(d) && cpu_has_pse36; |
253 | 0 | } Unexecuted instantiation: multi.c:guest_can_use_pse36 Unexecuted instantiation: guest_walk.c:guest_can_use_pse36 |
254 | | |
255 | | static inline bool guest_nx_enabled(const struct vcpu *v) |
256 | 1.59M | { |
257 | 1.59M | if ( GUEST_PAGING_LEVELS == 2 ) /* NX has no effect witout CR4.PAE. */ |
258 | 0 | return false; |
259 | 1.59M | |
260 | 1.59M | /* PV guests can't control EFER.NX, and inherits Xen's choice. */ |
261 | 1.59M | return is_pv_vcpu(v) ? cpu_has_nx : hvm_nx_enabled(v); |
262 | 1.59M | } Unexecuted instantiation: multi.c:guest_nx_enabled guest_walk.c:guest_nx_enabled Line | Count | Source | 256 | 1.59M | { | 257 | 1.59M | if ( GUEST_PAGING_LEVELS == 2 ) /* NX has no effect witout CR4.PAE. */ | 258 | 0 | return false; | 259 | 1.59M | | 260 | 1.59M | /* PV guests can't control EFER.NX, and inherits Xen's choice. */ | 261 | 1.59M | return is_pv_vcpu(v) ? cpu_has_nx : hvm_nx_enabled(v); | 262 | 1.59M | } |
|
263 | | |
264 | | static inline bool guest_wp_enabled(const struct vcpu *v) |
265 | 0 | { |
266 | 0 | /* PV guests can't control CR0.WP, and it is unconditionally set by Xen. */ |
267 | 0 | return is_pv_vcpu(v) || hvm_wp_enabled(v); |
268 | 0 | } Unexecuted instantiation: guest_walk.c:guest_wp_enabled Unexecuted instantiation: multi.c:guest_wp_enabled |
269 | | |
270 | | static inline bool guest_smep_enabled(const struct vcpu *v) |
271 | 0 | { |
272 | 0 | return !is_pv_vcpu(v) && hvm_smep_enabled(v); |
273 | 0 | } Unexecuted instantiation: guest_walk.c:guest_smep_enabled Unexecuted instantiation: multi.c:guest_smep_enabled |
274 | | |
275 | | static inline bool guest_smap_enabled(const struct vcpu *v) |
276 | 282 | { |
277 | 282 | return !is_pv_vcpu(v) && hvm_smap_enabled(v); |
278 | 282 | } guest_walk.c:guest_smap_enabled Line | Count | Source | 276 | 282 | { | 277 | 282 | return !is_pv_vcpu(v) && hvm_smap_enabled(v); | 278 | 282 | } |
Unexecuted instantiation: multi.c:guest_smap_enabled |
279 | | |
280 | | static inline bool guest_pku_enabled(const struct vcpu *v) |
281 | 282 | { |
282 | 282 | return !is_pv_vcpu(v) && hvm_pku_enabled(v); |
283 | 282 | } guest_walk.c:guest_pku_enabled Line | Count | Source | 281 | 282 | { | 282 | 282 | return !is_pv_vcpu(v) && hvm_pku_enabled(v); | 283 | 282 | } |
Unexecuted instantiation: multi.c:guest_pku_enabled |
284 | | |
285 | | /* Helpers for identifying whether guest entries have reserved bits set. */ |
286 | | |
287 | | /* Bits reserved because of maxphysaddr, and (lack of) EFER.NX */ |
288 | | static inline uint64_t guest_rsvd_bits(const struct vcpu *v) |
289 | 1.23M | { |
290 | 1.23M | return ((PADDR_MASK & |
291 | 1.23M | ~((1ul << v->domain->arch.cpuid->extd.maxphysaddr) - 1)) | |
292 | 18.4E | (guest_nx_enabled(v) ? 0 : put_pte_flags(_PAGE_NX_BIT))); |
293 | 1.23M | } guest_walk.c:guest_rsvd_bits Line | Count | Source | 289 | 1.23M | { | 290 | 1.23M | return ((PADDR_MASK & | 291 | 1.23M | ~((1ul << v->domain->arch.cpuid->extd.maxphysaddr) - 1)) | | 292 | 18.4E | (guest_nx_enabled(v) ? 0 : put_pte_flags(_PAGE_NX_BIT))); | 293 | 1.23M | } |
Unexecuted instantiation: multi.c:guest_rsvd_bits |
294 | | |
295 | | static inline bool guest_l1e_rsvd_bits(const struct vcpu *v, guest_l1e_t l1e) |
296 | 139k | { |
297 | 139k | return l1e.l1 & (guest_rsvd_bits(v) | GUEST_L1_PAGETABLE_RSVD); |
298 | 139k | } Unexecuted instantiation: multi.c:guest_l1e_rsvd_bits guest_walk.c:guest_l1e_rsvd_bits Line | Count | Source | 296 | 139k | { | 297 | 139k | return l1e.l1 & (guest_rsvd_bits(v) | GUEST_L1_PAGETABLE_RSVD); | 298 | 139k | } |
|
299 | | |
300 | | static inline bool guest_l2e_rsvd_bits(const struct vcpu *v, guest_l2e_t l2e) |
301 | 365k | { |
302 | 365k | uint64_t rsvd_bits = guest_rsvd_bits(v); |
303 | 365k | |
304 | 365k | return ((l2e.l2 & (rsvd_bits | GUEST_L2_PAGETABLE_RSVD | |
305 | 365k | (guest_can_use_l2_superpages(v) ? 0 : _PAGE_PSE))) || |
306 | 366k | ((l2e.l2 & _PAGE_PSE) && |
307 | 225k | (l2e.l2 & ((GUEST_PAGING_LEVELS == 2 && guest_can_use_pse36(v->domain)) |
308 | 225k | /* PSE36 tops out at 40 bits of address width. */ |
309 | 0 | ? (fold_pse36(rsvd_bits | (1ul << 40))) |
310 | 225k | : SUPERPAGE_RSVD(GUEST_L2_PAGETABLE_SHIFT))))); |
311 | 365k | } Unexecuted instantiation: multi.c:guest_l2e_rsvd_bits guest_walk.c:guest_l2e_rsvd_bits Line | Count | Source | 301 | 365k | { | 302 | 365k | uint64_t rsvd_bits = guest_rsvd_bits(v); | 303 | 365k | | 304 | 365k | return ((l2e.l2 & (rsvd_bits | GUEST_L2_PAGETABLE_RSVD | | 305 | 365k | (guest_can_use_l2_superpages(v) ? 0 : _PAGE_PSE))) || | 306 | 366k | ((l2e.l2 & _PAGE_PSE) && | 307 | 225k | (l2e.l2 & ((GUEST_PAGING_LEVELS == 2 && guest_can_use_pse36(v->domain)) | 308 | 225k | /* PSE36 tops out at 40 bits of address width. */ | 309 | 0 | ? (fold_pse36(rsvd_bits | (1ul << 40))) | 310 | 225k | : SUPERPAGE_RSVD(GUEST_L2_PAGETABLE_SHIFT))))); | 311 | 365k | } |
|
312 | | |
313 | | #if GUEST_PAGING_LEVELS >= 3 |
314 | | static inline bool guest_l3e_rsvd_bits(const struct vcpu *v, guest_l3e_t l3e) |
315 | 365k | { |
316 | 365k | return ((l3e.l3 & (guest_rsvd_bits(v) | GUEST_L3_PAGETABLE_RSVD | |
317 | 366k | (guest_can_use_l3_superpages(v->domain) ? 0 : _PAGE_PSE))) || |
318 | 366k | ((l3e.l3 & _PAGE_PSE) && |
319 | 0 | (l3e.l3 & SUPERPAGE_RSVD(GUEST_L3_PAGETABLE_SHIFT)))); |
320 | 365k | } Unexecuted instantiation: multi.c:guest_l3e_rsvd_bits guest_walk.c:guest_l3e_rsvd_bits Line | Count | Source | 315 | 365k | { | 316 | 365k | return ((l3e.l3 & (guest_rsvd_bits(v) | GUEST_L3_PAGETABLE_RSVD | | 317 | 366k | (guest_can_use_l3_superpages(v->domain) ? 0 : _PAGE_PSE))) || | 318 | 366k | ((l3e.l3 & _PAGE_PSE) && | 319 | 0 | (l3e.l3 & SUPERPAGE_RSVD(GUEST_L3_PAGETABLE_SHIFT)))); | 320 | 365k | } |
|
321 | | |
322 | | #if GUEST_PAGING_LEVELS >= 4 |
323 | | static inline bool guest_l4e_rsvd_bits(const struct vcpu *v, guest_l4e_t l4e) |
324 | 366k | { |
325 | 366k | return l4e.l4 & (guest_rsvd_bits(v) | GUEST_L4_PAGETABLE_RSVD | |
326 | 366k | ((v->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD) |
327 | 366k | ? _PAGE_GLOBAL : 0)); |
328 | 366k | } Unexecuted instantiation: multi.c:guest_l4e_rsvd_bits guest_walk.c:guest_l4e_rsvd_bits Line | Count | Source | 324 | 366k | { | 325 | 366k | return l4e.l4 & (guest_rsvd_bits(v) | GUEST_L4_PAGETABLE_RSVD | | 326 | 366k | ((v->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD) | 327 | 366k | ? _PAGE_GLOBAL : 0)); | 328 | 366k | } |
|
329 | | #endif /* GUEST_PAGING_LEVELS >= 4 */ |
330 | | #endif /* GUEST_PAGING_LEVELS >= 3 */ |
331 | | |
332 | | /* Type used for recording a walk through guest pagetables. It is |
333 | | * filled in by the pagetable walk function, and also used as a cache |
334 | | * for later walks. When we encounter a superpage l2e, we fabricate an |
335 | | * l1e for propagation to the shadow (for splintering guest superpages |
336 | | * into many shadow l1 entries). */ |
337 | | typedef struct guest_pagetable_walk walk_t; |
338 | | struct guest_pagetable_walk |
339 | | { |
340 | | unsigned long va; /* Address we were looking for */ |
341 | | #if GUEST_PAGING_LEVELS >= 3 |
342 | | #if GUEST_PAGING_LEVELS >= 4 |
343 | | guest_l4e_t l4e; /* Guest's level 4 entry */ |
344 | | #endif |
345 | | guest_l3e_t l3e; /* Guest's level 3 entry */ |
346 | | #endif |
347 | | guest_l2e_t l2e; /* Guest's level 2 entry */ |
348 | | union |
349 | | { |
350 | | guest_l1e_t l1e; /* Guest's level 1 entry (or fabrication). */ |
351 | | uint64_t el1e; /* L2 PSE36 superpages wider than 32 bits. */ |
352 | | }; |
353 | | #if GUEST_PAGING_LEVELS >= 4 |
354 | | mfn_t l4mfn; /* MFN that the level 4 entry was in */ |
355 | | mfn_t l3mfn; /* MFN that the level 3 entry was in */ |
356 | | #endif |
357 | | mfn_t l2mfn; /* MFN that the level 2 entry was in */ |
358 | | mfn_t l1mfn; /* MFN that the level 1 entry was in */ |
359 | | |
360 | | uint32_t pfec; /* Accumulated PFEC_* error code from walk. */ |
361 | | }; |
362 | | |
363 | | /* Given a walk_t, translate the gw->va into the guest's notion of the |
364 | | * corresponding frame number. */ |
365 | | static inline gfn_t guest_walk_to_gfn(const walk_t *gw) |
366 | 366k | { |
367 | 366k | if ( !(guest_l1e_get_flags(gw->l1e) & _PAGE_PRESENT) ) |
368 | 0 | return INVALID_GFN; |
369 | 366k | return (GUEST_PAGING_LEVELS == 2 |
370 | 0 | ? _gfn(gw->el1e >> PAGE_SHIFT) |
371 | 366k | : guest_l1e_get_gfn(gw->l1e)); |
372 | 366k | } Unexecuted instantiation: multi.c:guest_walk_to_gfn guest_walk.c:guest_walk_to_gfn Line | Count | Source | 366 | 366k | { | 367 | 366k | if ( !(guest_l1e_get_flags(gw->l1e) & _PAGE_PRESENT) ) | 368 | 0 | return INVALID_GFN; | 369 | 366k | return (GUEST_PAGING_LEVELS == 2 | 370 | 0 | ? _gfn(gw->el1e >> PAGE_SHIFT) | 371 | 366k | : guest_l1e_get_gfn(gw->l1e)); | 372 | 366k | } |
|
373 | | |
374 | | /* Given a walk_t, translate the gw->va into the guest's notion of the |
375 | | * corresponding physical address. */ |
376 | | static inline paddr_t guest_walk_to_gpa(const walk_t *gw) |
377 | 0 | { |
378 | 0 | gfn_t gfn = guest_walk_to_gfn(gw); |
379 | 0 |
|
380 | 0 | if ( gfn_eq(gfn, INVALID_GFN) ) |
381 | 0 | return INVALID_PADDR; |
382 | 0 |
|
383 | 0 | return (gfn_x(gfn) << PAGE_SHIFT) | (gw->va & ~PAGE_MASK); |
384 | 0 | } Unexecuted instantiation: multi.c:guest_walk_to_gpa Unexecuted instantiation: guest_walk.c:guest_walk_to_gpa |
385 | | |
386 | | /* Given a walk_t from a successful walk, return the page-order of the |
387 | | * page or superpage that the virtual address is in. */ |
388 | | static inline unsigned int guest_walk_to_page_order(const walk_t *gw) |
389 | 0 | { |
390 | 0 | /* This is only valid for successful walks - otherwise the |
391 | 0 | * PSE bits might be invalid. */ |
392 | 0 | ASSERT(guest_l1e_get_flags(gw->l1e) & _PAGE_PRESENT); |
393 | 0 | #if GUEST_PAGING_LEVELS >= 3 |
394 | | if ( guest_l3e_get_flags(gw->l3e) & _PAGE_PSE ) |
395 | | return GUEST_L3_PAGETABLE_SHIFT - PAGE_SHIFT; |
396 | | #endif |
397 | 0 | if ( guest_l2e_get_flags(gw->l2e) & _PAGE_PSE ) |
398 | 0 | return GUEST_L2_PAGETABLE_SHIFT - PAGE_SHIFT; |
399 | 0 | return GUEST_L1_PAGETABLE_SHIFT - PAGE_SHIFT; |
400 | 0 | } Unexecuted instantiation: guest_walk.c:guest_walk_to_page_order Unexecuted instantiation: multi.c:guest_walk_to_page_order |
401 | | |
402 | | |
403 | | /* |
404 | | * Walk the guest pagetables, after the manner of a hardware walker. |
405 | | * |
406 | | * Inputs: a vcpu, a virtual address, a walk_t to fill, a |
407 | | * pointer to a pagefault code, the MFN of the guest's |
408 | | * top-level pagetable, and a mapping of the |
409 | | * guest's top-level pagetable. |
410 | | * |
411 | | * We walk the vcpu's guest pagetables, filling the walk_t with what we |
412 | | * see and adding any Accessed and Dirty bits that are needed in the |
413 | | * guest entries. Using the pagefault code, we check the permissions as |
414 | | * we go. For the purposes of reading pagetables we treat all non-RAM |
415 | | * memory as contining zeroes. |
416 | | * |
417 | | * Returns a boolean indicating success or failure. walk_t.pfec contains |
418 | | * the accumulated error code on failure. |
419 | | */ |
420 | | |
421 | | /* Macro-fu so you can call guest_walk_tables() and get the right one. */ |
422 | 364k | #define GPT_RENAME2(_n, _l) _n ## _ ## _l ## _levels |
423 | 364k | #define GPT_RENAME(_n, _l) GPT_RENAME2(_n, _l) |
424 | 364k | #define guest_walk_tables GPT_RENAME(guest_walk_tables, GUEST_PAGING_LEVELS) |
425 | | |
426 | | bool |
427 | | guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, unsigned long va, |
428 | | walk_t *gw, uint32_t pfec, mfn_t top_mfn, void *top_map); |
429 | | |
430 | | /* Pretty-print the contents of a guest-walk */ |
431 | | static inline void print_gw(const walk_t *gw) |
432 | 0 | { |
433 | 0 | gprintk(XENLOG_INFO, "GUEST WALK TO %p\n", _p(gw->va)); |
434 | 0 | #if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */ |
435 | 0 | #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */ |
436 | 0 | gprintk(XENLOG_INFO, " l4e=%" PRI_gpte " l4mfn=%" PRI_mfn "\n", |
437 | 0 | gw->l4e.l4, mfn_x(gw->l4mfn)); |
438 | 0 | gprintk(XENLOG_INFO, " l3e=%" PRI_gpte " l3mfn=%" PRI_mfn "\n", |
439 | 0 | gw->l3e.l3, mfn_x(gw->l3mfn)); |
440 | 0 | #else /* PAE only... */ |
441 | 0 | gprintk(XENLOG_INFO, " l3e=%" PRI_gpte "\n", gw->l3e.l3); |
442 | 0 | #endif /* PAE or 64... */ |
443 | 0 | #endif /* All levels... */ |
444 | 0 | gprintk(XENLOG_INFO, " l2e=%" PRI_gpte " l2mfn=%" PRI_mfn "\n", |
445 | 0 | gw->l2e.l2, mfn_x(gw->l2mfn)); |
446 | 0 | #if GUEST_PAGING_LEVELS == 2 |
447 | 0 | gprintk(XENLOG_INFO, " el1e=%08" PRIx64 " l1mfn=%" PRI_mfn "\n", |
448 | 0 | gw->el1e, mfn_x(gw->l1mfn)); |
449 | 0 | #else |
450 | 0 | gprintk(XENLOG_INFO, " l1e=%" PRI_gpte " l1mfn=%" PRI_mfn "\n", |
451 | 0 | gw->l1e.l1, mfn_x(gw->l1mfn)); |
452 | 0 | #endif |
453 | 0 | gprintk(XENLOG_INFO, " pfec=%02x[%c%c%c%c%c%c]\n", gw->pfec, |
454 | 0 | gw->pfec & PFEC_prot_key ? 'K' : '-', |
455 | 0 | gw->pfec & PFEC_insn_fetch ? 'I' : 'd', |
456 | 0 | gw->pfec & PFEC_reserved_bit ? 'R' : '-', |
457 | 0 | gw->pfec & PFEC_user_mode ? 'U' : 's', |
458 | 0 | gw->pfec & PFEC_write_access ? 'W' : 'r', |
459 | 0 | gw->pfec & PFEC_page_present ? 'P' : '-' |
460 | 0 | ); |
461 | 0 | } Unexecuted instantiation: guest_walk.c:print_gw Unexecuted instantiation: multi.c:print_gw |
462 | | |
463 | | #endif /* _XEN_ASM_GUEST_PT_H */ |