debuggers.hg

view xen/arch/x86/hvm/mtrr.c @ 20940:9fc37faa25a0

x86: Intel EPT entry structure changes.

- Intel SDM defines bit6 in EPT page table entry as "Ignore PAT
memory type", so change the abbreviation from "igmt" to "ipat".

- Change the mfn and avail2 fields according to SDM definition.

Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 04 09:04:33 2010 +0000 (2010-02-04)
parents 30bfa1d8895d
children 3ffdb094c2c0
line source
1 /*
2 * mtrr.c: MTRR/PAT virtualization
3 *
4 * Copyright (c) 2007, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 */
20 #include <public/hvm/e820.h>
21 #include <xen/types.h>
22 #include <asm/e820.h>
23 #include <asm/mm.h>
24 #include <asm/paging.h>
25 #include <asm/p2m.h>
26 #include <xen/domain_page.h>
27 #include <asm/mtrr.h>
28 #include <asm/hvm/support.h>
29 #include <asm/hvm/cacheattr.h>
31 extern struct mtrr_state mtrr_state;
33 static uint64_t phys_base_msr_mask;
34 static uint64_t phys_mask_msr_mask;
35 static uint32_t size_or_mask;
36 static uint32_t size_and_mask;
38 /* Get page attribute fields (PAn) from PAT MSR. */
39 #define pat_cr_2_paf(pat_cr,n) ((((uint64_t)pat_cr) >> ((n)<<3)) & 0xff)
41 /* PAT entry to PTE flags (PAT, PCD, PWT bits). */
42 static uint8_t pat_entry_2_pte_flags[8] = {
43 0, _PAGE_PWT,
44 _PAGE_PCD, _PAGE_PCD | _PAGE_PWT,
45 _PAGE_PAT, _PAGE_PAT | _PAGE_PWT,
46 _PAGE_PAT | _PAGE_PCD, _PAGE_PAT | _PAGE_PCD | _PAGE_PWT };
48 /* Effective mm type lookup table, according to MTRR and PAT. */
49 static uint8_t mm_type_tbl[MTRR_NUM_TYPES][PAT_TYPE_NUMS] = {
50 /********PAT(UC,WC,RS,RS,WT,WP,WB,UC-)*/
51 /* RS means reserved type(2,3), and type is hardcoded here */
52 /*MTRR(UC):(UC,WC,RS,RS,UC,UC,UC,UC)*/
53 {0, 1, 2, 2, 0, 0, 0, 0},
54 /*MTRR(WC):(UC,WC,RS,RS,UC,UC,WC,WC)*/
55 {0, 1, 2, 2, 0, 0, 1, 1},
56 /*MTRR(RS):(RS,RS,RS,RS,RS,RS,RS,RS)*/
57 {2, 2, 2, 2, 2, 2, 2, 2},
58 /*MTRR(RS):(RS,RS,RS,RS,RS,RS,RS,RS)*/
59 {2, 2, 2, 2, 2, 2, 2, 2},
60 /*MTRR(WT):(UC,WC,RS,RS,WT,WP,WT,UC)*/
61 {0, 1, 2, 2, 4, 5, 4, 0},
62 /*MTRR(WP):(UC,WC,RS,RS,WT,WP,WP,WC)*/
63 {0, 1, 2, 2, 4, 5, 5, 1},
64 /*MTRR(WB):(UC,WC,RS,RS,WT,WP,WB,UC)*/
65 {0, 1, 2, 2, 4, 5, 6, 0}
66 };
68 /*
69 * Reverse lookup table, to find a pat type according to MTRR and effective
70 * memory type. This table is dynamically generated.
71 */
72 static uint8_t mtrr_epat_tbl[MTRR_NUM_TYPES][MEMORY_NUM_TYPES];
74 /* Lookup table for PAT entry of a given PAT value in host PAT. */
75 static uint8_t pat_entry_tbl[PAT_TYPE_NUMS];
77 static void get_mtrr_range(uint64_t base_msr, uint64_t mask_msr,
78 uint64_t *base, uint64_t *end)
79 {
80 uint32_t mask_lo = (uint32_t)mask_msr;
81 uint32_t mask_hi = (uint32_t)(mask_msr >> 32);
82 uint32_t base_lo = (uint32_t)base_msr;
83 uint32_t base_hi = (uint32_t)(base_msr >> 32);
84 uint32_t size;
86 if ( (mask_lo & 0x800) == 0 )
87 {
88 /* Invalid (i.e. free) range */
89 *base = 0;
90 *end = 0;
91 return;
92 }
94 /* Work out the shifted address mask. */
95 mask_lo = (size_or_mask | (mask_hi << (32 - PAGE_SHIFT)) |
96 (mask_lo >> PAGE_SHIFT));
98 /* This works correctly if size is a power of two (a contiguous range). */
99 size = -mask_lo;
100 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
101 *end = *base + size - 1;
102 }
104 bool_t is_var_mtrr_overlapped(struct mtrr_state *m)
105 {
106 int32_t seg, i;
107 uint64_t phys_base, phys_mask, phys_base_pre, phys_mask_pre;
108 uint64_t base_pre, end_pre, base, end;
109 uint8_t num_var_ranges = (uint8_t)m->mtrr_cap;
111 for ( i = 0; i < num_var_ranges; i++ )
112 {
113 phys_base_pre = ((uint64_t*)m->var_ranges)[i*2];
114 phys_mask_pre = ((uint64_t*)m->var_ranges)[i*2 + 1];
116 get_mtrr_range(phys_base_pre, phys_mask_pre,
117 &base_pre, &end_pre);
119 for ( seg = i + 1; seg < num_var_ranges; seg ++ )
120 {
121 phys_base = ((uint64_t*)m->var_ranges)[seg*2];
122 phys_mask = ((uint64_t*)m->var_ranges)[seg*2 + 1];
124 get_mtrr_range(phys_base, phys_mask,
125 &base, &end);
127 if ( ((base_pre != end_pre) && (base != end))
128 || ((base >= base_pre) && (base <= end_pre))
129 || ((end >= base_pre) && (end <= end_pre))
130 || ((base_pre >= base) && (base_pre <= end))
131 || ((end_pre >= base) && (end_pre <= end)) )
132 {
133 /* MTRR is overlapped. */
134 return 1;
135 }
136 }
137 }
138 return 0;
139 }
141 #define MTRR_PHYSMASK_VALID_BIT 11
142 #define MTRR_PHYSMASK_SHIFT 12
144 #define MTRR_PHYSBASE_TYPE_MASK 0xff /* lowest 8 bits */
145 #define MTRR_PHYSBASE_SHIFT 12
146 #define MTRR_VCNT 8
148 #define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
149 #define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
151 static int hvm_mtrr_pat_init(void)
152 {
153 unsigned int i, j, phys_addr;
155 memset(&mtrr_epat_tbl, INVALID_MEM_TYPE, sizeof(mtrr_epat_tbl));
156 for ( i = 0; i < MTRR_NUM_TYPES; i++ )
157 {
158 for ( j = 0; j < PAT_TYPE_NUMS; j++ )
159 {
160 int32_t tmp = mm_type_tbl[i][j];
161 if ( (tmp >= 0) && (tmp < MEMORY_NUM_TYPES) )
162 mtrr_epat_tbl[i][tmp] = j;
163 }
164 }
166 memset(&pat_entry_tbl, INVALID_MEM_TYPE,
167 PAT_TYPE_NUMS * sizeof(pat_entry_tbl[0]));
168 for ( i = 0; i < PAT_TYPE_NUMS; i++ )
169 {
170 for ( j = 0; j < PAT_TYPE_NUMS; j++ )
171 {
172 if ( pat_cr_2_paf(host_pat, j) == i )
173 {
174 pat_entry_tbl[i] = j;
175 break;
176 }
177 }
178 }
180 phys_addr = 36;
181 if ( cpuid_eax(0x80000000) >= 0x80000008 )
182 phys_addr = (uint8_t)cpuid_eax(0x80000008);
184 phys_base_msr_mask = ~((((uint64_t)1) << phys_addr) - 1) | 0xf00UL;
185 phys_mask_msr_mask = ~((((uint64_t)1) << phys_addr) - 1) | 0x7ffUL;
187 size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
188 size_and_mask = ~size_or_mask & 0xfff00000;
190 return 0;
191 }
192 __initcall(hvm_mtrr_pat_init);
194 uint8_t pat_type_2_pte_flags(uint8_t pat_type)
195 {
196 int32_t pat_entry = pat_entry_tbl[pat_type];
198 /* INVALID_MEM_TYPE, means doesn't find the pat_entry in host pat for
199 * a given pat_type. If host pat covers all the pat types,
200 * it can't happen.
201 */
202 if ( likely(pat_entry != INVALID_MEM_TYPE) )
203 return pat_entry_2_pte_flags[pat_entry];
205 return pat_entry_2_pte_flags[pat_entry_tbl[PAT_TYPE_UNCACHABLE]];
206 }
208 int hvm_vcpu_cacheattr_init(struct vcpu *v)
209 {
210 struct mtrr_state *m = &v->arch.hvm_vcpu.mtrr;
212 memset(m, 0, sizeof(*m));
214 m->var_ranges = xmalloc_array(struct mtrr_var_range, MTRR_VCNT);
215 if ( m->var_ranges == NULL )
216 return -ENOMEM;
217 memset(m->var_ranges, 0, MTRR_VCNT * sizeof(struct mtrr_var_range));
219 m->mtrr_cap = (1u << 10) | (1u << 8) | MTRR_VCNT;
221 v->arch.hvm_vcpu.pat_cr =
222 ((uint64_t)PAT_TYPE_WRBACK) | /* PAT0: WB */
223 ((uint64_t)PAT_TYPE_WRTHROUGH << 8) | /* PAT1: WT */
224 ((uint64_t)PAT_TYPE_UC_MINUS << 16) | /* PAT2: UC- */
225 ((uint64_t)PAT_TYPE_UNCACHABLE << 24) | /* PAT3: UC */
226 ((uint64_t)PAT_TYPE_WRBACK << 32) | /* PAT4: WB */
227 ((uint64_t)PAT_TYPE_WRTHROUGH << 40) | /* PAT5: WT */
228 ((uint64_t)PAT_TYPE_UC_MINUS << 48) | /* PAT6: UC- */
229 ((uint64_t)PAT_TYPE_UNCACHABLE << 56); /* PAT7: UC */
231 return 0;
232 }
234 void hvm_vcpu_cacheattr_destroy(struct vcpu *v)
235 {
236 xfree(v->arch.hvm_vcpu.mtrr.var_ranges);
237 }
239 /*
240 * Get MTRR memory type for physical address pa.
241 */
242 static uint8_t get_mtrr_type(struct mtrr_state *m, paddr_t pa)
243 {
244 int32_t addr, seg, index;
245 uint8_t overlap_mtrr = 0;
246 uint8_t overlap_mtrr_pos = 0;
247 uint64_t phys_base;
248 uint64_t phys_mask;
249 uint8_t num_var_ranges = m->mtrr_cap & 0xff;
251 if ( unlikely(!(m->enabled & 0x2)) )
252 return MTRR_TYPE_UNCACHABLE;
254 if ( (pa < 0x100000) && (m->enabled & 1) )
255 {
256 /* Fixed range MTRR takes effective */
257 addr = (uint32_t) pa;
258 if ( addr < 0x80000 )
259 {
260 seg = (addr >> 16);
261 return m->fixed_ranges[seg];
262 }
263 else if ( addr < 0xc0000 )
264 {
265 seg = (addr - 0x80000) >> 14;
266 index = (seg >> 3) + 1;
267 seg &= 7; /* select 0-7 segments */
268 return m->fixed_ranges[index*8 + seg];
269 }
270 else
271 {
272 /* 0xC0000 --- 0x100000 */
273 seg = (addr - 0xc0000) >> 12;
274 index = (seg >> 3) + 3;
275 seg &= 7; /* select 0-7 segments */
276 return m->fixed_ranges[index*8 + seg];
277 }
278 }
280 /* Match with variable MTRRs. */
281 for ( seg = 0; seg < num_var_ranges; seg++ )
282 {
283 phys_base = ((uint64_t*)m->var_ranges)[seg*2];
284 phys_mask = ((uint64_t*)m->var_ranges)[seg*2 + 1];
285 if ( phys_mask & (1 << MTRR_PHYSMASK_VALID_BIT) )
286 {
287 if ( ((uint64_t) pa & phys_mask) >> MTRR_PHYSMASK_SHIFT ==
288 (phys_base & phys_mask) >> MTRR_PHYSMASK_SHIFT )
289 {
290 if ( unlikely(m->overlapped) )
291 {
292 overlap_mtrr |= 1 << (phys_base & MTRR_PHYSBASE_TYPE_MASK);
293 overlap_mtrr_pos = phys_base & MTRR_PHYSBASE_TYPE_MASK;
294 }
295 else
296 {
297 /* If no overlap, return the found one */
298 return (phys_base & MTRR_PHYSBASE_TYPE_MASK);
299 }
300 }
301 }
302 }
304 /* Overlapped or not found. */
305 if ( unlikely(overlap_mtrr == 0) )
306 return m->def_type;
308 if ( likely(!(overlap_mtrr & ~( ((uint8_t)1) << overlap_mtrr_pos ))) )
309 /* Covers both one variable memory range matches and
310 * two or more identical match.
311 */
312 return overlap_mtrr_pos;
314 if ( overlap_mtrr & 0x1 )
315 /* Two or more match, one is UC. */
316 return MTRR_TYPE_UNCACHABLE;
318 if ( !(overlap_mtrr & 0xaf) )
319 /* Two or more match, WT and WB. */
320 return MTRR_TYPE_WRTHROUGH;
322 /* Behaviour is undefined, but return the last overlapped type. */
323 return overlap_mtrr_pos;
324 }
326 /*
327 * return the memory type from PAT.
328 * NOTE: valid only when paging is enabled.
329 * Only 4K page PTE is supported now.
330 */
331 static uint8_t page_pat_type(uint64_t pat_cr, uint32_t pte_flags)
332 {
333 int32_t pat_entry;
335 /* PCD/PWT -> bit 1/0 of PAT entry */
336 pat_entry = ( pte_flags >> 3 ) & 0x3;
337 /* PAT bits as bit 2 of PAT entry */
338 if ( pte_flags & _PAGE_PAT )
339 pat_entry |= 4;
341 return (uint8_t)pat_cr_2_paf(pat_cr, pat_entry);
342 }
344 /*
345 * Effective memory type for leaf page.
346 */
347 static uint8_t effective_mm_type(struct mtrr_state *m,
348 uint64_t pat,
349 paddr_t gpa,
350 uint32_t pte_flags,
351 uint8_t gmtrr_mtype)
352 {
353 uint8_t mtrr_mtype, pat_value, effective;
355 /* if get_pat_flags() gives a dedicated MTRR type,
356 * just use it
357 */
358 if ( gmtrr_mtype == NO_HARDCODE_MEM_TYPE )
359 mtrr_mtype = get_mtrr_type(m, gpa);
360 else
361 mtrr_mtype = gmtrr_mtype;
363 pat_value = page_pat_type(pat, pte_flags);
365 effective = mm_type_tbl[mtrr_mtype][pat_value];
367 return effective;
368 }
370 uint32_t get_pat_flags(struct vcpu *v,
371 uint32_t gl1e_flags,
372 paddr_t gpaddr,
373 paddr_t spaddr,
374 uint8_t gmtrr_mtype)
375 {
376 uint8_t guest_eff_mm_type;
377 uint8_t shadow_mtrr_type;
378 uint8_t pat_entry_value;
379 uint64_t pat = v->arch.hvm_vcpu.pat_cr;
380 struct mtrr_state *g = &v->arch.hvm_vcpu.mtrr;
382 /* 1. Get the effective memory type of guest physical address,
383 * with the pair of guest MTRR and PAT
384 */
385 guest_eff_mm_type = effective_mm_type(g, pat, gpaddr,
386 gl1e_flags, gmtrr_mtype);
387 /* 2. Get the memory type of host physical address, with MTRR */
388 shadow_mtrr_type = get_mtrr_type(&mtrr_state, spaddr);
390 /* 3. Find the memory type in PAT, with host MTRR memory type
391 * and guest effective memory type.
392 */
393 pat_entry_value = mtrr_epat_tbl[shadow_mtrr_type][guest_eff_mm_type];
394 /* If conflit occurs(e.g host MTRR is UC, guest memory type is
395 * WB),set UC as effective memory. Here, returning PAT_TYPE_UNCACHABLE will
396 * always set effective memory as UC.
397 */
398 if ( pat_entry_value == INVALID_MEM_TYPE )
399 {
400 struct domain *d = v->domain;
401 p2m_type_t p2mt;
402 gfn_to_mfn(d, paddr_to_pfn(gpaddr), &p2mt);
403 if (p2m_is_ram(p2mt))
404 gdprintk(XENLOG_WARNING,
405 "Conflict occurs for a given guest l1e flags:%x "
406 "at %"PRIx64" (the effective mm type:%d), "
407 "because the host mtrr type is:%d\n",
408 gl1e_flags, (uint64_t)gpaddr, guest_eff_mm_type,
409 shadow_mtrr_type);
410 pat_entry_value = PAT_TYPE_UNCACHABLE;
411 }
412 /* 4. Get the pte flags */
413 return pat_type_2_pte_flags(pat_entry_value);
414 }
416 /* Helper funtions for seting mtrr/pat */
417 bool_t pat_msr_set(uint64_t *pat, uint64_t msr_content)
418 {
419 uint8_t *value = (uint8_t*)&msr_content;
420 int32_t i;
422 if ( *pat != msr_content )
423 {
424 for ( i = 0; i < 8; i++ )
425 if ( unlikely(!(value[i] == 0 || value[i] == 1 ||
426 value[i] == 4 || value[i] == 5 ||
427 value[i] == 6 || value[i] == 7)) )
428 return 0;
430 *pat = msr_content;
431 }
433 return 1;
434 }
436 bool_t mtrr_def_type_msr_set(struct mtrr_state *m, uint64_t msr_content)
437 {
438 uint8_t def_type = msr_content & 0xff;
439 uint8_t enabled = (msr_content >> 10) & 0x3;
441 if ( unlikely(!(def_type == 0 || def_type == 1 || def_type == 4 ||
442 def_type == 5 || def_type == 6)) )
443 {
444 HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid MTRR def type:%x\n", def_type);
445 return 0;
446 }
448 if ( unlikely(msr_content && (msr_content & ~0xcffUL)) )
449 {
450 HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid msr content:%"PRIx64"\n",
451 msr_content);
452 return 0;
453 }
455 m->enabled = enabled;
456 m->def_type = def_type;
458 return 1;
459 }
461 bool_t mtrr_fix_range_msr_set(struct mtrr_state *m, uint32_t row,
462 uint64_t msr_content)
463 {
464 uint64_t *fixed_range_base = (uint64_t *)m->fixed_ranges;
466 if ( fixed_range_base[row] != msr_content )
467 {
468 uint8_t *range = (uint8_t*)&msr_content;
469 int32_t i, type;
471 for ( i = 0; i < 8; i++ )
472 {
473 type = range[i];
474 if ( unlikely(!(type == 0 || type == 1 ||
475 type == 4 || type == 5 || type == 6)) )
476 return 0;
477 }
479 fixed_range_base[row] = msr_content;
480 }
482 return 1;
483 }
485 bool_t mtrr_var_range_msr_set(struct mtrr_state *m, uint32_t msr,
486 uint64_t msr_content)
487 {
488 uint32_t index;
489 uint64_t msr_mask;
490 uint64_t *var_range_base = (uint64_t*)m->var_ranges;
492 index = msr - MSR_IA32_MTRR_PHYSBASE0;
494 if ( var_range_base[index] != msr_content )
495 {
496 uint32_t type = msr_content & 0xff;
498 msr_mask = (index & 1) ? phys_mask_msr_mask : phys_base_msr_mask;
500 if ( unlikely(!(type == 0 || type == 1 ||
501 type == 4 || type == 5 || type == 6)) )
502 return 0;
504 if ( unlikely(msr_content && (msr_content & msr_mask)) )
505 {
506 HVM_DBG_LOG(DBG_LEVEL_MSR, "invalid msr content:%"PRIx64"\n",
507 msr_content);
508 return 0;
509 }
511 var_range_base[index] = msr_content;
512 }
514 m->overlapped = is_var_mtrr_overlapped(m);
516 return 1;
517 }
519 bool_t mtrr_pat_not_equal(struct vcpu *vd, struct vcpu *vs)
520 {
521 struct mtrr_state *md = &vd->arch.hvm_vcpu.mtrr;
522 struct mtrr_state *ms = &vs->arch.hvm_vcpu.mtrr;
523 int32_t res;
524 uint8_t num_var_ranges = (uint8_t)md->mtrr_cap;
526 /* Test fixed ranges. */
527 res = memcmp(md->fixed_ranges, ms->fixed_ranges,
528 NUM_FIXED_RANGES*sizeof(mtrr_type));
529 if ( res )
530 return 1;
532 /* Test var ranges. */
533 res = memcmp(md->var_ranges, ms->var_ranges,
534 num_var_ranges*sizeof(struct mtrr_var_range));
535 if ( res )
536 return 1;
538 /* Test default type MSR. */
539 if ( (md->def_type != ms->def_type)
540 && (md->enabled != ms->enabled) )
541 return 1;
543 /* Test PAT. */
544 if ( vd->arch.hvm_vcpu.pat_cr != vs->arch.hvm_vcpu.pat_cr )
545 return 1;
547 return 0;
548 }
550 void hvm_init_cacheattr_region_list(
551 struct domain *d)
552 {
553 INIT_LIST_HEAD(&d->arch.hvm_domain.pinned_cacheattr_ranges);
554 }
556 void hvm_destroy_cacheattr_region_list(
557 struct domain *d)
558 {
559 struct list_head *head = &d->arch.hvm_domain.pinned_cacheattr_ranges;
560 struct hvm_mem_pinned_cacheattr_range *range;
562 while ( !list_empty(head) )
563 {
564 range = list_entry(head->next,
565 struct hvm_mem_pinned_cacheattr_range,
566 list);
567 list_del(&range->list);
568 xfree(range);
569 }
570 }
572 int32_t hvm_get_mem_pinned_cacheattr(
573 struct domain *d,
574 uint64_t guest_fn,
575 uint32_t *type)
576 {
577 struct hvm_mem_pinned_cacheattr_range *range;
579 *type = 0;
581 if ( !is_hvm_domain(d) )
582 return 0;
584 list_for_each_entry_rcu ( range,
585 &d->arch.hvm_domain.pinned_cacheattr_ranges,
586 list )
587 {
588 if ( (guest_fn >= range->start) && (guest_fn <= range->end) )
589 {
590 *type = range->type;
591 return 1;
592 }
593 }
595 return 0;
596 }
598 int32_t hvm_set_mem_pinned_cacheattr(
599 struct domain *d,
600 uint64_t gfn_start,
601 uint64_t gfn_end,
602 uint32_t type)
603 {
604 struct hvm_mem_pinned_cacheattr_range *range;
606 if ( !((type == PAT_TYPE_UNCACHABLE) ||
607 (type == PAT_TYPE_WRCOMB) ||
608 (type == PAT_TYPE_WRTHROUGH) ||
609 (type == PAT_TYPE_WRPROT) ||
610 (type == PAT_TYPE_WRBACK) ||
611 (type == PAT_TYPE_UC_MINUS)) ||
612 !is_hvm_domain(d) )
613 return -EINVAL;
615 range = xmalloc(struct hvm_mem_pinned_cacheattr_range);
616 if ( range == NULL )
617 return -ENOMEM;
619 memset(range, 0, sizeof(*range));
621 range->start = gfn_start;
622 range->end = gfn_end;
623 range->type = type;
625 list_add_rcu(&range->list, &d->arch.hvm_domain.pinned_cacheattr_ranges);
627 return 0;
628 }
630 static int hvm_save_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
631 {
632 int i;
633 struct vcpu *v;
634 struct hvm_hw_mtrr hw_mtrr;
635 struct mtrr_state *mtrr_state;
636 /* save mtrr&pat */
637 for_each_vcpu(d, v)
638 {
639 mtrr_state = &v->arch.hvm_vcpu.mtrr;
641 hw_mtrr.msr_pat_cr = v->arch.hvm_vcpu.pat_cr;
643 hw_mtrr.msr_mtrr_def_type = mtrr_state->def_type
644 | (mtrr_state->enabled << 10);
645 hw_mtrr.msr_mtrr_cap = mtrr_state->mtrr_cap;
647 for ( i = 0; i < MTRR_VCNT; i++ )
648 {
649 /* save physbase */
650 hw_mtrr.msr_mtrr_var[i*2] =
651 ((uint64_t*)mtrr_state->var_ranges)[i*2];
652 /* save physmask */
653 hw_mtrr.msr_mtrr_var[i*2+1] =
654 ((uint64_t*)mtrr_state->var_ranges)[i*2+1];
655 }
657 for ( i = 0; i < NUM_FIXED_MSR; i++ )
658 hw_mtrr.msr_mtrr_fixed[i] =
659 ((uint64_t*)mtrr_state->fixed_ranges)[i];
661 if ( hvm_save_entry(MTRR, v->vcpu_id, h, &hw_mtrr) != 0 )
662 return 1;
663 }
664 return 0;
665 }
667 static int hvm_load_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
668 {
669 int vcpuid, i;
670 struct vcpu *v;
671 struct mtrr_state *mtrr_state;
672 struct hvm_hw_mtrr hw_mtrr;
674 vcpuid = hvm_load_instance(h);
675 if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
676 {
677 gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
678 return -EINVAL;
679 }
681 if ( hvm_load_entry(MTRR, h, &hw_mtrr) != 0 )
682 return -EINVAL;
684 mtrr_state = &v->arch.hvm_vcpu.mtrr;
686 pat_msr_set(&v->arch.hvm_vcpu.pat_cr, hw_mtrr.msr_pat_cr);
688 mtrr_state->mtrr_cap = hw_mtrr.msr_mtrr_cap;
690 for ( i = 0; i < NUM_FIXED_MSR; i++ )
691 mtrr_fix_range_msr_set(mtrr_state, i, hw_mtrr.msr_mtrr_fixed[i]);
693 for ( i = 0; i < MTRR_VCNT; i++ )
694 {
695 mtrr_var_range_msr_set(mtrr_state,
696 MTRRphysBase_MSR(i), hw_mtrr.msr_mtrr_var[i*2]);
697 mtrr_var_range_msr_set(mtrr_state,
698 MTRRphysMask_MSR(i), hw_mtrr.msr_mtrr_var[i*2+1]);
699 }
701 mtrr_def_type_msr_set(mtrr_state, hw_mtrr.msr_mtrr_def_type);
703 return 0;
704 }
706 HVM_REGISTER_SAVE_RESTORE(MTRR, hvm_save_mtrr_msr, hvm_load_mtrr_msr,
707 1, HVMSR_PER_VCPU);
709 uint8_t epte_get_entry_emt(struct domain *d, unsigned long gfn, mfn_t mfn,
710 uint8_t *ipat, int direct_mmio)
711 {
712 uint8_t gmtrr_mtype, hmtrr_mtype;
713 uint32_t type;
714 struct vcpu *v = current;
716 *ipat = 0;
718 if ( (current->domain != d) &&
719 ((d->vcpu == NULL) || ((v = d->vcpu[0]) == NULL)) )
720 return MTRR_TYPE_WRBACK;
722 if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_IDENT_PT] )
723 return MTRR_TYPE_WRBACK;
725 if ( (v == current) && v->domain->arch.hvm_domain.is_in_uc_mode )
726 return MTRR_TYPE_UNCACHABLE;
728 if ( !mfn_valid(mfn_x(mfn)) )
729 return MTRR_TYPE_UNCACHABLE;
731 if ( hvm_get_mem_pinned_cacheattr(d, gfn, &type) )
732 return type;
734 if ( !iommu_enabled )
735 {
736 *ipat = 1;
737 return MTRR_TYPE_WRBACK;
738 }
740 if ( direct_mmio )
741 return MTRR_TYPE_UNCACHABLE;
743 if ( iommu_snoop )
744 {
745 *ipat = 1;
746 return MTRR_TYPE_WRBACK;
747 }
749 gmtrr_mtype = get_mtrr_type(&v->arch.hvm_vcpu.mtrr, (gfn << PAGE_SHIFT));
750 hmtrr_mtype = get_mtrr_type(&mtrr_state, (mfn_x(mfn) << PAGE_SHIFT));
751 return ((gmtrr_mtype <= hmtrr_mtype) ? gmtrr_mtype : hmtrr_mtype);
752 }