debuggers.hg

view xen/arch/x86/tboot.c @ 20931:39424ff0c91c

tboot: fix S3 issue for Intel Trusted Execution Technology.

Those unmapped pages cause page fault when MACing them and finally
cause S3 failure.

Signed-off-by: Shane Wang <shane.wang@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Feb 03 09:44:12 2010 +0000 (2010-02-03)
parents 5a224e101cb3
children 102dca3f485b
line source
1 #include <xen/config.h>
2 #include <xen/init.h>
3 #include <xen/types.h>
4 #include <xen/lib.h>
5 #include <xen/sched.h>
6 #include <xen/domain_page.h>
7 #include <xen/iommu.h>
8 #include <asm/fixmap.h>
9 #include <asm/page.h>
10 #include <asm/processor.h>
11 #include <asm/e820.h>
12 #include <asm/tboot.h>
13 #include <crypto/vmac.h>
15 /* tboot=<physical address of shared page> */
16 static char __initdata opt_tboot[20] = "";
17 string_param("tboot", opt_tboot);
19 /* Global pointer to shared data; NULL means no measured launch. */
20 tboot_shared_t *g_tboot_shared;
22 static vmac_t domain_mac; /* MAC for all domains during S3 */
23 static vmac_t xenheap_mac; /* MAC for xen heap during S3 */
24 static vmac_t frametable_mac; /* MAC for frame table during S3 */
26 static const uuid_t tboot_shared_uuid = TBOOT_SHARED_UUID;
28 /* used by tboot_protect_mem_regions() and/or tboot_parse_dmar_table() */
29 static uint64_t __initdata txt_heap_base, __initdata txt_heap_size;
30 static uint64_t __initdata sinit_base, __initdata sinit_size;
32 /*
33 * TXT configuration registers (offsets from TXT_{PUB, PRIV}_CONFIG_REGS_BASE)
34 */
36 #define TXT_PUB_CONFIG_REGS_BASE 0xfed30000
37 #define TXT_PRIV_CONFIG_REGS_BASE 0xfed20000
39 /* # pages for each config regs space - used by fixmap */
40 #define NR_TXT_CONFIG_PAGES ((TXT_PUB_CONFIG_REGS_BASE - \
41 TXT_PRIV_CONFIG_REGS_BASE) >> PAGE_SHIFT)
43 /* offsets from pub/priv config space */
44 #define TXTCR_SINIT_BASE 0x0270
45 #define TXTCR_SINIT_SIZE 0x0278
46 #define TXTCR_HEAP_BASE 0x0300
47 #define TXTCR_HEAP_SIZE 0x0308
49 extern char __init_begin[], __per_cpu_start[], __bss_start[];
51 #define SHA1_SIZE 20
52 typedef uint8_t sha1_hash_t[SHA1_SIZE];
54 typedef struct __packed {
55 uint32_t version; /* currently 6 */
56 sha1_hash_t bios_acm_id;
57 uint32_t edx_senter_flags;
58 uint64_t mseg_valid;
59 sha1_hash_t sinit_hash;
60 sha1_hash_t mle_hash;
61 sha1_hash_t stm_hash;
62 sha1_hash_t lcp_policy_hash;
63 uint32_t lcp_policy_control;
64 uint32_t rlp_wakeup_addr;
65 uint32_t reserved;
66 uint32_t num_mdrs;
67 uint32_t mdrs_off;
68 uint32_t num_vtd_dmars;
69 uint32_t vtd_dmars_off;
70 } sinit_mle_data_t;
72 static void tboot_copy_memory(unsigned char *va, uint32_t size,
73 unsigned long pa)
74 {
75 unsigned long map_base = 0;
76 unsigned char *map_addr = NULL;
77 unsigned int i;
79 for ( i = 0; i < size; i++ )
80 {
81 if ( map_base != PFN_DOWN(pa + i) )
82 {
83 map_base = PFN_DOWN(pa + i);
84 set_fixmap(FIX_TBOOT_MAP_ADDRESS, map_base << PAGE_SHIFT);
85 map_addr = (unsigned char *)fix_to_virt(FIX_TBOOT_MAP_ADDRESS);
86 }
87 va[i] = map_addr[pa + i - (map_base << PAGE_SHIFT)];
88 }
89 }
91 void __init tboot_probe(void)
92 {
93 tboot_shared_t *tboot_shared;
94 unsigned long p_tboot_shared;
96 /* Look for valid page-aligned address for shared page. */
97 p_tboot_shared = simple_strtoul(opt_tboot, NULL, 0);
98 if ( (p_tboot_shared == 0) || ((p_tboot_shared & ~PAGE_MASK) != 0) )
99 return;
101 /* Map and check for tboot UUID. */
102 set_fixmap(FIX_TBOOT_SHARED_BASE, p_tboot_shared);
103 tboot_shared = (tboot_shared_t *)fix_to_virt(FIX_TBOOT_SHARED_BASE);
104 if ( tboot_shared == NULL )
105 return;
106 if ( memcmp(&tboot_shared_uuid, (uuid_t *)tboot_shared, sizeof(uuid_t)) )
107 return;
109 /* new tboot_shared (w/ GAS support, integrity, etc.) is not backwards
110 compatible */
111 if ( tboot_shared->version < 4 ) {
112 printk("unsupported version of tboot (%u)\n", tboot_shared->version);
113 return;
114 }
116 g_tboot_shared = tboot_shared;
117 printk("TBOOT: found shared page at phys addr %lx:\n", p_tboot_shared);
118 printk(" version: %d\n", tboot_shared->version);
119 printk(" log_addr: 0x%08x\n", tboot_shared->log_addr);
120 printk(" shutdown_entry: 0x%08x\n", tboot_shared->shutdown_entry);
121 printk(" tboot_base: 0x%08x\n", tboot_shared->tboot_base);
122 printk(" tboot_size: 0x%x\n", tboot_shared->tboot_size);
124 /* these will be needed by tboot_protect_mem_regions() and/or
125 tboot_parse_dmar_table(), so get them now */
127 txt_heap_base = txt_heap_size = sinit_base = sinit_size = 0;
128 /* TXT Heap */
129 tboot_copy_memory((unsigned char *)&txt_heap_base, sizeof(txt_heap_base),
130 TXT_PUB_CONFIG_REGS_BASE + TXTCR_HEAP_BASE);
131 tboot_copy_memory((unsigned char *)&txt_heap_size, sizeof(txt_heap_size),
132 TXT_PUB_CONFIG_REGS_BASE + TXTCR_HEAP_SIZE);
133 /* SINIT */
134 tboot_copy_memory((unsigned char *)&sinit_base, sizeof(sinit_base),
135 TXT_PUB_CONFIG_REGS_BASE + TXTCR_SINIT_BASE);
136 tboot_copy_memory((unsigned char *)&sinit_size, sizeof(sinit_size),
137 TXT_PUB_CONFIG_REGS_BASE + TXTCR_SINIT_SIZE);
138 }
140 /* definitions from xen/drivers/passthrough/vtd/iommu.h
141 * used to walk through vtd page tables */
142 #define LEVEL_STRIDE (9)
143 #define PTE_NUM (1<<LEVEL_STRIDE)
144 #define dma_pte_present(p) (((p).val & 3) != 0)
145 #define dma_pte_addr(p) ((p).val & PAGE_MASK_4K)
146 #define agaw_to_level(val) ((val)+2)
147 struct dma_pte {
148 u64 val;
149 };
151 static void update_iommu_mac(vmac_ctx_t *ctx, uint64_t pt_maddr, int level)
152 {
153 int i;
154 struct dma_pte *pt_vaddr, *pte;
155 int next_level = level - 1;
157 if ( pt_maddr == 0 )
158 return;
160 pt_vaddr = (struct dma_pte *)map_domain_page(pt_maddr >> PAGE_SHIFT_4K);
161 vmac_update((void *)pt_vaddr, PAGE_SIZE, ctx);
163 for ( i = 0; i < PTE_NUM; i++ )
164 {
165 pte = &pt_vaddr[i];
166 if ( !dma_pte_present(*pte) )
167 continue;
169 if ( next_level >= 1 )
170 update_iommu_mac(ctx, dma_pte_addr(*pte), next_level);
171 }
173 unmap_domain_page(pt_vaddr);
174 }
176 #define is_page_in_use(page) \
177 (page_state_is(page, inuse) || page_state_is(page, offlining))
179 static void update_pagetable_mac(vmac_ctx_t *ctx)
180 {
181 unsigned long mfn;
183 for ( mfn = 0; mfn < max_page; mfn++ )
184 {
185 struct page_info *page = mfn_to_page(mfn);
187 if ( !mfn_valid(mfn) )
188 continue;
189 if ( is_page_in_use(page) && !is_xen_heap_page(page) ) {
190 if ( page->count_info & PGC_page_table ) {
191 void *pg = map_domain_page(mfn);
192 vmac_update(pg, PAGE_SIZE, ctx);
193 unmap_domain_page(pg);
194 }
195 }
196 }
197 }
199 static void tboot_gen_domain_integrity(const uint8_t key[TB_KEY_SIZE],
200 vmac_t *mac)
201 {
202 struct domain *d;
203 struct page_info *page;
204 uint8_t nonce[16] = {};
205 vmac_ctx_t ctx;
207 vmac_set_key((uint8_t *)key, &ctx);
208 for_each_domain( d )
209 {
210 if ( !d->arch.s3_integrity )
211 continue;
212 printk("MACing Domain %u\n", d->domain_id);
214 page_list_for_each(page, &d->page_list)
215 {
216 void *pg = __map_domain_page(page);
217 vmac_update(pg, PAGE_SIZE, &ctx);
218 unmap_domain_page(pg);
219 }
221 if ( !is_idle_domain(d) )
222 {
223 struct hvm_iommu *hd = domain_hvm_iommu(d);
224 update_iommu_mac(&ctx, hd->pgd_maddr, agaw_to_level(hd->agaw));
225 }
226 }
228 /* MAC all shadow page tables */
229 update_pagetable_mac(&ctx);
231 *mac = vmac(NULL, 0, nonce, NULL, &ctx);
233 printk("MAC for domains is: 0x%08"PRIx64"\n", *mac);
235 /* wipe ctx to ensure key is not left in memory */
236 memset(&ctx, 0, sizeof(ctx));
237 }
239 /*
240 * For stack overflow detection in debug build, a guard page is set up.
241 * This fn is used to detect whether a page is in the guarded pages for
242 * the above reason.
243 */
244 static int mfn_in_guarded_stack(unsigned long mfn)
245 {
246 extern void *stack_base[NR_CPUS];
247 void *p;
248 int i;
250 for ( i = 0; i < NR_CPUS; i++ )
251 {
252 if ( !stack_base[i] )
253 continue;
254 p = (void *)((unsigned long)stack_base[i] + STACK_SIZE -
255 PRIMARY_STACK_SIZE - PAGE_SIZE);
256 if ( mfn == virt_to_mfn(p) )
257 return -1;
258 }
260 return 0;
261 }
263 static void tboot_gen_xenheap_integrity(const uint8_t key[TB_KEY_SIZE],
264 vmac_t *mac)
265 {
266 unsigned long mfn;
267 uint8_t nonce[16] = {};
268 vmac_ctx_t ctx;
270 vmac_set_key((uint8_t *)key, &ctx);
271 for ( mfn = 0; mfn < max_page; mfn++ )
272 {
273 struct page_info *page = __mfn_to_page(mfn);
275 if ( !mfn_valid(mfn) )
276 continue;
277 if ( (mfn << PAGE_SHIFT) < __pa(&_end) )
278 continue; /* skip Xen */
279 if ( (mfn >= PFN_DOWN(g_tboot_shared->tboot_base - 3 * PAGE_SIZE))
280 && (mfn < PFN_UP(g_tboot_shared->tboot_base
281 + g_tboot_shared->tboot_size
282 + 3 * PAGE_SIZE)) )
283 continue; /* skip tboot and its page tables */
285 if ( is_page_in_use(page) && is_xen_heap_page(page) ) {
286 void *pg;
288 if ( mfn_in_guarded_stack(mfn) )
289 continue; /* skip guard stack, see memguard_guard_stack() in mm.c */
291 pg = mfn_to_virt(mfn);
292 vmac_update((uint8_t *)pg, PAGE_SIZE, &ctx);
293 }
294 }
295 *mac = vmac(NULL, 0, nonce, NULL, &ctx);
297 printk("MAC for xenheap is: 0x%08"PRIx64"\n", *mac);
299 /* wipe ctx to ensure key is not left in memory */
300 memset(&ctx, 0, sizeof(ctx));
301 }
303 static void tboot_gen_frametable_integrity(const uint8_t key[TB_KEY_SIZE],
304 vmac_t *mac)
305 {
306 unsigned int sidx, eidx, nidx;
307 unsigned int max_idx = (max_pdx + PDX_GROUP_COUNT - 1)/PDX_GROUP_COUNT;
308 uint8_t nonce[16] = {};
309 vmac_ctx_t ctx;
311 vmac_set_key((uint8_t *)key, &ctx);
312 for ( sidx = 0; ; sidx = nidx )
313 {
314 eidx = find_next_zero_bit(pdx_group_valid, max_idx, sidx);
315 nidx = find_next_bit(pdx_group_valid, max_idx, eidx);
316 if ( nidx >= max_idx )
317 break;
318 vmac_update((uint8_t *)pdx_to_page(sidx * PDX_GROUP_COUNT),
319 pdx_to_page(eidx * PDX_GROUP_COUNT)
320 - pdx_to_page(sidx * PDX_GROUP_COUNT), &ctx);
321 }
322 vmac_update((uint8_t *)pdx_to_page(sidx * PDX_GROUP_COUNT),
323 pdx_to_page(max_pdx - 1) + 1
324 - pdx_to_page(sidx * PDX_GROUP_COUNT), &ctx);
326 *mac = vmac(NULL, 0, nonce, NULL, &ctx);
328 printk("MAC for frametable is: 0x%08"PRIx64"\n", *mac);
330 /* wipe ctx to ensure key is not left in memory */
331 memset(&ctx, 0, sizeof(ctx));
332 }
334 void tboot_shutdown(uint32_t shutdown_type)
335 {
336 uint32_t map_base, map_size;
337 int err;
339 g_tboot_shared->shutdown_type = shutdown_type;
341 local_irq_disable();
343 /* we may be called from an interrupt context, so to prevent */
344 /* 'ASSERT(!in_irq());' in alloc_domheap_pages(), decrease count */
345 while ( in_irq() )
346 irq_exit();
348 /* Create identity map for tboot shutdown code. */
349 /* do before S3 integrity because mapping tboot may change xenheap */
350 map_base = PFN_DOWN(g_tboot_shared->tboot_base);
351 map_size = PFN_UP(g_tboot_shared->tboot_size);
353 err = map_pages_to_xen(map_base << PAGE_SHIFT, map_base, map_size,
354 __PAGE_HYPERVISOR);
355 if ( err != 0 ) {
356 printk("error (0x%x) mapping tboot pages (mfns) @ 0x%x, 0x%x\n", err,
357 map_base, map_size);
358 return;
359 }
361 /* if this is S3 then set regions to MAC */
362 if ( shutdown_type == TB_SHUTDOWN_S3 ) {
363 /*
364 * Xen regions for tboot to MAC
365 */
366 g_tboot_shared->num_mac_regions = 4;
367 /* S3 resume code (and other real mode trampoline code) */
368 g_tboot_shared->mac_regions[0].start = bootsym_phys(trampoline_start);
369 g_tboot_shared->mac_regions[0].size = bootsym_phys(trampoline_end) -
370 bootsym_phys(trampoline_start);
371 /* hypervisor code + data */
372 g_tboot_shared->mac_regions[1].start = (uint64_t)__pa(&_stext);
373 g_tboot_shared->mac_regions[1].size = __pa(&__init_begin) -
374 __pa(&_stext);
375 /* per-cpu data */
376 g_tboot_shared->mac_regions[2].start = (uint64_t)__pa(&__per_cpu_start);
377 g_tboot_shared->mac_regions[2].size =
378 (((uint64_t)last_cpu(cpu_possible_map) + 1) << PERCPU_SHIFT);
379 /* bss */
380 g_tboot_shared->mac_regions[3].start = (uint64_t)__pa(&__bss_start);
381 g_tboot_shared->mac_regions[3].size = __pa(&_end) - __pa(&__bss_start);
383 /*
384 * MAC domains and other Xen memory
385 */
386 /* Xen has no better entropy source for MAC key than tboot's */
387 /* MAC domains first in case it perturbs xenheap */
388 tboot_gen_domain_integrity(g_tboot_shared->s3_key, &domain_mac);
389 tboot_gen_frametable_integrity(g_tboot_shared->s3_key, &frametable_mac);
390 tboot_gen_xenheap_integrity(g_tboot_shared->s3_key, &xenheap_mac);
391 }
393 write_ptbase(idle_vcpu[0]);
395 ((void(*)(void))(unsigned long)g_tboot_shared->shutdown_entry)();
397 BUG(); /* should not reach here */
398 }
400 int tboot_in_measured_env(void)
401 {
402 return (g_tboot_shared != NULL);
403 }
405 int __init tboot_protect_mem_regions(void)
406 {
407 int rc;
409 if ( !tboot_in_measured_env() )
410 return 1;
412 /* TXT Heap */
413 if ( txt_heap_base == 0 )
414 return 0;
415 rc = e820_change_range_type(
416 &e820, txt_heap_base, txt_heap_base + txt_heap_size,
417 E820_RESERVED, E820_UNUSABLE);
418 if ( !rc )
419 return 0;
421 /* SINIT */
422 if ( sinit_base == 0 )
423 return 0;
424 rc = e820_change_range_type(
425 &e820, sinit_base, sinit_base + sinit_size,
426 E820_RESERVED, E820_UNUSABLE);
427 if ( !rc )
428 return 0;
430 /* TXT Private Space */
431 rc = e820_change_range_type(
432 &e820, TXT_PRIV_CONFIG_REGS_BASE,
433 TXT_PRIV_CONFIG_REGS_BASE + NR_TXT_CONFIG_PAGES * PAGE_SIZE,
434 E820_RESERVED, E820_UNUSABLE);
435 if ( !rc )
436 return 0;
438 return 1;
439 }
441 int __init tboot_parse_dmar_table(acpi_table_handler dmar_handler)
442 {
443 struct acpi_table_header *dmar_table;
444 int rc;
445 uint64_t size;
446 uint32_t dmar_table_length;
447 unsigned long pa;
448 sinit_mle_data_t sinit_mle_data;
449 unsigned char *dmar_table_raw;
452 if ( !tboot_in_measured_env() )
453 return acpi_table_parse(ACPI_SIG_DMAR, dmar_handler);
455 /* ACPI tables may not be DMA protected by tboot, so use DMAR copy */
456 /* SINIT saved in SinitMleData in TXT heap (which is DMA protected) */
458 if ( txt_heap_base == 0 )
459 return 1;
461 /* map TXT heap into Xen addr space */
463 /* walk heap to SinitMleData */
464 pa = txt_heap_base;
465 /* skip BiosData */
466 tboot_copy_memory((unsigned char *)&size, sizeof(size), pa);
467 pa += size;
468 /* skip OsMleData */
469 tboot_copy_memory((unsigned char *)&size, sizeof(size), pa);
470 pa += size;
471 /* skip OsSinitData */
472 tboot_copy_memory((unsigned char *)&size, sizeof(size), pa);
473 pa += size;
474 /* now points to SinitMleDataSize; set to SinitMleData */
475 pa += sizeof(uint64_t);
476 tboot_copy_memory((unsigned char *)&sinit_mle_data, sizeof(sinit_mle_data),
477 pa);
478 /* get addr of DMAR table */
479 pa += sinit_mle_data.vtd_dmars_off - sizeof(uint64_t);
480 tboot_copy_memory((unsigned char *)&dmar_table_length,
481 sizeof(dmar_table_length),
482 pa + sizeof(char) * ACPI_NAME_SIZE);
483 dmar_table_raw = xmalloc_array(unsigned char, dmar_table_length);
484 tboot_copy_memory(dmar_table_raw, dmar_table_length, pa);
485 dmar_table = (struct acpi_table_header *)dmar_table_raw;
486 rc = dmar_handler(dmar_table);
487 xfree(dmar_table_raw);
489 /* acpi_parse_dmar() zaps APCI DMAR signature in TXT heap table */
490 /* but dom0 will read real table, so must zap it there too */
491 dmar_table = NULL;
492 acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_table);
493 if ( dmar_table != NULL )
494 ((struct acpi_table_dmar *)dmar_table)->header.signature[0] = '\0';
496 return rc;
497 }
499 int tboot_s3_resume(void)
500 {
501 vmac_t mac;
503 if ( !tboot_in_measured_env() )
504 return 0;
506 /* need to do these in reverse order of shutdown */
507 tboot_gen_xenheap_integrity(g_tboot_shared->s3_key, &mac);
508 if ( mac != xenheap_mac )
509 return -1;
511 tboot_gen_frametable_integrity(g_tboot_shared->s3_key, &mac);
512 if ( mac != frametable_mac )
513 return -2;
515 tboot_gen_domain_integrity(g_tboot_shared->s3_key, &mac);
516 if ( mac != domain_mac )
517 return -3;
519 return 0;
520 }
522 /*
523 * Local variables:
524 * mode: C
525 * c-set-style: "BSD"
526 * c-basic-offset: 4
527 * tab-width: 4
528 * indent-tabs-mode: nil
529 * End:
530 */