debuggers.hg
changeset 20931:39424ff0c91c
tboot: fix S3 issue for Intel Trusted Execution Technology.
Those unmapped pages cause page fault when MACing them and finally
cause S3 failure.
Signed-off-by: Shane Wang <shane.wang@intel.com>
Those unmapped pages cause page fault when MACing them and finally
cause S3 failure.
Signed-off-by: Shane Wang <shane.wang@intel.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Wed Feb 03 09:44:12 2010 +0000 (2010-02-03) |
parents | 526af7ddb9bd |
children | 2a775968c7a1 |
files | xen/arch/x86/smpboot.c xen/arch/x86/tboot.c xen/common/page_alloc.c |
line diff
1.1 --- a/xen/arch/x86/smpboot.c Wed Feb 03 09:42:45 2010 +0000 1.2 +++ b/xen/arch/x86/smpboot.c Wed Feb 03 09:44:12 2010 +0000 1.3 @@ -103,7 +103,7 @@ static void map_cpu_to_logical_apicid(vo 1.4 /* State of each CPU. */ 1.5 DEFINE_PER_CPU(int, cpu_state) = { 0 }; 1.6 1.7 -static void *stack_base[NR_CPUS]; 1.8 +void *stack_base[NR_CPUS]; 1.9 DEFINE_SPINLOCK(cpu_add_remove_lock); 1.10 1.11 /*
2.1 --- a/xen/arch/x86/tboot.c Wed Feb 03 09:42:45 2010 +0000 2.2 +++ b/xen/arch/x86/tboot.c Wed Feb 03 09:44:12 2010 +0000 2.3 @@ -174,7 +174,7 @@ static void update_iommu_mac(vmac_ctx_t 2.4 } 2.5 2.6 #define is_page_in_use(page) \ 2.7 - ((page->count_info & PGC_count_mask) != 0 || page->count_info == 0) 2.8 + (page_state_is(page, inuse) || page_state_is(page, offlining)) 2.9 2.10 static void update_pagetable_mac(vmac_ctx_t *ctx) 2.11 { 2.12 @@ -236,6 +236,30 @@ static void tboot_gen_domain_integrity(c 2.13 memset(&ctx, 0, sizeof(ctx)); 2.14 } 2.15 2.16 +/* 2.17 + * For stack overflow detection in debug build, a guard page is set up. 2.18 + * This fn is used to detect whether a page is in the guarded pages for 2.19 + * the above reason. 2.20 + */ 2.21 +static int mfn_in_guarded_stack(unsigned long mfn) 2.22 +{ 2.23 + extern void *stack_base[NR_CPUS]; 2.24 + void *p; 2.25 + int i; 2.26 + 2.27 + for ( i = 0; i < NR_CPUS; i++ ) 2.28 + { 2.29 + if ( !stack_base[i] ) 2.30 + continue; 2.31 + p = (void *)((unsigned long)stack_base[i] + STACK_SIZE - 2.32 + PRIMARY_STACK_SIZE - PAGE_SIZE); 2.33 + if ( mfn == virt_to_mfn(p) ) 2.34 + return -1; 2.35 + } 2.36 + 2.37 + return 0; 2.38 +} 2.39 + 2.40 static void tboot_gen_xenheap_integrity(const uint8_t key[TB_KEY_SIZE], 2.41 vmac_t *mac) 2.42 { 2.43 @@ -250,8 +274,21 @@ static void tboot_gen_xenheap_integrity( 2.44 2.45 if ( !mfn_valid(mfn) ) 2.46 continue; 2.47 + if ( (mfn << PAGE_SHIFT) < __pa(&_end) ) 2.48 + continue; /* skip Xen */ 2.49 + if ( (mfn >= PFN_DOWN(g_tboot_shared->tboot_base - 3 * PAGE_SIZE)) 2.50 + && (mfn < PFN_UP(g_tboot_shared->tboot_base 2.51 + + g_tboot_shared->tboot_size 2.52 + + 3 * PAGE_SIZE)) ) 2.53 + continue; /* skip tboot and its page tables */ 2.54 + 2.55 if ( is_page_in_use(page) && is_xen_heap_page(page) ) { 2.56 - void *pg = mfn_to_virt(mfn); 2.57 + void *pg; 2.58 + 2.59 + if ( mfn_in_guarded_stack(mfn) ) 2.60 + continue; /* skip guard stack, see memguard_guard_stack() in mm.c */ 2.61 + 2.62 + pg = mfn_to_virt(mfn); 2.63 vmac_update((uint8_t *)pg, PAGE_SIZE, &ctx); 2.64 } 2.65 } 2.66 @@ -266,12 +303,27 @@ static void tboot_gen_xenheap_integrity( 2.67 static void tboot_gen_frametable_integrity(const uint8_t key[TB_KEY_SIZE], 2.68 vmac_t *mac) 2.69 { 2.70 + unsigned int sidx, eidx, nidx; 2.71 + unsigned int max_idx = (max_pdx + PDX_GROUP_COUNT - 1)/PDX_GROUP_COUNT; 2.72 uint8_t nonce[16] = {}; 2.73 vmac_ctx_t ctx; 2.74 2.75 vmac_set_key((uint8_t *)key, &ctx); 2.76 - *mac = vmac((uint8_t *)frame_table, 2.77 - PFN_UP(max_pdx * sizeof(*frame_table)), nonce, NULL, &ctx); 2.78 + for ( sidx = 0; ; sidx = nidx ) 2.79 + { 2.80 + eidx = find_next_zero_bit(pdx_group_valid, max_idx, sidx); 2.81 + nidx = find_next_bit(pdx_group_valid, max_idx, eidx); 2.82 + if ( nidx >= max_idx ) 2.83 + break; 2.84 + vmac_update((uint8_t *)pdx_to_page(sidx * PDX_GROUP_COUNT), 2.85 + pdx_to_page(eidx * PDX_GROUP_COUNT) 2.86 + - pdx_to_page(sidx * PDX_GROUP_COUNT), &ctx); 2.87 + } 2.88 + vmac_update((uint8_t *)pdx_to_page(sidx * PDX_GROUP_COUNT), 2.89 + pdx_to_page(max_pdx - 1) + 1 2.90 + - pdx_to_page(sidx * PDX_GROUP_COUNT), &ctx); 2.91 + 2.92 + *mac = vmac(NULL, 0, nonce, NULL, &ctx); 2.93 2.94 printk("MAC for frametable is: 0x%08"PRIx64"\n", *mac); 2.95
3.1 --- a/xen/common/page_alloc.c Wed Feb 03 09:42:45 2010 +0000 3.2 +++ b/xen/common/page_alloc.c Wed Feb 03 09:44:12 2010 +0000 3.3 @@ -932,8 +932,6 @@ void init_xenheap_pages(paddr_t ps, padd 3.4 if ( pe <= ps ) 3.5 return; 3.6 3.7 - memguard_guard_range(maddr_to_virt(ps), pe - ps); 3.8 - 3.9 /* 3.10 * Yuk! Ensure there is a one-page buffer between Xen and Dom zones, to 3.11 * prevent merging of power-of-two blocks across the zone boundary. 3.12 @@ -943,6 +941,8 @@ void init_xenheap_pages(paddr_t ps, padd 3.13 if ( !is_xen_heap_mfn(paddr_to_pfn(pe)) ) 3.14 pe -= PAGE_SIZE; 3.15 3.16 + memguard_guard_range(maddr_to_virt(ps), pe - ps); 3.17 + 3.18 init_heap_pages(maddr_to_page(ps), (pe - ps) >> PAGE_SHIFT); 3.19 } 3.20