# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1265190252 0
# Node ID 39424ff0c91ce95993b83aff919f08710c5c6c1c
# Parent 526af7ddb9bd82d49f0fe6aafe10761864d002e2
tboot: fix S3 issue for Intel Trusted Execution Technology.
Those unmapped pages cause page fault when MACing them and finally
cause S3 failure.
Signed-off-by: Shane Wang <shane.wang@xxxxxxxxx>
---
xen/arch/x86/smpboot.c | 2 -
xen/arch/x86/tboot.c | 60 ++++++++++++++++++++++++++++++++++++++++++++----
xen/common/page_alloc.c | 4 +--
3 files changed, 59 insertions(+), 7 deletions(-)
diff -r 526af7ddb9bd -r 39424ff0c91c xen/arch/x86/smpboot.c
--- a/xen/arch/x86/smpboot.c Wed Feb 03 09:42:45 2010 +0000
+++ b/xen/arch/x86/smpboot.c Wed Feb 03 09:44:12 2010 +0000
@@ -103,7 +103,7 @@ static void map_cpu_to_logical_apicid(vo
/* State of each CPU. */
DEFINE_PER_CPU(int, cpu_state) = { 0 };
-static void *stack_base[NR_CPUS];
+void *stack_base[NR_CPUS];
DEFINE_SPINLOCK(cpu_add_remove_lock);
/*
diff -r 526af7ddb9bd -r 39424ff0c91c xen/arch/x86/tboot.c
--- a/xen/arch/x86/tboot.c Wed Feb 03 09:42:45 2010 +0000
+++ b/xen/arch/x86/tboot.c Wed Feb 03 09:44:12 2010 +0000
@@ -174,7 +174,7 @@ static void update_iommu_mac(vmac_ctx_t
}
#define is_page_in_use(page) \
- ((page->count_info & PGC_count_mask) != 0 || page->count_info == 0)
+ (page_state_is(page, inuse) || page_state_is(page, offlining))
static void update_pagetable_mac(vmac_ctx_t *ctx)
{
@@ -236,6 +236,30 @@ static void tboot_gen_domain_integrity(c
memset(&ctx, 0, sizeof(ctx));
}
+/*
+ * For stack overflow detection in debug build, a guard page is set up.
+ * This fn is used to detect whether a page is in the guarded pages for
+ * the above reason.
+ */
+static int mfn_in_guarded_stack(unsigned long mfn)
+{
+ extern void *stack_base[NR_CPUS];
+ void *p;
+ int i;
+
+ for ( i = 0; i < NR_CPUS; i++ )
+ {
+ if ( !stack_base[i] )
+ continue;
+ p = (void *)((unsigned long)stack_base[i] + STACK_SIZE -
+ PRIMARY_STACK_SIZE - PAGE_SIZE);
+ if ( mfn == virt_to_mfn(p) )
+ return -1;
+ }
+
+ return 0;
+}
+
static void tboot_gen_xenheap_integrity(const uint8_t key[TB_KEY_SIZE],
vmac_t *mac)
{
@@ -250,8 +274,21 @@ static void tboot_gen_xenheap_integrity(
if ( !mfn_valid(mfn) )
continue;
+ if ( (mfn << PAGE_SHIFT) < __pa(&_end) )
+ continue; /* skip Xen */
+ if ( (mfn >= PFN_DOWN(g_tboot_shared->tboot_base - 3 * PAGE_SIZE))
+ && (mfn < PFN_UP(g_tboot_shared->tboot_base
+ + g_tboot_shared->tboot_size
+ + 3 * PAGE_SIZE)) )
+ continue; /* skip tboot and its page tables */
+
if ( is_page_in_use(page) && is_xen_heap_page(page) ) {
- void *pg = mfn_to_virt(mfn);
+ void *pg;
+
+ if ( mfn_in_guarded_stack(mfn) )
+ continue; /* skip guard stack, see memguard_guard_stack() in
mm.c */
+
+ pg = mfn_to_virt(mfn);
vmac_update((uint8_t *)pg, PAGE_SIZE, &ctx);
}
}
@@ -266,12 +303,27 @@ static void tboot_gen_frametable_integri
static void tboot_gen_frametable_integrity(const uint8_t key[TB_KEY_SIZE],
vmac_t *mac)
{
+ unsigned int sidx, eidx, nidx;
+ unsigned int max_idx = (max_pdx + PDX_GROUP_COUNT - 1)/PDX_GROUP_COUNT;
uint8_t nonce[16] = {};
vmac_ctx_t ctx;
vmac_set_key((uint8_t *)key, &ctx);
- *mac = vmac((uint8_t *)frame_table,
- PFN_UP(max_pdx * sizeof(*frame_table)), nonce, NULL, &ctx);
+ for ( sidx = 0; ; sidx = nidx )
+ {
+ eidx = find_next_zero_bit(pdx_group_valid, max_idx, sidx);
+ nidx = find_next_bit(pdx_group_valid, max_idx, eidx);
+ if ( nidx >= max_idx )
+ break;
+ vmac_update((uint8_t *)pdx_to_page(sidx * PDX_GROUP_COUNT),
+ pdx_to_page(eidx * PDX_GROUP_COUNT)
+ - pdx_to_page(sidx * PDX_GROUP_COUNT), &ctx);
+ }
+ vmac_update((uint8_t *)pdx_to_page(sidx * PDX_GROUP_COUNT),
+ pdx_to_page(max_pdx - 1) + 1
+ - pdx_to_page(sidx * PDX_GROUP_COUNT), &ctx);
+
+ *mac = vmac(NULL, 0, nonce, NULL, &ctx);
printk("MAC for frametable is: 0x%08"PRIx64"\n", *mac);
diff -r 526af7ddb9bd -r 39424ff0c91c xen/common/page_alloc.c
--- a/xen/common/page_alloc.c Wed Feb 03 09:42:45 2010 +0000
+++ b/xen/common/page_alloc.c Wed Feb 03 09:44:12 2010 +0000
@@ -932,8 +932,6 @@ void init_xenheap_pages(paddr_t ps, padd
if ( pe <= ps )
return;
- memguard_guard_range(maddr_to_virt(ps), pe - ps);
-
/*
* Yuk! Ensure there is a one-page buffer between Xen and Dom zones, to
* prevent merging of power-of-two blocks across the zone boundary.
@@ -942,6 +940,8 @@ void init_xenheap_pages(paddr_t ps, padd
ps += PAGE_SIZE;
if ( !is_xen_heap_mfn(paddr_to_pfn(pe)) )
pe -= PAGE_SIZE;
+
+ memguard_guard_range(maddr_to_virt(ps), pe - ps);
init_heap_pages(maddr_to_page(ps), (pe - ps) >> PAGE_SHIFT);
}
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|