diff -r cdff378e61eb xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Tue Oct 05 12:43:27 2010 +0200 +++ b/xen/arch/x86/hvm/hvm.c Wed Oct 06 11:09:20 2010 +0200 @@ -1356,30 +1356,16 @@ int hvm_virtual_to_linear_addr( return 0; } -static void *hvm_map_entry(unsigned long va) +void *hvm_map_frame(paddr_t pa, bool_t writable) { unsigned long gfn, mfn; p2m_type_t p2mt; - uint32_t pfec; struct vcpu *v = current; struct p2m_domain *p2m = p2m_get_hostp2m(v->domain); - if ( ((va & ~PAGE_MASK) + 8) > PAGE_SIZE ) - { - gdprintk(XENLOG_ERR, "Descriptor table entry " - "straddles page boundary\n"); - domain_crash(current->domain); - return NULL; - } - - /* We're mapping on behalf of the segment-load logic, which might - * write the accessed flags in the descriptors (in 32-bit mode), but - * we still treat it as a kernel-mode read (i.e. no access checks). */ - pfec = PFEC_page_present; - gfn = paging_gva_to_gfn(current, va, &pfec); - if ( pfec == PFEC_page_paged || pfec == PFEC_page_shared ) - return NULL; - mfn = mfn_x(gfn_to_mfn_unshare(p2m, gfn, &p2mt, 0)); + gfn = pa >> PAGE_SHIFT; + mfn = mfn_x(writable ? gfn_to_mfn_unshare(p2m, gfn, &p2mt, 0) + : gfn_to_mfn(p2m, gfn, &p2mt)); if ( p2m_is_paging(p2mt) ) { p2m_mem_paging_populate(p2m, gfn); @@ -1389,24 +1375,52 @@ static void *hvm_map_entry(unsigned long return NULL; if ( !p2m_is_ram(p2mt) ) { - gdprintk(XENLOG_ERR, "Failed to look up descriptor table entry\n"); - domain_crash(current->domain); + gdprintk(XENLOG_ERR, "Failed to look up frame\n"); return NULL; } ASSERT(mfn_valid(mfn)); - paging_mark_dirty(current->domain, mfn); - - return (char *)map_domain_page(mfn) + (va & ~PAGE_MASK); + if (writable) + paging_mark_dirty(v->domain, mfn); + + return (char *)map_domain_page(mfn) + (pa & ~PAGE_MASK); } -static void hvm_unmap_entry(void *p) +void hvm_unmap_frame(void *p) { if ( p ) unmap_domain_page(p); } +static void *hvm_map_entry(unsigned long va, bool_t writable) +{ + unsigned long gfn; + uint32_t pfec; + paddr_t pa; + struct vcpu *v = current; + + if ( ((va & ~PAGE_MASK) + 8) > PAGE_SIZE ) + { + gdprintk(XENLOG_ERR, "Descriptor table entry " + "straddles page boundary\n"); + domain_crash(v->domain); + return NULL; + } + + /* We're mapping on behalf of the segment-load logic, which might + * write the accessed flags in the descriptors (in 32-bit mode), but + * we still treat it as a kernel-mode read (i.e. no access checks). */ + pfec = PFEC_page_present; + gfn = paging_gva_to_gfn(current, va, &pfec); + if ( pfec == PFEC_page_paged || pfec == PFEC_page_shared ) + return NULL; + pa = (gfn << PAGE_SHIFT) + (va & ~PAGE_MASK); + return hvm_map_frame(pa, writable); +} + +#define hvm_unmap_entry hvm_unmap_frame + static int hvm_load_segment_selector( enum x86_segment seg, uint16_t sel) { @@ -1449,7 +1463,7 @@ static int hvm_load_segment_selector( if ( ((sel & 0xfff8) + 7) > desctab.limit ) goto fail; - pdesc = hvm_map_entry(desctab.base + (sel & 0xfff8)); + pdesc = hvm_map_entry(desctab.base + (sel & 0xfff8), HVM_MAP_RW); if ( pdesc == NULL ) goto hvm_map_fail; @@ -1566,11 +1580,11 @@ void hvm_task_switch( goto out; } - optss_desc = hvm_map_entry(gdt.base + (prev_tr.sel & 0xfff8)); + optss_desc = hvm_map_entry(gdt.base + (prev_tr.sel & 0xfff8), HVM_MAP_RW); if ( optss_desc == NULL ) goto out; - nptss_desc = hvm_map_entry(gdt.base + (tss_sel & 0xfff8)); + nptss_desc = hvm_map_entry(gdt.base + (tss_sel & 0xfff8), HVM_MAP_RW); if ( nptss_desc == NULL ) goto out; diff -r cdff378e61eb xen/include/asm-x86/hvm/hvm.h --- a/xen/include/asm-x86/hvm/hvm.h Tue Oct 05 12:43:27 2010 +0200 +++ b/xen/include/asm-x86/hvm/hvm.h Wed Oct 06 11:09:20 2010 +0200 @@ -335,6 +335,7 @@ enum hvm_access_type { hvm_access_read, hvm_access_write }; + int hvm_virtual_to_linear_addr( enum x86_segment seg, struct segment_register *reg, @@ -344,6 +345,11 @@ int hvm_virtual_to_linear_addr( unsigned int addr_size, unsigned long *linear_addr); +#define HVM_MAP_RW 1 +#define HVM_MAP_RO 0 +void *hvm_map_frame(paddr_t pa, bool_t writable); +void hvm_unmap_frame(void *p); + static inline void hvm_set_info_guest(struct vcpu *v) { if ( hvm_funcs.set_info_guest )