[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 15/17] [V3]PVH xen: Miscellaneous changes
This patch contains misc changes like restricting iobitmap calls for PVH, restricting 32bit PVH guest, etc.. Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx> --- xen/arch/x86/domain.c | 7 +++++++ xen/arch/x86/domain_page.c | 10 +++++----- xen/arch/x86/domctl.c | 5 +++++ xen/arch/x86/mm.c | 2 +- xen/arch/x86/physdev.c | 13 +++++++++++++ xen/common/grant_table.c | 4 ++-- xen/include/public/xen.h | 5 ++++- 7 files changed, 37 insertions(+), 9 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 4a24b8b..0b73d4c 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -341,6 +341,13 @@ int switch_compat(struct domain *d) if ( d == NULL ) return -EINVAL; + + if ( is_pvh_domain(d) ) + { + dprintk(XENLOG_G_ERR, + "Xen does not currently support 32bit PVH guests\n"); + return -EINVAL; + } if ( !may_switch_mode(d) ) return -EACCES; if ( is_pv_32on64_domain(d) ) diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c index 7421e03..49b3764 100644 --- a/xen/arch/x86/domain_page.c +++ b/xen/arch/x86/domain_page.c @@ -34,7 +34,7 @@ static inline struct vcpu *mapcache_current_vcpu(void) * then it means we are running on the idle domain's page table and must * therefore use its mapcache. */ - if ( unlikely(pagetable_is_null(v->arch.guest_table)) && !is_hvm_vcpu(v) ) + if ( unlikely(pagetable_is_null(v->arch.guest_table)) && is_pv_vcpu(v) ) { /* If we really are idling, perform lazy context switch now. */ if ( (v = idle_vcpu[smp_processor_id()]) == current ) @@ -71,7 +71,7 @@ void *map_domain_page(unsigned long mfn) #endif v = mapcache_current_vcpu(); - if ( !v || is_hvm_vcpu(v) ) + if ( !v || !is_pv_vcpu(v) ) return mfn_to_virt(mfn); dcache = &v->domain->arch.pv_domain.mapcache; @@ -175,7 +175,7 @@ void unmap_domain_page(const void *ptr) ASSERT(va >= MAPCACHE_VIRT_START && va < MAPCACHE_VIRT_END); v = mapcache_current_vcpu(); - ASSERT(v && !is_hvm_vcpu(v)); + ASSERT(v && is_pv_vcpu(v)); dcache = &v->domain->arch.pv_domain.mapcache; ASSERT(dcache->inuse); @@ -242,7 +242,7 @@ int mapcache_domain_init(struct domain *d) struct mapcache_domain *dcache = &d->arch.pv_domain.mapcache; unsigned int bitmap_pages; - if ( is_hvm_domain(d) || is_idle_domain(d) ) + if ( !is_pv_domain(d) || is_idle_domain(d) ) return 0; #ifdef NDEBUG @@ -273,7 +273,7 @@ int mapcache_vcpu_init(struct vcpu *v) unsigned int ents = d->max_vcpus * MAPCACHE_VCPU_ENTRIES; unsigned int nr = PFN_UP(BITS_TO_LONGS(ents) * sizeof(long)); - if ( is_hvm_vcpu(v) || !dcache->inuse ) + if ( !is_pv_vcpu(v) || !dcache->inuse ) return 0; if ( ents > dcache->entries ) diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index dc161c7..8f63a0b 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -1308,6 +1308,11 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c) c.nat->gs_base_kernel = hvm_get_shadow_gs_base(v); } } + else if ( is_pvh_vcpu(v) ) + { + /* pvh fixme: punt it to phase II */ + dprintk(XENLOG_ERR, "PVH: fixme: arch_get_info_guest()\n"); + } else { c(ldt_base = v->arch.pv_vcpu.ldt_base); diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 06fa3fd..d872a15 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -2656,7 +2656,7 @@ static struct domain *get_pg_owner(domid_t domid) goto out; } - if ( unlikely(paging_mode_translate(curr)) ) + if ( !is_pvh_domain(curr) && unlikely(paging_mode_translate(curr)) ) { MEM_LOG("Cannot mix foreign mappings with translated domains"); goto out; diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c index 876ac9d..78d9492 100644 --- a/xen/arch/x86/physdev.c +++ b/xen/arch/x86/physdev.c @@ -475,6 +475,13 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) case PHYSDEVOP_set_iopl: { struct physdev_set_iopl set_iopl; + + if ( is_pvh_vcpu(current) ) + { + ret = -EINVAL; + break; + } + ret = -EFAULT; if ( copy_from_guest(&set_iopl, arg, 1) != 0 ) break; @@ -488,6 +495,12 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) case PHYSDEVOP_set_iobitmap: { struct physdev_set_iobitmap set_iobitmap; + + if ( is_pvh_vcpu(current) ) + { + ret = -EINVAL; + break; + } ret = -EFAULT; if ( copy_from_guest(&set_iobitmap, arg, 1) != 0 ) break; diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c index 3f97328..a2073d2 100644 --- a/xen/common/grant_table.c +++ b/xen/common/grant_table.c @@ -721,7 +721,7 @@ __gnttab_map_grant_ref( double_gt_lock(lgt, rgt); - if ( !is_hvm_domain(ld) && need_iommu(ld) ) + if ( is_pv_domain(ld) && need_iommu(ld) ) { unsigned int wrc, rdc; int err = 0; @@ -932,7 +932,7 @@ __gnttab_unmap_common( act->pin -= GNTPIN_hstw_inc; } - if ( !is_hvm_domain(ld) && need_iommu(ld) ) + if ( is_pv_domain(ld) && need_iommu(ld) ) { unsigned int wrc, rdc; int err = 0; diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h index e9431e2..02b0585 100644 --- a/xen/include/public/xen.h +++ b/xen/include/public/xen.h @@ -692,6 +692,8 @@ typedef struct shared_info shared_info_t; * c. list of allocated page frames [mfn_list, nr_pages] * (unless relocated due to XEN_ELFNOTE_INIT_P2M) * d. start_info_t structure [register ESI (x86)] + * d1. struct shared_info_t [shared_info] + * (above if auto translated guest) * e. bootstrap page tables [pt_base, CR3 (x86)] * f. bootstrap stack [register ESP (x86)] * 4. Bootstrap elements are packed together, but each is 4kB-aligned. @@ -711,7 +713,8 @@ struct start_info { /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */ char magic[32]; /* "xen-<version>-<platform>". */ unsigned long nr_pages; /* Total pages allocated to this domain. */ - unsigned long shared_info; /* MACHINE address of shared info struct. */ + unsigned long shared_info; /* MACHINE address of shared info struct. * + * Physical address if PVH */ uint32_t flags; /* SIF_xxx flags. */ xen_pfn_t store_mfn; /* MACHINE page number of shared page. */ uint32_t store_evtchn; /* Event channel for store communication. */ -- 1.7.2.3 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |