|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 16/20] PVH xen: Miscellaneous changes
This patch contains misc changes like restricting iobitmap calls for PVH,
restricting 32bit PVH guest, etc..
Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
---
xen/arch/x86/domain.c | 7 +++++++
xen/arch/x86/domain_page.c | 10 +++++-----
xen/arch/x86/domctl.c | 19 +++++++++++++------
xen/arch/x86/mm.c | 2 +-
xen/arch/x86/physdev.c | 13 +++++++++++++
xen/common/grant_table.c | 4 ++--
6 files changed, 41 insertions(+), 14 deletions(-)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 4883fd1..21382eb 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -339,6 +339,13 @@ int switch_compat(struct domain *d)
if ( d == NULL )
return -EINVAL;
+
+ if ( is_pvh_domain(d) )
+ {
+ dprintk(XENLOG_G_ERR,
+ "Xen does not currently support 32bit PVH guests\n");
+ return -EINVAL;
+ }
if ( !may_switch_mode(d) )
return -EACCES;
if ( is_pv_32on64_domain(d) )
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index efda6af..7685416 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -34,7 +34,7 @@ static inline struct vcpu *mapcache_current_vcpu(void)
* then it means we are running on the idle domain's page table and must
* therefore use its mapcache.
*/
- if ( unlikely(pagetable_is_null(v->arch.guest_table)) && !is_hvm_vcpu(v) )
+ if ( unlikely(pagetable_is_null(v->arch.guest_table)) && is_pv_vcpu(v) )
{
/* If we really are idling, perform lazy context switch now. */
if ( (v = idle_vcpu[smp_processor_id()]) == current )
@@ -71,7 +71,7 @@ void *map_domain_page(unsigned long mfn)
#endif
v = mapcache_current_vcpu();
- if ( !v || is_hvm_vcpu(v) )
+ if ( !v || !is_pv_vcpu(v) )
return mfn_to_virt(mfn);
dcache = &v->domain->arch.pv_domain.mapcache;
@@ -175,7 +175,7 @@ void unmap_domain_page(const void *ptr)
ASSERT(va >= MAPCACHE_VIRT_START && va < MAPCACHE_VIRT_END);
v = mapcache_current_vcpu();
- ASSERT(v && !is_hvm_vcpu(v));
+ ASSERT(v && is_pv_vcpu(v));
dcache = &v->domain->arch.pv_domain.mapcache;
ASSERT(dcache->inuse);
@@ -242,7 +242,7 @@ int mapcache_domain_init(struct domain *d)
struct mapcache_domain *dcache = &d->arch.pv_domain.mapcache;
unsigned int bitmap_pages;
- if ( is_hvm_domain(d) || is_idle_domain(d) )
+ if ( !is_pv_domain(d) || is_idle_domain(d) )
return 0;
#ifdef NDEBUG
@@ -273,7 +273,7 @@ int mapcache_vcpu_init(struct vcpu *v)
unsigned int ents = d->max_vcpus * MAPCACHE_VCPU_ENTRIES;
unsigned int nr = PFN_UP(BITS_TO_LONGS(ents) * sizeof(long));
- if ( is_hvm_vcpu(v) || !dcache->inuse )
+ if ( !is_pv_vcpu(v) || !dcache->inuse )
return 0;
if ( ents > dcache->entries )
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index c5a6f6f..3604816 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -64,9 +64,10 @@ long domctl_memory_mapping(struct domain *d, unsigned long
gfn,
if ( add_map )
{
- printk(XENLOG_G_INFO
- "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx\n",
- d->domain_id, gfn, mfn, nr_mfns);
+ if ( !is_pvh_domain(d) ) /* PVH maps lots and lots */
+ printk(XENLOG_G_INFO
+ "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx\n",
+ d->domain_id, gfn, mfn, nr_mfns);
ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
if ( !ret && paging_mode_translate(d) )
@@ -91,9 +92,10 @@ long domctl_memory_mapping(struct domain *d, unsigned long
gfn,
}
else
{
- printk(XENLOG_G_INFO
- "memory_map:remove: dom%d gfn=%lx mfn=%lx nr=%lx\n",
- d->domain_id, gfn, mfn, nr_mfns);
+ if ( !is_pvh_domain(d) ) /* PVH unmaps lots and lots */
+ printk(XENLOG_G_INFO
+ "memory_map:remove: dom%d gfn=%lx mfn=%lx nr=%lx\n",
+ d->domain_id, gfn, mfn, nr_mfns);
if ( paging_mode_translate(d) )
for ( i = 0; i < nr_mfns; i++ )
@@ -1304,6 +1306,11 @@ void arch_get_info_guest(struct vcpu *v,
vcpu_guest_context_u c)
c.nat->gs_base_kernel = hvm_get_shadow_gs_base(v);
}
}
+ else if ( is_pvh_vcpu(v) )
+ {
+ /* pvh fixme: punt it to phase II */
+ printk(XENLOG_WARNING "PVH: fixme: arch_get_info_guest()\n");
+ }
else
{
c(ldt_base = v->arch.pv_vcpu.ldt_base);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index ef37053..88c6f0c 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2805,7 +2805,7 @@ static struct domain *get_pg_owner(domid_t domid)
goto out;
}
- if ( unlikely(paging_mode_translate(curr)) )
+ if ( !is_pvh_domain(curr) && unlikely(paging_mode_translate(curr)) )
{
MEM_LOG("Cannot mix foreign mappings with translated domains");
goto out;
diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
index eb8a407..520824b 100644
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -475,6 +475,13 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void)
arg)
case PHYSDEVOP_set_iopl: {
struct physdev_set_iopl set_iopl;
+
+ if ( is_pvh_vcpu(current) )
+ {
+ ret = -EINVAL;
+ break;
+ }
+
ret = -EFAULT;
if ( copy_from_guest(&set_iopl, arg, 1) != 0 )
break;
@@ -488,6 +495,12 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void)
arg)
case PHYSDEVOP_set_iobitmap: {
struct physdev_set_iobitmap set_iobitmap;
+
+ if ( is_pvh_vcpu(current) )
+ {
+ ret = -EINVAL;
+ break;
+ }
ret = -EFAULT;
if ( copy_from_guest(&set_iobitmap, arg, 1) != 0 )
break;
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 3f97328..a2073d2 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -721,7 +721,7 @@ __gnttab_map_grant_ref(
double_gt_lock(lgt, rgt);
- if ( !is_hvm_domain(ld) && need_iommu(ld) )
+ if ( is_pv_domain(ld) && need_iommu(ld) )
{
unsigned int wrc, rdc;
int err = 0;
@@ -932,7 +932,7 @@ __gnttab_unmap_common(
act->pin -= GNTPIN_hstw_inc;
}
- if ( !is_hvm_domain(ld) && need_iommu(ld) )
+ if ( is_pv_domain(ld) && need_iommu(ld) )
{
unsigned int wrc, rdc;
int err = 0;
--
1.7.2.3
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |