[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 3/4] x86: remove has_hvm_container_{domain/vcpu}



On Fri, Mar 03, 2017 at 12:25:07PM +0000, Roger Pau Monne wrote:
> It is now useless since PVHv1 is removed and PVHv2 is a HVM domain from Xen's
> point of view.
> 
> Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Acked-by: Tim Deegan <tim@xxxxxxx>
> Reviewed-by: Kevin Tian <kevin.tian@xxxxxxxxx>
> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> Acked-by: George Dunlap <george.dunlap@xxxxxxxxxx>
> ---
> Cc: Christoph Egger <chegger@xxxxxxxxx>
> Cc: Jan Beulich <jbeulich@xxxxxxxx>
> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Cc: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
> Cc: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
> Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
> Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
> Cc: Elena Ufimtseva <elena.ufimtseva@xxxxxxxxxx>

Hmm, I dont see the code I should ACK.
But here you go!

Acked-by: Elena Ufimtseva <elena.ufimtseva@xxxxxxxxxx>

> Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> Cc: Tim Deegan <tim@xxxxxxx>
> Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
> ---
>  xen/arch/x86/cpu/mcheck/vmce.c      |  6 +++---
>  xen/arch/x86/cpu/vpmu.c             |  4 ++--
>  xen/arch/x86/cpu/vpmu_amd.c         | 12 ++++++------
>  xen/arch/x86/cpu/vpmu_intel.c       | 31 +++++++++++++++----------------
>  xen/arch/x86/cpuid.c                |  6 +++---
>  xen/arch/x86/debug.c                |  2 +-
>  xen/arch/x86/domain.c               | 28 ++++++++++++++--------------
>  xen/arch/x86/domain_build.c         |  5 ++---
>  xen/arch/x86/domctl.c               |  2 +-
>  xen/arch/x86/hvm/dm.c               |  2 +-
>  xen/arch/x86/hvm/hvm.c              |  6 +++---
>  xen/arch/x86/hvm/irq.c              |  2 +-
>  xen/arch/x86/hvm/mtrr.c             |  2 +-
>  xen/arch/x86/hvm/vmsi.c             |  3 +--
>  xen/arch/x86/hvm/vmx/vmcs.c         |  4 ++--
>  xen/arch/x86/hvm/vmx/vmx.c          |  4 ++--
>  xen/arch/x86/mm.c                   |  4 ++--
>  xen/arch/x86/mm/paging.c            |  2 +-
>  xen/arch/x86/mm/shadow/common.c     |  9 ++++-----
>  xen/arch/x86/setup.c                |  2 +-
>  xen/arch/x86/time.c                 | 11 +++++------
>  xen/arch/x86/traps.c                |  4 ++--
>  xen/arch/x86/x86_64/traps.c         |  4 ++--
>  xen/drivers/passthrough/x86/iommu.c |  2 +-
>  xen/include/asm-x86/domain.h        |  2 +-
>  xen/include/asm-x86/event.h         |  2 +-
>  xen/include/asm-x86/guest_access.h  | 12 ++++++------
>  xen/include/asm-x86/hvm/hvm.h       |  2 +-
>  xen/include/xen/sched.h             |  2 --
>  xen/include/xen/tmem_xen.h          |  5 ++---
>  30 files changed, 87 insertions(+), 95 deletions(-)
> 
> diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
> index 8b727b4..6fb7833 100644
> --- a/xen/arch/x86/cpu/mcheck/vmce.c
> +++ b/xen/arch/x86/cpu/mcheck/vmce.c
> @@ -82,7 +82,7 @@ int vmce_restore_vcpu(struct vcpu *v, const struct 
> hvm_vmce_vcpu *ctxt)
>      {
>          dprintk(XENLOG_G_ERR, "%s restore: unsupported MCA capabilities"
>                  " %#" PRIx64 " for %pv (supported: %#Lx)\n",
> -                has_hvm_container_vcpu(v) ? "HVM" : "PV", ctxt->caps,
> +                is_hvm_vcpu(v) ? "HVM" : "PV", ctxt->caps,
>                  v, guest_mcg_cap & ~MCG_CAP_COUNT);
>          return -EPERM;
>      }
> @@ -364,7 +364,7 @@ int inject_vmce(struct domain *d, int vcpu)
>          if ( !v->is_initialised )
>              continue;
>  
> -        if ( (has_hvm_container_domain(d) ||
> +        if ( (is_hvm_domain(d) ||
>                guest_has_trap_callback(d, v->vcpu_id, TRAP_machine_check)) &&
>               !test_and_set_bool(v->mce_pending) )
>          {
> @@ -444,7 +444,7 @@ int unmmap_broken_page(struct domain *d, mfn_t mfn, 
> unsigned long gfn)
>      if ( !mfn_valid(mfn) )
>          return -EINVAL;
>  
> -    if ( !has_hvm_container_domain(d) || !paging_mode_hap(d) )
> +    if ( !is_hvm_domain(d) || !paging_mode_hap(d) )
>          return -EOPNOTSUPP;
>  
>      rc = -1;
> diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
> index a1e9f00..03401fd 100644
> --- a/xen/arch/x86/cpu/vpmu.c
> +++ b/xen/arch/x86/cpu/vpmu.c
> @@ -237,7 +237,7 @@ void vpmu_do_interrupt(struct cpu_user_regs *regs)
>          vpmu->arch_vpmu_ops->arch_vpmu_save(sampling, 1);
>          vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
>  
> -        if ( has_hvm_container_vcpu(sampled) )
> +        if ( is_hvm_vcpu(sampled) )
>              *flags = 0;
>          else
>              *flags = PMU_SAMPLE_PV;
> @@ -288,7 +288,7 @@ void vpmu_do_interrupt(struct cpu_user_regs *regs)
>              r->sp = cur_regs->rsp;
>              r->flags = cur_regs->rflags;
>  
> -            if ( !has_hvm_container_vcpu(sampled) )
> +            if ( !is_hvm_vcpu(sampled) )
>              {
>                  r->ss = cur_regs->ss;
>                  r->cs = cur_regs->cs;
> diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c
> index e0acbf4..b3c3697 100644
> --- a/xen/arch/x86/cpu/vpmu_amd.c
> +++ b/xen/arch/x86/cpu/vpmu_amd.c
> @@ -305,8 +305,8 @@ static int amd_vpmu_save(struct vcpu *v,  bool_t to_guest)
>  
>      context_save(v);
>  
> -    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) &&
> -         has_hvm_container_vcpu(v) && is_msr_bitmap_on(vpmu) )
> +    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
> +         is_msr_bitmap_on(vpmu) )
>          amd_vpmu_unset_msr_bitmap(v);
>  
>      if ( to_guest )
> @@ -367,7 +367,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t 
> msr_content,
>          return -EINVAL;
>  
>      /* For all counters, enable guest only mode for HVM guest */
> -    if ( has_hvm_container_vcpu(v) && (type == MSR_TYPE_CTRL) &&
> +    if ( is_hvm_vcpu(v) && (type == MSR_TYPE_CTRL) &&
>           !is_guest_mode(msr_content) )
>      {
>          set_guest_mode(msr_content);
> @@ -381,7 +381,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t 
> msr_content,
>              return 0;
>          vpmu_set(vpmu, VPMU_RUNNING);
>  
> -        if ( has_hvm_container_vcpu(v) && is_msr_bitmap_on(vpmu) )
> +        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
>               amd_vpmu_set_msr_bitmap(v);
>      }
>  
> @@ -390,7 +390,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t 
> msr_content,
>          (is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, 
> VPMU_RUNNING) )
>      {
>          vpmu_reset(vpmu, VPMU_RUNNING);
> -        if ( has_hvm_container_vcpu(v) && is_msr_bitmap_on(vpmu) )
> +        if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
>               amd_vpmu_unset_msr_bitmap(v);
>          release_pmu_ownership(PMU_OWNER_HVM);
>      }
> @@ -433,7 +433,7 @@ static void amd_vpmu_destroy(struct vcpu *v)
>  {
>      struct vpmu_struct *vpmu = vcpu_vpmu(v);
>  
> -    if ( has_hvm_container_vcpu(v) && is_msr_bitmap_on(vpmu) )
> +    if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
>          amd_vpmu_unset_msr_bitmap(v);
>  
>      xfree(vpmu->context);
> diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
> index 626bed5..0d66ecb 100644
> --- a/xen/arch/x86/cpu/vpmu_intel.c
> +++ b/xen/arch/x86/cpu/vpmu_intel.c
> @@ -306,7 +306,7 @@ static inline void __core2_vpmu_save(struct vcpu *v)
>      for ( i = 0; i < arch_pmc_cnt; i++ )
>          rdmsrl(MSR_IA32_PERFCTR0 + i, xen_pmu_cntr_pair[i].counter);
>  
> -    if ( !has_hvm_container_vcpu(v) )
> +    if ( !is_hvm_vcpu(v) )
>          rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status);
>  }
>  
> @@ -314,7 +314,7 @@ static int core2_vpmu_save(struct vcpu *v, bool_t 
> to_guest)
>  {
>      struct vpmu_struct *vpmu = vcpu_vpmu(v);
>  
> -    if ( !has_hvm_container_vcpu(v) )
> +    if ( !is_hvm_vcpu(v) )
>          wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
>  
>      if ( !vpmu_are_all_set(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED) )
> @@ -323,8 +323,8 @@ static int core2_vpmu_save(struct vcpu *v, bool_t 
> to_guest)
>      __core2_vpmu_save(v);
>  
>      /* Unset PMU MSR bitmap to trap lazy load. */
> -    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) &&
> -         has_hvm_container_vcpu(v) && cpu_has_vmx_msr_bitmap )
> +    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
> +         cpu_has_vmx_msr_bitmap )
>          core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
>  
>      if ( to_guest )
> @@ -362,7 +362,7 @@ static inline void __core2_vpmu_load(struct vcpu *v)
>      if ( vpmu_is_set(vcpu_vpmu(v), VPMU_CPU_HAS_DS) )
>          wrmsrl(MSR_IA32_DS_AREA, core2_vpmu_cxt->ds_area);
>  
> -    if ( !has_hvm_container_vcpu(v) )
> +    if ( !is_hvm_vcpu(v) )
>      {
>          wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, 
> core2_vpmu_cxt->global_ovf_ctrl);
>          core2_vpmu_cxt->global_ovf_ctrl = 0;
> @@ -413,7 +413,7 @@ static int core2_vpmu_verify(struct vcpu *v)
>      }
>  
>      if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_DS) &&
> -         !(has_hvm_container_vcpu(v)
> +         !(is_hvm_vcpu(v)
>             ? is_canonical_address(core2_vpmu_cxt->ds_area)
>             : __addr_ok(core2_vpmu_cxt->ds_area)) )
>          return -EINVAL;
> @@ -474,7 +474,7 @@ static int core2_vpmu_alloc_resource(struct vcpu *v)
>      if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
>          return 0;
>  
> -    if ( has_hvm_container_vcpu(v) )
> +    if ( is_hvm_vcpu(v) )
>      {
>          wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
>          if ( vmx_add_host_load_msr(MSR_CORE_PERF_GLOBAL_CTRL) )
> @@ -539,7 +539,7 @@ static int core2_vpmu_msr_common_check(u32 msr_index, int 
> *type, int *index)
>      {
>          __core2_vpmu_load(current);
>          vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
> -        if ( has_hvm_container_vcpu(current) &&
> +        if ( is_hvm_vcpu(current) &&
>               cpu_has_vmx_msr_bitmap )
>              core2_vpmu_set_msr_bitmap(current->arch.hvm_vmx.msr_bitmap);
>      }
> @@ -612,9 +612,8 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t 
> msr_content,
>              return -EINVAL;
>          if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_DS) )
>          {
> -            if ( !(has_hvm_container_vcpu(v)
> -                   ? is_canonical_address(msr_content)
> -                   : __addr_ok(msr_content)) )
> +            if ( !(is_hvm_vcpu(v) ? is_canonical_address(msr_content)
> +                                  : __addr_ok(msr_content)) )
>              {
>                  gdprintk(XENLOG_WARNING,
>                           "Illegal address for IA32_DS_AREA: %#" PRIx64 "x\n",
> @@ -635,7 +634,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t 
> msr_content,
>          if ( msr_content & fixed_ctrl_mask )
>              return -EINVAL;
>  
> -        if ( has_hvm_container_vcpu(v) )
> +        if ( is_hvm_vcpu(v) )
>              vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL,
>                                 &core2_vpmu_cxt->global_ctrl);
>          else
> @@ -704,7 +703,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t 
> msr_content,
>              if ( blocked )
>                  return -EINVAL;
>  
> -            if ( has_hvm_container_vcpu(v) )
> +            if ( is_hvm_vcpu(v) )
>                  vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL,
>                                     &core2_vpmu_cxt->global_ctrl);
>              else
> @@ -723,7 +722,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t 
> msr_content,
>          wrmsrl(msr, msr_content);
>      else
>      {
> -        if ( has_hvm_container_vcpu(v) )
> +        if ( is_hvm_vcpu(v) )
>              vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
>          else
>              wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
> @@ -757,7 +756,7 @@ static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t 
> *msr_content)
>              *msr_content = core2_vpmu_cxt->global_status;
>              break;
>          case MSR_CORE_PERF_GLOBAL_CTRL:
> -            if ( has_hvm_container_vcpu(v) )
> +            if ( is_hvm_vcpu(v) )
>                  vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
>              else
>                  rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, *msr_content);
> @@ -858,7 +857,7 @@ static void core2_vpmu_destroy(struct vcpu *v)
>      vpmu->context = NULL;
>      xfree(vpmu->priv_context);
>      vpmu->priv_context = NULL;
> -    if ( has_hvm_container_vcpu(v) && cpu_has_vmx_msr_bitmap )
> +    if ( is_hvm_vcpu(v) && cpu_has_vmx_msr_bitmap )
>          core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
>      release_pmu_ownership(PMU_OWNER_HVM);
>      vpmu_clear(vpmu);
> diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
> index 0dd35dc..f9bb98d 100644
> --- a/xen/arch/x86/cpuid.c
> +++ b/xen/arch/x86/cpuid.c
> @@ -762,7 +762,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
>                  res->c |= cpufeat_mask(X86_FEATURE_DSCPL);
>          }
>  
> -        if ( has_hvm_container_domain(d) )
> +        if ( is_hvm_domain(d) )
>          {
>              /* OSXSAVE clear in policy.  Fast-forward CR4 back in. */
>              if ( v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_OSXSAVE )
> @@ -918,11 +918,11 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
>      case 0x80000001:
>          /* SYSCALL is hidden outside of long mode on Intel. */
>          if ( p->x86_vendor == X86_VENDOR_INTEL &&
> -             has_hvm_container_domain(d) && !hvm_long_mode_enabled(v) )
> +             is_hvm_domain(d) && !hvm_long_mode_enabled(v) )
>              res->d &= ~cpufeat_mask(X86_FEATURE_SYSCALL);
>  
>      common_leaf1_adjustments:
> -        if ( has_hvm_container_domain(d) )
> +        if ( is_hvm_domain(d) )
>          {
>              /* Fast-forward MSR_APIC_BASE.EN. */
>              if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
> diff --git a/xen/arch/x86/debug.c b/xen/arch/x86/debug.c
> index 499574e..2070077 100644
> --- a/xen/arch/x86/debug.c
> +++ b/xen/arch/x86/debug.c
> @@ -168,7 +168,7 @@ unsigned int dbg_rw_guest_mem(struct domain *dp, void * 
> __user gaddr,
>  
>          pagecnt = min_t(long, PAGE_SIZE - (addr & ~PAGE_MASK), len);
>  
> -        mfn = (has_hvm_container_domain(dp)
> +        mfn = (is_hvm_domain(dp)
>                 ? dbg_hvm_va2mfn(addr, dp, toaddr, &gfn)
>                 : dbg_pv_va2mfn(addr, dp, pgd3));
>          if ( mfn_eq(mfn, INVALID_MFN) )
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> index 84a289d..90e2b1f 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -188,7 +188,7 @@ void dump_pageframe_info(struct domain *d)
>          spin_unlock(&d->page_alloc_lock);
>      }
>  
> -    if ( has_hvm_container_domain(d) )
> +    if ( is_hvm_domain(d) )
>          p2m_pod_dump_data(d);
>  
>      spin_lock(&d->page_alloc_lock);
> @@ -412,7 +412,7 @@ int vcpu_initialise(struct vcpu *v)
>  
>      spin_lock_init(&v->arch.vpmu.vpmu_lock);
>  
> -    if ( has_hvm_container_domain(d) )
> +    if ( is_hvm_domain(d) )
>      {
>          rc = hvm_vcpu_initialise(v);
>          goto done;
> @@ -488,7 +488,7 @@ void vcpu_destroy(struct vcpu *v)
>      if ( !is_idle_domain(v->domain) )
>          vpmu_destroy(v);
>  
> -    if ( has_hvm_container_vcpu(v) )
> +    if ( is_hvm_vcpu(v) )
>          hvm_vcpu_destroy(v);
>      else
>          xfree(v->arch.pv_vcpu.trap_ctxt);
> @@ -575,7 +575,7 @@ int arch_domain_create(struct domain *d, unsigned int 
> domcr_flags,
>          d->arch.emulation_flags = emflags;
>      }
>  
> -    if ( has_hvm_container_domain(d) )
> +    if ( is_hvm_domain(d) )
>      {
>          d->arch.hvm_domain.hap_enabled =
>              hvm_funcs.hap_supported && (domcr_flags & DOMCRF_hap);
> @@ -649,7 +649,7 @@ int arch_domain_create(struct domain *d, unsigned int 
> domcr_flags,
>      if ( (rc = psr_domain_init(d)) != 0 )
>          goto fail;
>  
> -    if ( has_hvm_container_domain(d) )
> +    if ( is_hvm_domain(d) )
>      {
>          if ( (rc = hvm_domain_initialise(d)) != 0 )
>              goto fail;
> @@ -708,7 +708,7 @@ int arch_domain_create(struct domain *d, unsigned int 
> domcr_flags,
>  
>  void arch_domain_destroy(struct domain *d)
>  {
> -    if ( has_hvm_container_domain(d) )
> +    if ( is_hvm_domain(d) )
>          hvm_domain_destroy(d);
>  
>      xfree(d->arch.e820);
> @@ -760,8 +760,8 @@ int arch_domain_soft_reset(struct domain *d)
>      p2m_type_t p2mt;
>      unsigned int i;
>  
> -    /* Soft reset is supported for HVM/PVH domains only. */
> -    if ( !has_hvm_container_domain(d) )
> +    /* Soft reset is supported for HVM domains only. */
> +    if ( !is_hvm_domain(d) )
>          return -EINVAL;
>  
>      hvm_domain_soft_reset(d);
> @@ -951,7 +951,7 @@ int arch_set_info_guest(
>      v->fpu_initialised = !!(flags & VGCF_I387_VALID);
>  
>      v->arch.flags &= ~TF_kernel_mode;
> -    if ( (flags & VGCF_in_kernel) || has_hvm_container_domain(d)/*???*/ )
> +    if ( (flags & VGCF_in_kernel) || is_hvm_domain(d)/*???*/ )
>          v->arch.flags |= TF_kernel_mode;
>  
>      v->arch.vgc_flags = flags;
> @@ -996,7 +996,7 @@ int arch_set_info_guest(
>          }
>      }
>  
> -    if ( has_hvm_container_domain(d) )
> +    if ( is_hvm_domain(d) )
>      {
>          for ( i = 0; i < ARRAY_SIZE(v->arch.debugreg); ++i )
>              v->arch.debugreg[i] = c(debugreg[i]);
> @@ -2021,7 +2021,7 @@ static void __context_switch(void)
>              if ( xcr0 != get_xcr0() && !set_xcr0(xcr0) )
>                  BUG();
>  
> -            if ( cpu_has_xsaves && has_hvm_container_vcpu(n) )
> +            if ( cpu_has_xsaves && is_hvm_vcpu(n) )
>                  set_msr_xss(n->arch.hvm_vcpu.msr_xss);
>          }
>          vcpu_restore_fpu_eager(n);
> @@ -2111,7 +2111,7 @@ void context_switch(struct vcpu *prev, struct vcpu 
> *next)
>  
>          if ( is_pv_domain(nextd) &&
>               (is_idle_domain(prevd) ||
> -              has_hvm_container_domain(prevd) ||
> +              is_hvm_domain(prevd) ||
>                is_pv_32bit_domain(prevd) != is_pv_32bit_domain(nextd)) )
>          {
>              uint64_t efer = read_efer();
> @@ -2413,7 +2413,7 @@ int domain_relinquish_resources(struct domain *d)
>  
>      pit_deinit(d);
>  
> -    if ( has_hvm_container_domain(d) )
> +    if ( is_hvm_domain(d) )
>          hvm_domain_relinquish_resources(d);
>  
>      return 0;
> @@ -2456,7 +2456,7 @@ void vcpu_mark_events_pending(struct vcpu *v)
>      if ( already_pending )
>          return;
>  
> -    if ( has_hvm_container_vcpu(v) )
> +    if ( is_hvm_vcpu(v) )
>          hvm_assert_evtchn_irq(v);
>      else
>          vcpu_kick(v);
> diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
> index 86055d1..a0d9ee0 100644
> --- a/xen/arch/x86/domain_build.c
> +++ b/xen/arch/x86/domain_build.c
> @@ -360,9 +360,8 @@ static unsigned long __init compute_dom0_nr_pages(
>              avail -= max_pdx >> s;
>      }
>  
> -    need_paging = has_hvm_container_domain(d)
> -                  ? !iommu_hap_pt_share || !paging_mode_hap(d)
> -                  : opt_dom0_shadow;
> +    need_paging = is_hvm_domain(d) ? !iommu_hap_pt_share || 
> !paging_mode_hap(d)
> +                                   : opt_dom0_shadow;
>      for ( ; ; need_paging = 0 )
>      {
>          nr_pages = dom0_nrpages;
> diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
> index 944af39..dfe3d10 100644
> --- a/xen/arch/x86/domctl.c
> +++ b/xen/arch/x86/domctl.c
> @@ -1538,7 +1538,7 @@ void arch_get_info_guest(struct vcpu *v, 
> vcpu_guest_context_u c)
>      for ( i = 0; i < ARRAY_SIZE(v->arch.debugreg); ++i )
>          c(debugreg[i] = v->arch.debugreg[i]);
>  
> -    if ( has_hvm_container_domain(d) )
> +    if ( is_hvm_domain(d) )
>      {
>          struct segment_register sreg;
>  
> diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
> index 2122c45..333c884 100644
> --- a/xen/arch/x86/hvm/dm.c
> +++ b/xen/arch/x86/hvm/dm.c
> @@ -283,7 +283,7 @@ static int dm_op(domid_t domid,
>      if ( rc )
>          return rc;
>  
> -    if ( !has_hvm_container_domain(d) )
> +    if ( !is_hvm_domain(d) )
>          goto out;
>  
>      rc = xsm_dm_op(XSM_DM_PRIV, d);
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index c1e2cd4..6fe6db0 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -3052,7 +3052,7 @@ static enum hvm_copy_result __hvm_copy(
>      char *p;
>      int count, todo = size;
>  
> -    ASSERT(has_hvm_container_vcpu(v));
> +    ASSERT(is_hvm_vcpu(v));
>  
>      /*
>       * XXX Disable for 4.1.0: PV-on-HVM drivers will do grant-table ops
> @@ -3993,7 +3993,7 @@ static int hvmop_set_param(
>          return -ESRCH;
>  
>      rc = -EINVAL;
> -    if ( !has_hvm_container_domain(d) )
> +    if ( !is_hvm_domain(d) )
>          goto out;
>  
>      rc = hvm_allow_set_param(d, &a);
> @@ -4248,7 +4248,7 @@ static int hvmop_get_param(
>          return -ESRCH;
>  
>      rc = -EINVAL;
> -    if ( !has_hvm_container_domain(d) )
> +    if ( !is_hvm_domain(d) )
>          goto out;
>  
>      rc = hvm_allow_get_param(d, &a);
> diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
> index 760544b..a774ed7 100644
> --- a/xen/arch/x86/hvm/irq.c
> +++ b/xen/arch/x86/hvm/irq.c
> @@ -480,7 +480,7 @@ int hvm_local_events_need_delivery(struct vcpu *v)
>  
>  void arch_evtchn_inject(struct vcpu *v)
>  {
> -    if ( has_hvm_container_vcpu(v) )
> +    if ( is_hvm_vcpu(v) )
>          hvm_assert_evtchn_irq(v);
>  }
>  
> diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
> index c5c27cb..b721c63 100644
> --- a/xen/arch/x86/hvm/mtrr.c
> +++ b/xen/arch/x86/hvm/mtrr.c
> @@ -540,7 +540,7 @@ int hvm_get_mem_pinned_cacheattr(struct domain *d, gfn_t 
> gfn,
>      uint64_t mask = ~(uint64_t)0 << order;
>      int rc = -ENXIO;
>  
> -    ASSERT(has_hvm_container_domain(d));
> +    ASSERT(is_hvm_domain(d));
>  
>      rcu_read_lock(&pinned_cacheattr_rcu_lock);
>      list_for_each_entry_rcu ( range,
> diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
> index 25f5756..a36692c 100644
> --- a/xen/arch/x86/hvm/vmsi.c
> +++ b/xen/arch/x86/hvm/vmsi.c
> @@ -560,8 +560,7 @@ void msixtbl_init(struct domain *d)
>  {
>      struct hvm_io_handler *handler;
>  
> -    if ( !has_hvm_container_domain(d) || !has_vlapic(d) ||
> -         msixtbl_initialised(d) )
> +    if ( !is_hvm_domain(d) || !has_vlapic(d) || msixtbl_initialised(d) )
>          return;
>  
>      INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list);
> diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
> index 9791756..934674c 100644
> --- a/xen/arch/x86/hvm/vmx/vmcs.c
> +++ b/xen/arch/x86/hvm/vmx/vmcs.c
> @@ -768,7 +768,7 @@ void vmx_vmcs_exit(struct vcpu *v)
>      {
>          /* Don't confuse vmx_do_resume (for @v or @current!) */
>          vmx_clear_vmcs(v);
> -        if ( has_hvm_container_vcpu(current) )
> +        if ( is_hvm_vcpu(current) )
>              vmx_load_vmcs(current);
>  
>          spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
> @@ -1927,7 +1927,7 @@ static void vmcs_dump(unsigned char ch)
>  
>      for_each_domain ( d )
>      {
> -        if ( !has_hvm_container_domain(d) )
> +        if ( !is_hvm_domain(d) )
>              continue;
>          printk("\n>>> Domain %d <<<\n", d->domain_id);
>          for_each_vcpu ( d, v )
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 59df948..ee08de0 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -242,7 +242,7 @@ void vmx_pi_hooks_assign(struct domain *d)
>  {
>      struct vcpu *v;
>  
> -    if ( !iommu_intpost || !has_hvm_container_domain(d) )
> +    if ( !iommu_intpost || !is_hvm_domain(d) )
>          return;
>  
>      ASSERT(!d->arch.hvm_domain.pi_ops.vcpu_block);
> @@ -279,7 +279,7 @@ void vmx_pi_hooks_assign(struct domain *d)
>  /* This function is called when pcidevs_lock is held */
>  void vmx_pi_hooks_deassign(struct domain *d)
>  {
> -    if ( !iommu_intpost || !has_hvm_container_domain(d) )
> +    if ( !iommu_intpost || !is_hvm_domain(d) )
>          return;
>  
>      ASSERT(d->arch.hvm_domain.pi_ops.vcpu_block);
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index 12dabcf..92d0f7f 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -438,7 +438,7 @@ int page_is_ram_type(unsigned long mfn, unsigned long 
> mem_type)
>  
>  unsigned long domain_get_maximum_gpfn(struct domain *d)
>  {
> -    if ( has_hvm_container_domain(d) )
> +    if ( is_hvm_domain(d) )
>          return p2m_get_hostp2m(d)->max_mapped_pfn;
>      /* NB. PV guests specify nr_pfns rather than max_pfn so we adjust here. 
> */
>      return (arch_get_max_pfn(d) ?: 1) - 1;
> @@ -3184,7 +3184,7 @@ long do_mmuext_op(
>              break;
>          }
>  
> -        if ( has_hvm_container_domain(d) )
> +        if ( is_hvm_domain(d) )
>          {
>              switch ( op.cmd )
>              {
> diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
> index 6327631..75f5fc0 100644
> --- a/xen/arch/x86/mm/paging.c
> +++ b/xen/arch/x86/mm/paging.c
> @@ -420,7 +420,7 @@ static int paging_log_dirty_op(struct domain *d,
>           * Mark dirty all currently write-mapped pages on e.g. the
>           * final iteration of a save operation.
>           */
> -        if ( has_hvm_container_domain(d) &&
> +        if ( is_hvm_domain(d) &&
>               (sc->mode & XEN_DOMCTL_SHADOW_LOGDIRTY_FINAL) )
>              hvm_mapped_guest_frames_mark_dirty(d);
>  
> diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
> index d078d78..d93f2ab 100644
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -319,7 +319,7 @@ const struct x86_emulate_ops *shadow_init_emulation(
>      struct vcpu *v = current;
>      unsigned long addr;
>  
> -    ASSERT(has_hvm_container_vcpu(v));
> +    ASSERT(is_hvm_vcpu(v));
>  
>      memset(sh_ctxt, 0, sizeof(*sh_ctxt));
>  
> @@ -363,7 +363,7 @@ void shadow_continue_emulation(struct sh_emulate_ctxt 
> *sh_ctxt,
>      struct vcpu *v = current;
>      unsigned long addr, diff;
>  
> -    ASSERT(has_hvm_container_vcpu(v));
> +    ASSERT(is_hvm_vcpu(v));
>  
>      /*
>       * We don't refetch the segment bases, because we don't emulate
> @@ -1700,9 +1700,8 @@ void *sh_emulate_map_dest(struct vcpu *v, unsigned long 
> vaddr,
>  
>  #ifndef NDEBUG
>      /* We don't emulate user-mode writes to page tables. */
> -    if ( has_hvm_container_domain(d)
> -         ? hvm_get_cpl(v) == 3
> -         : !guest_kernel_mode(v, guest_cpu_user_regs()) )
> +    if ( is_hvm_domain(d) ? hvm_get_cpl(v) == 3
> +                          : !guest_kernel_mode(v, guest_cpu_user_regs()) )
>      {
>          gdprintk(XENLOG_DEBUG, "User-mode write to pagetable reached "
>                   "emulate_map_dest(). This should never happen!\n");
> diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
> index 4a807b8..24ba103 100644
> --- a/xen/arch/x86/setup.c
> +++ b/xen/arch/x86/setup.c
> @@ -1723,7 +1723,7 @@ void __hwdom_init setup_io_bitmap(struct domain *d)
>  {
>      int rc;
>  
> -    if ( has_hvm_container_domain(d) )
> +    if ( is_hvm_domain(d) )
>      {
>          bitmap_fill(d->arch.hvm_domain.io_bitmap, 0x10000);
>          rc = rangeset_report_ranges(d->arch.ioport_caps, 0, 0x10000,
> diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c
> index fe683bd..b4988bf 100644
> --- a/xen/arch/x86/time.c
> +++ b/xen/arch/x86/time.c
> @@ -940,7 +940,7 @@ static void __update_vcpu_system_time(struct vcpu *v, int 
> force)
>      }
>      else
>      {
> -        if ( has_hvm_container_domain(d) && hvm_tsc_scaling_supported )
> +        if ( is_hvm_domain(d) && hvm_tsc_scaling_supported )
>          {
>              tsc_stamp            = hvm_scale_tsc(d, t->stamp.local_tsc);
>              _u.tsc_to_system_mul = d->arch.vtsc_to_ns.mul_frac;
> @@ -1951,7 +1951,7 @@ void tsc_get_info(struct domain *d, uint32_t *tsc_mode,
>                    uint64_t *elapsed_nsec, uint32_t *gtsc_khz,
>                    uint32_t *incarnation)
>  {
> -    bool_t enable_tsc_scaling = has_hvm_container_domain(d) &&
> +    bool_t enable_tsc_scaling = is_hvm_domain(d) &&
>                                  hvm_tsc_scaling_supported && !d->arch.vtsc;
>  
>      *incarnation = d->arch.incarnation;
> @@ -2031,7 +2031,7 @@ void tsc_set_info(struct domain *d,
>           *  PV: guest has not migrated yet (and thus arch.tsc_khz == cpu_khz)
>           */
>          if ( tsc_mode == TSC_MODE_DEFAULT && host_tsc_is_safe() &&
> -             (has_hvm_container_domain(d) ?
> +             (is_hvm_domain(d) ?
>                (d->arch.tsc_khz == cpu_khz ||
>                 hvm_get_tsc_scaling_ratio(d->arch.tsc_khz)) :
>                incarnation == 0) )
> @@ -2046,8 +2046,7 @@ void tsc_set_info(struct domain *d,
>      case TSC_MODE_PVRDTSCP:
>          d->arch.vtsc = !boot_cpu_has(X86_FEATURE_RDTSCP) ||
>                         !host_tsc_is_safe();
> -        enable_tsc_scaling = has_hvm_container_domain(d) &&
> -                             !d->arch.vtsc &&
> +        enable_tsc_scaling = is_hvm_domain(d) && !d->arch.vtsc &&
>                               hvm_get_tsc_scaling_ratio(gtsc_khz ?: cpu_khz);
>          d->arch.tsc_khz = (enable_tsc_scaling && gtsc_khz) ? gtsc_khz : 
> cpu_khz;
>          set_time_scale(&d->arch.vtsc_to_ns, d->arch.tsc_khz * 1000 );
> @@ -2064,7 +2063,7 @@ void tsc_set_info(struct domain *d,
>          break;
>      }
>      d->arch.incarnation = incarnation + 1;
> -    if ( has_hvm_container_domain(d) )
> +    if ( is_hvm_domain(d) )
>      {
>          if ( hvm_tsc_scaling_supported && !d->arch.vtsc )
>              d->arch.hvm_domain.tsc_scaling_ratio =
> diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
> index 13a609b..0bf5061 100644
> --- a/xen/arch/x86/traps.c
> +++ b/xen/arch/x86/traps.c
> @@ -799,7 +799,7 @@ void do_trap(struct cpu_user_regs *regs)
>      }
>  
>      if ( ((trapnr == TRAP_copro_error) || (trapnr == TRAP_simd_error)) &&
> -         system_state >= SYS_STATE_active && has_hvm_container_vcpu(curr) &&
> +         system_state >= SYS_STATE_active && is_hvm_vcpu(curr) &&
>           curr->arch.hvm_vcpu.fpu_exception_callback )
>      {
>          curr->arch.hvm_vcpu.fpu_exception_callback(
> @@ -976,7 +976,7 @@ void cpuid_hypervisor_leaves(const struct vcpu *v, 
> uint32_t leaf,
>          break;
>  
>      case 4: /* HVM hypervisor leaf. */
> -        if ( !has_hvm_container_domain(d) || subleaf != 0 )
> +        if ( !is_hvm_domain(d) || subleaf != 0 )
>              break;
>  
>          if ( cpu_has_vmx_apic_reg_virt )
> diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
> index b66c24b..ad4d6c1 100644
> --- a/xen/arch/x86/x86_64/traps.c
> +++ b/xen/arch/x86/x86_64/traps.c
> @@ -88,7 +88,7 @@ void show_registers(const struct cpu_user_regs *regs)
>      enum context context;
>      struct vcpu *v = system_state >= SYS_STATE_smp_boot ? current : NULL;
>  
> -    if ( guest_mode(regs) && has_hvm_container_vcpu(v) )
> +    if ( guest_mode(regs) && is_hvm_vcpu(v) )
>      {
>          struct segment_register sreg;
>          context = CTXT_hvm_guest;
> @@ -623,7 +623,7 @@ static void hypercall_page_initialise_ring3_kernel(void 
> *hypercall_page)
>  void hypercall_page_initialise(struct domain *d, void *hypercall_page)
>  {
>      memset(hypercall_page, 0xCC, PAGE_SIZE);
> -    if ( has_hvm_container_domain(d) )
> +    if ( is_hvm_domain(d) )
>          hvm_hypercall_page_initialise(d, hypercall_page);
>      else if ( !is_pv_32bit_domain(d) )
>          hypercall_page_initialise_ring3_kernel(hypercall_page);
> diff --git a/xen/drivers/passthrough/x86/iommu.c 
> b/xen/drivers/passthrough/x86/iommu.c
> index 69cd6c5..750c663 100644
> --- a/xen/drivers/passthrough/x86/iommu.c
> +++ b/xen/drivers/passthrough/x86/iommu.c
> @@ -55,7 +55,7 @@ int arch_iommu_populate_page_table(struct domain *d)
>  
>      while ( !rc && (page = page_list_remove_head(&d->page_list)) )
>      {
> -        if ( has_hvm_container_domain(d) ||
> +        if ( is_hvm_domain(d) ||
>              (page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
>          {
>              unsigned long mfn = page_to_mfn(page);
> diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
> index d182d75..ec14cce 100644
> --- a/xen/include/asm-x86/domain.h
> +++ b/xen/include/asm-x86/domain.h
> @@ -16,7 +16,7 @@
>  #define is_pv_32bit_domain(d)  ((d)->arch.is_32bit_pv)
>  #define is_pv_32bit_vcpu(v)    (is_pv_32bit_domain((v)->domain))
>  
> -#define is_hvm_pv_evtchn_domain(d) (has_hvm_container_domain(d) && \
> +#define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \
>          d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector)
>  #define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain))
>  #define is_domain_direct_mapped(d) ((void)(d), 0)
> diff --git a/xen/include/asm-x86/event.h b/xen/include/asm-x86/event.h
> index d589d6f..a91599d 100644
> --- a/xen/include/asm-x86/event.h
> +++ b/xen/include/asm-x86/event.h
> @@ -26,7 +26,7 @@ static inline int local_events_need_delivery(void)
>  
>      ASSERT(!is_idle_vcpu(v));
>  
> -    return (has_hvm_container_vcpu(v) ? hvm_local_events_need_delivery(v) :
> +    return (is_hvm_vcpu(v) ? hvm_local_events_need_delivery(v) :
>              (vcpu_info(v, evtchn_upcall_pending) &&
>               !vcpu_info(v, evtchn_upcall_mask)));
>  }
> diff --git a/xen/include/asm-x86/guest_access.h 
> b/xen/include/asm-x86/guest_access.h
> index 88edb3f..ca700c9 100644
> --- a/xen/include/asm-x86/guest_access.h
> +++ b/xen/include/asm-x86/guest_access.h
> @@ -14,27 +14,27 @@
>  
>  /* Raw access functions: no type checking. */
>  #define raw_copy_to_guest(dst, src, len)        \
> -    (has_hvm_container_vcpu(current) ?                     \
> +    (is_hvm_vcpu(current) ?                     \
>       copy_to_user_hvm((dst), (src), (len)) :    \
>       copy_to_user((dst), (src), (len)))
>  #define raw_copy_from_guest(dst, src, len)      \
> -    (has_hvm_container_vcpu(current) ?                     \
> +    (is_hvm_vcpu(current) ?                     \
>       copy_from_user_hvm((dst), (src), (len)) :  \
>       copy_from_user((dst), (src), (len)))
>  #define raw_clear_guest(dst,  len)              \
> -    (has_hvm_container_vcpu(current) ?                     \
> +    (is_hvm_vcpu(current) ?                     \
>       clear_user_hvm((dst), (len)) :             \
>       clear_user((dst), (len)))
>  #define __raw_copy_to_guest(dst, src, len)      \
> -    (has_hvm_container_vcpu(current) ?                     \
> +    (is_hvm_vcpu(current) ?                     \
>       copy_to_user_hvm((dst), (src), (len)) :    \
>       __copy_to_user((dst), (src), (len)))
>  #define __raw_copy_from_guest(dst, src, len)    \
> -    (has_hvm_container_vcpu(current) ?                     \
> +    (is_hvm_vcpu(current) ?                     \
>       copy_from_user_hvm((dst), (src), (len)) :  \
>       __copy_from_user((dst), (src), (len)))
>  #define __raw_clear_guest(dst,  len)            \
> -    (has_hvm_container_vcpu(current) ?                     \
> +    (is_hvm_vcpu(current) ?                     \
>       clear_user_hvm((dst), (len)) :             \
>       clear_user((dst), (len)))
>  
> diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
> index da0f6ba..2b4e328 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -623,7 +623,7 @@ unsigned long hvm_cr4_guest_valid_bits(const struct vcpu 
> *v, bool restore);
>  #define arch_vcpu_block(v) ({                                   \
>      struct vcpu *v_ = (v);                                      \
>      struct domain *d_ = v_->domain;                             \
> -    if ( has_hvm_container_domain(d_) &&                        \
> +    if ( is_hvm_domain(d_) &&                               \
>           (d_->arch.hvm_domain.pi_ops.vcpu_block) )          \
>          d_->arch.hvm_domain.pi_ops.vcpu_block(v_);          \
>  })
> diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
> index cc11999..832352a 100644
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -874,8 +874,6 @@ void watchdog_domain_destroy(struct domain *d);
>  #define is_pv_vcpu(v)   (is_pv_domain((v)->domain))
>  #define is_hvm_domain(d) ((d)->guest_type == guest_type_hvm)
>  #define is_hvm_vcpu(v)   (is_hvm_domain(v->domain))
> -#define has_hvm_container_domain(d) ((d)->guest_type != guest_type_pv)
> -#define has_hvm_container_vcpu(v)   (has_hvm_container_domain((v)->domain))
>  #define is_pinned_vcpu(v) ((v)->domain->is_pinned || \
>                             cpumask_weight((v)->cpu_hard_affinity) == 1)
>  #ifdef CONFIG_HAS_PASSTHROUGH
> diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
> index a6cab00..13cf7bc 100644
> --- a/xen/include/xen/tmem_xen.h
> +++ b/xen/include/xen/tmem_xen.h
> @@ -185,9 +185,8 @@ typedef XEN_GUEST_HANDLE_PARAM(char) tmem_cli_va_param_t;
>  static inline int tmem_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t 
> uops)
>  {
>  #ifdef CONFIG_COMPAT
> -    if ( has_hvm_container_vcpu(current) ?
> -         hvm_guest_x86_mode(current) != 8 :
> -         is_pv_32bit_vcpu(current) )
> +    if ( is_hvm_vcpu(current) ? hvm_guest_x86_mode(current) != 8
> +                              : is_pv_32bit_vcpu(current) )
>      {
>          int rc;
>          enum XLAT_tmem_op_u u;
> -- 
> 2.10.1 (Apple Git-78)
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.