[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH RFC v12 07/21] pvh: Disable unneeded features of HVM containers



> From: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> Date: Fri, Sep 13, 2013 at 9:36 AM
> Subject: Re: [Xen-devel] [PATCH RFC v12 07/21] pvh: Disable unneeded
> features of HVM containers
> To: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> Cc: Keir Fraser <keir@xxxxxxx>, Tim Deegan <tim@xxxxxxx>, Jan Beulich
> <jan.beulich@xxxxxxxx>, xen-devel@xxxxxxxxxxxxx
> 
> 
> On 13/09/13 17:25, George Dunlap wrote:
> >
> > Things kept:
> > * cacheattr_region lists
> > * irq-related structures
> > * paging
> > * tm_list
> >
> > Things disabled for now:
> > * compat xlation
> >
> > Things disabled:
> > * Emulated timers and clock sources
> > * IO/MMIO emulation
> > * msix tables
> > * hvm params
> > * hvm_funcs
> > * nested HVM
> > * Fast-path for emulated lapic accesses
> >
> > Getting rid of the hvm_params struct required a couple other places to
> > check for its existence before attempting to read the params.
> >
> > Signed-off-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> > Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
> > CC: Jan Beulich <jan.beulich@xxxxxxxx>
> > CC: Tim Deegan <tim@xxxxxxx>
> > CC: Keir Fraser <keir@xxxxxxx>
> > ---
> >   xen/arch/x86/hvm/hvm.c      |   37
> ++++++++++++++++++++++++++++++++++---
> >   xen/arch/x86/hvm/io.c       |    4 ++++
> >   xen/arch/x86/hvm/irq.c      |    3 +++
> >   xen/arch/x86/hvm/mtrr.c     |    3 ++-
> >   xen/arch/x86/hvm/vmx/intr.c |    3 ++-
> >   xen/arch/x86/hvm/vmx/vmcs.c |    5 +++--
> >   xen/arch/x86/hvm/vmx/vmx.c  |   10 ++++++++--
> >   7 files changed, 56 insertions(+), 9 deletions(-)
> >
> > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index
> > 1764b78..6a7a006 100644
> > --- a/xen/arch/x86/hvm/hvm.c
> > +++ b/xen/arch/x86/hvm/hvm.c
> > @@ -301,6 +301,10 @@ u64 hvm_get_guest_tsc_adjust(struct vcpu *v)
> >     void hvm_migrate_timers(struct vcpu *v)
> >   {
> > +    /* PVH doesn't use rtc and emulated timers, it uses pvclock mechanism.
> */
> > +    if ( is_pvh_vcpu(v) )
> > +        return;
> > +
> >       rtc_migrate_timers(v);
> >       pt_migrate(v);
> >   }
> > @@ -342,10 +346,13 @@ void hvm_do_resume(struct vcpu *v)
> >   {
> >       ioreq_t *p;
> >   -    pt_restore_timer(v);
> > -
> >       check_wakeup_from_wait();
> >   +    if ( is_pvh_vcpu(v) )
> > +        goto check_inject_trap;
> > +
> > +    pt_restore_timer(v);
> > +
> >       /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE).
> */
> >       p = get_ioreq(v);
> >       while ( p->state != STATE_IOREQ_NONE ) @@ -368,6 +375,7 @@ void
> > hvm_do_resume(struct vcpu *v)
> >           }
> >       }
> >   +  check_inject_trap:
> >       /* Inject pending hw/sw trap */
> >       if ( v->arch.hvm_vcpu.inject_trap.vector != -1 )
> >       {
> > @@ -521,6 +529,7 @@ int hvm_domain_initialise(struct domain *d)
> >           return -EINVAL;
> >       }
> >   +    /* PVH: pbut_lock and uc_lock unused, but won't hurt */
> >       spin_lock_init(&d->arch.hvm_domain.pbuf_lock);
> >       spin_lock_init(&d->arch.hvm_domain.irq_lock);
> >       spin_lock_init(&d->arch.hvm_domain.uc_lock);
> > @@ -531,6 +540,9 @@ int hvm_domain_initialise(struct domain *d)
> >       if ( rc != 0 )
> >           goto fail0;
> >   +    if ( is_pvh_domain(d) )
> > +        return 0;
> > +
> >       INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list);
> >       spin_lock_init(&d->arch.hvm_domain.msixtbl_list_lock);
> >   @@ -584,6 +596,9 @@ int hvm_domain_initialise(struct domain *d)
> >     void hvm_domain_relinquish_resources(struct domain *d)
> >   {
> > +    if ( is_pvh_domain(d) )
> > +        return;
> > +
> >       if ( hvm_funcs.nhvm_domain_relinquish_resources )
> >           hvm_funcs.nhvm_domain_relinquish_resources(d);
> >   @@ -609,6 +624,10 @@ void hvm_domain_relinquish_resources(struct
> domain *d)
> >   void hvm_domain_destroy(struct domain *d)
> >   {
> >       hvm_destroy_cacheattr_region_list(d);
> > +
> > +    if ( is_pvh_domain(d) )
> > +        return;
> > +
> >       hvm_funcs.domain_destroy(d);
> >       rtc_deinit(d);
> >       stdvga_deinit(d);
> > @@ -1093,6 +1112,14 @@ int hvm_vcpu_initialise(struct vcpu *v)
> >         v->arch.hvm_vcpu.inject_trap.vector = -1;
> >   +    if ( is_pvh_vcpu(v) )
> > +    {
> > +        v->arch.hvm_vcpu.hcall_64bit = 1;    /* PVH 32bitfixme. */
> > +        /* This for hvm_long_mode_enabled(v). */
> > +        v->arch.hvm_vcpu.guest_efer = EFER_SCE | EFER_LMA | EFER_LME;
> > +        return 0;
> > +    }
> > +
> >       rc = setup_compat_arg_xlat(v); /* teardown: free_compat_arg_xlat()
> */
> >       if ( rc != 0 )
> >           goto fail3;
> > @@ -1168,7 +1195,10 @@ void hvm_vcpu_destroy(struct vcpu *v)
> >         tasklet_kill(&v->arch.hvm_vcpu.assert_evtchn_irq_tasklet);
> >       hvm_vcpu_cacheattr_destroy(v);
> > -    vlapic_destroy(v);
> > +
> > +    if ( is_hvm_vcpu(v) )
> > +        vlapic_destroy(v);
> > +
> >       hvm_funcs.vcpu_destroy(v);
> >         /* Event channel is already freed by evtchn_destroy(). */ @@
> > -1369,6 +1399,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
> >       /* For the benefit of 32-bit WinXP (& older Windows) on AMD CPUs,
> >        * a fast path for LAPIC accesses, skipping the p2m lookup. */
> >       if ( !nestedhvm_vcpu_in_guestmode(v)
> > +         && is_hvm_vcpu(v)
> >            && gfn == PFN_DOWN(vlapic_base_address(vcpu_vlapic(v))) )
> >       {
> >           if ( !handle_mmio() )
> > diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index
> > 4ae2c0c..3af4b34 100644
> > --- a/xen/arch/x86/hvm/io.c
> > +++ b/xen/arch/x86/hvm/io.c
> > @@ -175,6 +175,10 @@ int handle_mmio(void)
> >       struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
> >       int rc;
> >   +    /* No MMIO for PVH vcpus */
> > +    if ( is_pvh_vcpu(curr) )
> > +        return 0;
> > +
> >       hvm_emulate_prepare(&ctxt, guest_cpu_user_regs());
> >         rc = hvm_emulate_one(&ctxt);
> > diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c index
> > 9eae5de..92fb245 100644
> > --- a/xen/arch/x86/hvm/irq.c
> > +++ b/xen/arch/x86/hvm/irq.c
> > @@ -405,6 +405,9 @@ struct hvm_intack
> hvm_vcpu_has_pending_irq(struct vcpu *v)
> >            && vcpu_info(v, evtchn_upcall_pending) )
> >           return hvm_intack_vector(plat->irq.callback_via.vector);
> >   +    if ( is_pvh_vcpu(v) )
> > +        return hvm_intack_none;
> > +
> >       if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output )
> >           return hvm_intack_pic(0);
> >   diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c index
> > ef51a8d..df888a6 100644
> > --- a/xen/arch/x86/hvm/mtrr.c
> > +++ b/xen/arch/x86/hvm/mtrr.c
> > @@ -693,7 +693,8 @@ uint8_t epte_get_entry_emt(struct domain *d,
> unsigned long gfn, mfn_t mfn,
> >            ((d->vcpu == NULL) || ((v = d->vcpu[0]) == NULL)) )
> >           return MTRR_TYPE_WRBACK;
> >   -    if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_IDENT_PT] )
> > +    if ( v->domain->arch.hvm_domain.params
> > +         && !v->domain-
> >arch.hvm_domain.params[HVM_PARAM_IDENT_PT] )
> 
> 
> This is one thing I want to discuss: I can see why initially it was decided to
> disable hvm_params, as at the moment PVH only uses one of them, and this
> saves allocating the array for PVH domains.  But the result is a lot of 
> fairly ugly
> things like this.  There's also (in another patch) a hack that allows a guest 
> to
> *set* the IRQ callback via hvmop hvm_param_set, but not *read* it.
> 
> Additionally, as far as I can tell, the only reason we can't support mem 
> events
> is because we don't have hvm_params.
> 
> Since I think at some point we well want to use the mem events on PVH
> guests, we should probably just allocate it now and avoid having things like
> this in the first place.

I definitely would like to use mem_access with PVH domains, so it would be good 
if this is not disabled. I am willing to test / make it work once these patches 
go in.

Thanks,
Aravindh  


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.