[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 11/24] PVH xen: domain create, schedular related code changes



>>> On 18.07.13 at 04:32, Mukesh Rathor <mukesh.rathor@xxxxxxxxxx> wrote:
> This patch mostly contains changes to arch/x86/domain.c to allow for a PVH
> domain creation. The new function pvh_set_vcpu_info(), introduced in the
> previous patch, is called here to set some guest context in the VMCS.
> This patch also changes the context_switch code in the same file to follow
> HVM behaviour for PVH.
> 
> Changes in V2:
>   - changes to read_segment_register() moved to this patch.
>   - The other comment was to create NULL functions for pvh_set_vcpu_info
>     and pvh_read_descriptor which are implemented in later patch, but since
>     I disable PVH creation until all patches are checked in, it is not 
> needed.
>     But it helps breaking down of patches.
> 
> Changes in V3:
>   - Fix read_segment_register() macro to make sure args are evaluated once,
>     and use # instead of STR for name in the macro.
> 
> Changes in V4:
>   - Remove pvh substruct in the hvm substruct, as the vcpu_info_mfn has been
>     moved out of pv_vcpu struct.
>   - rename hvm_pvh_* functions to hvm_*.
> 
> Changes in V5:
>   - remove pvh_read_descriptor().
> 
> Changes in V7:
>   - remove hap_update_cr3() and read_segment_register changes from here.
> 
> Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>

Re the title: I don't see anything scheduler related in here.
Re the contents:
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>

> ---
>  xen/arch/x86/domain.c |   56 ++++++++++++++++++++++++++++++++----------------
>  xen/arch/x86/mm.c     |    3 ++
>  2 files changed, 40 insertions(+), 19 deletions(-)
> 
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> index c361abf..fccb4ee 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -385,7 +385,7 @@ int vcpu_initialise(struct vcpu *v)
>  
>      vmce_init_vcpu(v);
>  
> -    if ( is_hvm_domain(d) )
> +    if ( !is_pv_domain(d) )
>      {
>          rc = hvm_vcpu_initialise(v);
>          goto done;
> @@ -452,7 +452,7 @@ void vcpu_destroy(struct vcpu *v)
>  
>      vcpu_destroy_fpu(v);
>  
> -    if ( is_hvm_vcpu(v) )
> +    if ( !is_pv_vcpu(v) )
>          hvm_vcpu_destroy(v);
>      else
>          xfree(v->arch.pv_vcpu.trap_ctxt);
> @@ -464,7 +464,7 @@ int arch_domain_create(struct domain *d, unsigned int 
> domcr_flags)
>      int rc = -ENOMEM;
>  
>      d->arch.hvm_domain.hap_enabled =
> -        is_hvm_domain(d) &&
> +        !is_pv_domain(d) &&
>          hvm_funcs.hap_supported &&
>          (domcr_flags & DOMCRF_hap);
>      d->arch.hvm_domain.mem_sharing_enabled = 0;
> @@ -512,7 +512,7 @@ int arch_domain_create(struct domain *d, unsigned int 
> domcr_flags)
>      mapcache_domain_init(d);
>  
>      HYPERVISOR_COMPAT_VIRT_START(d) =
> -        is_hvm_domain(d) ? ~0u : __HYPERVISOR_COMPAT_VIRT_START;
> +        is_pv_domain(d) ? __HYPERVISOR_COMPAT_VIRT_START : ~0u;
>  
>      if ( (rc = paging_domain_init(d, domcr_flags)) != 0 )
>          goto fail;
> @@ -555,7 +555,7 @@ int arch_domain_create(struct domain *d, unsigned int 
> domcr_flags)
>      }
>      spin_lock_init(&d->arch.e820_lock);
>  
> -    if ( is_hvm_domain(d) )
> +    if ( !is_pv_domain(d) )
>      {
>          if ( (rc = hvm_domain_initialise(d)) != 0 )
>          {
> @@ -651,7 +651,7 @@ int arch_set_info_guest(
>  #define c(fld) (compat ? (c.cmp->fld) : (c.nat->fld))
>      flags = c(flags);
>  
> -    if ( !is_hvm_vcpu(v) )
> +    if ( is_pv_vcpu(v) )
>      {
>          if ( !compat )
>          {
> @@ -704,7 +704,7 @@ int arch_set_info_guest(
>      v->fpu_initialised = !!(flags & VGCF_I387_VALID);
>  
>      v->arch.flags &= ~TF_kernel_mode;
> -    if ( (flags & VGCF_in_kernel) || is_hvm_vcpu(v)/*???*/ )
> +    if ( (flags & VGCF_in_kernel) || !is_pv_vcpu(v)/*???*/ )
>          v->arch.flags |= TF_kernel_mode;
>  
>      v->arch.vgc_flags = flags;
> @@ -719,7 +719,7 @@ int arch_set_info_guest(
>      if ( !compat )
>      {
>          memcpy(&v->arch.user_regs, &c.nat->user_regs, 
> sizeof(c.nat->user_regs));
> -        if ( !is_hvm_vcpu(v) )
> +        if ( is_pv_vcpu(v) )
>              memcpy(v->arch.pv_vcpu.trap_ctxt, c.nat->trap_ctxt,
>                     sizeof(c.nat->trap_ctxt));
>      }
> @@ -735,10 +735,13 @@ int arch_set_info_guest(
>  
>      v->arch.user_regs.eflags |= 2;
>  
> -    if ( is_hvm_vcpu(v) )
> +    if ( !is_pv_vcpu(v) )
>      {
>          hvm_set_info_guest(v);
> -        goto out;
> +        if ( is_hvm_vcpu(v) || v->is_initialised )
> +            goto out;
> +        else
> +            goto pvh_skip_pv_stuff;
>      }
>  
>      init_int80_direct_trap(v);
> @@ -853,6 +856,7 @@ int arch_set_info_guest(
>  
>      set_bit(_VPF_in_reset, &v->pause_flags);
>  
> + pvh_skip_pv_stuff:
>      if ( !compat )
>          cr3_gfn = xen_cr3_to_pfn(c.nat->ctrlreg[3]);
>      else
> @@ -861,7 +865,7 @@ int arch_set_info_guest(
>  
>      if ( !cr3_page )
>          rc = -EINVAL;
> -    else if ( paging_mode_refcounts(d) )
> +    else if ( paging_mode_refcounts(d) || is_pvh_vcpu(v) )
>          /* nothing */;
>      else if ( cr3_page == v->arch.old_guest_table )
>      {
> @@ -893,8 +897,15 @@ int arch_set_info_guest(
>          /* handled below */;
>      else if ( !compat )
>      {
> +        /* PVH 32bitfixme. */
> +        if ( is_pvh_vcpu(v) )
> +        {
> +            v->arch.cr3 = page_to_mfn(cr3_page);
> +            v->arch.hvm_vcpu.guest_cr[3] = c.nat->ctrlreg[3];
> +        }
> +
>          v->arch.guest_table = pagetable_from_page(cr3_page);
> -        if ( c.nat->ctrlreg[1] )
> +        if ( c.nat->ctrlreg[1] && !is_pvh_vcpu(v) )
>          {
>              cr3_gfn = xen_cr3_to_pfn(c.nat->ctrlreg[1]);
>              cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC);
> @@ -954,6 +965,13 @@ int arch_set_info_guest(
>  
>      update_cr3(v);
>  
> +    if ( is_pvh_vcpu(v) )
> +    {
> +        /* Set VMCS fields. */
> +        if ( (rc = pvh_set_vcpu_info(v, c.nat)) != 0 )
> +            return rc;
> +    }
> +
>   out:
>      if ( flags & VGCF_online )
>          clear_bit(_VPF_down, &v->pause_flags);
> @@ -1315,7 +1333,7 @@ static void update_runstate_area(struct vcpu *v)
>  
>  static inline int need_full_gdt(struct vcpu *v)
>  {
> -    return (!is_hvm_vcpu(v) && !is_idle_vcpu(v));
> +    return (is_pv_vcpu(v) && !is_idle_vcpu(v));
>  }
>  
>  static void __context_switch(void)
> @@ -1450,7 +1468,7 @@ void context_switch(struct vcpu *prev, struct vcpu 
> *next)
>          /* Re-enable interrupts before restoring state which may fault. */
>          local_irq_enable();
>  
> -        if ( !is_hvm_vcpu(next) )
> +        if ( is_pv_vcpu(next) )
>          {
>              load_LDT(next);
>              load_segments(next);
> @@ -1576,12 +1594,12 @@ unsigned long hypercall_create_continuation(
>          regs->eax  = op;
>  
>          /* Ensure the hypercall trap instruction is re-executed. */
> -        if ( !is_hvm_vcpu(current) )
> +        if ( is_pv_vcpu(current) )
>              regs->eip -= 2;  /* re-execute 'syscall' / 'int $xx' */
>          else
>              current->arch.hvm_vcpu.hcall_preempted = 1;
>  
> -        if ( !is_hvm_vcpu(current) ?
> +        if ( is_pv_vcpu(current) ?
>               !is_pv_32on64_vcpu(current) :
>               (hvm_guest_x86_mode(current) == 8) )
>          {
> @@ -1849,7 +1867,7 @@ int domain_relinquish_resources(struct domain *d)
>                  return ret;
>          }
>  
> -        if ( !is_hvm_domain(d) )
> +        if ( is_pv_domain(d) )
>          {
>              for_each_vcpu ( d, v )
>              {
> @@ -1922,7 +1940,7 @@ int domain_relinquish_resources(struct domain *d)
>          BUG();
>      }
>  
> -    if ( is_hvm_domain(d) )
> +    if ( !is_pv_domain(d) )
>          hvm_domain_relinquish_resources(d);
>  
>      return 0;
> @@ -2006,7 +2024,7 @@ void vcpu_mark_events_pending(struct vcpu *v)
>      if ( already_pending )
>          return;
>  
> -    if ( is_hvm_vcpu(v) )
> +    if ( !is_pv_vcpu(v) )
>          hvm_assert_evtchn_irq(v);
>      else
>          vcpu_kick(v);
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index e980431..de7ba45 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -4334,6 +4334,9 @@ void destroy_gdt(struct vcpu *v)
>      int i;
>      unsigned long pfn;
>  
> +    if ( is_pvh_vcpu(v) )
> +        return;
> +
>      v->arch.pv_vcpu.gdt_ents = 0;
>      pl1e = gdt_ldt_ptes(v->domain, v);
>      for ( i = 0; i < FIRST_RESERVED_GDT_PAGE; i++ )
> -- 
> 1.7.2.3
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx 
> http://lists.xen.org/xen-devel 



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.