[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 12/18] PVH xen: support hypercalls for PVH



On Fri, May 24, 2013 at 06:25:31PM -0700, Mukesh Rathor wrote:
> This patch replaces the old patch which created pvh.c. Instead, we modify
> hvm.c to add support for PVH also.

I think that is a not a very helpfull commit description.

In six months if one looks at that one will say 'what old patch?'?
The 'we modify hvm.c to add support for PVH also' is obvious as
you are touching said file and the subject mentiones that.

Perhaps just describe this patch limitation. Enumerate the
hypercalls that are implemented and explain why the other ones
are not implemented?


> 
> Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
> ---
>  xen/arch/x86/hvm/hvm.c |   58 +++++++++++++++++++++++++++++++++++++++--------
>  1 files changed, 48 insertions(+), 10 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index a525080..74004bc 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -3242,6 +3242,8 @@ static long hvm_physdev_op(int cmd, 
> XEN_GUEST_HANDLE_PARAM(void) arg)
>          case PHYSDEVOP_get_free_pirq:
>              return do_physdev_op(cmd, arg);
>          default:
> +            if ( is_pvh_vcpu(current) && is_hardware_domain(current->domain) 
> )
> +                return do_physdev_op(cmd, arg);
>              return -ENOSYS;
>      }
>  }
> @@ -3249,7 +3251,7 @@ static long hvm_physdev_op(int cmd, 
> XEN_GUEST_HANDLE_PARAM(void) arg)
>  static long hvm_vcpu_op(
>      int cmd, int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg)
>  {
> -    long rc;
> +    long rc = -ENOSYS;
>  
>      switch ( cmd )
>      {
> @@ -3262,6 +3264,14 @@ static long hvm_vcpu_op(
>      case VCPUOP_register_vcpu_info:
>          rc = do_vcpu_op(cmd, vcpuid, arg);
>          break;
> +
> +    case VCPUOP_is_up:
> +    case VCPUOP_up:
> +    case VCPUOP_initialise:
> +        if ( is_pvh_vcpu(current) )
> +            rc = do_vcpu_op(cmd, vcpuid, arg);
> +        break;
> +
>      default:
>          rc = -ENOSYS;
>          break;
> @@ -3381,12 +3391,31 @@ static hvm_hypercall_t *const 
> hvm_hypercall32_table[NR_hypercalls] = {
>      HYPERCALL(tmem_op)
>  };
>  
> +/* PVH 32bitfixme */
> +static hvm_hypercall_t *const pvh_hypercall64_table[NR_hypercalls] = {
> +    HYPERCALL(platform_op),
> +    HYPERCALL(memory_op),
> +    HYPERCALL(xen_version),
> +    HYPERCALL(console_io),
> +    [ __HYPERVISOR_grant_table_op ]  = (hvm_hypercall_t *)hvm_grant_table_op,
> +    [ __HYPERVISOR_vcpu_op ]         = (hvm_hypercall_t *)hvm_vcpu_op,
> +    HYPERCALL(mmuext_op),
> +    HYPERCALL(xsm_op),
> +    HYPERCALL(sched_op),
> +    HYPERCALL(event_channel_op),
> +    [ __HYPERVISOR_physdev_op ]      = (hvm_hypercall_t *)hvm_physdev_op,
> +    HYPERCALL(hvm_op),
> +    HYPERCALL(sysctl),
> +    HYPERCALL(domctl)
> +};
> +
>  int hvm_do_hypercall(struct cpu_user_regs *regs)
>  {
>      struct vcpu *curr = current;
>      struct segment_register sreg;
>      int mode = hvm_guest_x86_mode(curr);
>      uint32_t eax = regs->eax;
> +    hvm_hypercall_t **hcall_table;
>  
>      switch ( mode )
>      {
> @@ -3407,7 +3436,9 @@ int hvm_do_hypercall(struct cpu_user_regs *regs)
>      if ( (eax & 0x80000000) && is_viridian_domain(curr->domain) )
>          return viridian_hypercall(regs);
>  
> -    if ( (eax >= NR_hypercalls) || !hvm_hypercall32_table[eax] )
> +    if ( (eax >= NR_hypercalls) ||
> +         (is_pvh_vcpu(curr) && !pvh_hypercall64_table[eax]) ||
> +         (is_hvm_vcpu(curr) && !hvm_hypercall32_table[eax]) )
>      {
>          regs->eax = -ENOSYS;
>          return HVM_HCALL_completed;
> @@ -3421,17 +3452,24 @@ int hvm_do_hypercall(struct cpu_user_regs *regs)
>                      eax, regs->rdi, regs->rsi, regs->rdx,
>                      regs->r10, regs->r8, regs->r9);
>  
> +        if ( is_pvh_vcpu(curr) )
> +            hcall_table = (hvm_hypercall_t **)pvh_hypercall64_table;
> +        else
> +            hcall_table = (hvm_hypercall_t **)hvm_hypercall64_table;
> +
>          curr->arch.hvm_vcpu.hcall_64bit = 1;
> -        regs->rax = hvm_hypercall64_table[eax](regs->rdi,
> -                                               regs->rsi,
> -                                               regs->rdx,
> -                                               regs->r10,
> -                                               regs->r8,
> -                                               regs->r9); 
> +        regs->rax = hcall_table[eax](regs->rdi,
> +                                     regs->rsi,
> +                                     regs->rdx,
> +                                     regs->r10,
> +                                     regs->r8,
> +                                     regs->r9);
>          curr->arch.hvm_vcpu.hcall_64bit = 0;
>      }
>      else
>      {
> +        ASSERT(!is_pvh_vcpu(curr));   /* PVH 32bitfixme */
> +
>          HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%u(%x, %x, %x, %x, %x, %x)", eax,
>                      (uint32_t)regs->ebx, (uint32_t)regs->ecx,
>                      (uint32_t)regs->edx, (uint32_t)regs->esi,
> @@ -3855,7 +3893,7 @@ long do_hvm_op(unsigned long op, 
> XEN_GUEST_HANDLE_PARAM(void) arg)
>              return -ESRCH;
>  
>          rc = -EINVAL;
> -        if ( !is_hvm_domain(d) )
> +        if ( is_pv_domain(d) )
>              goto param_fail;
>  
>          rc = xsm_hvm_param(XSM_TARGET, d, op);
> @@ -4027,7 +4065,7 @@ long do_hvm_op(unsigned long op, 
> XEN_GUEST_HANDLE_PARAM(void) arg)
>                  break;
>              }
>  
> -            if ( rc == 0 ) 
> +            if ( rc == 0 && !is_pvh_domain(d) )
>              {
>                  d->arch.hvm_domain.params[a.index] = a.value;
>  
> -- 
> 1.7.2.3
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> http://lists.xen.org/xen-devel
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.