[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] add HVMOP_get_mem_type hvmop



Hi, 

At 14:11 +0100 on 03 May (1304431873), Olaf Hering wrote:
> diff -r 10f27b8b3d63 -r e7356373147a xen/arch/x86/hvm/hvm.c
> --- a/xen/arch/x86/hvm/hvm.c  Mon May 02 12:00:40 2011 +0100
> +++ b/xen/arch/x86/hvm/hvm.c  Tue May 03 15:06:13 2011 +0200
> @@ -3676,6 +3676,37 @@ long do_hvm_op(unsigned long op, XEN_GUE
>          break;
>      }
>  
> +    case HVMOP_get_mem_type:
> +    {
> +        struct xen_hvm_get_mem_type a;
> +        struct domain *d;
> +        p2m_type_t t;
> +
> +        if ( copy_from_guest(&a, arg, 1) )
> +            return -EFAULT;
> +
> +        rc = rcu_lock_remote_target_domain_by_id(a.domid, &d);

I thought this call was intended to be used from inside the guest in
question.  rcu_lock_remote_target_domain_by_id() explicitly refuses to
let a domain operate on itself. 

> +        if ( rc != 0 )
> +            return rc;
> +
> +        rc = -EINVAL;
> +        if ( is_hvm_domain(d) )
> +        {
> +            gfn_to_mfn_unshare(p2m_get_hostp2m(d), a.pfn, &t, 0);
> +            if ( p2m_is_mmio(t) )
> +                a.mem_type =  HVMMEM_mmio_dm;
> +            else if ( p2m_is_readonly(t) )
> +                a.mem_type =  HVMMEM_ram_ro;
> +            else if ( p2m_is_ram(t) )
> +                a.mem_type =  HVMMEM_ram_rw;
> +            else
> +                a.mem_type =  HVMMEM_mmio_dm;
> +            rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
> +        }
> +        rcu_unlock_domain(d);
> +        break;
> +    }
> +
>      case HVMOP_set_mem_type:
>      {
>          struct xen_hvm_set_mem_type a;
> diff -r 10f27b8b3d63 -r e7356373147a xen/include/public/hvm/hvm_op.h
> --- a/xen/include/public/hvm/hvm_op.h Mon May 02 12:00:40 2011 +0100
> +++ b/xen/include/public/hvm/hvm_op.h Tue May 03 15:06:13 2011 +0200
> @@ -76,6 +76,12 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_
>  /* Flushes all VCPU TLBs: @arg must be NULL. */
>  #define HVMOP_flush_tlbs          5
>  
> +typedef enum {
> +    HVMMEM_ram_rw,             /* Normal read/write guest RAM */
> +    HVMMEM_ram_ro,             /* Read-only; writes are discarded */
> +    HVMMEM_mmio_dm,            /* Reads and write go to the device model */
> +} hvmmem_type_t;
> +

This is now outside the #ifdef, when both of its users are inside it.
If that wasn't deliberate, please put it back. 

>  /* Following tools-only interfaces may change in future. */
>  #if defined(__XEN__) || defined(__XEN_TOOLS__)
>  
> @@ -109,11 +115,6 @@ typedef struct xen_hvm_modified_memory x
>  DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
>  
>  #define HVMOP_set_mem_type    8
> -typedef enum {
> -    HVMMEM_ram_rw,             /* Normal read/write guest RAM */
> -    HVMMEM_ram_ro,             /* Read-only; writes are discarded */
> -    HVMMEM_mmio_dm,            /* Reads and write go to the device model */
> -} hvmmem_type_t;
>  /* Notify that a region of memory is to be treated in a specific way. */
>  struct xen_hvm_set_mem_type {
>      /* Domain to be updated. */
> @@ -223,6 +224,20 @@ struct xen_hvm_inject_trap {
>  typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
>  DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);
>  
> +#define HVMOP_get_mem_type    15
> +/* Return hvmmem_type_t for the specified pfn. */
> +struct xen_hvm_get_mem_type {
> +    /* Domain to be queried. */
> +    domid_t domid;
> +    /* OUT variable. */
> +    uint8_t mem_type;
> +    /* IN variable. */
> +    uint64_t pfn;

This structure will be laid out differently on 32-bit and 64-bit
builds. :(  Also, since the _set operation uses a 16-bit variable for
the type, you might as well do the same here. 

Cheers,

Tim.

-- 
Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Principal Software Engineer, Xen Platform Team
Citrix Systems UK Ltd.  (Company #02937203, SL9 0BG)

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.