WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

Re: [Xen-devel] query the page type of a hvm page from within a hvm gues

To: "Olaf Hering" <olaf@xxxxxxxxx>
Subject: Re: [Xen-devel] query the page type of a hvm page from within a hvm guest
From: "Jan Beulich" <JBeulich@xxxxxxxxxx>
Date: Mon, 02 May 2011 10:43:59 +0100
Cc: "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>, Tim Deegan <Tim.Deegan@xxxxxxxxxx>
Delivery-date: Mon, 02 May 2011 02:45:00 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <20110502091744.GA15606@xxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <20110407101202.GA31553@xxxxxxxxx> <20110407111602.GI30961@xxxxxxxxxxxxxxxxxxxxxxx> <20110502091744.GA15606@xxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
>>> On 02.05.11 at 11:17, Olaf Hering <olaf@xxxxxxxxx> wrote:
> On Thu, Apr 07, Tim Deegan wrote:
> 
>> Hi, 
>> 
>> At 11:12 +0100 on 07 Apr (1302174722), Olaf Hering wrote:
>> > If the crash kernel had a way to ask the hypervisor wether a specific
>> > guest gfn is ballooned and thus backed by ram, the load issue would not
>> > happen.  There seems to be no interface to query the type of a guest gfn
>> > from within the hvm guest.
>> > 
>> > Any ideas how to implement that?
>> > I see HVMOP_set_mem_type, but no HVMOP_get_mem_type.
>> 
>> Feel free to add HVMOP_get_mem_type.  I don't think any great harm can
>> come from allowing the guest to query its own memory status.
> 
> This version works for me, tested with xen 4.0.1 and SLES11 SP1 kernel.
> The actual kernel interface needs to be send to lkml.
> 
> Is there an u8 for padding required after mem_type in xen_hvm_get_mem_type?
> Should HVMOP_get_mem_type just check for p2m_is_mmio() or return every
> possible hvmmem_type_t value?

I'd say the latter, even if you don't use it at present.

> Olaf
> 
> ---
>  unmodified_drivers/linux-2.6/platform-pci/platform-pci.c |   36 
> +++++++++++++++
>  xen/arch/ia64/vmx/vmx_hypercall.c                        |    1
>  xen/arch/x86/hvm/hvm.c                                   |   27 +++++++++++
>  xen/include/public/hvm/hvm_op.h                          |   25 ++++++++--
>  4 files changed, 84 insertions(+), 5 deletions(-)
> 
> diff -r 2f08c89b767d 
> unmodified_drivers/linux-2.6/platform-pci/platform-pci.c
> --- a/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c        Wed Apr 
> 20 
> 17:13:08 2011 +0100
> +++ b/unmodified_drivers/linux-2.6/platform-pci/platform-pci.c        Mon May 
> 02 
> 11:10:13 2011 +0200
> @@ -349,6 +349,39 @@ static int check_platform_magic(struct d
>       return -ENODEV;
>  }
>  
> +#ifdef HAVE_OLDMEM_PFN_IS_RAM
> +static get_mem_type_supported;
> +static unsigned long prev_pfn;
> +static unsigned long prev_mem_type;
> +
> +static int xen_oldmem_pfn_is_ram(unsigned long pfn)
> +{
> +     struct xen_hvm_get_mem_type a;
> +     int ret;
> +
> +     if (get_mem_type_supported)

The name of the variable seems badly chosen, and the way you
coded this the code wouldn't be able to use the new interface
after migrating from an incapable hypervisor to a capable one.

> +             return -ENXIO;
> +
> +     if (pfn == prev_pfn)
> +             return prev_mem_type == HVMMEM_ram_rw;

Did you in fact observe many immediately subsequent calls with
the same input (i.e. is the caching really worthwhile)?

> +
> +     a.domid = DOMID_SELF;
> +     a.pfn = pfn;
> +     a.mem_type = 0;
> +     ret = HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a);
> +     if (ret) {
> +             get_mem_type_supported = ret;
> +             ret = -ENXIO;
> +     } else {
> +             ret = a.mem_type == HVMMEM_ram_rw;
> +             prev_pfn = pfn;
> +             prev_mem_type = a.mem_type;
> +     }
> +
> +     return ret;
> +}
> +#endif
> +
>  static int __devinit platform_pci_init(struct pci_dev *pdev,
>                                      const struct pci_device_id *ent)
>  {
> @@ -417,6 +450,9 @@ static int __devinit platform_pci_init(s
>       if ((ret = xen_panic_handler_init()))
>               goto out;
>  
> +#ifdef HAVE_OLDMEM_PFN_IS_RAM
> +     register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
> +#endif
>   out:
>       if (ret) {
>               pci_release_region(pdev, 0);
> diff -r 2f08c89b767d xen/arch/ia64/vmx/vmx_hypercall.c
> --- a/xen/arch/ia64/vmx/vmx_hypercall.c       Wed Apr 20 17:13:08 2011 +0100
> +++ b/xen/arch/ia64/vmx/vmx_hypercall.c       Mon May 02 11:10:13 2011 +0200
> @@ -217,6 +217,7 @@ do_hvm_op(unsigned long op, XEN_GUEST_HA
>          break;
>      }
>  
> +    case HVMOP_get_mem_type:
>      case HVMOP_set_mem_type:
>      case HVMOP_set_mem_access:
>      case HVMOP_get_mem_access:
> diff -r 2f08c89b767d xen/arch/x86/hvm/hvm.c
> --- a/xen/arch/x86/hvm/hvm.c  Wed Apr 20 17:13:08 2011 +0100
> +++ b/xen/arch/x86/hvm/hvm.c  Mon May 02 11:10:13 2011 +0200
> @@ -3676,6 +3676,33 @@ long do_hvm_op(unsigned long op, XEN_GUE
>          break;
>      }
>  
> +    case HVMOP_get_mem_type:
> +    {
> +        struct xen_hvm_get_mem_type a;
> +        struct domain *d;
> +        p2m_type_t t;
> +
> +        if ( copy_from_guest(&a, arg, 1) )
> +            return -EFAULT;
> +
> +        rc = rcu_lock_remote_target_domain_by_id(a.domid, &d);
> +        if ( rc != 0 )
> +            return rc;
> +
> +        rc = -EINVAL;
> +        if ( is_hvm_domain(d) )
> +        {
> +            gfn_to_mfn_unshare(p2m_get_hostp2m(d), a.pfn, &t, 0);
> +            if ( p2m_is_mmio(t) )
> +                a.mem_type =  HVMMEM_mmio_dm;
> +            else
> +                a.mem_type =  HVMMEM_ram_rw;
> +            rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
> +        }
> +        rcu_unlock_domain(d);
> +        break;
> +    }
> +
>      case HVMOP_set_mem_type:
>      {
>          struct xen_hvm_set_mem_type a;
> diff -r 2f08c89b767d xen/include/public/hvm/hvm_op.h
> --- a/xen/include/public/hvm/hvm_op.h Wed Apr 20 17:13:08 2011 +0100
> +++ b/xen/include/public/hvm/hvm_op.h Mon May 02 11:10:13 2011 +0200
> @@ -76,6 +76,12 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_
>  /* Flushes all VCPU TLBs: @arg must be NULL. */
>  #define HVMOP_flush_tlbs          5
>  
> +typedef enum {
> +    HVMMEM_ram_rw,             /* Normal read/write guest RAM */
> +    HVMMEM_ram_ro,             /* Read-only; writes are discarded */
> +    HVMMEM_mmio_dm,            /* Reads and write go to the device model */
> +} hvmmem_type_t;
> +
>  /* Following tools-only interfaces may change in future. */
>  #if defined(__XEN__) || defined(__XEN_TOOLS__)
>  
> @@ -109,11 +115,6 @@ typedef struct xen_hvm_modified_memory x
>  DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
>  
>  #define HVMOP_set_mem_type    8
> -typedef enum {
> -    HVMMEM_ram_rw,             /* Normal read/write guest RAM */
> -    HVMMEM_ram_ro,             /* Read-only; writes are discarded */
> -    HVMMEM_mmio_dm,            /* Reads and write go to the device model */
> -} hvmmem_type_t;
>  /* Notify that a region of memory is to be treated in a specific way. */
>  struct xen_hvm_set_mem_type {
>      /* Domain to be updated. */
> @@ -223,6 +224,20 @@ struct xen_hvm_inject_trap {
>  typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
>  DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);
>  
> +#define HVMOP_get_mem_type    15
> +/* Return hvmmem_type_t for the specified pfn. */
> +struct xen_hvm_get_mem_type {
> +    /* Domain to be updated. */

... queried ...

Jan

> +    domid_t domid;
> +    /* OUT variable. */
> +    uint8_t mem_type;
> +    /* IN variable. */
> +    uint64_t pfn;
> +};
> +typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
> +
> +
>  #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
>  
>  #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxx 
> http://lists.xensource.com/xen-devel 



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel