[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: libxc: maintain a small, per-handle, cache of hypercall buffer memory (Was: Re: [Xen-devel] Xen 4.1 rc1 test report)


  • To: Ian Campbell <Ian.Campbell@xxxxxxxxxxxxx>
  • From: Haitao Shan <maillists.shan@xxxxxxxxx>
  • Date: Mon, 31 Jan 2011 11:06:11 +0800
  • Cc: "Zheng, Shaohui" <shaohui.zheng@xxxxxxxxx>, Keir Fraser <keir@xxxxxxx>, "xen-devel@xxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxx>
  • Delivery-date: Sun, 30 Jan 2011 19:08:47 -0800
  • Domainkey-signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=mime-version:in-reply-to:references:date:message-id:subject:from:to :cc:content-type:content-transfer-encoding; b=X0/rLh/DNlpmroTQKJI0w13KtLTt1oV8iIQ3Hm+s15QDKkIoVYNweihrK4KPI2V6rO 16GMnHXL2lGnIP+Kwxj14CIFtQnqDyr/++VmvtiyWgmx2YvAJhau6+1vr7veOgIcM2ps MxuzgiOhycNYUmxiJTaxVFjCL1eQwKgJC/KDo=
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

Hi, Ian,

I can not apply your patch. Is this patch developed against more newer
version of Xen than what I can have at hand?

Shan Haitao

2011/1/26 Ian Campbell <Ian.Campbell@xxxxxxxxxxxxx>:
> On Wed, 2011-01-26 at 00:47 +0000, Haitao Shan wrote:
>> I think it is basically the same idea as Keir introduced in 20841. I
>> guess this bug would happen on platforms which has large number of
>> physical CPUs, not only on EX system of Intel.
>> If you can cook the patch, that would be great! Thanks!!
>
> Does this help?
>
> Ian.
>
> 8<---------------------------------------
>
> # HG changeset patch
> # User Ian Campbell <ian.campbell@xxxxxxxxxx>
> # Date 1296038761 0
> # Node ID 8b8b7e024f9d6f4c2ce1a4efbf38f07eeb460d91
> # Parent  e4e69622dc95037eab6740f79ecf9c1d05bca529
> libxc: maintain a small, per-handle, cache of hypercall buffer memory
>
> Constantly m(un)locking memory can have significant overhead on
> systems with large numbers of CPUs. This was previously fixed by
> 20841:fbe8f32fa257 but this was dropped during the transition to
> hypercall buffers.
>
> Introduce a small cache of single page hypercall buffer allocations
> which can be reused to avoid this overhead.
>
> Add some statistics tracking to the hypercall buffer allocations.
>
> The cache size of 4 was chosen based on these statistics since they
> indicated that 2 pages was sufficient to satisfy all concurrent single
> page hypercall buffer allocations seen during "xl create", "xl
> shutdown" and "xl destroy" of both a PV and HVM guest therefore 4
> pages should cover the majority of important cases.
>
> Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
>
> diff -r e4e69622dc95 -r 8b8b7e024f9d tools/libxc/xc_hcall_buf.c
> --- a/tools/libxc/xc_hcall_buf.c        Wed Jan 26 10:22:42 2011 +0000
> +++ b/tools/libxc/xc_hcall_buf.c        Wed Jan 26 10:46:01 2011 +0000
> @@ -18,6 +18,7 @@
>
>  #include <stdlib.h>
>  #include <string.h>
> +#include <pthread.h>
>
>  #include "xc_private.h"
>  #include "xg_private.h"
> @@ -28,11 +29,108 @@ xc_hypercall_buffer_t XC__HYPERCALL_BUFF
>     HYPERCALL_BUFFER_INIT_NO_BOUNCE
>  };
>
> +pthread_mutex_t hypercall_buffer_cache_mutex = PTHREAD_MUTEX_INITIALIZER;
> +
> +static void hypercall_buffer_cache_lock(xc_interface *xch)
> +{
> +    if ( xch->flags & XC_OPENFLAG_NON_REENTRANT )
> +        return;
> +    pthread_mutex_lock(&hypercall_buffer_cache_mutex);
> +}
> +
> +static void hypercall_buffer_cache_unlock(xc_interface *xch)
> +{
> +    if ( xch->flags & XC_OPENFLAG_NON_REENTRANT )
> +        return;
> +    pthread_mutex_unlock(&hypercall_buffer_cache_mutex);
> +}
> +
> +static void *hypercall_buffer_cache_alloc(xc_interface *xch, int nr_pages)
> +{
> +    void *p = NULL;
> +
> +    hypercall_buffer_cache_lock(xch);
> +
> +    xch->hypercall_buffer_total_allocations++;
> +    xch->hypercall_buffer_current_allocations++;
> +    if ( xch->hypercall_buffer_current_allocations > 
> xch->hypercall_buffer_maximum_allocations )
> +        xch->hypercall_buffer_maximum_allocations = 
> xch->hypercall_buffer_current_allocations;
> +
> +    if ( nr_pages > 1 )
> +    {
> +        xch->hypercall_buffer_cache_toobig++;
> +    }
> +    else if ( xch->hypercall_buffer_cache_nr > 0 )
> +    {
> +        p = xch->hypercall_buffer_cache[--xch->hypercall_buffer_cache_nr];
> +        xch->hypercall_buffer_cache_hits++;
> +    }
> +    else
> +    {
> +        xch->hypercall_buffer_cache_misses++;
> +    }
> +
> +    hypercall_buffer_cache_unlock(xch);
> +
> +    return p;
> +}
> +
> +static int hypercall_buffer_cache_free(xc_interface *xch, void *p, int 
> nr_pages)
> +{
> +    int rc = 0;
> +
> +    hypercall_buffer_cache_lock(xch);
> +
> +    xch->hypercall_buffer_total_releases++;
> +    xch->hypercall_buffer_current_allocations--;
> +
> +    if ( nr_pages == 1 && xch->hypercall_buffer_cache_nr < 
> HYPERCALL_BUFFER_CACHE_SIZE )
> +    {
> +        xch->hypercall_buffer_cache[xch->hypercall_buffer_cache_nr++] = p;
> +        rc = 1;
> +    }
> +
> +    hypercall_buffer_cache_unlock(xch);
> +
> +    return rc;
> +}
> +
> +void xc__hypercall_buffer_cache_release(xc_interface *xch)
> +{
> +    void *p;
> +
> +    hypercall_buffer_cache_lock(xch);
> +
> +    DBGPRINTF("hypercall buffer: total allocations:%d total releases:%d",
> +              xch->hypercall_buffer_total_allocations,
> +              xch->hypercall_buffer_total_releases);
> +    DBGPRINTF("hypercall buffer: current allocations:%d maximum 
> allocations:%d",
> +              xch->hypercall_buffer_current_allocations,
> +              xch->hypercall_buffer_maximum_allocations);
> +    DBGPRINTF("hypercall buffer: cache current size:%d",
> +              xch->hypercall_buffer_cache_nr);
> +    DBGPRINTF("hypercall buffer: cache hits:%d misses:%d toobig:%d",
> +              xch->hypercall_buffer_cache_hits,
> +              xch->hypercall_buffer_cache_misses,
> +              xch->hypercall_buffer_cache_toobig);
> +
> +    while ( xch->hypercall_buffer_cache_nr > 0 )
> +    {
> +        p = xch->hypercall_buffer_cache[--xch->hypercall_buffer_cache_nr];
> +        xch->ops->u.privcmd.free_hypercall_buffer(xch, xch->ops_handle, p, 
> 1);
> +    }
> +
> +    hypercall_buffer_cache_unlock(xch);
> +}
> +
>  void *xc__hypercall_buffer_alloc_pages(xc_interface *xch, 
> xc_hypercall_buffer_t *b, int nr_pages)
>  {
> -    void *p = xch->ops->u.privcmd.alloc_hypercall_buffer(xch, 
> xch->ops_handle, nr_pages);
> +    void *p = hypercall_buffer_cache_alloc(xch, nr_pages);
>
> -    if (!p)
> +    if ( !p )
> +        p = xch->ops->u.privcmd.alloc_hypercall_buffer(xch, xch->ops_handle, 
> nr_pages);
> +
> +    if ( !p )
>         return NULL;
>
>     b->hbuf = p;
> @@ -47,7 +145,8 @@ void xc__hypercall_buffer_free_pages(xc_
>     if ( b->hbuf == NULL )
>         return;
>
> -    xch->ops->u.privcmd.free_hypercall_buffer(xch, xch->ops_handle, b->hbuf, 
> nr_pages);
> +    if ( !hypercall_buffer_cache_free(xch, b->hbuf, nr_pages) )
> +        xch->ops->u.privcmd.free_hypercall_buffer(xch, xch->ops_handle, 
> b->hbuf, nr_pages);
>  }
>
>  struct allocation_header {
> diff -r e4e69622dc95 -r 8b8b7e024f9d tools/libxc/xc_private.c
> --- a/tools/libxc/xc_private.c  Wed Jan 26 10:22:42 2011 +0000
> +++ b/tools/libxc/xc_private.c  Wed Jan 26 10:46:01 2011 +0000
> @@ -126,6 +126,16 @@ static struct xc_interface_core *xc_inte
>     xch->error_handler   = logger;           xch->error_handler_tofree   = 0;
>     xch->dombuild_logger = dombuild_logger;  xch->dombuild_logger_tofree = 0;
>
> +    xch->hypercall_buffer_cache_nr = 0;
> +
> +    xch->hypercall_buffer_total_allocations = 0;
> +    xch->hypercall_buffer_total_releases = 0;
> +    xch->hypercall_buffer_current_allocations = 0;
> +    xch->hypercall_buffer_maximum_allocations = 0;
> +    xch->hypercall_buffer_cache_hits = 0;
> +    xch->hypercall_buffer_cache_misses = 0;
> +    xch->hypercall_buffer_cache_toobig = 0;
> +
>     xch->ops_handle = XC_OSDEP_OPEN_ERROR;
>     xch->ops = NULL;
>
> @@ -171,6 +181,8 @@ static int xc_interface_close_common(xc_
>  static int xc_interface_close_common(xc_interface *xch)
>  {
>     int rc = 0;
> +
> +    xc_hypercall_buffer_cache_release(xch);
>
>     xtl_logger_destroy(xch->dombuild_logger_tofree);
>     xtl_logger_destroy(xch->error_handler_tofree);
> diff -r e4e69622dc95 -r 8b8b7e024f9d tools/libxc/xc_private.h
> --- a/tools/libxc/xc_private.h  Wed Jan 26 10:22:42 2011 +0000
> +++ b/tools/libxc/xc_private.h  Wed Jan 26 10:46:01 2011 +0000
> @@ -75,6 +75,28 @@ struct xc_interface_core {
>     FILE *dombuild_logger_file;
>     const char *currently_progress_reporting;
>
> +    /*
> +     * A simple cache of unused, single page, hypercall buffers
> +     *
> +     * Protected by a global lock.
> +     */
> +#define HYPERCALL_BUFFER_CACHE_SIZE 4
> +    int hypercall_buffer_cache_nr;
> +    void *hypercall_buffer_cache[HYPERCALL_BUFFER_CACHE_SIZE];
> +
> +    /*
> +     * Hypercall buffer statistics. All protected by the global
> +     * hypercall_buffer_cache lock.
> +     */
> +    int hypercall_buffer_total_allocations;
> +    int hypercall_buffer_total_releases;
> +    int hypercall_buffer_current_allocations;
> +    int hypercall_buffer_maximum_allocations;
> +    int hypercall_buffer_cache_hits;
> +    int hypercall_buffer_cache_misses;
> +    int hypercall_buffer_cache_toobig;
> +
> +    /* Low lovel OS interface */
>     xc_osdep_info_t  osdep;
>     xc_osdep_ops    *ops; /* backend operations */
>     xc_osdep_handle  ops_handle; /* opaque data for xc_osdep_ops */
> @@ -156,6 +178,11 @@ int xc__hypercall_bounce_pre(xc_interfac
>  #define xc_hypercall_bounce_pre(_xch, _name) xc__hypercall_bounce_pre(_xch, 
> HYPERCALL_BUFFER(_name))
>  void xc__hypercall_bounce_post(xc_interface *xch, xc_hypercall_buffer_t 
> *bounce);
>  #define xc_hypercall_bounce_post(_xch, _name) 
> xc__hypercall_bounce_post(_xch, HYPERCALL_BUFFER(_name))
> +
> +/*
> + * Release hypercall buffer cache
> + */
> +void xc__hypercall_buffer_cache_release(xc_interface *xch);
>
>  /*
>  * Hypercall interfaces.
>
>
>

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.