[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH] mem_event: use wait queue when ring is full


  • To: "Olaf Hering" <olaf@xxxxxxxxx>
  • From: "Andres Lagar-Cavilla" <andres@xxxxxxxxxxxxxxxx>
  • Date: Fri, 16 Dec 2011 09:04:18 -0800
  • Cc: xen-devel@xxxxxxxxxxxxxxxxxxx, tim@xxxxxxx, adin@xxxxxxxxxxxxxx
  • Delivery-date: Fri, 16 Dec 2011 17:04:39 +0000
  • Domainkey-signature: a=rsa-sha1; c=nofws; d=lagarcavilla.org; h=message-id :in-reply-to:references:date:subject:from:to:cc:reply-to :mime-version:content-type:content-transfer-encoding; q=dns; s= lagarcavilla.org; b=UpBsePVHC3emLYRwU89AL1Hh++0kRR7t7tNZXKFhWk0U PHBi76+PdAyqgVgoojqYUCEWgMBS1K5xkm5fCOOXcsVH4N9Q48lk+L/C18tds1Qt KRAnJYxcWrNMIx9qQGWc3KORN3WCQoUJ31kmN+7oTEHaNMMGT/1eFtxhJjzyE5M=
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

> On Thu, Dec 15, Andres Lagar-Cavilla wrote:
>
>> > - How many requests should foreign vcpus place in the ring if the
>> guest
>> >   has more vcpus than available slots in the ring? Just a single one
>> so
>> >   that foreigners can also make some progress?
>> The idea is that foreign vcpus can place as many events as they want as
>> long as each guest vcpu that is not blocked on a men event has room to
>> send one men event. When we reach that border condition, no more foreign
>> men events.
>>
>> The case for which there are way too many guest vcpus needs to be
>> handled,
>> either by capping the max number of vcpus for domains using a men event,
>> or by growing the ring size.
>
> Right now the ring is one page, so it can not hold more than 64 entries.
> If that ever changes, the accounting can be adjusted.
>
>> > - Should access and paging have the same rules for accounting?
>> Absolutely.
>>
>> And both should use wait queues in extreme cases in which a guest vcpu
>> with a single action generates multiple memory events. Given that when
>> we
>> hit a border condition the guest vcpu will place one event and be
>> flagged
>> VPF_mem_event_paused (or whatever that flag is named), if a guest vcpu
>> generates another event when flagged, that's our queue for putting the
>> vcpu on a wait queue.
>
> An extra flag is not needed.
Can you elaborate? Which flag is not needed? And why?

>
> Below is an incremental patch (on top of v6) which does some accounting.
> Its just compile tested.
>
> Olaf
>
>
> diff -r 5d5d10e1568b xen/arch/x86/mm/mem_event.c
> --- a/xen/arch/x86/mm/mem_event.c
> +++ b/xen/arch/x86/mm/mem_event.c
> @@ -114,6 +114,19 @@ static int mem_event_enable(
>
>      med->pause_flag = pause_flag;
>
> +    /*
> +     * Configure ring accounting:
> +     * Each guest vcpu should be able to place at least one request.
> +     * If there are more vcpus than available slots in the ring, not all
> vcpus
> +     * can place requests in the ring anyway.  A minimum (arbitrary)
> number of
> +     * foreign requests will be allowed in this case.
> +     */
> +    if ( d->max_vcpus < RING_SIZE(&med->front_ring) )
> +        med->max_foreign = RING_SIZE(&med->front_ring) - d->max_vcpus;
> +    if ( med->max_foreign < 13 )
> +        med->max_foreign = 13;
Magic number! Why?

More generally, does this patch apply on top of a previous patch? What's
the context here?

Thanks
Andres

> +    med->max_target = RING_SIZE(&med->front_ring) - med->max_foreign;
> +
>      init_waitqueue_head(&med->wq);
>
>      /* Wake any VCPUs waiting for the ring to appear */
> @@ -147,23 +160,28 @@ static int mem_event_disable(struct mem_
>
>  static int _mem_event_put_request(struct domain *d,
>                                    struct mem_event_domain *med,
> -                                  mem_event_request_t *req)
> +                                  mem_event_request_t *req,
> +                                  int *done)
>  {
>      mem_event_front_ring_t *front_ring;
> -    int free_req, claimed_req;
> +    int free_req, claimed_req, ret;
>      RING_IDX req_prod;
>
> +    if ( *done )
> +        return 1;
> +
>      mem_event_ring_lock(med);
>
> -    free_req = RING_FREE_REQUESTS(&med->front_ring);
> +    front_ring = &med->front_ring;
> +
>      /* Foreign requests must succeed because their vcpus can not sleep */
>      claimed_req = med->foreign_producers;
> +    free_req = RING_FREE_REQUESTS(front_ring);
>      if ( !free_req || ( current->domain == d && free_req <= claimed_req )
> ) {
>          mem_event_ring_unlock(med);
>          return 0;
>      }
>
> -    front_ring = &med->front_ring;
>      req_prod = front_ring->req_prod_pvt;
>
>      if ( current->domain != d )
> @@ -176,9 +194,18 @@ static int _mem_event_put_request(struct
>      memcpy(RING_GET_REQUEST(front_ring, req_prod), req, sizeof(*req));
>      req_prod++;
>
> +    ret = 1;
> +    *done = 1;
> +    free_req--;
> +
>      /* Update accounting */
>      if ( current->domain == d )
> +    {
>          med->target_producers--;
> +        /* Ring is full, no more requests from this vcpu, go to sleep */
> +        if ( free_req < med->max_target )
> +            ret = 0;
> +    }
>      else
>          med->foreign_producers--;
>
> @@ -190,19 +217,20 @@ static int _mem_event_put_request(struct
>
>      notify_via_xen_event_channel(d, med->xen_port);
>
> -    return 1;
> +    return ret;
>  }
>
>  void mem_event_put_request(struct domain *d, struct mem_event_domain
> *med,
>                             mem_event_request_t *req)
>  {
> +    int done = 0;
>      /* Go to sleep if request came from guest */
>      if (current->domain == d) {
> -        wait_event(med->wq, _mem_event_put_request(d, med, req));
> +        wait_event(med->wq, _mem_event_put_request(d, med, req, &done));
>          return;
>      }
>      /* Ring was full anyway, unable to sleep in non-guest context */
> -    if (!_mem_event_put_request(d, med, req))
> +    if (!_mem_event_put_request(d, med, req, &done))
>          printk("Failed to put memreq: d %u t %x f %x gfn %lx\n",
> d->domain_id,
>                  req->type, req->flags, (unsigned long)req->gfn);
>  }
> @@ -341,7 +369,8 @@ int mem_event_claim_slot(struct domain *
>          med->target_producers++;
>          ring_full = 0;
>      }
> -    else if ( med->foreign_producers + med->target_producers + 1 <
> free_req )
> +    else if ( med->foreign_producers + med->target_producers < free_req
> &&
> +              med->foreign_producers < med->max_foreign )
>      {
>          med->foreign_producers++;
>          ring_full = 0;
> diff -r 5d5d10e1568b xen/include/xen/sched.h
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -184,8 +184,11 @@ struct mem_event_domain
>  {
>      /* ring lock */
>      spinlock_t ring_lock;
> -    unsigned short foreign_producers;
> -    unsigned short target_producers;
> +    /* The ring has 64 entries */
> +    unsigned char foreign_producers;
> +    unsigned char max_foreign;
> +    unsigned char target_producers;
> +    unsigned char max_target;
>      /* shared page */
>      mem_event_shared_page_t *shared_page;
>      /* shared ring page */
>



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.