[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] Re: how to handle paged hypercall args?


  • To: Olaf Hering <olaf@xxxxxxxxx>
  • From: Keir Fraser <keir@xxxxxxx>
  • Date: Thu, 02 Dec 2010 10:22:57 +0000
  • Cc: xen-devel@xxxxxxxxxxxxxxxxxxx, Jan Beulich <JBeulich@xxxxxxxxxx>
  • Delivery-date: Thu, 02 Dec 2010 02:23:42 -0800
  • Domainkey-signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=sender:user-agent:date:subject:from:to:cc:message-id:thread-topic :thread-index:in-reply-to:mime-version:content-type :content-transfer-encoding; b=hvhG7XCm9U7oktJDyaUTNHlwQtnvLi+C0KvK7vrUPqitTruD7Tn4/MfLUC+W4QOqNl t3b+rUW1XnTgr3s5D/FAXP5i0oUJF1EPY0wxF9fRj4zeMDpXg7R8OP2Dvi8nid7he4NA 5mFiuneBswSYqcrJALkV9r/cJCPJrFt4I+2a4=
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>
  • Thread-index: AcuSCuM9ywIkU2AjLkKEvwz8H/SQEw==
  • Thread-topic: [Xen-devel] Re: how to handle paged hypercall args?

On 02/12/2010 10:11, "Olaf Hering" <olaf@xxxxxxxxx> wrote:

> On Thu, Nov 18, Keir Fraser wrote:
> 
>> I've done something along these lines now as xen-unstable:22402. It actually
>> seems to work okay! So you can go ahead and use waitqueues in __hvm_copy()
>> now.
> 
> This is my first attempt to do it.
> It crashed Xen on the very first try in a spectacular way. But it
> happend only once for some reason.
> See my other mail.

Firstly, the usage of waitqueues is broken. The waitqueue_head should be
shared with code that pages in, so that vcpus can be *woken* at some point
after they start waiting. As it is, if a vcpu does sleep on its local
waitqueue_head, it will never wake. You might start with a single global
waitqueue_head and wake everyone on it every time a page (or maybe page
batch) is paged in. More sophisticated might be to hash page numbers into a
array of waitqueue_heads, to reduce false wakeups. This is all similar to
Linux waitqueues by the way -- your current code would be just as broken in
Linux as it is Xen.

Secondly, you should be able to hide the waiting inside __hvm_copy(). I
doubt you really need to touch the callers.

 -- Keir

> 
> Olaf
> 
> --- xen-unstable.hg-4.1.22447.orig/xen/arch/x86/hvm/hvm.c
> +++ xen-unstable.hg-4.1.22447/xen/arch/x86/hvm/hvm.c
> @@ -1986,69 +1986,117 @@ static enum hvm_copy_result __hvm_copy(
>  enum hvm_copy_result hvm_copy_to_guest_phys(
>      paddr_t paddr, void *buf, int size)
>  {
> -    return __hvm_copy(buf, paddr, size,
> +    enum hvm_copy_result res;
> +    struct waitqueue_head wq;
> +    init_waitqueue_head(&wq);
> +
> +    wait_event(wq, (
> +    res = __hvm_copy(buf, paddr, size,
>                        HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_phys,
> -                      0);
> +                      0)) != HVMCOPY_gfn_paged_out);
> +    return res;
>  }
>  
>  enum hvm_copy_result hvm_copy_from_guest_phys(
>      void *buf, paddr_t paddr, int size)
>  {
> -    return __hvm_copy(buf, paddr, size,
> +    enum hvm_copy_result res;
> +    struct waitqueue_head wq;
> +    init_waitqueue_head(&wq);
> +
> +    wait_event(wq, (
> +    res = __hvm_copy(buf, paddr, size,
>                        HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_phys,
> -                      0);
> +                      0)) != HVMCOPY_gfn_paged_out);
> +    return res;
>  }
>  
>  enum hvm_copy_result hvm_copy_to_guest_virt(
>      unsigned long vaddr, void *buf, int size, uint32_t pfec)
>  {
> -    return __hvm_copy(buf, vaddr, size,
> +    enum hvm_copy_result res;
> +    struct waitqueue_head wq;
> +    init_waitqueue_head(&wq);
> +
> +    wait_event(wq, (
> +    res = __hvm_copy(buf, vaddr, size,
>                        HVMCOPY_to_guest | HVMCOPY_fault | HVMCOPY_virt,
> -                      PFEC_page_present | PFEC_write_access | pfec);
> +                      PFEC_page_present | PFEC_write_access | pfec)) !=
> HVMCOPY_gfn_paged_out);
> +    return res;
>  }
>  
>  enum hvm_copy_result hvm_copy_from_guest_virt(
>      void *buf, unsigned long vaddr, int size, uint32_t pfec)
>  {
> -    return __hvm_copy(buf, vaddr, size,
> +    enum hvm_copy_result res;
> +    struct waitqueue_head wq;
> +    init_waitqueue_head(&wq);
> +
> +    wait_event(wq, (
> +    res = __hvm_copy(buf, vaddr, size,
>                        HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt,
> -                      PFEC_page_present | pfec);
> +                      PFEC_page_present | pfec)) != HVMCOPY_gfn_paged_out);
> +    return res;
>  }
>  
>  enum hvm_copy_result hvm_fetch_from_guest_virt(
>      void *buf, unsigned long vaddr, int size, uint32_t pfec)
>  {
> +    enum hvm_copy_result res;
> +    struct waitqueue_head wq;
>      if ( hvm_nx_enabled(current) )
>          pfec |= PFEC_insn_fetch;
> -    return __hvm_copy(buf, vaddr, size,
> +    init_waitqueue_head(&wq);
> +
> +    wait_event(wq, (
> +    res = __hvm_copy(buf, vaddr, size,
>                        HVMCOPY_from_guest | HVMCOPY_fault | HVMCOPY_virt,
> -                      PFEC_page_present | pfec);
> +                      PFEC_page_present | pfec)) != HVMCOPY_gfn_paged_out);
> +    return res;
>  }
>  
>  enum hvm_copy_result hvm_copy_to_guest_virt_nofault(
>      unsigned long vaddr, void *buf, int size, uint32_t pfec)
>  {
> -    return __hvm_copy(buf, vaddr, size,
> +    enum hvm_copy_result res;
> +    struct waitqueue_head wq;
> +    init_waitqueue_head(&wq);
> +
> +    wait_event(wq, (
> +    res = __hvm_copy(buf, vaddr, size,
>                        HVMCOPY_to_guest | HVMCOPY_no_fault | HVMCOPY_virt,
> -                      PFEC_page_present | PFEC_write_access | pfec);
> +                      PFEC_page_present | PFEC_write_access | pfec)) !=
> HVMCOPY_gfn_paged_out);
> +    return res;
>  }
>  
>  enum hvm_copy_result hvm_copy_from_guest_virt_nofault(
>      void *buf, unsigned long vaddr, int size, uint32_t pfec)
>  {
> -    return __hvm_copy(buf, vaddr, size,
> +    enum hvm_copy_result res;
> +    struct waitqueue_head wq;
> +    init_waitqueue_head(&wq);
> +
> +    wait_event(wq, (
> +    res = __hvm_copy(buf, vaddr, size,
>                        HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt,
> -                      PFEC_page_present | pfec);
> +                      PFEC_page_present | pfec)) != HVMCOPY_gfn_paged_out);
> +    return res;
>  }
>  
>  enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
>      void *buf, unsigned long vaddr, int size, uint32_t pfec)
>  {
> +    enum hvm_copy_result res;
> +    struct waitqueue_head wq;
>      if ( hvm_nx_enabled(current) )
>          pfec |= PFEC_insn_fetch;
> -    return __hvm_copy(buf, vaddr, size,
> +    init_waitqueue_head(&wq);
> +
> +    wait_event(wq, (
> +    res = __hvm_copy(buf, vaddr, size,
>                        HVMCOPY_from_guest | HVMCOPY_no_fault | HVMCOPY_virt,
> -                      PFEC_page_present | pfec);
> +                      PFEC_page_present | pfec)) != HVMCOPY_gfn_paged_out);
> +    return res;
>  }
>  
>  unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.