WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-devel

Re: [Xen-devel] [PATCH] privcmd: MMAPBATCH: Fix error handling/reporting

To: Ian Campbell <ian.campbell@xxxxxxxxxx>
Subject: Re: [Xen-devel] [PATCH] privcmd: MMAPBATCH: Fix error handling/reporting
From: Jeremy Fitzhardinge <jeremy@xxxxxxxx>
Date: Wed, 20 May 2009 12:33:38 -0700
Cc: xen-devel@xxxxxxxxxxxxxxxxxxx
Delivery-date: Wed, 20 May 2009 12:34:10 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
In-reply-to: <1242830730-3341-1-git-send-email-ian.campbell@xxxxxxxxxx>
List-help: <mailto:xen-devel-request@lists.xensource.com?subject=help>
List-id: Xen developer discussion <xen-devel.lists.xensource.com>
List-post: <mailto:xen-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/mailman/listinfo/xen-devel>, <mailto:xen-devel-request@lists.xensource.com?subject=unsubscribe>
References: <1242830676.22654.66.camel@xxxxxxxxxxxxxxxxxxxxxx> <1242830730-3341-1-git-send-email-ian.campbell@xxxxxxxxxx>
Sender: xen-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: Thunderbird 2.0.0.21 (X11/20090320)
Ian Campbell wrote:
On error IOCTL_PRIVCMD_MMAPBATCH is expected to set the top nibble of
the effected MFN and return 0. Currently it leaves the MFN unmodified
and returns the number of failures. Therefore:

- reimplement remap_domain_mfn_range() using direct
  HYPERVISOR_mmu_update() calls and small batches. The xen_set_domain_pte()
  interface does not report errors and since some failures are
  expected/normal using the multicall infrastructure is too noisy.
The noise is just for debugging; if failure is expected, then maybe we can extend it to be quiet about those cases.
- return 0 as expected
- writeback the updated MFN list to mmapbatch->arr not over mmapbatch,
  smashing the caller's stack.
Oops.
- remap_domain_mfn_range can be static.

With this change I am able to start an HVM domain.

OK, good.  I've pulled this into xen-tip/dom0/xenfs.

Thanks,
   J

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Cc: Jeremy Fitzhardinge <jeremy@xxxxxxxx>
---
 drivers/xen/xenfs/privcmd.c |   56 +++++++++++++++++++++++++++++++-----------
 1 files changed, 41 insertions(+), 15 deletions(-)

diff --git a/drivers/xen/xenfs/privcmd.c b/drivers/xen/xenfs/privcmd.c
index 263f622..110b062 100644
--- a/drivers/xen/xenfs/privcmd.c
+++ b/drivers/xen/xenfs/privcmd.c
@@ -31,14 +31,16 @@
 #include <xen/features.h>
 #include <xen/page.h>
+#define REMAP_BATCH_SIZE 16
+
 #ifndef HAVE_ARCH_PRIVCMD_MMAP
 static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
 #endif
struct remap_data {
        unsigned long mfn;
-       unsigned domid;
        pgprot_t prot;
+       struct mmu_update *mmu_update;
 };
static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
@@ -47,17 +49,23 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t 
token,
        struct remap_data *rmd = data;
        pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
- xen_set_domain_pte(ptep, pte, rmd->domid);
+       rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr;
+       rmd->mmu_update->val = pte_val_ma(pte);
+       rmd->mmu_update++;
return 0;
 }
-int remap_domain_mfn_range(struct vm_area_struct *vma, unsigned long addr,
-                          unsigned long mfn, unsigned long size,
-                          pgprot_t prot, unsigned domid)
+static int remap_domain_mfn_range(struct vm_area_struct *vma,
+                                 unsigned long addr,
+                                 unsigned long mfn, int nr,
+                                 pgprot_t prot, unsigned domid)
 {
        struct remap_data rmd;
-       int err;
+       struct mmu_update mmu_update[REMAP_BATCH_SIZE];
+       int batch;
+       unsigned long range;
+       int err = 0;
prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); @@ -65,10 +73,29 @@ int remap_domain_mfn_range(struct vm_area_struct *vma, unsigned long addr, rmd.mfn = mfn;
        rmd.prot = prot;
-       rmd.domid = domid;
- err = apply_to_page_range(vma->vm_mm, addr, size,
-                                 remap_area_mfn_pte_fn, &rmd);
+       while (nr) {
+               batch = min(REMAP_BATCH_SIZE, nr);
+               range = (unsigned long)batch << PAGE_SHIFT;
+
+               rmd.mmu_update = mmu_update;
+               err = apply_to_page_range(vma->vm_mm, addr, range,
+                                         remap_area_mfn_pte_fn, &rmd);
+               if (err)
+                       goto out;
+
+               err = -EFAULT;
+               if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
+                       goto out;
+
+               nr -= batch;
+               addr += range;
+       }
+
+       err = 0;
+out:
+
+       flush_tlb_all();
return err;
 }
@@ -157,7 +184,7 @@ static int traverse_pages(unsigned nelem, size_t size,
 {
        void *pagedata;
        unsigned pageidx;
-       int ret;
+       int ret = 0;
BUG_ON(size > PAGE_SIZE); @@ -207,8 +234,7 @@ static int mmap_mfn_range(void *data, void *state) rc = remap_domain_mfn_range(vma,
                                    msg->va & PAGE_MASK,
-                                   msg->mfn,
-                                   msg->npages << PAGE_SHIFT,
+                                   msg->mfn, msg->npages,
                                    vma->vm_page_prot,
                                    st->domain);
        if (rc < 0)
@@ -289,7 +315,7 @@ static int mmap_batch_fn(void *data, void *state)
        struct mmap_batch_state *st = state;
if (remap_domain_mfn_range(st->vma, st->va & PAGE_MASK,
-                                  *mfnp, PAGE_SIZE,
+                                  *mfnp, 1,
                                   st->vma->vm_page_prot, st->domain) < 0) {
                *mfnp |= 0xf0000000U;
                st->err++;
@@ -361,9 +387,9 @@ static long privcmd_ioctl_mmap_batch(void __user *udata)
        up_write(&mm->mmap_sem);
if (state.err > 0) {
-               ret = state.err;
+               ret = 0;
- state.user = udata;
+               state.user = m.arr;
                traverse_pages(m.num, sizeof(xen_pfn_t),
                               &pagelist,
                               mmap_return_errors, &state);


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel