[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH RFC] p2m: use GNTTABOP_unmap_and_duplicate if available



The new GNTTABOP_unmap_and_duplicate operation doesn't zero the
mapping passed in new_addr, allowing us to perform batch unmaps in p2m
code without requiring the use of a multicall.

Signed-off-by: Roger Pau Monnà <roger.pau@xxxxxxxxxx>
Cc: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
I don't currently have a NFS server (the one I had is currently not
working due to SD card corruption) and I cannot set up one right now,
so I've only tested this with a raw image stored in a local disk.
---
 arch/x86/include/asm/xen/page.h     |    4 +-
 arch/x86/xen/p2m.c                  |  131 ++++++++++++++++++++++++++++++++++-
 drivers/xen/grant-table.c           |   24 ++----
 include/xen/interface/grant_table.h |   22 ++++++
 4 files changed, 162 insertions(+), 19 deletions(-)

diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 6aef9fb..ea1dce6 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -51,8 +51,8 @@ extern unsigned long set_phys_range_identity(unsigned long 
pfn_s,
 
 extern int m2p_add_override(unsigned long mfn, struct page *page,
                            struct gnttab_map_grant_ref *kmap_op);
-extern int m2p_remove_override(struct page *page,
-                               struct gnttab_map_grant_ref *kmap_op);
+extern int m2p_remove_override(struct page **pages,
+                               struct gnttab_map_grant_ref *kmap_ops, int 
count);
 extern struct page *m2p_find_override(unsigned long mfn);
 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long 
pfn);
 
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 0d4ec35..e3d2668 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -154,6 +154,8 @@
 #include <linux/hash.h>
 #include <linux/sched.h>
 #include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/hardirq.h>
 
 #include <asm/cache.h>
 #include <asm/setup.h>
@@ -853,6 +855,7 @@ bool set_phys_to_machine(unsigned long pfn, unsigned long 
mfn)
 
 static RESERVE_BRK_ARRAY(struct list_head, m2p_overrides, M2P_OVERRIDE_HASH);
 static DEFINE_SPINLOCK(m2p_override_lock);
+extern bool gnttab_supports_duplicate;
 
 static void __init m2p_override_init(void)
 {
@@ -933,7 +936,7 @@ int m2p_add_override(unsigned long mfn, struct page *page,
        return 0;
 }
 EXPORT_SYMBOL_GPL(m2p_add_override);
-int m2p_remove_override(struct page *page,
+int m2p_remove_override_single(struct page *page,
                struct gnttab_map_grant_ref *kmap_op)
 {
        unsigned long flags;
@@ -1034,6 +1037,132 @@ int m2p_remove_override(struct page *page,
 
        return 0;
 }
+
+int m2p_remove_override_batch(struct page **pages,
+               struct gnttab_map_grant_ref *kmap_ops, int count)
+{
+       struct gnttab_unmap_and_duplicate *unmap_ops = NULL;
+       unsigned long *mfn = kcalloc(count, sizeof(mfn[0]), GFP_KERNEL);
+       unsigned long flags;
+       unsigned long pfn;
+       unsigned long uninitialized_var(address);
+       unsigned level;
+       struct page *page;
+       pte_t *ptep = NULL;
+       int ret = 0, i;
+
+
+       for (i = 0; i < count; i++) {
+               page = pages[i];
+               pfn = page_to_pfn(page);
+               mfn[i] = get_phys_to_machine(pfn);
+               if (mfn[i] == INVALID_P2M_ENTRY || !(mfn[i] & 
FOREIGN_FRAME_BIT)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               if (!PageHighMem(page)) {
+                       address = (unsigned long)__va(pfn << PAGE_SHIFT);
+                       ptep = lookup_address(address, &level);
+
+                       if (WARN(ptep == NULL || level != PG_LEVEL_4K,
+                                               "m2p_remove_override: pfn %lx 
not mapped", pfn)) {
+                               ret = -EINVAL;
+                               goto out;
+                       }
+               }
+
+               spin_lock_irqsave(&m2p_override_lock, flags);
+               list_del(&page->lru);
+               spin_unlock_irqrestore(&m2p_override_lock, flags);
+               WARN_ON(!PagePrivate(page));
+               ClearPagePrivate(page);
+
+               set_phys_to_machine(pfn, page->index);
+       }
+
+       if (kmap_ops != NULL) {
+               struct page *scratch_page = get_balloon_scratch_page();
+               unsigned long scratch_page_address = (unsigned long)
+                                       __va(page_to_pfn(scratch_page) << 
PAGE_SHIFT);
+               int invcount = 0;
+
+               unmap_ops = kcalloc(count, sizeof(unmap_ops[0]), GFP_KERNEL);
+               if (!unmap_ops) {
+                       put_balloon_scratch_page();
+                       ret = -1;
+                       goto out;
+               }
+               for (i = 0; i < count; i++) {
+                       page = pages[i];
+                       if (!PageHighMem(page)) {
+                               unmap_ops[invcount].host_addr = 
kmap_ops[i].host_addr;
+                               unmap_ops[invcount].new_addr = 
scratch_page_address;
+                               unmap_ops[invcount].handle = kmap_ops[i].handle;
+                               invcount++;
+                       }
+               }
+               ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_duplicate, 
unmap_ops, invcount);
+               if (ret) {
+                       printk(KERN_WARNING "m2p_remove_override_batch: 
GNTTABOP_unmap_and_duplicate failed");
+                       put_balloon_scratch_page();
+                       ret = -1;
+                       goto out;
+               }
+               put_balloon_scratch_page();
+       }
+
+       for (i = 0; i < count; i++) {
+               /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already 
present
+                * somewhere in this domain, even before being added to the
+                * m2p_override (see comment above in m2p_add_override).
+                * If there are no other entries in the m2p_override 
corresponding
+                * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m 
for
+                * the original pfn (the one shared by the frontend): the 
backend
+                * cannot do any IO on this page anymore because it has been
+                * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry 
of
+                * the original pfn causes mfn_to_pfn(mfn) to return the 
frontend
+                * pfn again. */
+               pfn = page_to_pfn(pages[i]);
+               mfn[i] &= ~FOREIGN_FRAME_BIT;
+               ret = __get_user(pfn, &machine_to_phys_mapping[mfn[i]]);
+               if (ret == 0 && get_phys_to_machine(pfn) == 
FOREIGN_FRAME(mfn[i]) &&
+                               m2p_find_override(mfn[i]) == NULL)
+                       set_phys_to_machine(pfn, mfn[i]);
+       }
+
+out:
+       kfree(unmap_ops);
+       kfree(mfn);
+       return ret;
+}
+
+int m2p_remove_override(struct page **pages,
+               struct gnttab_map_grant_ref *kmap_ops, int count)
+{
+       int i, ret;
+       bool lazy = false;
+
+       if (gnttab_supports_duplicate) {
+               ret = m2p_remove_override_batch(pages, kmap_ops, count);
+               if (ret)
+                       return ret;
+       } else {
+               if (!in_interrupt() && paravirt_get_lazy_mode() == 
PARAVIRT_LAZY_NONE) {
+                       arch_enter_lazy_mmu_mode();
+                       lazy = true;
+               }
+               for (i = 0; i < count; i++) {
+                       ret = m2p_remove_override_single(pages[i], kmap_ops ?
+                                              &kmap_ops[i] : NULL);
+                       if (ret)
+                               return ret;
+               }
+               if (lazy)
+                       arch_leave_lazy_mmu_mode();
+       }
+       return ret;
+}
 EXPORT_SYMBOL_GPL(m2p_remove_override);
 
 struct page *m2p_find_override(unsigned long mfn)
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index d5418c1..835f600 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -64,6 +64,7 @@ static int gnttab_free_count;
 static grant_ref_t gnttab_free_head;
 static DEFINE_SPINLOCK(gnttab_list_lock);
 unsigned long xen_hvm_resume_frames;
+bool gnttab_supports_duplicate;
 EXPORT_SYMBOL_GPL(xen_hvm_resume_frames);
 
 static union {
@@ -934,8 +935,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref 
*unmap_ops,
                      struct gnttab_map_grant_ref *kmap_ops,
                      struct page **pages, unsigned int count)
 {
-       int i, ret;
-       bool lazy = false;
+       int ret;
 
        ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, 
count);
        if (ret)
@@ -944,20 +944,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref 
*unmap_ops,
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return ret;
 
-       if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
-               arch_enter_lazy_mmu_mode();
-               lazy = true;
-       }
-
-       for (i = 0; i < count; i++) {
-               ret = m2p_remove_override(pages[i], kmap_ops ?
-                                      &kmap_ops[i] : NULL);
-               if (ret)
-                       return ret;
-       }
-
-       if (lazy)
-               arch_leave_lazy_mmu_mode();
+       ret = m2p_remove_override(pages, kmap_ops, count);
 
        return ret;
 }
@@ -1247,6 +1234,11 @@ int gnttab_init(void)
        gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
        gnttab_free_head  = NR_RESERVED_ENTRIES;
 
+       if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_duplicate, NULL, 0))
+               gnttab_supports_duplicate = false;
+       else
+               gnttab_supports_duplicate = true;
+
        printk("Grant table initialized\n");
        return 0;
 
diff --git a/include/xen/interface/grant_table.h 
b/include/xen/interface/grant_table.h
index e40fae9..1f49dc1 100644
--- a/include/xen/interface/grant_table.h
+++ b/include/xen/interface/grant_table.h
@@ -428,6 +428,27 @@ struct gnttab_unmap_and_replace {
 DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_and_replace);
 
 /*
+ * GNTTABOP_unmap_and_duplicate: Destroy one or more grant-reference mappings
+ * tracked by <handle> but atomically replace the page table entry with one
+ * pointing to the machine address under <new_addr>.
+ * NOTES:
+ *  1. The call may fail in an undefined manner if either mapping is not
+ *     tracked by <handle>.
+ *  2. After executing a batch of unmaps, it is guaranteed that no stale
+ *     mappings will remain in the device or host TLBs.
+ */
+#define GNTTABOP_unmap_and_duplicate  12
+struct gnttab_unmap_and_duplicate {
+    /* IN parameters. */
+    uint64_t host_addr;
+    uint64_t new_addr;
+    grant_handle_t handle;
+    /* OUT parameters. */
+    int16_t  status;              /* => enum grant_status */
+};
+DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_and_duplicate_t);
+
+/*
  * GNTTABOP_set_version: Request a particular version of the grant
  * table shared table structure.  This operation can only be performed
  * once in any given domain.  It must be performed before any grants
@@ -522,6 +543,7 @@ DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version);
 #define GNTST_bad_copy_arg    (-10) /* copy arguments cross page boundary.   */
 #define GNTST_address_too_big (-11) /* transfer page address too large.      */
 #define GNTST_eagain          (-12) /* Operation not done; try again.        */
+#define GNTST_enosys          (-13) /* Operation not implemented.            */
 
 #define GNTTABOP_error_msgs {                   \
     "okay",                                     \
-- 
1.7.7.5 (Apple Git-26)


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.