[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 2/3] x86: rework hypercall argument translation area setup


  • To: "xen-devel" <xen-devel@xxxxxxxxxxxxx>
  • From: "Jan Beulich" <JBeulich@xxxxxxxx>
  • Date: Tue, 26 Feb 2013 16:06:43 +0000
  • Delivery-date: Tue, 26 Feb 2013 16:06:58 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>

... using the new per-domain mapping management functions, adding
destroy_perdomain_mapping() to the previously introduced pair.

Rather than using an order-1 Xen heap allocation, use (currently 2)
individual domain heap pages to populate space in the per-domain
mapping area.

Also fix a benign off-by-one mistake in is_compat_arg_xlat_range().

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5657,6 +5657,59 @@ int create_perdomain_mapping(struct doma
     return rc;
 }
 
+void destroy_perdomain_mapping(struct domain *d, unsigned long va,
+                               unsigned int nr)
+{
+    const l3_pgentry_t *l3tab, *pl3e;
+
+    ASSERT(va >= PERDOMAIN_VIRT_START &&
+           va < PERDOMAIN_VIRT_SLOT(PERDOMAIN_SLOTS));
+    ASSERT(!l3_table_offset(va ^ (va + nr * PAGE_SIZE - 1)));
+
+    if ( !d->arch.perdomain_l3_pg )
+        return;
+
+    l3tab = __map_domain_page(d->arch.perdomain_l3_pg);
+    pl3e = l3tab + l3_table_offset(va);
+
+    if ( l3e_get_flags(*pl3e) & _PAGE_PRESENT )
+    {
+        const l2_pgentry_t *l2tab = map_domain_page(l3e_get_pfn(*pl3e));
+        const l2_pgentry_t *pl2e = l2tab + l2_table_offset(va);
+        unsigned int i = l1_table_offset(va);
+
+        while ( nr )
+        {
+            if ( l2e_get_flags(*pl2e) & _PAGE_PRESENT )
+            {
+                l1_pgentry_t *l1tab = map_domain_page(l2e_get_pfn(*pl2e));
+
+                for ( ; nr && i < L1_PAGETABLE_ENTRIES; --nr, ++i )
+                {
+                    if ( (l1e_get_flags(l1tab[i]) &
+                          (_PAGE_PRESENT | _PAGE_AVAIL0)) ==
+                         (_PAGE_PRESENT | _PAGE_AVAIL0) )
+                        free_domheap_page(l1e_get_page(l1tab[i]));
+                    l1tab[i] = l1e_empty();
+                }
+
+                unmap_domain_page(l1tab);
+            }
+            else if ( nr + i < L1_PAGETABLE_ENTRIES )
+                break;
+            else
+                nr -= L1_PAGETABLE_ENTRIES - i;
+
+            ++pl2e;
+            i = 0;
+        }
+
+        unmap_domain_page(l2tab);
+    }
+
+    unmap_domain_page(l3tab);
+}
+
 void free_perdomain_mappings(struct domain *d)
 {
     l3_pgentry_t *l3tab = __map_domain_page(d->arch.perdomain_l3_pg);
--- a/xen/arch/x86/usercopy.c
+++ b/xen/arch/x86/usercopy.c
@@ -6,8 +6,8 @@
  * Copyright 2002 Andi Kleen <ak@xxxxxxx>
  */
 
-#include <xen/config.h>
 #include <xen/lib.h>
+#include <xen/sched.h>
 #include <asm/uaccess.h>
 
 unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned n)
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -832,27 +832,17 @@ void __init zap_low_mappings(void)
                      __PAGE_HYPERVISOR);
 }
 
-void *compat_arg_xlat_virt_base(void)
-{
-    return current->arch.compat_arg_xlat;
-}
-
 int setup_compat_arg_xlat(struct vcpu *v)
 {
-    unsigned int order = get_order_from_bytes(COMPAT_ARG_XLAT_SIZE);
-
-    v->arch.compat_arg_xlat = alloc_xenheap_pages(order,
-                                                  MEMF_node(vcpu_to_node(v)));
-
-    return v->arch.compat_arg_xlat ? 0 : -ENOMEM;
+    return create_perdomain_mapping(v->domain, ARG_XLAT_START(v),
+                                    PFN_UP(COMPAT_ARG_XLAT_SIZE),
+                                    NULL, NIL(struct page_info *));
 }
 
 void free_compat_arg_xlat(struct vcpu *v)
 {
-    unsigned int order = get_order_from_bytes(COMPAT_ARG_XLAT_SIZE);
-
-    free_xenheap_pages(v->arch.compat_arg_xlat, order);
-    v->arch.compat_arg_xlat = NULL;
+    destroy_perdomain_mapping(v->domain, ARG_XLAT_START(v),
+                              PFN_UP(COMPAT_ARG_XLAT_SIZE));
 }
 
 void cleanup_frame_table(struct mem_hotadd_info *info)
--- a/xen/include/asm-x86/config.h
+++ b/xen/include/asm-x86/config.h
@@ -212,7 +212,7 @@ extern unsigned char boot_edid_info[128]
 /* Slot 260: per-domain mappings (including map cache). */
 #define PERDOMAIN_VIRT_START    (PML4_ADDR(260))
 #define PERDOMAIN_SLOT_MBYTES   (PML4_ENTRY_BYTES >> (20 + PAGETABLE_ORDER))
-#define PERDOMAIN_SLOTS         2
+#define PERDOMAIN_SLOTS         3
 #define PERDOMAIN_VIRT_SLOT(s)  (PERDOMAIN_VIRT_START + (s) * \
                                  (PERDOMAIN_SLOT_MBYTES << 20))
 /* Slot 261: machine-to-phys conversion table (256GB). */
@@ -311,6 +311,13 @@ extern unsigned long xen_phys_start;
 #define MAPCACHE_VIRT_END        (MAPCACHE_VIRT_START + \
                                   MAPCACHE_ENTRIES * PAGE_SIZE)
 
+/* Argument translation area. The third per-domain-mapping sub-area. */
+#define ARG_XLAT_VIRT_START      PERDOMAIN_VIRT_SLOT(2)
+/* Allow for at least one guard page (COMPAT_ARG_XLAT_SIZE being 2 pages): */
+#define ARG_XLAT_VA_SHIFT        (2 + PAGE_SHIFT)
+#define ARG_XLAT_START(v)        \
+    (ARG_XLAT_VIRT_START + ((v)->vcpu_id << ARG_XLAT_VA_SHIFT))
+
 #define ELFSIZE 64
 
 #define ARCH_CRASH_SAVE_VMCOREINFO
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -442,9 +442,6 @@ struct arch_vcpu
 
     /* A secondary copy of the vcpu time info. */
     XEN_GUEST_HANDLE(vcpu_time_info_t) time_info_guest;
-
-    void *compat_arg_xlat;
-
 } __cacheline_aligned;
 
 /* Shorthands to improve code legibility. */
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -579,6 +579,8 @@ int map_ldt_shadow_page(unsigned int);
 int create_perdomain_mapping(struct domain *, unsigned long va,
                              unsigned int nr, l1_pgentry_t **,
                              struct page_info **);
+void destroy_perdomain_mapping(struct domain *, unsigned long va,
+                               unsigned int nr);
 void free_perdomain_mappings(struct domain *);
 
 extern int memory_add(unsigned long spfn, unsigned long epfn, unsigned int 
pxm);
--- a/xen/include/asm-x86/x86_64/uaccess.h
+++ b/xen/include/asm-x86/x86_64/uaccess.h
@@ -1,16 +1,15 @@
 #ifndef __X86_64_UACCESS_H
 #define __X86_64_UACCESS_H
 
-#define COMPAT_ARG_XLAT_VIRT_BASE compat_arg_xlat_virt_base()
+#define COMPAT_ARG_XLAT_VIRT_BASE ((void *)ARG_XLAT_START(current))
 #define COMPAT_ARG_XLAT_SIZE      (2*PAGE_SIZE)
 struct vcpu;
-void *compat_arg_xlat_virt_base(void);
 int setup_compat_arg_xlat(struct vcpu *v);
 void free_compat_arg_xlat(struct vcpu *v);
 #define is_compat_arg_xlat_range(addr, size) ({                               \
     unsigned long __off;                                                      \
     __off = (unsigned long)(addr) - (unsigned long)COMPAT_ARG_XLAT_VIRT_BASE; \
-    (__off <= COMPAT_ARG_XLAT_SIZE) &&                                        \
+    (__off < COMPAT_ARG_XLAT_SIZE) &&                                         \
     ((__off + (unsigned long)(size)) <= COMPAT_ARG_XLAT_SIZE);                \
 })
 


Attachment: x86-map-arg-xlat-area.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.