[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 4/4] linux/i386: utilize hypervisor highmem handling helpers



Assumes hypervisor interface headers have been sync-ed after the
hypervisor side patch was applied.

As usual, written and tested on 2.6.27.3 and made apply to the 2.6.18
tree without further testing.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

Index: head-2008-10-24/arch/i386/mm/highmem-xen.c
===================================================================
--- head-2008-10-24.orig/arch/i386/mm/highmem-xen.c     2008-10-27 
11:56:39.000000000 +0100
+++ head-2008-10-24/arch/i386/mm/highmem-xen.c  2008-10-27 11:56:59.000000000 
+0100
@@ -151,9 +151,56 @@ struct page *kmap_atomic_to_page(void *p
        return pte_page(*pte);
 }
 
+void clear_highpage(struct page *page)
+{
+       void *kaddr;
+
+       if (likely(xen_feature(XENFEAT_highmem_assist))
+           && PageHighMem(page)) {
+               struct mmuext_op meo;
+
+               meo.cmd = MMUEXT_CLEAR_PAGE;
+               meo.arg1.mfn = pfn_to_mfn(page_to_pfn(page));
+               if (HYPERVISOR_mmuext_op(&meo, 1, NULL, DOMID_SELF) == 0)
+                       return;
+       }
+
+       kaddr = kmap_atomic(page, KM_USER0);
+       clear_page(kaddr);
+       kunmap_atomic(kaddr, KM_USER0);
+}
+
+void copy_highpage(struct page *to, struct page *from)
+{
+       void *vfrom, *vto;
+
+       if (likely(xen_feature(XENFEAT_highmem_assist))
+           && (PageHighMem(from) || PageHighMem(to))) {
+               unsigned long from_pfn = page_to_pfn(from);
+               unsigned long to_pfn = page_to_pfn(to);
+               struct mmuext_op meo;
+
+               meo.cmd = MMUEXT_COPY_PAGE;
+               meo.arg1.mfn = pfn_to_mfn(to_pfn);
+               meo.arg2.src_mfn = pfn_to_mfn(from_pfn);
+               if (mfn_to_pfn(meo.arg2.src_mfn) == from_pfn
+                   && mfn_to_pfn(meo.arg1.mfn) == to_pfn
+                   && HYPERVISOR_mmuext_op(&meo, 1, NULL, DOMID_SELF) == 0)
+                       return;
+       }
+
+       vfrom = kmap_atomic(from, KM_USER0);
+       vto = kmap_atomic(to, KM_USER1);
+       copy_page(vto, vfrom);
+       kunmap_atomic(vfrom, KM_USER0);
+       kunmap_atomic(vto, KM_USER1);
+}
+
 EXPORT_SYMBOL(kmap);
 EXPORT_SYMBOL(kunmap);
 EXPORT_SYMBOL(kmap_atomic);
 EXPORT_SYMBOL(kmap_atomic_pte);
 EXPORT_SYMBOL(kunmap_atomic);
 EXPORT_SYMBOL(kmap_atomic_to_page);
+EXPORT_SYMBOL(clear_highpage);
+EXPORT_SYMBOL(copy_highpage);
Index: head-2008-10-24/include/asm-i386/mach-xen/asm/highmem.h
===================================================================
--- head-2008-10-24.orig/include/asm-i386/mach-xen/asm/highmem.h        
2008-10-27 11:56:39.000000000 +0100
+++ head-2008-10-24/include/asm-i386/mach-xen/asm/highmem.h     2008-08-19 
09:59:39.000000000 +0200
@@ -77,6 +77,23 @@ struct page *kmap_atomic_to_page(void *p
 
 #define flush_cache_kmaps()    do { } while (0)
 
+void clear_highpage(struct page *);
+static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
+{
+       clear_highpage(page);
+}
+#define __HAVE_ARCH_CLEAR_HIGHPAGE
+#define __HAVE_ARCH_CLEAR_USER_HIGHPAGE
+
+void copy_highpage(struct page *to, struct page *from);
+static inline void copy_user_highpage(struct page *to, struct page *from,
+       unsigned long vaddr, struct vm_area_struct *vma)
+{
+       copy_highpage(to, from);
+}
+#define __HAVE_ARCH_COPY_HIGHPAGE
+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
+
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_HIGHMEM_H */
Index: head-2008-10-24/include/linux/highmem.h
===================================================================
--- head-2008-10-24.orig/include/linux/highmem.h        2008-04-17 
04:49:44.000000000 +0200
+++ head-2008-10-24/include/linux/highmem.h     2008-08-19 09:59:39.000000000 
+0200
@@ -62,6 +62,7 @@ static inline void *kmap_atomic(struct p
 
 #endif /* CONFIG_HIGHMEM */
 
+#ifndef __HAVE_ARCH_CLEAR_USER_HIGHPAGE
 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
 static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
 {
@@ -69,6 +70,7 @@ static inline void clear_user_highpage(s
        /* Make sure this page is cleared on other CPU's too before using it */
        smp_wmb();
 }
+#endif
 
 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 static inline struct page *
@@ -85,12 +87,14 @@ alloc_zeroed_user_highpage(struct vm_are
 }
 #endif
 
+#ifndef __HAVE_ARCH_CLEAR_HIGHPAGE
 static inline void clear_highpage(struct page *page)
 {
        void *kaddr = kmap_atomic(page, KM_USER0);
        clear_page(kaddr);
        kunmap_atomic(kaddr, KM_USER0);
 }
+#endif
 
 /*
  * Same but also flushes aliased cache contents to RAM.
@@ -111,6 +115,7 @@ static inline void memclear_highpage_flus
        kunmap_atomic(kaddr, KM_USER0);
 }
 
+#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
 static inline void copy_user_highpage(struct page *to, struct page *from, 
unsigned long vaddr)
 {
        char *vfrom, *vto;
@@ -123,7 +127,9 @@ static inline void copy_user_highpage(st
        /* Make sure this page is cleared on other CPU's too before using it */
        smp_wmb();
 }
+#endif
 
+#ifndef __HAVE_ARCH_COPY_HIGHPAGE
 static inline void copy_highpage(struct page *to, struct page *from)
 {
        char *vfrom, *vto;
@@ -135,4 +141,5 @@ static inline void copy_highpage(struct 
        kunmap_atomic(vto, KM_USER1);
 }
+#endif
 
 #endif /* _LINUX_HIGHMEM_H */



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.