[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] linux/blktap2: reduce TLB flush scope



c/s 885 added very coarse TLB flushing. Since these flushes always
follow single page updates, single page flushes (when available) are
sufficient.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

--- head-2009-05-29.orig/arch/i386/mm/hypervisor.c      2009-06-03 
09:30:17.000000000 +0200
+++ head-2009-05-29/arch/i386/mm/hypervisor.c   2009-06-03 09:31:39.000000000 
+0200
@@ -140,6 +140,7 @@ void xen_invlpg_all(unsigned long ptr)
        op.arg1.linear_addr = ptr & PAGE_MASK;
        BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
+EXPORT_SYMBOL_GPL(xen_invlpg_all);
 
 void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
 {
@@ -151,6 +152,7 @@ void xen_invlpg_mask(const cpumask_t *ma
        set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
        BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
+EXPORT_SYMBOL_GPL(xen_invlpg_mask);
 
 #endif /* CONFIG_SMP */
 
--- head-2009-05-29.orig/drivers/xen/blktap2/device.c   2009-06-03 
09:01:12.000000000 +0200
+++ head-2009-05-29/drivers/xen/blktap2/device.c        2009-06-03 
09:40:38.000000000 +0200
@@ -194,6 +194,16 @@ blktap_umap_uaddr(struct vm_area_struct 
                                   PAGE_SIZE, blktap_umap_uaddr_fn, mm);
 }
 
+static inline void
+flush_tlb_kernel_page(unsigned long kvaddr)
+{
+#ifdef CONFIG_X86
+       xen_invlpg_all(kvaddr);
+#else
+       flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
+#endif
+}
+
 static void
 blktap_device_end_dequeued_request(struct blktap_device *dev,
                                   struct request *req, int uptodate)
@@ -319,7 +329,7 @@ blktap_unmap(struct blktap *tap, struct 
                if (request->handles[i].kernel == INVALID_GRANT_HANDLE) {
                        kvaddr = request_to_kaddr(request, i);
                        blktap_umap_uaddr(&init_mm, kvaddr);
-                       flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
+                       flush_tlb_kernel_page(kvaddr);
                        set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
                                            INVALID_P2M_ENTRY);
                }
@@ -550,9 +560,9 @@ blktap_map(struct blktap *tap,
 
        pte = mk_pte(page, ring->vma->vm_page_prot);
        blktap_map_uaddr(ring->vma->vm_mm, uvaddr, pte_mkwrite(pte));
-       flush_tlb_mm(ring->vma->vm_mm);
+       flush_tlb_page(ring->vma, uvaddr);
        blktap_map_uaddr(&init_mm, kvaddr, mk_pte(page, PAGE_KERNEL));
-       flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
+       flush_tlb_kernel_page(kvaddr);
 
        set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT, pte_mfn(pte));
        request->handles[seg].kernel = INVALID_GRANT_HANDLE;




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.