WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] [xen-unstable] [BLK] tap: Allocate pages for foreign map

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] [xen-unstable] [BLK] tap: Allocate pages for foreign mappings individually rather than contiguously.
From: Xen patchbot-unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Fri, 06 Oct 2006 12:30:27 +0000
Delivery-date: Fri, 06 Oct 2006 05:33:00 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 3971f49ce5924b9f08f9379093287b83e47c82f9
# Parent  34b2348dfe4b6245123e70b1a7401a3c508555b5
[BLK] tap: Allocate pages for foreign mappings individually rather than 
contiguously.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c |  149 ++++++++---------------
 1 files changed, 57 insertions(+), 92 deletions(-)

diff -r 34b2348dfe4b -r 3971f49ce592 
linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c
--- a/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c  Fri Oct 06 08:09:52 
2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c  Fri Oct 06 10:30:43 
2006 +0100
@@ -186,16 +186,18 @@ static inline unsigned int RTN_PEND_IDX(
 
 #define BLKBACK_INVALID_HANDLE (~0)
 
-typedef struct mmap_page {
-       unsigned long start;
-       struct page *mpage;
-} mmap_page_t;
-
-static mmap_page_t mmap_start[MAX_DYNAMIC_MEM];
+static struct page **foreign_pages[MAX_DYNAMIC_MEM];
+static inline unsigned long idx_to_kaddr(
+       unsigned int mmap_idx, unsigned int req_idx, unsigned int sg_idx)
+{
+       unsigned int arr_idx = req_idx*BLKIF_MAX_SEGMENTS_PER_REQUEST + sg_idx;
+       unsigned long pfn = page_to_pfn(foreign_pages[mmap_idx][arr_idx]);
+       return (unsigned long)pfn_to_kaddr(pfn);
+}
+
 static unsigned short mmap_alloc = 0;
 static unsigned short mmap_lock = 0;
 static unsigned short mmap_inuse = 0;
-static unsigned long *pending_addrs[MAX_DYNAMIC_MEM];
 
 /******************************************************************
  * GRANT HANDLES
@@ -726,63 +728,33 @@ static int req_increase(void)
 static int req_increase(void)
 {
        int i, j;
-       struct page *page;
-       int ret;
-
-       ret = -EINVAL;
+       struct page **pages = NULL;
+
        if (mmap_alloc >= MAX_PENDING_REQS || mmap_lock) 
-               goto done;
-
-#ifdef __ia64__
-       extern unsigned long alloc_empty_foreign_map_page_range(
-               unsigned long pages);
-       mmap_start[mmap_alloc].start = (unsigned long)
-               alloc_empty_foreign_map_page_range(mmap_pages);
-#else /* ! ia64 */
-       page = balloon_alloc_empty_page_range(mmap_pages);
-       ret = -ENOMEM;
-       if (page == NULL) {
-               printk("%s balloon_alloc_empty_page_range gave NULL\n", 
__FUNCTION__);
-               goto done;
-       }
-
-       /* Pin all of the pages. */
-       for (i=0; i<mmap_pages; i++)
-               get_page(&page[i]);
-
-       mmap_start[mmap_alloc].start = 
-               (unsigned long)pfn_to_kaddr(page_to_pfn(page));
-       mmap_start[mmap_alloc].mpage = page;
-
-#endif
-
-       pending_reqs[mmap_alloc]  = kzalloc(sizeof(pending_req_t) *
-                                       blkif_reqs, GFP_KERNEL);
-       pending_addrs[mmap_alloc] = kzalloc(sizeof(unsigned long) *
-                                       mmap_pages, GFP_KERNEL);
-
-       ret = -ENOMEM;
-       if (!pending_reqs[mmap_alloc] || !pending_addrs[mmap_alloc]) {
-               kfree(pending_reqs[mmap_alloc]);
-               kfree(pending_addrs[mmap_alloc]);
-               WPRINTK("%s: out of memory\n", __FUNCTION__);
-               ret = -ENOMEM;
-               goto done;
-       }
-
-       ret = 0;
-
-       DPRINTK("%s: reqs=%d, pages=%d, mmap_vstart=0x%lx\n",
-               __FUNCTION__, blkif_reqs, mmap_pages, 
-              mmap_start[mmap_alloc].start);
-
-       BUG_ON(mmap_start[mmap_alloc].start == 0);
-
-       for (i = 0; i < mmap_pages; i++) 
-               pending_addrs[mmap_alloc][i] = 
-                       mmap_start[mmap_alloc].start + (i << PAGE_SHIFT);
-
-       for (i = 0; i < MAX_PENDING_REQS ; i++) {
+               return -EINVAL;
+
+       pending_reqs[mmap_alloc] = kzalloc(sizeof(pending_req_t) *
+                                          blkif_reqs, GFP_KERNEL);
+       pages = kmalloc(sizeof(pages[0]) * mmap_pages, GFP_KERNEL);
+
+       if (!pending_reqs[mmap_alloc] || !pages)
+               goto out_of_memory;
+
+       for (i = 0; i < mmap_pages; i++) {
+               pages[i] = balloon_alloc_empty_page();
+               if (!pages[i]) {
+                       while (--i >= 0)
+                               balloon_free_empty_page(pages[i]);
+                       goto out_of_memory;
+               }
+       }
+
+       foreign_pages[mmap_alloc] = pages;
+
+       DPRINTK("%s: reqs=%d, pages=%d\n",
+               __FUNCTION__, blkif_reqs, mmap_pages);
+
+       for (i = 0; i < MAX_PENDING_REQS; i++) {
                list_add_tail(&pending_reqs[mmap_alloc][i].free_list, 
                              &pending_free);
                pending_reqs[mmap_alloc][i].mem_idx = mmap_alloc;
@@ -793,30 +765,27 @@ static int req_increase(void)
 
        mmap_alloc++;
        DPRINTK("# MMAPs increased to %d\n",mmap_alloc);
-done:
-       return ret;
+       return 0;
+
+ out_of_memory:
+       kfree(pages);
+       kfree(pending_reqs[mmap_alloc]);
+       WPRINTK("%s: out of memory\n", __FUNCTION__);
+       return -ENOMEM;
 }
 
 static void mmap_req_del(int mmap)
 {
        int i;
-       struct page *page;
-
-       /*Spinlock already acquired*/
+
+       BUG_ON(!spin_is_locked(&pending_free_lock));
+
        kfree(pending_reqs[mmap]);
-       kfree(pending_addrs[mmap]);
-
-#ifdef __ia64__
-       /*Not sure what goes here yet!*/
-#else
-
-       /* Unpin all of the pages. */
-       page = mmap_start[mmap].mpage;
-       for (i=0; i<mmap_pages; i++)
-               put_page(&page[i]);
-
-       balloon_dealloc_empty_page_range(mmap_start[mmap].mpage, mmap_pages);
-#endif
+
+       for (i = 0; i < mmap_pages; i++)
+               balloon_free_empty_page(foreign_pages[mmap][i]);
+       kfree(foreign_pages[mmap]);
+       foreign_pages[mmap] = NULL;
 
        mmap_lock = 0;
        DPRINTK("# MMAPs decreased to %d\n",mmap_alloc);
@@ -887,7 +856,7 @@ static void fast_flush_area(pending_req_
        mmap_idx = req->mem_idx;
 
        for (i = 0; i < req->nr_pages; i++) {
-               kvaddr = MMAP_VADDR(mmap_start[mmap_idx].start, k_idx, i);
+               kvaddr = idx_to_kaddr(mmap_idx, k_idx, i);
                uvaddr = MMAP_VADDR(info->user_vstart, u_idx, i);
 
                khandle = &pending_handle(mmap_idx, k_idx, i);
@@ -896,7 +865,7 @@ static void fast_flush_area(pending_req_
                        continue;
                }
                gnttab_set_unmap_op(&unmap[invcount], 
-                       MMAP_VADDR(mmap_start[mmap_idx].start, k_idx, i), 
+                                   idx_to_kaddr(mmap_idx, k_idx, i), 
                                    GNTMAP_host_map, khandle->kernel);
                invcount++;
 
@@ -1030,9 +999,8 @@ static int blktap_read_ufe_ring(tap_blki
                        struct page *pg;
                        int offset;
 
-                       uvaddr  = MMAP_VADDR(info->user_vstart, usr_idx, j);
-                       kvaddr = MMAP_VADDR(mmap_start[mmap_idx].start, 
-                                           pending_idx, j);
+                       uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, j);
+                       kvaddr = idx_to_kaddr(mmap_idx, pending_idx, j);
 
                        pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
                        ClearPageReserved(pg);
@@ -1214,8 +1182,7 @@ static void dispatch_rw_block_io(blkif_t
                uint32_t flags;
 
                uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
-               kvaddr = MMAP_VADDR(mmap_start[mmap_idx].start, 
-                                   pending_idx, i);
+               kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
                page = virt_to_page(kvaddr);
 
                sector = req->sector_number + (8*i);
@@ -1267,8 +1234,7 @@ static void dispatch_rw_block_io(blkif_t
                struct page *pg;
 
                uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i/2);
-               kvaddr = MMAP_VADDR(mmap_start[mmap_idx].start, 
-                                   pending_idx, i/2);
+               kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i/2);
 
                if (unlikely(map[i].status != 0)) {
                        WPRINTK("invalid kernel buffer -- "
@@ -1298,8 +1264,7 @@ static void dispatch_rw_block_io(blkif_t
                unsigned long kvaddr;
                struct page *pg;
 
-               kvaddr = MMAP_VADDR(mmap_start[mmap_idx].start, 
-                                   pending_idx, i);
+               kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
                pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
                SetPageReserved(pg);
        }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] [xen-unstable] [BLK] tap: Allocate pages for foreign mappings individually rather than contiguously., Xen patchbot-unstable <=