[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/5] xen/blkback: Use xenballooned pages for mapped areas



For proper grant mappings, HVM guests require pages allocated using
alloc_xenballooned_pages instead of alloc_page or alloc_vm_area.

Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
---
 drivers/block/xen-blkback/blkback.c |   20 +++++++++-----------
 drivers/block/xen-blkback/common.h  |    2 +-
 drivers/block/xen-blkback/xenbus.c  |   22 ++++++++++++----------
 3 files changed, 22 insertions(+), 22 deletions(-)

diff --git a/drivers/block/xen-blkback/blkback.c 
b/drivers/block/xen-blkback/blkback.c
index 2330a9a..a0d3cbd 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -42,6 +42,7 @@
 
 #include <xen/events.h>
 #include <xen/page.h>
+#include <xen/balloon.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
 #include "common.h"
@@ -778,14 +779,14 @@ static int __init xen_blkif_init(void)
                goto out_of_memory;
        }
 
-       for (i = 0; i < mmap_pages; i++) {
-               blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
-               blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
-               if (blkbk->pending_pages[i] == NULL) {
-                       rc = -ENOMEM;
-                       goto out_of_memory;
-               }
+       if (alloc_xenballooned_pages(mmap_pages, blkbk->pending_pages)) {
+               rc = -ENOMEM;
+               goto out_of_memory;
        }
+
+       for (i = 0; i < mmap_pages; i++)
+               blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
+
        rc = xen_blkif_interface_init();
        if (rc)
                goto failed_init;
@@ -812,10 +813,7 @@ static int __init xen_blkif_init(void)
        kfree(blkbk->pending_reqs);
        kfree(blkbk->pending_grant_handles);
        if (blkbk->pending_pages) {
-               for (i = 0; i < mmap_pages; i++) {
-                       if (blkbk->pending_pages[i])
-                               __free_page(blkbk->pending_pages[i]);
-               }
+               free_xenballooned_pages(mmap_pages, blkbk->pending_pages);
                kfree(blkbk->pending_pages);
        }
        kfree(blkbk);
diff --git a/drivers/block/xen-blkback/common.h 
b/drivers/block/xen-blkback/common.h
index 00c57c9..944857e 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -139,7 +139,7 @@ struct xen_blkif {
        /* Comms information. */
        enum blkif_protocol     blk_protocol;
        union blkif_back_rings  blk_rings;
-       struct vm_struct        *blk_ring_area;
+       struct page     *blk_ring_page;
        /* The VBD attached to this interface. */
        struct xen_vbd          vbd;
        /* Back pointer to the backend_info. */
diff --git a/drivers/block/xen-blkback/xenbus.c 
b/drivers/block/xen-blkback/xenbus.c
index 5fd2010..49acc17 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -17,6 +17,7 @@
 #include <stdarg.h>
 #include <linux/module.h>
 #include <linux/kthread.h>
+#include <xen/balloon.h>
 #include <xen/events.h>
 #include <xen/grant_table.h>
 #include "common.h"
@@ -123,8 +124,9 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
 static int map_frontend_page(struct xen_blkif *blkif, unsigned long 
shared_page)
 {
        struct gnttab_map_grant_ref op;
+       void *addr = pfn_to_kaddr(page_to_pfn(blkif->blk_ring_page));
 
-       gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
+       gnttab_set_map_op(&op, (unsigned long)addr,
                          GNTMAP_host_map, shared_page, blkif->domid);
 
        if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
@@ -144,8 +146,9 @@ static int map_frontend_page(struct xen_blkif *blkif, 
unsigned long shared_page)
 static void unmap_frontend_page(struct xen_blkif *blkif)
 {
        struct gnttab_unmap_grant_ref op;
+       void *addr = pfn_to_kaddr(page_to_pfn(blkif->blk_ring_page));
 
-       gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
+       gnttab_set_unmap_op(&op, (unsigned long)addr,
                            GNTMAP_host_map, blkif->shmem_handle);
 
        if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
@@ -161,13 +164,12 @@ static int xen_blkif_map(struct xen_blkif *blkif, 
unsigned long shared_page,
        if (blkif->irq)
                return 0;
 
-       blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE);
-       if (!blkif->blk_ring_area)
+       if (alloc_xenballooned_pages(1, &blkif->blk_ring_page))
                return -ENOMEM;
 
        err = map_frontend_page(blkif, shared_page);
        if (err) {
-               free_vm_area(blkif->blk_ring_area);
+               free_xenballooned_pages(1, &blkif->blk_ring_page);
                return err;
        }
 
@@ -175,21 +177,21 @@ static int xen_blkif_map(struct xen_blkif *blkif, 
unsigned long shared_page,
        case BLKIF_PROTOCOL_NATIVE:
        {
                struct blkif_sring *sring;
-               sring = (struct blkif_sring *)blkif->blk_ring_area->addr;
+               sring = pfn_to_kaddr(page_to_pfn(blkif->blk_ring_page));
                BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
                break;
        }
        case BLKIF_PROTOCOL_X86_32:
        {
                struct blkif_x86_32_sring *sring_x86_32;
-               sring_x86_32 = (struct blkif_x86_32_sring 
*)blkif->blk_ring_area->addr;
+               sring_x86_32 = pfn_to_kaddr(page_to_pfn(blkif->blk_ring_page));
                BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, 
PAGE_SIZE);
                break;
        }
        case BLKIF_PROTOCOL_X86_64:
        {
                struct blkif_x86_64_sring *sring_x86_64;
-               sring_x86_64 = (struct blkif_x86_64_sring 
*)blkif->blk_ring_area->addr;
+               sring_x86_64 = pfn_to_kaddr(page_to_pfn(blkif->blk_ring_page));
                BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, 
PAGE_SIZE);
                break;
        }
@@ -202,7 +204,7 @@ static int xen_blkif_map(struct xen_blkif *blkif, unsigned 
long shared_page,
                                                    "blkif-backend", blkif);
        if (err < 0) {
                unmap_frontend_page(blkif);
-               free_vm_area(blkif->blk_ring_area);
+               free_xenballooned_pages(1, &blkif->blk_ring_page);
                blkif->blk_rings.common.sring = NULL;
                return err;
        }
@@ -229,7 +231,7 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
 
        if (blkif->blk_rings.common.sring) {
                unmap_frontend_page(blkif);
-               free_vm_area(blkif->blk_ring_area);
+               free_xenballooned_pages(1, &blkif->blk_ring_page);
                blkif->blk_rings.common.sring = NULL;
        }
 }
-- 
1.7.6.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.