For proper grant mappings, HVM guests require pages allocated using
alloc_xenballooned_pages instead of alloc_vm_area.
Signed-off-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
---
drivers/net/xen-netback/common.h | 4 ++--
drivers/net/xen-netback/netback.c | 34 ++++++++++++++++++++--------------
2 files changed, 22 insertions(+), 16 deletions(-)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 161f207..d5ee9d1 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -70,8 +70,8 @@ struct xenvif {
/* The shared rings and indexes. */
struct xen_netif_tx_back_ring tx;
struct xen_netif_rx_back_ring rx;
- struct vm_struct *tx_comms_area;
- struct vm_struct *rx_comms_area;
+ struct page *tx_comms_page;
+ struct page *rx_comms_page;
/* Frontend feature information. */
u8 can_sg:1;
diff --git a/drivers/net/xen-netback/netback.c
b/drivers/net/xen-netback/netback.c
index fd00f25..f35e07c 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -42,6 +42,7 @@
#include <xen/events.h>
#include <xen/interface/memory.h>
+#include <xen/balloon.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>
@@ -1578,9 +1579,11 @@ static int xen_netbk_kthread(void *data)
void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
{
struct gnttab_unmap_grant_ref op;
+ void *addr;
if (vif->tx.sring) {
- gnttab_set_unmap_op(&op, (unsigned
long)vif->tx_comms_area->addr,
+ addr = pfn_to_kaddr(page_to_pfn(vif->tx_comms_page));
+ gnttab_set_unmap_op(&op, (unsigned long)addr,
GNTMAP_host_map, vif->tx_shmem_handle);
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
@@ -1588,16 +1591,17 @@ void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
}
if (vif->rx.sring) {
- gnttab_set_unmap_op(&op, (unsigned
long)vif->rx_comms_area->addr,
+ addr = pfn_to_kaddr(page_to_pfn(vif->rx_comms_page));
+ gnttab_set_unmap_op(&op, (unsigned long)addr,
GNTMAP_host_map, vif->rx_shmem_handle);
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
BUG();
}
- if (vif->rx_comms_area)
- free_vm_area(vif->rx_comms_area);
- if (vif->tx_comms_area)
- free_vm_area(vif->tx_comms_area);
+ if (vif->rx_comms_page)
+ free_xenballooned_pages(1, &vif->rx_comms_page);
+ if (vif->tx_comms_page)
+ free_xenballooned_pages(1, &vif->tx_comms_page);
}
int xen_netbk_map_frontend_rings(struct xenvif *vif,
@@ -1610,15 +1614,19 @@ int xen_netbk_map_frontend_rings(struct xenvif *vif,
int err = -ENOMEM;
- vif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
- if (vif->tx_comms_area == NULL)
+ if (alloc_xenballooned_pages(1, &vif->tx_comms_page))
goto err;
- vif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
- if (vif->rx_comms_area == NULL)
+ txs = (struct xen_netif_tx_sring *)pfn_to_kaddr(page_to_pfn(
+ vif->tx_comms_page));
+
+ if (alloc_xenballooned_pages(1, &vif->rx_comms_page))
goto err;
- gnttab_set_map_op(&op, (unsigned long)vif->tx_comms_area->addr,
+ rxs = (struct xen_netif_rx_sring *)pfn_to_kaddr(page_to_pfn(
+ vif->rx_comms_page));
+
+ gnttab_set_map_op(&op, (unsigned long)txs,
GNTMAP_host_map, tx_ring_ref, vif->domid);
if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
@@ -1635,10 +1643,9 @@ int xen_netbk_map_frontend_rings(struct xenvif *vif,
vif->tx_shmem_ref = tx_ring_ref;
vif->tx_shmem_handle = op.handle;
- txs = (struct xen_netif_tx_sring *)vif->tx_comms_area->addr;
BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
- gnttab_set_map_op(&op, (unsigned long)vif->rx_comms_area->addr,
+ gnttab_set_map_op(&op, (unsigned long)rxs,
GNTMAP_host_map, rx_ring_ref, vif->domid);
if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
@@ -1656,7 +1663,6 @@ int xen_netbk_map_frontend_rings(struct xenvif *vif,
vif->rx_shmem_handle = op.handle;
vif->rx_req_cons_peek = 0;
- rxs = (struct xen_netif_rx_sring *)vif->rx_comms_area->addr;
BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
return 0;
--
1.7.6.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|