# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 932aa2b425e842f9925d006f710771362425f009
# Parent ecc77b1c861209455e99f442a363e7a6333c05c9
Netfront/back always use grant tables now. This cleans up a
whole bunch of code.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
diff -r ecc77b1c8612 -r 932aa2b425e8 linux-2.6-xen-sparse/arch/xen/Kconfig
--- a/linux-2.6-xen-sparse/arch/xen/Kconfig Thu Sep 22 13:05:36 2005
+++ b/linux-2.6-xen-sparse/arch/xen/Kconfig Thu Sep 22 13:36:20 2005
@@ -110,13 +110,6 @@
network interfaces within another guest OS. Unless you are building a
dedicated device-driver domain, or your master control domain
(domain 0), then you almost certainly want to say Y here.
-
-config XEN_NETDEV_GRANT
- bool "Grant table substrate for network drivers (DANGEROUS)"
- default n
- help
- This introduces the use of grant tables as a data exhange mechanism
- between the frontend and backend network drivers.
config XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
bool "Pipelined transmitter (DANGEROUS)"
diff -r ecc77b1c8612 -r 932aa2b425e8
linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32
--- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32 Thu Sep
22 13:05:36 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32 Thu Sep
22 13:36:20 2005
@@ -19,7 +19,6 @@
# CONFIG_XEN_TPMDEV_BACKEND is not set
CONFIG_XEN_BLKDEV_FRONTEND=y
CONFIG_XEN_NETDEV_FRONTEND=y
-CONFIG_XEN_NETDEV_GRANT=y
# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
# CONFIG_XEN_BLKDEV_TAP is not set
# CONFIG_XEN_SHADOW_MODE is not set
diff -r ecc77b1c8612 -r 932aa2b425e8
linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64
--- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 Thu Sep
22 13:05:36 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64 Thu Sep
22 13:36:20 2005
@@ -19,7 +19,6 @@
# CONFIG_XEN_TPMDEV_BACKEND is not set
CONFIG_XEN_BLKDEV_FRONTEND=y
CONFIG_XEN_NETDEV_FRONTEND=y
-CONFIG_XEN_NETDEV_GRANT=y
# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
# CONFIG_XEN_BLKDEV_TAP is not set
# CONFIG_XEN_SHADOW_MODE is not set
diff -r ecc77b1c8612 -r 932aa2b425e8
linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32
--- a/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32 Thu Sep
22 13:05:36 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32 Thu Sep
22 13:36:20 2005
@@ -16,7 +16,6 @@
# CONFIG_XEN_TPMDEV_BACKEND is not set
CONFIG_XEN_BLKDEV_FRONTEND=y
CONFIG_XEN_NETDEV_FRONTEND=y
-CONFIG_XEN_NETDEV_GRANT=y
# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
# CONFIG_XEN_BLKDEV_TAP is not set
# CONFIG_XEN_SHADOW_MODE is not set
diff -r ecc77b1c8612 -r 932aa2b425e8
linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64
--- a/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64 Thu Sep
22 13:05:36 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64 Thu Sep
22 13:36:20 2005
@@ -16,7 +16,6 @@
# CONFIG_XEN_TPMDEV_BACKEND is not set
CONFIG_XEN_BLKDEV_FRONTEND=y
CONFIG_XEN_NETDEV_FRONTEND=y
-CONFIG_XEN_NETDEV_GRANT=y
# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
# CONFIG_XEN_BLKDEV_TAP is not set
# CONFIG_XEN_SHADOW_MODE is not set
diff -r ecc77b1c8612 -r 932aa2b425e8
linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32
--- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32 Thu Sep
22 13:05:36 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32 Thu Sep
22 13:36:20 2005
@@ -19,7 +19,6 @@
# CONFIG_XEN_TPMDEV_BACKEND is not set
CONFIG_XEN_BLKDEV_FRONTEND=y
CONFIG_XEN_NETDEV_FRONTEND=y
-CONFIG_XEN_NETDEV_GRANT=y
# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
# CONFIG_XEN_BLKDEV_TAP is not set
# CONFIG_XEN_SHADOW_MODE is not set
diff -r ecc77b1c8612 -r 932aa2b425e8
linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64
--- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64 Thu Sep
22 13:05:36 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64 Thu Sep
22 13:36:20 2005
@@ -19,7 +19,6 @@
# CONFIG_XEN_TPMDEV_BACKEND is not set
CONFIG_XEN_BLKDEV_FRONTEND=y
CONFIG_XEN_NETDEV_FRONTEND=y
-CONFIG_XEN_NETDEV_GRANT=y
# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
# CONFIG_XEN_BLKDEV_TAP is not set
# CONFIG_XEN_SHADOW_MODE is not set
diff -r ecc77b1c8612 -r 932aa2b425e8
linux-2.6-xen-sparse/drivers/xen/netback/common.h
--- a/linux-2.6-xen-sparse/drivers/xen/netback/common.h Thu Sep 22 13:05:36 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h Thu Sep 22 13:36:20 2005
@@ -62,9 +62,7 @@
/* Private indexes into shared ring. */
NETIF_RING_IDX rx_req_cons;
NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */
-#ifdef CONFIG_XEN_NETDEV_GRANT
NETIF_RING_IDX rx_resp_prod_copy;
-#endif
NETIF_RING_IDX tx_req_cons;
NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
diff -r ecc77b1c8612 -r 932aa2b425e8
linux-2.6-xen-sparse/drivers/xen/netback/netback.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Thu Sep 22
13:05:36 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Thu Sep 22
13:36:20 2005
@@ -23,7 +23,7 @@
static int make_rx_response(netif_t *netif,
u16 id,
s8 st,
- unsigned long addr,
+ u16 offset,
u16 size,
u16 csum_valid);
@@ -41,11 +41,7 @@
static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE*2+1];
static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
-#ifdef CONFIG_XEN_NETDEV_GRANT
static gnttab_transfer_t grant_rx_op[MAX_PENDING_REQS];
-#else
-static struct mmuext_op rx_mmuext[NETIF_RX_RING_SIZE];
-#endif
static unsigned char rx_notify[NR_EVENT_CHANNELS];
/* Don't currently gate addition of an interface to the tx scheduling list. */
@@ -72,14 +68,9 @@
static struct sk_buff_head tx_queue;
-#ifdef CONFIG_XEN_NETDEV_GRANT
static u16 grant_tx_ref[MAX_PENDING_REQS];
static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
-
-#else
-static multicall_entry_t tx_mcl[MAX_PENDING_REQS];
-#endif
static struct list_head net_schedule_list;
static spinlock_t net_schedule_list_lock;
@@ -108,7 +99,7 @@
return mfn;
}
-#ifndef CONFIG_XEN_NETDEV_GRANT
+#if 0
static void free_mfn(unsigned long mfn)
{
unsigned long flags;
@@ -180,18 +171,7 @@
dev_kfree_skb(skb);
skb = nskb;
}
-#ifdef CONFIG_XEN_NETDEV_GRANT
-#ifdef DEBUG_GRANT
- printk(KERN_ALERT "#### be_xmit: req_prod=%d req_cons=%d "
- "id=%04x gr=%04x\n",
- netif->rx->req_prod,
- netif->rx_req_cons,
- netif->rx->ring[
- MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.id,
- netif->rx->ring[
- MASK_NETIF_RX_IDX(netif->rx_req_cons)].req.gref);
-#endif
-#endif
+
netif->rx_req_cons++;
netif_get(netif);
@@ -232,11 +212,7 @@
u16 size, id, evtchn;
multicall_entry_t *mcl;
mmu_update_t *mmu;
-#ifdef CONFIG_XEN_NETDEV_GRANT
gnttab_transfer_t *gop;
-#else
- struct mmuext_op *mmuext;
-#endif
unsigned long vdata, old_mfn, new_mfn;
struct sk_buff_head rxq;
struct sk_buff *skb;
@@ -247,11 +223,7 @@
mcl = rx_mcl;
mmu = rx_mmu;
-#ifdef CONFIG_XEN_NETDEV_GRANT
gop = grant_rx_op;
-#else
- mmuext = rx_mmuext;
-#endif
while ((skb = skb_dequeue(&rx_queue)) != NULL) {
netif = netdev_priv(skb->dev);
@@ -277,25 +249,13 @@
pfn_pte_ma(new_mfn, PAGE_KERNEL), 0);
mcl++;
-#ifdef CONFIG_XEN_NETDEV_GRANT
gop->mfn = old_mfn;
gop->domid = netif->domid;
gop->ref = netif->rx->ring[
MASK_NETIF_RX_IDX(netif->rx_resp_prod_copy)].req.gref;
netif->rx_resp_prod_copy++;
gop++;
-#else
- mcl->op = __HYPERVISOR_mmuext_op;
- mcl->args[0] = (unsigned long)mmuext;
- mcl->args[1] = 1;
- mcl->args[2] = 0;
- mcl->args[3] = netif->domid;
- mcl++;
-
- mmuext->cmd = MMUEXT_REASSIGN_PAGE;
- mmuext->arg1.mfn = old_mfn;
- mmuext++;
-#endif
+
mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
MMU_MACHPHYS_UPDATE;
mmu->val = __pa(vdata) >> PAGE_SHIFT;
@@ -303,9 +263,6 @@
__skb_queue_tail(&rxq, skb);
-#ifdef DEBUG_GRANT
- dump_packet('a', old_mfn, vdata);
-#endif
/* Filled the batch queue? */
if ((mcl - rx_mcl) == ARRAY_SIZE(rx_mcl))
break;
@@ -321,17 +278,12 @@
mcl->args[3] = DOMID_SELF;
mcl++;
-#ifdef CONFIG_XEN_NETDEV_GRANT
mcl[-2].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
-#else
- mcl[-3].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
-#endif
BUG_ON(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0);
mcl = rx_mcl;
-#ifdef CONFIG_XEN_NETDEV_GRANT
- if(HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op,
- gop - grant_rx_op)) {
+ if( HYPERVISOR_grant_table_op(GNTTABOP_transfer, grant_rx_op,
+ gop - grant_rx_op)) {
/*
* The other side has given us a bad grant ref, or has no
* headroom, or has gone away. Unfortunately the current grant
@@ -343,20 +295,14 @@
grant_rx_op[0].domid, gop - grant_rx_op);
}
gop = grant_rx_op;
-#else
- mmuext = rx_mmuext;
-#endif
+
while ((skb = __skb_dequeue(&rxq)) != NULL) {
netif = netdev_priv(skb->dev);
size = skb->tail - skb->data;
/* Rederive the machine addresses. */
new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
-#ifdef CONFIG_XEN_NETDEV_GRANT
old_mfn = 0; /* XXX Fix this so we can free_mfn() on error! */
-#else
- old_mfn = mmuext[0].arg1.mfn;
-#endif
atomic_set(&(skb_shinfo(skb)->dataref), 1);
skb_shinfo(skb)->nr_frags = 0;
skb_shinfo(skb)->frag_list = NULL;
@@ -369,27 +315,17 @@
/* Check the reassignment error code. */
status = NETIF_RSP_OKAY;
-#ifdef CONFIG_XEN_NETDEV_GRANT
if(gop->status != 0) {
DPRINTK("Bad status %d from grant transfer to DOM%u\n",
gop->status, netif->domid);
/* XXX SMH: should free 'old_mfn' here */
status = NETIF_RSP_ERROR;
}
-#else
- if (unlikely(mcl[1].result != 0)) {
- DPRINTK("Failed MMU update transferring to DOM%u\n",
- netif->domid);
- free_mfn(old_mfn);
- status = NETIF_RSP_ERROR;
- }
-#endif
evtchn = netif->evtchn;
id = netif->rx->ring[
MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
if (make_rx_response(netif, id, status,
- (old_mfn << PAGE_SHIFT) | /* XXX */
- ((unsigned long)skb->data & ~PAGE_MASK),
+ (unsigned long)skb->data & ~PAGE_MASK,
size, skb->proto_csum_valid) &&
(rx_notify[evtchn] == 0)) {
rx_notify[evtchn] = 1;
@@ -398,13 +334,8 @@
netif_put(netif);
dev_kfree_skb(skb);
-#ifdef CONFIG_XEN_NETDEV_GRANT
mcl++;
gop++;
-#else
- mcl += 2;
- mmuext += 1;
-#endif
}
while (notify_nr != 0) {
@@ -486,11 +417,7 @@
inline static void net_tx_action_dealloc(void)
{
-#ifdef CONFIG_XEN_NETDEV_GRANT
gnttab_unmap_grant_ref_t *gop;
-#else
- multicall_entry_t *mcl;
-#endif
u16 pending_idx;
PEND_RING_IDX dc, dp;
netif_t *netif;
@@ -498,7 +425,6 @@
dc = dealloc_cons;
dp = dealloc_prod;
-#ifdef CONFIG_XEN_NETDEV_GRANT
/*
* Free up any grants we have finished using
*/
@@ -513,26 +439,8 @@
}
BUG_ON(HYPERVISOR_grant_table_op(
GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops));
-#else
- mcl = tx_mcl;
- while (dc != dp) {
- pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
- MULTI_update_va_mapping(mcl, MMAP_VADDR(pending_idx),
- __pte(0), 0);
- mcl++;
- }
-
- mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
- BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0);
-
- mcl = tx_mcl;
-#endif
+
while (dealloc_cons != dp) {
-#ifndef CONFIG_XEN_NETDEV_GRANT
- /* The update_va_mapping() must not fail. */
- BUG_ON(mcl[0].result != 0);
-#endif
-
pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
netif = pending_tx_info[pending_idx].netif;
@@ -556,10 +464,6 @@
add_to_net_schedule_list_tail(netif);
netif_put(netif);
-
-#ifndef CONFIG_XEN_NETDEV_GRANT
- mcl++;
-#endif
}
}
@@ -572,21 +476,13 @@
netif_tx_request_t txreq;
u16 pending_idx;
NETIF_RING_IDX i;
-#ifdef CONFIG_XEN_NETDEV_GRANT
gnttab_map_grant_ref_t *mop;
-#else
- multicall_entry_t *mcl;
-#endif
unsigned int data_len;
if (dealloc_cons != dealloc_prod)
net_tx_action_dealloc();
-#ifdef CONFIG_XEN_NETDEV_GRANT
mop = tx_map_ops;
-#else
- mcl = tx_mcl;
-#endif
while ((NR_PENDING_REQS < MAX_PENDING_REQS) &&
!list_empty(&net_schedule_list)) {
/* Get a netif from the list with work to do. */
@@ -657,8 +553,7 @@
}
/* No crossing a page as the payload mustn't fragment. */
- if (unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >=
- PAGE_SIZE)) {
+ if (unlikely((txreq.offset + txreq.size) >= PAGE_SIZE)) {
DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n",
txreq.addr, txreq.size,
(txreq.addr &~PAGE_MASK) + txreq.size);
@@ -682,20 +577,12 @@
/* Packets passed to netif_rx() must have some headroom. */
skb_reserve(skb, 16);
-#ifdef CONFIG_XEN_NETDEV_GRANT
+
mop->host_addr = MMAP_VADDR(pending_idx);
mop->dom = netif->domid;
- mop->ref = txreq.addr >> PAGE_SHIFT;
+ mop->ref = txreq.gref;
mop->flags = GNTMAP_host_map | GNTMAP_readonly;
mop++;
-#else
- MULTI_update_va_mapping_otherdomain(
- mcl, MMAP_VADDR(pending_idx),
- pfn_pte_ma(txreq.addr >> PAGE_SHIFT, PAGE_KERNEL),
- 0, netif->domid);
-
- mcl++;
-#endif
memcpy(&pending_tx_info[pending_idx].req,
&txreq, sizeof(txreq));
@@ -706,17 +593,10 @@
pending_cons++;
-#ifdef CONFIG_XEN_NETDEV_GRANT
if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
break;
-#else
- /* Filled the batch queue? */
- if ((mcl - tx_mcl) == ARRAY_SIZE(tx_mcl))
- break;
-#endif
- }
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
+ }
+
if (mop == tx_map_ops)
return;
@@ -724,14 +604,6 @@
GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops));
mop = tx_map_ops;
-#else
- if (mcl == tx_mcl)
- return;
-
- BUG_ON(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0);
-
- mcl = tx_mcl;
-#endif
while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
pending_idx = *((u16 *)skb->data);
netif = pending_tx_info[pending_idx].netif;
@@ -739,7 +611,6 @@
sizeof(txreq));
/* Check the remap error code. */
-#ifdef CONFIG_XEN_NETDEV_GRANT
if (unlikely(mop->handle < 0)) {
printk(KERN_ALERT "#### netback grant fails\n");
make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
@@ -754,30 +625,13 @@
__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT);
grant_tx_ref[pending_idx] = mop->handle;
-#else
- if (unlikely(mcl[0].result != 0)) {
- DPRINTK("Bad page frame\n");
- make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
- netif_put(netif);
- kfree_skb(skb);
- mcl++;
- pending_ring[MASK_PEND_IDX(pending_prod++)] =
- pending_idx;
- continue;
- }
-
- phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >>
- PAGE_SHIFT] =
- FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
-#endif
data_len = (txreq.size > PKT_PROT_LEN) ?
PKT_PROT_LEN : txreq.size;
__skb_put(skb, data_len);
memcpy(skb->data,
- (void *)(MMAP_VADDR(pending_idx)|
- (txreq.addr&~PAGE_MASK)),
+ (void *)(MMAP_VADDR(pending_idx)|txreq.offset),
data_len);
if (data_len < txreq.size) {
/* Append the packet payload as a fragment. */
@@ -786,7 +640,7 @@
skb_shinfo(skb)->frags[0].size =
txreq.size - data_len;
skb_shinfo(skb)->frags[0].page_offset =
- (txreq.addr + data_len) & ~PAGE_MASK;
+ txreq.offset + data_len;
skb_shinfo(skb)->nr_frags = 1;
} else {
/* Schedule a response immediately. */
@@ -813,11 +667,7 @@
netif_rx(skb);
netif->dev->last_rx = jiffies;
-#ifdef CONFIG_XEN_NETDEV_GRANT
mop++;
-#else
- mcl++;
-#endif
}
}
@@ -874,7 +724,7 @@
static int make_rx_response(netif_t *netif,
u16 id,
s8 st,
- unsigned long addr,
+ u16 offset,
u16 size,
u16 csum_valid)
{
@@ -882,7 +732,7 @@
netif_rx_response_t *resp;
resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
- resp->addr = addr;
+ resp->offset = offset;
resp->csum_valid = csum_valid;
resp->id = id;
resp->status = (s16)size;
@@ -937,9 +787,6 @@
return 0;
IPRINTK("Initialising Xen netif backend.\n");
-#ifdef CONFIG_XEN_NETDEV_GRANT
- IPRINTK("Using grant tables.\n");
-#endif
/* We can increase reservation by this much in net_rx_action(). */
balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
diff -r ecc77b1c8612 -r 932aa2b425e8
linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Thu Sep 22
13:05:36 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Thu Sep 22
13:36:20 2005
@@ -256,8 +256,8 @@
for (i = np->tx_resp_cons; i != prod; i++) {
id = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
skb = np->tx_skbs[id];
-#ifdef CONFIG_XEN_NETDEV_GRANT
- if
(unlikely(gnttab_query_foreign_access(np->grant_tx_ref[id]) != 0)) {
+ if (unlikely(gnttab_query_foreign_access(
+ np->grant_tx_ref[id]) != 0)) {
printk(KERN_ALERT "network_tx_buf_gc: warning "
"-- grant still in use by backend "
"domain.\n");
@@ -268,7 +268,6 @@
gnttab_release_grant_reference(
&np->gref_tx_head, np->grant_tx_ref[id]);
np->grant_tx_ref[id] = GRANT_INVALID_REF;
-#endif
ADD_ID_TO_FREELIST(np->tx_skbs, id);
dev_kfree_skb_irq(skb);
}
@@ -287,10 +286,7 @@
mb();
} while (prod != np->tx->resp_prod);
-#ifdef CONFIG_XEN_NETDEV_GRANT
out:
-#endif
-
if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
np->tx_full = 0;
if (np->user_state == UST_OPEN)
@@ -307,9 +303,7 @@
int i, batch_target;
NETIF_RING_IDX req_prod = np->rx->req_prod;
struct xen_memory_reservation reservation;
-#ifdef CONFIG_XEN_NETDEV_GRANT
grant_ref_t ref;
-#endif
if (unlikely(np->backend_state != BEST_CONNECTED))
return;
@@ -343,13 +337,11 @@
np->rx_skbs[id] = skb;
np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
-#ifdef CONFIG_XEN_NETDEV_GRANT
ref = gnttab_claim_grant_reference(&np->gref_rx_head);
BUG_ON((signed short)ref < 0);
np->grant_rx_ref[id] = ref;
gnttab_grant_foreign_transfer_ref(ref, np->backend_id);
np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.gref = ref;
-#endif
rx_pfn_array[i] = virt_to_mfn(skb->head);
/* Remove this page from map before passing back to Xen. */
@@ -400,10 +392,8 @@
struct net_private *np = netdev_priv(dev);
netif_tx_request_t *tx;
NETIF_RING_IDX i;
-#ifdef CONFIG_XEN_NETDEV_GRANT
grant_ref_t ref;
unsigned long mfn;
-#endif
if (unlikely(np->tx_full)) {
printk(KERN_ALERT "%s: full queue wasn't stopped!\n",
@@ -439,18 +429,13 @@
tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
tx->id = id;
-#ifdef CONFIG_XEN_NETDEV_GRANT
ref = gnttab_claim_grant_reference(&np->gref_tx_head);
BUG_ON((signed short)ref < 0);
mfn = virt_to_mfn(skb->data);
gnttab_grant_foreign_access_ref(
ref, np->backend_id, mfn, GNTMAP_readonly);
- tx->addr = ref << PAGE_SHIFT;
- np->grant_tx_ref[id] = ref;
-#else
- tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT;
-#endif
- tx->addr |= (unsigned long)skb->data & ~PAGE_MASK;
+ tx->gref = np->grant_tx_ref[id] = ref;
+ tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
tx->size = skb->len;
tx->csum_blank = (skb->ip_summed == CHECKSUM_HW);
@@ -511,10 +496,8 @@
int work_done, budget, more_to_do = 1;
struct sk_buff_head rxq;
unsigned long flags;
-#ifdef CONFIG_XEN_NETDEV_GRANT
unsigned long mfn;
grant_ref_t ref;
-#endif
spin_lock(&np->rx_lock);
@@ -550,7 +533,6 @@
continue;
}
-#ifdef CONFIG_XEN_NETDEV_GRANT
ref = np->grant_rx_ref[rx->id];
if(ref == GRANT_INVALID_REF) {
@@ -568,17 +550,12 @@
np->grant_rx_ref[rx->id] = GRANT_INVALID_REF;
mfn = gnttab_end_foreign_transfer_ref(ref);
gnttab_release_grant_reference(&np->gref_rx_head, ref);
-#endif
skb = np->rx_skbs[rx->id];
ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
/* NB. We handle skb overflow later. */
-#ifdef CONFIG_XEN_NETDEV_GRANT
- skb->data = skb->head + rx->addr;
-#else
- skb->data = skb->head + (rx->addr & ~PAGE_MASK);
-#endif
+ skb->data = skb->head + rx->offset;
skb->len = rx->status;
skb->tail = skb->data + skb->len;
@@ -589,30 +566,14 @@
np->stats.rx_bytes += rx->status;
/* Remap the page. */
-#ifdef CONFIG_XEN_NETDEV_GRANT
mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
-#else
- mmu->ptr = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
-#endif
mmu->val = __pa(skb->head) >> PAGE_SHIFT;
mmu++;
-#ifdef CONFIG_XEN_NETDEV_GRANT
MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
pfn_pte_ma(mfn, PAGE_KERNEL), 0);
-#else
- MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
- pfn_pte_ma(rx->addr >> PAGE_SHIFT,
- PAGE_KERNEL), 0);
-#endif
mcl++;
-#ifdef CONFIG_XEN_NETDEV_GRANT
phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] = mfn;
-#else
- phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] =
- rx->addr >> PAGE_SHIFT;
-#endif
-
__skb_queue_tail(&rxq, skb);
}
@@ -773,16 +734,12 @@
tx = &np->tx->ring[requeue_idx++].req;
tx->id = i;
-#ifdef CONFIG_XEN_NETDEV_GRANT
gnttab_grant_foreign_access_ref(
np->grant_tx_ref[i], np->backend_id,
virt_to_mfn(np->tx_skbs[i]->data),
GNTMAP_readonly);
- tx->addr = np->grant_tx_ref[i] << PAGE_SHIFT;
-#else
- tx->addr = virt_to_mfn(skb->data) << PAGE_SHIFT;
-#endif
- tx->addr |= (unsigned long)skb->data & ~PAGE_MASK;
+ tx->gref = np->grant_tx_ref[i];
+ tx->offset = (unsigned long)skb->data & ~PAGE_MASK;
tx->size = skb->len;
np->stats.tx_bytes += skb->len;
@@ -795,12 +752,10 @@
/* Rebuild the RX buffer freelist and the RX ring itself. */
for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++) {
if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET) {
-#ifdef CONFIG_XEN_NETDEV_GRANT
gnttab_grant_foreign_transfer_ref(
np->grant_rx_ref[i], np->backend_id);
np->rx->ring[requeue_idx].req.gref =
np->grant_rx_ref[i];
-#endif
np->rx->ring[requeue_idx].req.id = i;
requeue_idx++;
}
@@ -862,11 +817,9 @@
static void netif_uninit(struct net_device *dev)
{
-#ifdef CONFIG_XEN_NETDEV_GRANT
struct net_private *np = netdev_priv(dev);
gnttab_free_grant_references(np->gref_tx_head);
gnttab_free_grant_references(np->gref_rx_head);
-#endif
}
static struct ethtool_ops network_ethtool_ops =
@@ -911,19 +864,14 @@
/* Initialise {tx,rx}_skbs as a free chain containing every entry. */
for (i = 0; i <= NETIF_TX_RING_SIZE; i++) {
np->tx_skbs[i] = (void *)((unsigned long) i+1);
-#ifdef CONFIG_XEN_NETDEV_GRANT
np->grant_tx_ref[i] = GRANT_INVALID_REF;
-#endif
}
for (i = 0; i <= NETIF_RX_RING_SIZE; i++) {
np->rx_skbs[i] = (void *)((unsigned long) i+1);
-#ifdef CONFIG_XEN_NETDEV_GRANT
np->grant_rx_ref[i] = GRANT_INVALID_REF;
-#endif
- }
-
-#ifdef CONFIG_XEN_NETDEV_GRANT
+ }
+
/* A grant for every tx ring slot */
if (gnttab_alloc_grant_references(NETIF_TX_RING_SIZE,
&np->gref_tx_head) < 0) {
@@ -937,7 +885,6 @@
gnttab_free_grant_references(np->gref_tx_head);
goto exit;
}
-#endif
netdev->open = network_open;
netdev->hard_start_xmit = network_start_xmit;
@@ -971,10 +918,8 @@
return err;
exit_free_grefs:
-#ifdef CONFIG_XEN_NETDEV_GRANT
gnttab_free_grant_references(np->gref_tx_head);
gnttab_free_grant_references(np->gref_rx_head);
-#endif
goto exit;
}
@@ -1024,10 +969,8 @@
evtchn_op_t op = { .cmd = EVTCHNOP_alloc_unbound };
int err;
-#ifdef CONFIG_XEN_NETDEV_GRANT
info->tx_ring_ref = GRANT_INVALID_REF;
info->rx_ring_ref = GRANT_INVALID_REF;
-#endif
info->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
if (info->tx == 0) {
@@ -1045,7 +988,6 @@
memset(info->rx, 0, PAGE_SIZE);
info->backend_state = BEST_DISCONNECTED;
-#ifdef CONFIG_XEN_NETDEV_GRANT
err = gnttab_grant_foreign_access(info->backend_id,
virt_to_mfn(info->tx), 0);
if (err < 0) {
@@ -1061,11 +1003,6 @@
goto out;
}
info->rx_ring_ref = err;
-
-#else
- info->tx_ring_ref = virt_to_mfn(info->tx);
- info->rx_ring_ref = virt_to_mfn(info->rx);
-#endif
op.u.alloc_unbound.dom = info->backend_id;
err = HYPERVISOR_event_channel_op(&op);
@@ -1084,7 +1021,6 @@
free_page((unsigned long)info->rx);
info->rx = 0;
-#ifdef CONFIG_XEN_NETDEV_GRANT
if (info->tx_ring_ref != GRANT_INVALID_REF)
gnttab_end_foreign_access(info->tx_ring_ref, 0);
info->tx_ring_ref = GRANT_INVALID_REF;
@@ -1092,7 +1028,6 @@
if (info->rx_ring_ref != GRANT_INVALID_REF)
gnttab_end_foreign_access(info->rx_ring_ref, 0);
info->rx_ring_ref = GRANT_INVALID_REF;
-#endif
return err;
}
@@ -1106,7 +1041,6 @@
free_page((unsigned long)info->rx);
info->rx = 0;
-#ifdef CONFIG_XEN_NETDEV_GRANT
if (info->tx_ring_ref != GRANT_INVALID_REF)
gnttab_end_foreign_access(info->tx_ring_ref, 0);
info->tx_ring_ref = GRANT_INVALID_REF;
@@ -1114,7 +1048,6 @@
if (info->rx_ring_ref != GRANT_INVALID_REF)
gnttab_end_foreign_access(info->rx_ring_ref, 0);
info->rx_ring_ref = GRANT_INVALID_REF;
-#endif
unbind_evtchn_from_irqhandler(info->evtchn, info->netdev);
info->evtchn = 0;
diff -r ecc77b1c8612 -r 932aa2b425e8 xen/include/public/io/netif.h
--- a/xen/include/public/io/netif.h Thu Sep 22 13:05:36 2005
+++ b/xen/include/public/io/netif.h Thu Sep 22 13:36:20 2005
@@ -10,10 +10,11 @@
#define __XEN_PUBLIC_IO_NETIF_H__
typedef struct netif_tx_request {
- unsigned long addr; /* Machine address of packet. */
+ grant_ref_t gref; /* Reference to buffer page */
+ u16 offset:15; /* Offset within buffer page */
u16 csum_blank:1; /* Proto csum field blank? */
- u16 id:15; /* Echoed in response message. */
- u16 size; /* Packet size in bytes. */
+ u16 id; /* Echoed in response message. */
+ u16 size; /* Packet size in bytes. */
} netif_tx_request_t;
typedef struct netif_tx_response {
@@ -22,21 +23,15 @@
} netif_tx_response_t;
typedef struct {
- u16 id; /* Echoed in response message. */
-#ifdef CONFIG_XEN_NETDEV_GRANT
- grant_ref_t gref; /* 2: Reference to incoming granted frame */
-#endif
+ u16 id; /* Echoed in response message. */
+ grant_ref_t gref; /* Reference to incoming granted frame */
} netif_rx_request_t;
typedef struct {
-#ifdef CONFIG_XEN_NETDEV_GRANT
- u32 addr; /* 0: Offset in page of start of received packet */
-#else
- unsigned long addr; /* Machine address of packet. */
-#endif
- u16 csum_valid:1; /* Protocol checksum is validated? */
- u16 id:15;
- s16 status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
+ u16 offset; /* Offset in page of start of received packet */
+ u16 csum_valid; /* Protocol checksum is validated? */
+ u16 id;
+ s16 status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
} netif_rx_response_t;
/*
@@ -53,18 +48,8 @@
#define MASK_NETIF_RX_IDX(_i) ((_i)&(NETIF_RX_RING_SIZE-1))
#define MASK_NETIF_TX_IDX(_i) ((_i)&(NETIF_TX_RING_SIZE-1))
-#ifdef __x86_64__
-/*
- * This restriction can be lifted when we move netfront/netback to use
- * grant tables. This will remove memory_t fields from the above structures
- * and thus relax natural alignment restrictions.
- */
-#define NETIF_TX_RING_SIZE 128
-#define NETIF_RX_RING_SIZE 128
-#else
#define NETIF_TX_RING_SIZE 256
#define NETIF_RX_RING_SIZE 256
-#endif
/* This structure must fit in a memory page. */
typedef struct netif_tx_interface {
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|