WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-changelog

[Xen-changelog] Move net split driver onto ring.h generic rings.

To: xen-changelog@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-changelog] Move net split driver onto ring.h generic rings.
From: Xen patchbot -unstable <patchbot-unstable@xxxxxxxxxxxxxxxxxxx>
Date: Thu, 01 Dec 2005 12:32:07 +0000
Delivery-date: Thu, 01 Dec 2005 12:32:21 +0000
Envelope-to: www-data@xxxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-changelog-request@lists.xensource.com?subject=help>
List-id: BK change log <xen-changelog.lists.xensource.com>
List-post: <mailto:xen-changelog@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-changelog>, <mailto:xen-changelog-request@lists.xensource.com?subject=unsubscribe>
Reply-to: xen-devel@xxxxxxxxxxxxxxxxxxx
Sender: xen-changelog-bounces@xxxxxxxxxxxxxxxxxxx
# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID c55ac1858bbce73be3be7ee436765cf25d33bdbf
# Parent  310746cf9f27512be6520cf60c083e1726a26106
Move net split driver onto ring.h generic rings.

No backend should SHARED_RING_INIT(): that's the frontend's job.

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r 310746cf9f27 -r c55ac1858bbc linux-2.6-xen-sparse/arch/xen/Kconfig
--- a/linux-2.6-xen-sparse/arch/xen/Kconfig     Thu Dec  1 10:10:40 2005
+++ b/linux-2.6-xen-sparse/arch/xen/Kconfig     Thu Dec  1 10:27:27 2005
@@ -70,57 +70,12 @@
          network devices to other guests via a high-performance shared-memory
          interface.
 
-config XEN_TPMDEV_FRONTEND
-        bool "TPM-device frontend driver"
-        default n
-       select TCG_TPM
-       select TCG_XEN
-        help
-          The TPM-device frontend driver.
-
-config XEN_TPMDEV_BACKEND
-        bool "TPM-device backend driver"
-        default n
-        help
-          The TPM-device backend driver
-
-config XEN_TPMDEV_CLOSE_IF_VTPM_FAILS
-        bool "TPM backend closes upon vTPM failure"
-        depends on XEN_TPMDEV_BACKEND
-        default n
-        help
-          The TPM backend closes the channel if the vTPM in userspace indicates
-          a failure. The corresponding domain's channel will be closed.
-          Say Y if you want this feature.
-
-config XEN_BLKDEV_FRONTEND
-       tristate "Block-device frontend driver"
-       default y
-       help
-         The block-device frontend driver allows the kernel to access block
-         devices mounted within another guest OS. Unless you are building a
-         dedicated device-driver domain, or your master control domain
-         (domain 0), then you almost certainly want to say Y here.
-
-config XEN_NETDEV_FRONTEND
-       tristate "Network-device frontend driver"
-       default y
-       help
-         The network-device frontend driver allows the kernel to access
-         network interfaces within another guest OS. Unless you are building a
-         dedicated device-driver domain, or your master control domain
-         (domain 0), then you almost certainly want to say Y here.
-
-config XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
+config XEN_NETDEV_PIPELINED_TRANSMITTER
        bool "Pipelined transmitter (DANGEROUS)"
-       depends on XEN_NETDEV_FRONTEND
-       default n
-       help
-         The driver will assume that the backend is pipelining packets for
-         transmission: whenever packets are pending in the remote backend,
-         the driver will not send asynchronous notifications when it queues
-         additional packets for transmission.
-         If the backend is a dumb domain, such as a transparent Ethernet
+       depends on XEN_NETDEV_BACKEND
+       default n
+       help
+         If the net backend is a dumb domain, such as a transparent Ethernet
          bridge with no local IP interface, it is safe to say Y here to get
          slightly lower network overhead.
          If the backend has a local IP interface; or may be doing smart things
@@ -128,6 +83,47 @@
          are unsure; or if you experience network hangs when this option is
          enabled; then you must say N here.
 
+config XEN_TPMDEV_FRONTEND
+        bool "TPM-device frontend driver"
+        default n
+       select TCG_TPM
+       select TCG_XEN
+        help
+          The TPM-device frontend driver.
+
+config XEN_TPMDEV_BACKEND
+        bool "TPM-device backend driver"
+        default n
+        help
+          The TPM-device backend driver
+
+config XEN_TPMDEV_CLOSE_IF_VTPM_FAILS
+        bool "TPM backend closes upon vTPM failure"
+        depends on XEN_TPMDEV_BACKEND
+        default n
+        help
+          The TPM backend closes the channel if the vTPM in userspace indicates
+          a failure. The corresponding domain's channel will be closed.
+          Say Y if you want this feature.
+
+config XEN_BLKDEV_FRONTEND
+       tristate "Block-device frontend driver"
+       default y
+       help
+         The block-device frontend driver allows the kernel to access block
+         devices mounted within another guest OS. Unless you are building a
+         dedicated device-driver domain, or your master control domain
+         (domain 0), then you almost certainly want to say Y here.
+
+config XEN_NETDEV_FRONTEND
+       tristate "Network-device frontend driver"
+       default y
+       help
+         The network-device frontend driver allows the kernel to access
+         network interfaces within another guest OS. Unless you are building a
+         dedicated device-driver domain, or your master control domain
+         (domain 0), then you almost certainly want to say Y here.
+
 config XEN_BLKDEV_TAP
        bool "Block device tap driver"
        default n
diff -r 310746cf9f27 -r c55ac1858bbc 
linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32
--- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32       Thu Dec 
 1 10:10:40 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32       Thu Dec 
 1 10:27:27 2005
@@ -15,11 +15,11 @@
 CONFIG_XEN_BLKDEV_BACKEND=y
 # CONFIG_XEN_BLKDEV_TAP_BE is not set
 CONFIG_XEN_NETDEV_BACKEND=y
+# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
 # CONFIG_XEN_TPMDEV_FRONTEND is not set
 # CONFIG_XEN_TPMDEV_BACKEND is not set
 CONFIG_XEN_BLKDEV_FRONTEND=y
 CONFIG_XEN_NETDEV_FRONTEND=y
-# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
 # CONFIG_XEN_BLKDEV_TAP is not set
 # CONFIG_XEN_SHADOW_MODE is not set
 CONFIG_XEN_SCRUB_PAGES=y
diff -r 310746cf9f27 -r c55ac1858bbc 
linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64
--- a/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64       Thu Dec 
 1 10:10:40 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64       Thu Dec 
 1 10:27:27 2005
@@ -15,11 +15,11 @@
 CONFIG_XEN_BLKDEV_BACKEND=y
 # CONFIG_XEN_BLKDEV_TAP_BE is not set
 CONFIG_XEN_NETDEV_BACKEND=y
+# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
 # CONFIG_XEN_TPMDEV_FRONTEND is not set
 # CONFIG_XEN_TPMDEV_BACKEND is not set
 CONFIG_XEN_BLKDEV_FRONTEND=y
 CONFIG_XEN_NETDEV_FRONTEND=y
-# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
 # CONFIG_XEN_BLKDEV_TAP is not set
 # CONFIG_XEN_SHADOW_MODE is not set
 CONFIG_XEN_SCRUB_PAGES=y
diff -r 310746cf9f27 -r c55ac1858bbc 
linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32
--- a/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32       Thu Dec 
 1 10:10:40 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32       Thu Dec 
 1 10:27:27 2005
@@ -16,7 +16,6 @@
 # CONFIG_XEN_TPMDEV_BACKEND is not set
 CONFIG_XEN_BLKDEV_FRONTEND=y
 CONFIG_XEN_NETDEV_FRONTEND=y
-# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
 # CONFIG_XEN_BLKDEV_TAP is not set
 # CONFIG_XEN_SHADOW_MODE is not set
 CONFIG_XEN_SCRUB_PAGES=y
diff -r 310746cf9f27 -r c55ac1858bbc 
linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64
--- a/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64       Thu Dec 
 1 10:10:40 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64       Thu Dec 
 1 10:27:27 2005
@@ -16,7 +16,6 @@
 # CONFIG_XEN_TPMDEV_BACKEND is not set
 CONFIG_XEN_BLKDEV_FRONTEND=y
 CONFIG_XEN_NETDEV_FRONTEND=y
-# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
 # CONFIG_XEN_BLKDEV_TAP is not set
 # CONFIG_XEN_SHADOW_MODE is not set
 CONFIG_XEN_SCRUB_PAGES=y
diff -r 310746cf9f27 -r c55ac1858bbc 
linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32
--- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32        Thu Dec 
 1 10:10:40 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32        Thu Dec 
 1 10:27:27 2005
@@ -15,11 +15,11 @@
 CONFIG_XEN_BLKDEV_BACKEND=y
 # CONFIG_XEN_BLKDEV_TAP_BE is not set
 CONFIG_XEN_NETDEV_BACKEND=y
+# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
 # CONFIG_XEN_TPMDEV_FRONTEND is not set
 # CONFIG_XEN_TPMDEV_BACKEND is not set
 CONFIG_XEN_BLKDEV_FRONTEND=y
 CONFIG_XEN_NETDEV_FRONTEND=y
-# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
 # CONFIG_XEN_BLKDEV_TAP is not set
 # CONFIG_XEN_SHADOW_MODE is not set
 CONFIG_XEN_SCRUB_PAGES=y
diff -r 310746cf9f27 -r c55ac1858bbc 
linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64
--- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64        Thu Dec 
 1 10:10:40 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64        Thu Dec 
 1 10:27:27 2005
@@ -15,11 +15,11 @@
 CONFIG_XEN_BLKDEV_BACKEND=y
 # CONFIG_XEN_BLKDEV_TAP_BE is not set
 CONFIG_XEN_NETDEV_BACKEND=y
+# CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER is not set
 # CONFIG_XEN_TPMDEV_FRONTEND is not set
 # CONFIG_XEN_TPMDEV_BACKEND is not set
 CONFIG_XEN_BLKDEV_FRONTEND=y
 CONFIG_XEN_NETDEV_FRONTEND=y
-# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
 # CONFIG_XEN_BLKDEV_TAP is not set
 # CONFIG_XEN_SHADOW_MODE is not set
 CONFIG_XEN_SCRUB_PAGES=y
diff -r 310746cf9f27 -r c55ac1858bbc 
linux-2.6-xen-sparse/drivers/xen/blkback/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c      Thu Dec  1 
10:10:40 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkback/interface.c      Thu Dec  1 
10:27:27 2005
@@ -107,7 +107,6 @@
        blkif->evtchn = op.u.bind_interdomain.local_port;
 
        sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
-       SHARED_RING_INIT(sring);
        BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
 
        blkif->irq = bind_evtchn_to_irqhandler(
diff -r 310746cf9f27 -r c55ac1858bbc 
linux-2.6-xen-sparse/drivers/xen/blktap/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/blktap/interface.c       Thu Dec  1 
10:10:40 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blktap/interface.c       Thu Dec  1 
10:27:27 2005
@@ -97,7 +97,6 @@
        blkif->evtchn = op.u.bind_interdomain.local_port;
 
        sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
-       SHARED_RING_INIT(sring);
        BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
 
        blkif->irq = bind_evtchn_to_irqhandler(
diff -r 310746cf9f27 -r c55ac1858bbc 
linux-2.6-xen-sparse/drivers/xen/netback/common.h
--- a/linux-2.6-xen-sparse/drivers/xen/netback/common.h Thu Dec  1 10:10:40 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/common.h Thu Dec  1 10:27:27 2005
@@ -53,16 +53,12 @@
        unsigned int     irq;
 
        /* The shared rings and indexes. */
-       netif_tx_interface_t *tx;
-       netif_rx_interface_t *rx;
+       netif_tx_back_ring_t tx;
+       netif_rx_back_ring_t rx;
        struct vm_struct *comms_area;
 
-       /* Private indexes into shared ring. */
-       NETIF_RING_IDX rx_req_cons;
-       NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */
-       NETIF_RING_IDX rx_resp_prod_copy;
-       NETIF_RING_IDX tx_req_cons;
-       NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
+       /* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
+       RING_IDX rx_req_cons_peek;
 
        /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
        unsigned long   credit_bytes;
@@ -80,6 +76,9 @@
 
        struct work_struct free_work;
 } netif_t;
+
+#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
+#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
 
 void netif_creditlimit(netif_t *netif);
 int  netif_disconnect(netif_t *netif);
diff -r 310746cf9f27 -r c55ac1858bbc 
linux-2.6-xen-sparse/drivers/xen/netback/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c      Thu Dec  1 
10:10:40 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c      Thu Dec  1 
10:27:27 2005
@@ -184,6 +184,8 @@
              unsigned long rx_ring_ref, unsigned int evtchn)
 {
        int err;
+       netif_tx_sring_t *txs;
+       netif_rx_sring_t *rxs;
        evtchn_op_t op = {
                .cmd = EVTCHNOP_bind_interdomain,
                .u.bind_interdomain.remote_dom = netif->domid,
@@ -216,10 +218,15 @@
                netif->evtchn, netif_be_int, 0, netif->dev->name, netif);
        disable_irq(netif->irq);
 
-       netif->tx = (netif_tx_interface_t *)netif->comms_area->addr;
-       netif->rx = (netif_rx_interface_t *)
+       txs = (netif_tx_sring_t *)netif->comms_area->addr;
+       BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);
+
+       rxs = (netif_rx_sring_t *)
                ((char *)netif->comms_area->addr + PAGE_SIZE);
-       netif->tx->resp_prod = netif->rx->resp_prod = 0;
+       BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);
+
+       netif->rx_req_cons_peek = 0;
+
        netif_get(netif);
        wmb(); /* Other CPUs see new state before interface is started. */
 
@@ -246,7 +253,7 @@
 
        unregister_netdev(netif->dev);
 
-       if (netif->tx) {
+       if (netif->tx.sring) {
                unmap_frontend_pages(netif);
                free_vm_area(netif->comms_area);
        }
diff -r 310746cf9f27 -r c55ac1858bbc 
linux-2.6-xen-sparse/drivers/xen/netback/netback.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Thu Dec  1 
10:10:40 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Thu Dec  1 
10:27:27 2005
@@ -38,8 +38,8 @@
 #define MAX_PENDING_REQS 256
 
 static struct sk_buff_head rx_queue;
-static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE*2+1];
-static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
+static multicall_entry_t rx_mcl[NET_RX_RING_SIZE*2+1];
+static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
 
 static gnttab_transfer_t grant_rx_op[MAX_PENDING_REQS];
 static unsigned char rx_notify[NR_IRQS];
@@ -126,8 +126,9 @@
 
        /* Drop the packet if the target domain has no receive buffers. */
        if (!netif->active || 
-           (netif->rx_req_cons == netif->rx->req_prod) ||
-           ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE))
+           (netif->rx_req_cons_peek == netif->rx.sring->req_prod) ||
+           ((netif->rx_req_cons_peek - netif->rx.rsp_prod_pvt) ==
+            NET_RX_RING_SIZE))
                goto drop;
 
        /*
@@ -154,7 +155,7 @@
                skb = nskb;
        }
 
-       netif->rx_req_cons++;
+       netif->rx_req_cons_peek++;
        netif_get(netif);
 
        skb_queue_tail(&rx_queue, skb);
@@ -198,7 +199,7 @@
        unsigned long vdata, old_mfn, new_mfn;
        struct sk_buff_head rxq;
        struct sk_buff *skb;
-       u16 notify_list[NETIF_RX_RING_SIZE];
+       u16 notify_list[NET_RX_RING_SIZE];
        int notify_nr = 0;
        int ret;
 
@@ -233,9 +234,9 @@
 
                gop->mfn = old_mfn;
                gop->domid = netif->domid;
-               gop->ref = netif->rx->ring[
-                       MASK_NETIF_RX_IDX(netif->rx_resp_prod_copy)].req.gref;
-               netif->rx_resp_prod_copy++;
+               gop->ref = RING_GET_REQUEST(
+                       &netif->rx, netif->rx.req_cons)->gref;
+               netif->rx.req_cons++;
                gop++;
 
                mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
@@ -300,8 +301,7 @@
                        status = NETIF_RSP_ERROR; 
                }
                irq = netif->irq;
-               id = netif->rx->ring[
-                       MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
+               id = RING_GET_REQUEST(&netif->rx, netif->rx.rsp_prod_pvt)->id;
                if (make_rx_response(netif, id, status,
                                     (unsigned long)skb->data & ~PAGE_MASK,
                                     size, skb->proto_csum_valid) &&
@@ -371,13 +371,31 @@
        spin_unlock_irq(&net_schedule_list_lock);
 }
 
+/*
+ * Note on CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER:
+ * If this driver is pipelining transmit requests then we can be very
+ * aggressive in avoiding new-packet notifications -- frontend only needs to
+ * send a notification if there are no outstanding unreceived responses.
+ * If we may be buffer transmit buffers for any reason then we must be rather
+ * more conservative and advertise that we are 'sleeping' this connection here.
+ */
 void netif_schedule_work(netif_t *netif)
 {
-       if ((netif->tx_req_cons != netif->tx->req_prod) &&
-           ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE)) {
+       if (RING_HAS_UNCONSUMED_REQUESTS(&netif->tx)) {
                add_to_net_schedule_list_tail(netif);
                maybe_schedule_tx_action();
        }
+#ifndef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
+       else {
+               netif->tx.sring->server_is_sleeping = 1;
+               mb();
+               if (RING_HAS_UNCONSUMED_REQUESTS(&netif->tx)) {
+                       netif->tx.sring->server_is_sleeping = 0;
+                       add_to_net_schedule_list_tail(netif);
+                       maybe_schedule_tx_action();
+               }
+       }
+#endif
 }
 
 void netif_deschedule_work(netif_t *netif)
@@ -437,11 +455,18 @@
                  * packets.
                 */
                mb();
-               if ((netif->tx_req_cons != netif->tx->req_prod) &&
-                   ((netif->tx_req_cons-netif->tx_resp_prod) !=
-                    NETIF_TX_RING_SIZE))
+
+               if (RING_HAS_UNCONSUMED_REQUESTS(&netif->tx)) {
                        add_to_net_schedule_list_tail(netif);
-        
+               } else {
+                       netif->tx.sring->server_is_sleeping = 1;
+                       mb();
+                       if (RING_HAS_UNCONSUMED_REQUESTS(&netif->tx)) {
+                               netif->tx.sring->server_is_sleeping = 0;
+                               add_to_net_schedule_list_tail(netif);
+                       }
+               }
+
                netif_put(netif);
        }
 }
@@ -454,7 +479,7 @@
        netif_t *netif;
        netif_tx_request_t txreq;
        u16 pending_idx;
-       NETIF_RING_IDX i;
+       RING_IDX i;
        gnttab_map_grant_ref_t *mop;
        unsigned int data_len;
        int ret;
@@ -472,16 +497,14 @@
                remove_from_net_schedule_list(netif);
 
                /* Work to do? */
-               i = netif->tx_req_cons;
-               if ((i == netif->tx->req_prod) ||
-                   ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE)) {
+               if (!RING_HAS_UNCONSUMED_REQUESTS(&netif->tx)) {
                        netif_put(netif);
                        continue;
                }
 
+               i = netif->tx.req_cons;
                rmb(); /* Ensure that we see the request before we copy it. */
-               memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req, 
-                      sizeof(txreq));
+               memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
                /* Credit-based scheduling. */
                if (txreq.size > netif->remaining_credit) {
                        unsigned long now = jiffies;
@@ -515,12 +538,7 @@
                }
                netif->remaining_credit -= txreq.size;
 
-               /*
-                * Why the barrier? It ensures that the frontend sees updated
-                * req_cons before we check for more work to schedule.
-                */
-               netif->tx->req_cons = ++netif->tx_req_cons;
-               mb();
+               netif->tx.req_cons++;
 
                netif_schedule_work(netif);
 
@@ -688,17 +706,18 @@
                              u16      id,
                              s8       st)
 {
-       NETIF_RING_IDX i = netif->tx_resp_prod;
+       RING_IDX i = netif->tx.rsp_prod_pvt;
        netif_tx_response_t *resp;
 
-       resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
+       resp = RING_GET_RESPONSE(&netif->tx, i);
        resp->id     = id;
        resp->status = st;
        wmb();
-       netif->tx->resp_prod = netif->tx_resp_prod = ++i;
+       netif->tx.rsp_prod_pvt = ++i;
+       RING_PUSH_RESPONSES(&netif->tx);
 
        mb(); /* Update producer before checking event threshold. */
-       if (i == netif->tx->event)
+       if (i == netif->tx.sring->rsp_event)
                notify_remote_via_irq(netif->irq);
 }
 
@@ -709,10 +728,10 @@
                             u16      size,
                             u16      csum_valid)
 {
-       NETIF_RING_IDX i = netif->rx_resp_prod;
+       RING_IDX i = netif->rx.rsp_prod_pvt;
        netif_rx_response_t *resp;
 
-       resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
+       resp = RING_GET_RESPONSE(&netif->rx, i);
        resp->offset     = offset;
        resp->csum_valid = csum_valid;
        resp->id         = id;
@@ -720,10 +739,11 @@
        if (st < 0)
                resp->status = (s16)st;
        wmb();
-       netif->rx->resp_prod = netif->rx_resp_prod = ++i;
+       netif->rx.rsp_prod_pvt = ++i;
+       RING_PUSH_RESPONSES(&netif->rx);
 
        mb(); /* Update producer before checking event threshold. */
-       return (i == netif->rx->event);
+       return (i == netif->rx.sring->rsp_event);
 }
 
 static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
@@ -739,16 +759,16 @@
                netif = list_entry(ent, netif_t, list);
                printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
                       "rx_resp_prod=%08x\n",
-                      i, netif->rx_req_cons, netif->rx_resp_prod);
+                      i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
                printk(KERN_ALERT "   tx_req_cons=%08x tx_resp_prod=%08x)\n",
-                      netif->tx_req_cons, netif->tx_resp_prod);
+                      netif->tx.req_cons, netif->tx.rsp_prod_pvt);
                printk(KERN_ALERT "   shared(rx_req_prod=%08x "
                       "rx_resp_prod=%08x\n",
-                      netif->rx->req_prod, netif->rx->resp_prod);
+                      netif->rx.sring->req_prod, netif->rx.sring->rsp_prod);
                printk(KERN_ALERT "   rx_event=%08x tx_req_prod=%08x\n",
-                      netif->rx->event, netif->tx->req_prod);
+                      netif->rx.sring->rsp_event, netif->tx.sring->req_prod);
                printk(KERN_ALERT "   tx_resp_prod=%08x, tx_event=%08x)\n",
-                      netif->tx->resp_prod, netif->tx->event);
+                      netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event);
                i++;
        }
 
@@ -764,7 +784,7 @@
        struct page *page;
 
        /* We can increase reservation by this much in net_rx_action(). */
-       balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
+       balloon_update_driver_allowance(NET_RX_RING_SIZE);
 
        skb_queue_head_init(&rx_queue);
        skb_queue_head_init(&tx_queue);
diff -r 310746cf9f27 -r c55ac1858bbc 
linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Thu Dec  1 
10:10:40 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Thu Dec  1 
10:27:27 2005
@@ -61,6 +61,9 @@
 
 #define GRANT_INVALID_REF      0
 
+#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
+#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
+
 #ifndef __GFP_NOWARN
 #define __GFP_NOWARN 0
 #endif
@@ -76,22 +79,9 @@
 /* Allow headroom on each rx pkt for Ethernet header, alignment padding, ... */
 #define RX_HEADROOM 200
 
-/*
- * If the backend driver is pipelining transmit requests then we can be very
- * aggressive in avoiding new-packet notifications -- only need to send a
- * notification if there are no outstanding unreceived responses.
- * If the backend may be buffering our transmit buffers for any reason then we
- * are rather more conservative.
- */
-#ifdef CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
-#define TX_TEST_IDX resp_prod /* aggressive: any outstanding responses? */
-#else
-#define TX_TEST_IDX req_cons  /* conservative: not seen all our requests? */
-#endif
-
-static unsigned long rx_pfn_array[NETIF_RX_RING_SIZE];
-static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE+1];
-static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
+static unsigned long rx_pfn_array[NET_RX_RING_SIZE];
+static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
+static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
 
 struct netfront_info
 {
@@ -99,11 +89,10 @@
        struct net_device *netdev;
 
        struct net_device_stats stats;
-       NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
        unsigned int tx_full;
     
-       netif_tx_interface_t *tx;
-       netif_rx_interface_t *rx;
+       netif_tx_front_ring_t tx;
+       netif_rx_front_ring_t rx;
 
        spinlock_t   tx_lock;
        spinlock_t   rx_lock;
@@ -124,7 +113,7 @@
 
        /* Receive-ring batched refills. */
 #define RX_MIN_TARGET 8
-#define RX_MAX_TARGET NETIF_RX_RING_SIZE
+#define RX_MAX_TARGET NET_RX_RING_SIZE
        int rx_min_target, rx_max_target, rx_target;
        struct sk_buff_head rx_batch;
 
@@ -132,13 +121,13 @@
         * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
         * array is an index into a chain of free entries.
         */
-       struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
-       struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
+       struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1];
+       struct sk_buff *rx_skbs[NET_RX_RING_SIZE+1];
 
        grant_ref_t gref_tx_head;
-       grant_ref_t grant_tx_ref[NETIF_TX_RING_SIZE + 1]; 
+       grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1]; 
        grant_ref_t gref_rx_head;
-       grant_ref_t grant_rx_ref[NETIF_TX_RING_SIZE + 1]; 
+       grant_ref_t grant_rx_ref[NET_TX_RING_SIZE + 1]; 
 
        struct xenbus_device *xbdev;
        int tx_ring_ref;
@@ -337,37 +326,45 @@
 
 static int setup_device(struct xenbus_device *dev, struct netfront_info *info)
 {
+       netif_tx_sring_t *txs;
+       netif_rx_sring_t *rxs;
        int err;
        struct net_device *netdev = info->netdev;
 
        info->tx_ring_ref = GRANT_INVALID_REF;
        info->rx_ring_ref = GRANT_INVALID_REF;
-       info->rx = NULL;
-       info->tx = NULL;
+       info->rx.sring = NULL;
+       info->tx.sring = NULL;
        info->irq = 0;
 
-       info->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
-       if (!info->tx) {
+       txs = (netif_tx_sring_t *)__get_free_page(GFP_KERNEL);
+       if (!txs) {
                err = -ENOMEM;
                xenbus_dev_fatal(dev, err, "allocating tx ring page");
                goto fail;
        }
-       info->rx = (netif_rx_interface_t *)__get_free_page(GFP_KERNEL);
-       if (!info->rx) {
+       rxs = (netif_rx_sring_t *)__get_free_page(GFP_KERNEL);
+       if (!rxs) {
                err = -ENOMEM;
                xenbus_dev_fatal(dev, err, "allocating rx ring page");
                goto fail;
        }
-       memset(info->tx, 0, PAGE_SIZE);
-       memset(info->rx, 0, PAGE_SIZE);
+       memset(txs, 0, PAGE_SIZE);
+       memset(rxs, 0, PAGE_SIZE);
        info->backend_state = BEST_DISCONNECTED;
 
-       err = xenbus_grant_ring(dev, virt_to_mfn(info->tx));
+       SHARED_RING_INIT(txs);
+       FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
+
+       SHARED_RING_INIT(rxs);
+       FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
+
+       err = xenbus_grant_ring(dev, virt_to_mfn(txs));
        if (err < 0)
                goto fail;
        info->tx_ring_ref = err;
 
-       err = xenbus_grant_ring(dev, virt_to_mfn(info->rx));
+       err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
        if (err < 0)
                goto fail;
        info->rx_ring_ref = err;
@@ -454,7 +451,7 @@
        np->user_state = UST_OPEN;
 
        network_alloc_rx_buffers(dev);
-       np->rx->event = np->rx_resp_cons + 1;
+       np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
 
        netif_start_queue(dev);
 
@@ -463,7 +460,7 @@
 
 static void network_tx_buf_gc(struct net_device *dev)
 {
-       NETIF_RING_IDX i, prod;
+       RING_IDX i, prod;
        unsigned short id;
        struct netfront_info *np = netdev_priv(dev);
        struct sk_buff *skb;
@@ -472,11 +469,11 @@
                return;
 
        do {
-               prod = np->tx->resp_prod;
+               prod = np->tx.sring->rsp_prod;
                rmb(); /* Ensure we see responses up to 'rp'. */
 
-               for (i = np->tx_resp_cons; i != prod; i++) {
-                       id  = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
+               for (i = np->tx.rsp_cons; i != prod; i++) {
+                       id  = RING_GET_RESPONSE(&np->tx, i)->id;
                        skb = np->tx_skbs[id];
                        if (unlikely(gnttab_query_foreign_access(
                                np->grant_tx_ref[id]) != 0)) {
@@ -494,7 +491,7 @@
                        dev_kfree_skb_irq(skb);
                }
         
-               np->tx_resp_cons = prod;
+               np->tx.rsp_cons = prod;
         
                /*
                 * Set a new event, then check for race with update of tx_cons.
@@ -504,12 +501,14 @@
                 * data is outstanding: in such cases notification from Xen is
                 * likely to be the only kick that we'll get.
                 */
-               np->tx->event = prod + ((np->tx->req_prod - prod) >> 1) + 1;
+               np->tx.sring->rsp_event =
+                       prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
                mb();
-       } while (prod != np->tx->resp_prod);
+       } while (prod != np->tx.sring->rsp_prod);
 
  out: 
-       if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
+       if (np->tx_full &&
+           ((np->tx.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
                np->tx_full = 0;
                if (np->user_state == UST_OPEN)
                        netif_wake_queue(dev);
@@ -523,7 +522,7 @@
        struct netfront_info *np = netdev_priv(dev);
        struct sk_buff *skb;
        int i, batch_target;
-       NETIF_RING_IDX req_prod = np->rx->req_prod;
+       RING_IDX req_prod = np->rx.req_prod_pvt;
        struct xen_memory_reservation reservation;
        grant_ref_t ref;
 
@@ -536,7 +535,7 @@
         * allocator, so should reduce the chance of failed allocation requests
         *  both for ourself and for other kernel subsystems.
         */
-       batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
+       batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
        for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
                skb = alloc_xen_skb(dev->mtu + RX_HEADROOM);
                if (skb == NULL)
@@ -558,13 +557,13 @@
 
                np->rx_skbs[id] = skb;
         
-               np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
+               RING_GET_REQUEST(&np->rx, req_prod + i)->id = id;
                ref = gnttab_claim_grant_reference(&np->gref_rx_head);
                BUG_ON((signed short)ref < 0);
                np->grant_rx_ref[id] = ref;
                gnttab_grant_foreign_transfer_ref(ref,
                                                  np->xbdev->otherend_id);
-               np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.gref = ref;
+               RING_GET_REQUEST(&np->rx, req_prod + i)->gref = ref;
                rx_pfn_array[i] = virt_to_mfn(skb->head);
 
                /* Remove this page from map before passing back to Xen. */
@@ -599,10 +598,11 @@
                panic("Unable to reduce memory reservation\n");
 
        /* Above is a suitable barrier to ensure backend will see requests. */
-       np->rx->req_prod = req_prod + i;
+       np->rx.req_prod_pvt = req_prod + i;
+       RING_PUSH_REQUESTS(&np->rx);
 
        /* Adjust our fill target if we risked running out of buffers. */
-       if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
+       if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
            ((np->rx_target *= 2) > np->rx_max_target))
                np->rx_target = np->rx_max_target;
 }
@@ -613,7 +613,7 @@
        unsigned short id;
        struct netfront_info *np = netdev_priv(dev);
        netif_tx_request_t *tx;
-       NETIF_RING_IDX i;
+       RING_IDX i;
        grant_ref_t ref;
        unsigned long mfn;
 
@@ -643,12 +643,12 @@
                goto drop;
        }
 
-       i = np->tx->req_prod;
+       i = np->tx.req_prod_pvt;
 
        id = GET_ID_FROM_FREELIST(np->tx_skbs);
        np->tx_skbs[id] = skb;
 
-       tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
+       tx = RING_GET_REQUEST(&np->tx, i);
 
        tx->id   = id;
        ref = gnttab_claim_grant_reference(&np->gref_tx_head);
@@ -662,11 +662,12 @@
        tx->csum_blank = (skb->ip_summed == CHECKSUM_HW);
 
        wmb(); /* Ensure that backend will see the request. */
-       np->tx->req_prod = i + 1;
+       np->tx.req_prod_pvt = i + 1;
+       RING_PUSH_REQUESTS(&np->tx);
 
        network_tx_buf_gc(dev);
 
-       if ((i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1)) {
+       if (RING_FULL(&np->tx)) {
                np->tx_full = 1;
                netif_stop_queue(dev);
        }
@@ -678,8 +679,10 @@
 
        /* Only notify Xen if we really have to. */
        mb();
-       if (np->tx->TX_TEST_IDX == i)
+       if (np->tx.sring->server_is_sleeping) {
+               np->tx.sring->server_is_sleeping = 0;
                notify_remote_via_irq(np->irq);
+       }
 
        return 0;
 
@@ -699,7 +702,7 @@
        network_tx_buf_gc(dev);
        spin_unlock_irqrestore(&np->tx_lock, flags);
 
-       if ((np->rx_resp_cons != np->rx->resp_prod) &&
+       if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx) &&
            (np->user_state == UST_OPEN))
                netif_rx_schedule(dev);
 
@@ -712,7 +715,7 @@
        struct netfront_info *np = netdev_priv(dev);
        struct sk_buff *skb, *nskb;
        netif_rx_response_t *rx;
-       NETIF_RING_IDX i, rp;
+       RING_IDX i, rp;
        mmu_update_t *mmu = rx_mmu;
        multicall_entry_t *mcl = rx_mcl;
        int work_done, budget, more_to_do = 1;
@@ -732,13 +735,13 @@
 
        if ((budget = *pbudget) > dev->quota)
                budget = dev->quota;
-       rp = np->rx->resp_prod;
+       rp = np->rx.sring->rsp_prod;
        rmb(); /* Ensure we see queued responses up to 'rp'. */
 
-       for (i = np->rx_resp_cons, work_done = 0; 
+       for (i = np->rx.rsp_cons, work_done = 0; 
             (i != rp) && (work_done < budget);
             i++, work_done++) {
-               rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
+               rx = RING_GET_RESPONSE(&np->rx, i);
 
                /*
                  * This definitely indicates a bug, either in this driver or
@@ -756,10 +759,11 @@
                        if (net_ratelimit())
                                WPRINTK("Unfulfilled rx req (id=%d, st=%d).\n",
                                        rx->id, rx->status);
-                       np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].
-                               req.id = rx->id;
+                       RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id =
+                               rx->id;
                        wmb();
-                       np->rx->req_prod++;
+                       np->rx.req_prod_pvt++;
+                       RING_PUSH_REQUESTS(&np->rx);
                        work_done--;
                        continue;
                }
@@ -861,11 +865,11 @@
                dev->last_rx = jiffies;
        }
 
-       np->rx_resp_cons = i;
+       np->rx.rsp_cons = i;
 
        /* If we get a callback with very few responses, reduce fill target. */
        /* NB. Note exponential increase, linear decrease. */
-       if (((np->rx->req_prod - np->rx->resp_prod) >
+       if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
             ((3*np->rx_target) / 4)) &&
            (--np->rx_target < np->rx_min_target))
                np->rx_target = np->rx_min_target;
@@ -878,11 +882,11 @@
        if (work_done < budget) {
                local_irq_save(flags);
 
-               np->rx->event = i + 1;
+               np->rx.sring->rsp_event = i + 1;
     
                /* Deal with hypervisor racing our resetting of rx_event. */
                mb();
-               if (np->rx->resp_prod == i) {
+               if (np->rx.sring->rsp_prod == i) {
                        __netif_rx_complete(dev);
                        more_to_do = 0;
                }
@@ -925,8 +929,8 @@
        /* Recovery procedure: */
 
        /* Step 1: Reinitialise variables. */
-       np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
-       np->rx->event = np->tx->event = 1;
+       np->tx_full = 0;
+       np->rx.sring->rsp_event = np->tx.sring->rsp_event = 1;
 
        /*
         * Step 2: Rebuild the RX and TX ring contents.
@@ -946,13 +950,14 @@
         * to avoid this but maybe it doesn't matter so much given the
         * interface has been down.
         */
-       for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
+       for (requeue_idx = 0, i = 1; i <= NET_TX_RING_SIZE; i++) {
                if ((unsigned long)np->tx_skbs[i] < __PAGE_OFFSET)
                        continue;
 
                skb = np->tx_skbs[i];
 
-               tx = &np->tx->ring[requeue_idx++].req;
+               tx = RING_GET_REQUEST(&np->tx, requeue_idx);
+               requeue_idx++;
 
                tx->id = i;
                gnttab_grant_foreign_access_ref(
@@ -968,21 +973,23 @@
                np->stats.tx_packets++;
        }
        wmb();
-       np->tx->req_prod = requeue_idx;
+       np->tx.req_prod_pvt = requeue_idx;
+       RING_PUSH_REQUESTS(&np->tx);
 
        /* Rebuild the RX buffer freelist and the RX ring itself. */
-       for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++) { 
+       for (requeue_idx = 0, i = 1; i <= NET_RX_RING_SIZE; i++) { 
                if ((unsigned long)np->rx_skbs[i] < __PAGE_OFFSET)
                        continue;
                gnttab_grant_foreign_transfer_ref(
                        np->grant_rx_ref[i], np->xbdev->otherend_id);
-               np->rx->ring[requeue_idx].req.gref =
+               RING_GET_REQUEST(&np->rx, requeue_idx)->gref =
                        np->grant_rx_ref[i];
-               np->rx->ring[requeue_idx].req.id = i;
+               RING_GET_REQUEST(&np->rx, requeue_idx)->id = i;
                requeue_idx++; 
        }
        wmb();                
-       np->rx->req_prod = requeue_idx;
+       np->rx.req_prod_pvt = requeue_idx;
+       RING_PUSH_REQUESTS(&np->rx);
 
        /*
         * Step 3: All public and private state should now be sane.  Get
@@ -1066,25 +1073,25 @@
        np->rx_max_target = RX_MAX_TARGET;
 
        /* Initialise {tx,rx}_skbs as a free chain containing every entry. */
-       for (i = 0; i <= NETIF_TX_RING_SIZE; i++) {
+       for (i = 0; i <= NET_TX_RING_SIZE; i++) {
                np->tx_skbs[i] = (void *)((unsigned long) i+1);
                np->grant_tx_ref[i] = GRANT_INVALID_REF;
        }
 
-       for (i = 0; i <= NETIF_RX_RING_SIZE; i++) {
+       for (i = 0; i <= NET_RX_RING_SIZE; i++) {
                np->rx_skbs[i] = (void *)((unsigned long) i+1);
                np->grant_rx_ref[i] = GRANT_INVALID_REF;
        }
 
        /* A grant for every tx ring slot */
-       if (gnttab_alloc_grant_references(NETIF_TX_RING_SIZE,
+       if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
                                          &np->gref_tx_head) < 0) {
                printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
                err = -ENOMEM;
                goto exit;
        }
        /* A grant for every rx ring slot */
-       if (gnttab_alloc_grant_references(NETIF_RX_RING_SIZE,
+       if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
                                          &np->gref_rx_head) < 0) {
                printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
                gnttab_free_grant_references(np->gref_tx_head);
@@ -1212,12 +1219,12 @@
        spin_unlock(&info->rx_lock);
        spin_unlock_irq(&info->tx_lock);
     
-       end_access(info->tx_ring_ref, info->tx);
-       end_access(info->rx_ring_ref, info->rx);
+       end_access(info->tx_ring_ref, info->tx.sring);
+       end_access(info->rx_ring_ref, info->rx.sring);
        info->tx_ring_ref = GRANT_INVALID_REF;
        info->rx_ring_ref = GRANT_INVALID_REF;
-       info->tx = NULL;
-       info->rx = NULL;
+       info->tx.sring = NULL;
+       info->rx.sring = NULL;
 
        if (info->irq)
                unbind_from_irqhandler(info->irq, info->netdev);
diff -r 310746cf9f27 -r c55ac1858bbc xen/include/public/io/netif.h
--- a/xen/include/public/io/netif.h     Thu Dec  1 10:10:40 2005
+++ b/xen/include/public/io/netif.h     Thu Dec  1 10:27:27 2005
@@ -8,6 +8,8 @@
 
 #ifndef __XEN_PUBLIC_IO_NETIF_H__
 #define __XEN_PUBLIC_IO_NETIF_H__
+
+#include "ring.h"
 
 typedef struct netif_tx_request {
     grant_ref_t gref;      /* Reference to buffer page */
@@ -35,57 +37,12 @@
 } netif_rx_response_t;
 
 /*
- * We use a special capitalised type name because it is _essential_ that all 
- * arithmetic on indexes is done on an integer type of the correct size.
+ * Generate netif ring structures and types.
  */
-typedef uint32_t NETIF_RING_IDX;
 
-/*
- * Ring indexes are 'free running'. That is, they are not stored modulo the
- * size of the ring buffer. The following macros convert a free-running counter
- * into a value that can directly index a ring-buffer array.
- */
-#define MASK_NETIF_RX_IDX(_i) ((_i)&(NETIF_RX_RING_SIZE-1))
-#define MASK_NETIF_TX_IDX(_i) ((_i)&(NETIF_TX_RING_SIZE-1))
+DEFINE_RING_TYPES(netif_tx, netif_tx_request_t, netif_tx_response_t);
+DEFINE_RING_TYPES(netif_rx, netif_rx_request_t, netif_rx_response_t);
 
-#define NETIF_TX_RING_SIZE 256
-#define NETIF_RX_RING_SIZE 256
-
-/* This structure must fit in a memory page. */
-typedef struct netif_tx_interface {
-    /*
-     * Frontend places packets into ring at tx_req_prod.
-     * Frontend receives event when tx_resp_prod passes tx_event.
-     * 'req_cons' is a shadow of the backend's request consumer -- the frontend
-     * may use it to determine if all queued packets have been seen by the
-     * backend.
-     */
-    NETIF_RING_IDX req_prod;
-    NETIF_RING_IDX req_cons;
-    NETIF_RING_IDX resp_prod;
-    NETIF_RING_IDX event;
-    union {
-        netif_tx_request_t  req;
-        netif_tx_response_t resp;
-    } ring[NETIF_TX_RING_SIZE];
-} netif_tx_interface_t;
-
-/* This structure must fit in a memory page. */
-typedef struct netif_rx_interface {
-    /*
-     * Frontend places empty buffers into ring at rx_req_prod.
-     * Frontend receives event when rx_resp_prod passes rx_event.
-     */
-    NETIF_RING_IDX req_prod;
-    NETIF_RING_IDX resp_prod;
-    NETIF_RING_IDX event;
-    union {
-        netif_rx_request_t  req;
-        netif_rx_response_t resp;
-    } ring[NETIF_RX_RING_SIZE];
-} netif_rx_interface_t;
-
-/* Descriptor status values */
 #define NETIF_RSP_DROPPED         -2
 #define NETIF_RSP_ERROR           -1
 #define NETIF_RSP_OKAY             0
diff -r 310746cf9f27 -r c55ac1858bbc xen/include/public/io/ring.h
--- a/xen/include/public/io/ring.h      Thu Dec  1 10:10:40 2005
+++ b/xen/include/public/io/ring.h      Thu Dec  1 10:27:27 2005
@@ -1,3 +1,6 @@
+
+
+
 /*
  * Shared producer-consumer ring macros.
  * Tim Deegan and Andrew Warfield November 2004.
@@ -22,7 +25,7 @@
  * power of two (so we can mask with (size-1) to loop around).
  */
 #define __RING_SIZE(_s, _sz) \
-    (__RD32(((_sz) - 2*sizeof(RING_IDX)) / sizeof((_s)->ring[0])))
+    (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
 
 /*
  *  Macros to make the correct C datatypes for a new kind of ring.
@@ -65,6 +68,8 @@
 struct __name##_sring {                                                 \
     RING_IDX req_prod;                                                  \
     RING_IDX rsp_prod;                                                  \
+    RING_IDX rsp_event; /* notify client when rsp_prod == rsp_event */  \
+    uint8_t  server_is_sleeping; /* notify server to kick off work  */  \
     union __name##_sring_entry ring[1]; /* variable-length */           \
 };                                                                      \
                                                                         \

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

<Prev in Thread] Current Thread [Next in Thread>
  • [Xen-changelog] Move net split driver onto ring.h generic rings., Xen patchbot -unstable <=