[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH RFC 1/4] xen-netback: Factor queue-specific data into queue struct.



On Wed, Jan 15, 2014 at 04:23:21PM +0000, Andrew J. Bennieston wrote:
[...]
> +
> +struct xenvif_queue { /* Per-queue data for xenvif */
> +     unsigned int number; /* Queue number, 0-based */

Use "id" instead?

> +     char name[IFNAMSIZ+4]; /* DEVNAME-qN */
> +     struct xenvif *vif; /* Parent VIF */
>  
>       /* Use NAPI for guest TX */
>       struct napi_struct napi;
>       /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
>       unsigned int tx_irq;
>       /* Only used when feature-split-event-channels = 1 */
> -     char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
> +     char tx_irq_name[IFNAMSIZ+7]; /* DEVNAME-qN-tx */
>       struct xen_netif_tx_back_ring tx;
>       struct sk_buff_head tx_queue;
>       struct page *mmap_pages[MAX_PENDING_REQS];
> @@ -140,7 +142,7 @@ struct xenvif {
>       /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
>       unsigned int rx_irq;
>       /* Only used when feature-split-event-channels = 1 */
> -     char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
> +     char rx_irq_name[IFNAMSIZ+7]; /* DEVNAME-qN-rx */
>       struct xen_netif_rx_back_ring rx;
>       struct sk_buff_head rx_queue;
>  
> @@ -150,14 +152,27 @@ struct xenvif {
>        */
>       RING_IDX rx_req_cons_peek;
>  
> -     /* This array is allocated seperately as it is large */
> -     struct gnttab_copy *grant_copy_op;
> +     struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS];

Any reason to swtich back to array inside structure? This array is
really large.

>  
>       /* We create one meta structure per ring request we consume, so
>        * the maximum number is the same as the ring size.
>        */
>       struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
>  
> +     /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
> +     unsigned long   credit_bytes;
> +     unsigned long   credit_usec;
> +     unsigned long   remaining_credit;
> +     struct timer_list credit_timeout;
> +     u64 credit_window_start;
> +
> +};
> +
[...]
>  
> +static u16 select_queue(struct net_device *dev, struct sk_buff *skb)

Suggest add xenvif_ prefix.

> +{
> +     struct xenvif *vif = netdev_priv(dev);
> +     u32 hash;
> +     u16 queue_index;
> +
> +     /* First, check if there is only one queue */
> +     if (vif->num_queues == 1) {
> +             queue_index = 0;
> +     }
> +     else {

Coding style.

> +             /* Use skb_get_rxhash to obtain an L4 hash if available */
> +             hash = skb_get_rxhash(skb);
> +             queue_index = (u16) (((u64)hash * vif->num_queues) >> 32);
> +     }

Actually why do you special-case num_queues == 1? If it is an
optimazation for old frontend then please add some comment.

> +
> +     return queue_index;
> +}
> +
>  static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
>  {
>       struct xenvif *vif = netdev_priv(dev);
> +     u16 queue_index = 0;
> +     struct xenvif_queue *queue = NULL;
>  
>       BUG_ON(skb->dev != dev);
>  
> -     /* Drop the packet if vif is not ready */
> -     if (vif->task == NULL)
> +     /* Drop the packet if the queues are not set up */
> +     if (vif->num_queues < 1 || vif->queues == NULL)

You don't need both, do you? They should be strictly synchronized.

> +             goto drop;
> +
> +     /* Obtain the queue to be used to transmit this packet */
> +     queue_index = skb_get_queue_mapping(skb);
> +     queue = &vif->queues[queue_index];
> +
> +     /* Drop the packet if queue is not ready */
> +     if (queue->task == NULL)
>               goto drop;
>  
[...]
>  static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
> @@ -163,20 +209,30 @@ static struct net_device_stats *xenvif_get_stats(struct 
> net_device *dev)
>  
>  static void xenvif_up(struct xenvif *vif)
>  {
> -     napi_enable(&vif->napi);
> -     enable_irq(vif->tx_irq);
> -     if (vif->tx_irq != vif->rx_irq)
> -             enable_irq(vif->rx_irq);
> -     xenvif_check_rx_xenvif(vif);
> +     struct xenvif_queue *queue = NULL;
> +     unsigned int queue_index;

Better insert empty line here.

> +     for (queue_index = 0; queue_index < vif->num_queues; ++queue_index) {
> +             queue = &vif->queues[queue_index];
> +             napi_enable(&queue->napi);
> +             enable_irq(queue->tx_irq);
> +             if (queue->tx_irq != queue->rx_irq)
> +                     enable_irq(queue->rx_irq);
> +             xenvif_check_rx_xenvif(queue);
> +     }
>  }
>  
>  static void xenvif_down(struct xenvif *vif)
>  {
> -     napi_disable(&vif->napi);
> -     disable_irq(vif->tx_irq);
> -     if (vif->tx_irq != vif->rx_irq)
> -             disable_irq(vif->rx_irq);
> -     del_timer_sync(&vif->credit_timeout);
> +     struct xenvif_queue *queue = NULL;
> +     unsigned int queue_index;

Ditto.

> +     for (queue_index = 0; queue_index < vif->num_queues; ++queue_index) {
> +             queue = &vif->queues[queue_index];
> +             napi_disable(&queue->napi);
> +             disable_irq(queue->tx_irq);
> +             if (queue->tx_irq != queue->rx_irq)
> +                     disable_irq(queue->rx_irq);
> +             del_timer_sync(&queue->credit_timeout);
> +     }
>  }
>  
[...]
> @@ -622,20 +672,9 @@ static int connect_rings(struct backend_info *be)
>               val = 0;
>       vif->ipv6_csum = !!val;
>  
> -     /* Map the shared frame, irq etc. */
> -     err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref,
> -                          tx_evtchn, rx_evtchn);
> -     if (err) {
> -             xenbus_dev_fatal(dev, err,
> -                              "mapping shared-frames %lu/%lu port tx %u rx 
> %u",
> -                              tx_ring_ref, rx_ring_ref,
> -                              tx_evtchn, rx_evtchn);
> -             return err;
> -     }
>       return 0;
>  }
>  
> -

Blank line change, not necessary.

Wei.

>  /* ** Driver Registration ** */
>  
>  
> -- 
> 1.7.10.4

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.