[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/2] Use correct config option for ixgbe VMDq


  • To: xen-devel@xxxxxxxxxxxxxxxxxxx
  • From: Mitch Williams <mitch.a.williams@xxxxxxxxx>
  • Date: Tue, 10 Feb 2009 15:10:30 -0800
  • Cc: steven.smith@xxxxxxxxxxxxx, joserenato.santos@xxxxxx
  • Delivery-date: Tue, 10 Feb 2009 15:11:10 -0800
  • Domainkey-signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=mime-version:sender:date:x-google-sender-auth:message-id:subject :from:to:cc:content-type:content-transfer-encoding; b=O/yuddOEDkyEvsfCldJ+yljoerSYSR6zVTfbRLP1VeoLPG9dKY0JCVA/5ui6AgjW/g eNFAINZXiHt7S3zM1YErr3Es+hN99eTbCWoLri7gwAVl6VmmgqKxTYbuXy4cc4a3zsq6 k12gryRH659voqIN6XQIIFcZc5viO7X9tqqYc=
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

The correct kernel configuration for VMDq support is
CONFIG_XEN_NETDEV2_VMQ, not CONFIG_XEN_NETDEV2_BACKEND.

Signed-off-by: Mitch Williams <mitch.a.williams@xxxxxxxxx>

diff -urpN a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
--- a/drivers/net/ixgbe/ixgbe.h 2009-02-06 09:03:44.000000000 -0800
+++ b/drivers/net/ixgbe/ixgbe.h 2009-02-10 14:32:57.000000000 -0800
@@ -35,7 +35,7 @@
 #include <linux/pci.h>
 #include <linux/netdevice.h>
 #include <linux/vmalloc.h>
-#ifdef CONFIG_XEN_NETDEV2_BACKEND
+#ifdef CONFIG_XEN_NETDEV2_VMQ
 #include <linux/netvmq.h>
 #endif

@@ -423,7 +423,7 @@ struct ixgbe_adapter {
        unsigned int lro_flushed;
        unsigned int lro_no_desc;
 #endif
-#ifdef CONFIG_XEN_NETDEV2_BACKEND
+#ifdef CONFIG_XEN_NETDEV2_VMQ
        struct net_vmq *vmq;
        u32 rx_queues_allocated;
 #endif
diff -urpN a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
--- a/drivers/net/ixgbe/ixgbe_main.c    2009-02-10 14:15:53.000000000 -0800
+++ b/drivers/net/ixgbe/ixgbe_main.c    2009-02-10 14:32:47.000000000 -0800
@@ -431,7 +431,7 @@ static void ixgbe_receive_skb(struct ixg
        bool is_vlan = (status & IXGBE_RXD_STAT_VP);
        u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);

-#ifdef CONFIG_XEN_NETDEV2_BACKEND
+#ifdef CONFIG_XEN_NETDEV2_VMQ
        if ((adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) && ring->queue_index) {
                /* This is a VMDq packet destined for a VM. */
                vmq_netif_rx(skb, ring->queue_index);
@@ -519,7 +519,7 @@ static inline void ixgbe_rx_checksum(str
        skb->ip_summed = CHECKSUM_UNNECESSARY;
        adapter->hw_csum_rx_good++;

-#ifdef CONFIG_XEN_NETDEV2_BACKEND
+#ifdef CONFIG_XEN_NETDEV2_VMQ
        if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
                skb->proto_data_valid = 1;
 #endif
@@ -567,7 +567,7 @@ static void ixgbe_alloc_rx_buffers(struc

                if (!bi->skb) {
                        struct sk_buff *skb;
-#ifdef CONFIG_XEN_NETDEV2_BACKEND
+#ifdef CONFIG_XEN_NETDEV2_VMQ
                        if ((adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) &&
                            rx_ring->queue_index) {
                                skb = vmq_alloc_skb(adapter->netdev,
@@ -604,7 +604,7 @@ static void ixgbe_alloc_rx_buffers(struc
                        bi->skb = skb;
                        bi->dma = pci_map_single(pdev, skb->data, bufsz,
                                                 PCI_DMA_FROMDEVICE);
-#ifdef CONFIG_XEN_NETDEV2_BACKEND
+#ifdef CONFIG_XEN_NETDEV2_VMQ
                        }
 #endif
                }
@@ -1056,7 +1056,7 @@ static bool ixgbe_clean_rx_irq(struct ix
                cleaned = true;
                skb = rx_buffer_info->skb;
                rx_buffer_info->skb = NULL;
-#ifdef CONFIG_XEN_NETDEV2_BACKEND
+#ifdef CONFIG_XEN_NETDEV2_VMQ
                if ((adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) &&
                     rx_ring->queue_index) {
                        /* for Xen VMDq, packet data goes in first page of
@@ -1132,7 +1132,7 @@ static bool ixgbe_clean_rx_irq(struct ix
                /* probably a little skewed due to removing CRC */
                total_rx_bytes += skb->len;
                total_rx_packets++;
-#ifdef CONFIG_XEN_NETDEV2_BACKEND
+#ifdef CONFIG_XEN_NETDEV2_VMQ
                if (skb->data)
 #endif
                        skb->protocol = eth_type_trans(skb, adapter->netdev);
@@ -2276,7 +2276,7 @@ static void ixgbe_configure_rx(struct ix
                adapter->rx_ring[i].head = IXGBE_RDH(j);
                adapter->rx_ring[i].tail = IXGBE_RDT(j);

-#ifndef CONFIG_XEN_NETDEV2_BACKEND
+#ifndef CONFIG_XEN_NETDEV2_VMQ
                if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
                        /* Reserve VMDq set 1 for FCoE, using 3k buffers */
                        if ((i & adapter->ring_feature[RING_F_VMDQ].mask) == 1)
@@ -2288,7 +2288,7 @@ static void ixgbe_configure_rx(struct ix
                }
 #else
                        adapter->rx_ring[i].rx_buf_len = rx_buf_len;
-#endif /* CONFIG_XEN_NETDEV2_BACKEND */
+#endif /* CONFIG_XEN_NETDEV2_VMQ */

 #ifndef IXGBE_NO_INET_LRO
                /* Intitial LRO Settings */
@@ -2462,7 +2462,7 @@ static void ixgbe_restore_vlan(struct ix
 }

 #endif
-#ifndef CONFIG_XEN_NETDEV2_BACKEND
+#ifndef CONFIG_XEN_NETDEV2_VMQ
 /**
  * compare_ether_oui - Compare two OUIs
  * @addr1: pointer to a 6 byte array containing an Ethernet address
@@ -2491,11 +2491,11 @@ static inline int is_fcoe_ether_addr(con
        static const u8 fcoe_oui[] = { 0x0e, 0xfc, 0x00 };
        return compare_ether_oui(addr, fcoe_oui) == 0;
 }
-#endif /* CONFIG_XEN_NETDEV2_BACKEND */
+#endif /* CONFIG_XEN_NETDEV2_VMQ */

 static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
u32 *vmdq)
 {
-#ifndef CONFIG_XEN_NETDEV2_BACKEND
+#ifndef CONFIG_XEN_NETDEV2_VMQ
        struct ixgbe_adapter *adapter = hw->back;
 #endif
        struct dev_mc_list *mc_ptr;
@@ -2507,7 +2507,7 @@ static u8 *ixgbe_addr_list_itr(struct ix
                *mc_addr_ptr = mc_ptr->next->dmi_addr;
        else
                *mc_addr_ptr = NULL;
-#ifndef CONFIG_XEN_NETDEV2_BACKEND
+#ifndef CONFIG_XEN_NETDEV2_VMQ
        if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
                /* VMDQ set 1 is used for FCoE */
                if (adapter->ring_feature[RING_F_VMDQ].indices)
@@ -2904,7 +2904,7 @@ static void ixgbe_clean_rx_ring(struct i

                rx_buffer_info = &rx_ring->rx_buffer_info[i];
                if (rx_buffer_info->skb) {
-#ifdef CONFIG_XEN_NETDEV2_BACKEND
+#ifdef CONFIG_XEN_NETDEV2_VMQ
                        if ((adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) &&
                             rx_ring->queue_index) {
                                pci_unmap_page(pdev, rx_buffer_info->dma,
@@ -3870,7 +3870,7 @@ int ixgbe_setup_rx_resources(struct ixgb
        rx_ring->work_limit = rx_ring->count / 2;
 #endif

-#ifdef CONFIG_XEN_NETDEV2_BACKEND
+#ifdef CONFIG_XEN_NETDEV2_VMQ
        if ((adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) &&
             rx_ring->queue_index) {
                        rx_ring->active = 0;
@@ -3879,7 +3879,7 @@ int ixgbe_setup_rx_resources(struct ixgb
 #endif
                rx_ring->active = 1;
                rx_ring->allocated = 1;
-#ifdef CONFIG_XEN_NETDEV2_BACKEND
+#ifdef CONFIG_XEN_NETDEV2_VMQ
        }
 #endif

@@ -4002,7 +4002,7 @@ static int ixgbe_setup_all_rx_resources(
                DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
                break;
        }
-#ifdef CONFIG_XEN_NETDEV2_BACKEND
+#ifdef CONFIG_XEN_NETDEV2_VMQ
        adapter->rx_queues_allocated = 0;
 #endif
        return err;
@@ -4048,7 +4048,7 @@ static int ixgbe_change_mtu(struct net_d
        if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
                return -EINVAL;

-#ifdef CONFIG_XEN_NETDEV2_BACKEND
+#ifdef CONFIG_XEN_NETDEV2_VMQ
        /* Jumbo frames not currently supported in VMDq mode under Xen */
        if ((adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) &&
            (max_frame > ETH_FRAME_LEN))
@@ -4960,7 +4960,7 @@ static int ixgbe_ioctl(struct net_device

 #endif

-#ifdef CONFIG_XEN_NETDEV2_BACKEND
+#ifdef CONFIG_XEN_NETDEV2_VMQ
 int ixgbe_get_avail_queues(struct net_device *netdev, unsigned int queue_type)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -5142,7 +5142,7 @@ static void ixgbe_setup_vmq(struct ixgbe
                adapter->netdev->vmq = vmq;
        }
 }
-#endif /* CONFIG_XEN_NETDEV2_BACKEND */
+#endif /* CONFIG_XEN_NETDEV2_VMQ */

 #ifdef CONFIG_NET_POLL_CONTROLLER
 /*
@@ -5442,7 +5442,7 @@ static int __devinit ixgbe_probe(struct

 #endif
        strcpy(netdev->name, "eth%d");
-#ifdef CONFIG_XEN_NETDEV2_BACKEND
+#ifdef CONFIG_XEN_NETDEV2_VMQ
        if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
                ixgbe_setup_vmq(adapter);
 #endif
@@ -5450,7 +5450,7 @@ static int __devinit ixgbe_probe(struct
        if (err)
                goto err_register;

-#ifndef CONFIG_XEN_NETDEV2_BACKEND
+#ifndef CONFIG_XEN_NETDEV2_VMQ
        if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
                ixgbe_sysfs_create(adapter);
 #endif
@@ -5563,14 +5563,14 @@ static void __devexit ixgbe_remove(struc
        }

 #endif
-#ifdef CONFIG_XEN_NETDEV2_BACKEND
+#ifdef CONFIG_XEN_NETDEV2_VMQ
        if (netdev->vmq) {
                free_vmq(netdev->vmq);
                netdev->vmq = 0;
        }
 #endif

-#ifndef CONFIG_XEN_NETDEV2_BACKEND
+#ifndef CONFIG_XEN_NETDEV2_VMQ
        if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
                ixgbe_sysfs_remove(adapter);
 #endif
diff -urpN a/drivers/net/ixgbe/ixgbe_param.c b/drivers/net/ixgbe/ixgbe_param.c
--- a/drivers/net/ixgbe/ixgbe_param.c   2009-02-10 14:15:35.000000000 -0800
+++ b/drivers/net/ixgbe/ixgbe_param.c   2009-02-10 14:32:40.000000000 -0800
@@ -723,7 +723,7 @@ void __devinit ixgbe_check_options(struc
                        adapter->flags |= IXGBE_FLAG_RX_PS_CAPABLE;
                }
 #endif
-#ifdef CONFIG_XEN_NETDEV2_BACKEND
+#ifdef CONFIG_XEN_NETDEV2_VMQ
        if ((adapter->flags &
              (IXGBE_FLAG_RX_PS_CAPABLE | IXGBE_FLAG_VMDQ_ENABLED)) ==
              (IXGBE_FLAG_RX_PS_CAPABLE | IXGBE_FLAG_VMDQ_ENABLED)) {

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.