[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 10/11] kernel compatibility code


  • To: xen-devel@xxxxxxxxxxxxxxxxxxx
  • From: Mitch Williams <mitch.a.williams@xxxxxxxxx>
  • Date: Wed, 28 Jan 2009 16:39:14 -0800
  • Delivery-date: Wed, 28 Jan 2009 16:52:20 -0800
  • Domainkey-signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=mime-version:sender:date:x-google-sender-auth:message-id:subject :from:to:content-type:content-transfer-encoding; b=C2jSDQrwTLUEDx9DH83rXabF3DhbVPTr/VLx16bzfAE3qAItJ7lTNIvRgnpNBFlEyy P1tRcQWon8i18mMn8lu2euvttc6BD0L9VqVpB0mthQExUgxOqUaKrR7vDdgDVtDPMapZ 7pmzMrbHc9GreowFm6+PElI8/JmSakRpbyRYU=
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

This version of ixgbe has been written to the current upstream kernel
API.  This header file defines data structures and functions to allow
the driver to function on older kernels.

Signed-off-by: Mitch Williams <mitch.a.williams@xxxxxxxxx>

diff -urpN -X dontdiff a/drivers/net/ixgbe/kcompat.c
b/drivers/net/ixgbe/kcompat.c
--- a/drivers/net/ixgbe/kcompat.c       1969-12-31 16:00:00.000000000 -0800
+++ b/drivers/net/ixgbe/kcompat.c       2009-01-22 11:33:59.000000000 -0800
@@ -0,0 +1,442 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@xxxxxxxxxxxxxxxxxxxxx>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+
+
+
+
+
+#ifdef DRIVER_IXGBE
+#include "ixgbe.h"
+#endif
+
+#include "kcompat.h"
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
+
+/**************************************/
+/* PCI DMA MAPPING */
+
+#if defined(CONFIG_HIGHMEM)
+
+#ifndef PCI_DRAM_OFFSET
+#define PCI_DRAM_OFFSET 0
+#endif
+
+u64
+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
+                 size_t size, int direction)
+{
+       return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
+               PCI_DRAM_OFFSET);
+}
+
+#else /* CONFIG_HIGHMEM */
+
+u64
+_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
+                 size_t size, int direction)
+{
+       return pci_map_single(dev, (void *)page_address(page) + offset, size,
+                             direction);
+}
+
+#endif /* CONFIG_HIGHMEM */
+
+void
+_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
+                   int direction)
+{
+       return pci_unmap_single(dev, dma_addr, size, direction);
+}
+
+#endif /* 2.4.13 => 2.4.3 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
+
+/**************************************/
+/* PCI DRIVER API */
+
+int
+_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
+{
+       if (!pci_dma_supported(dev, mask))
+               return -EIO;
+       dev->dma_mask = mask;
+       return 0;
+}
+
+int
+_kc_pci_request_regions(struct pci_dev *dev, char *res_name)
+{
+       int i;
+
+       for (i = 0; i < 6; i++) {
+               if (pci_resource_len(dev, i) == 0)
+                       continue;
+
+               if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
+                       if (!request_region(pci_resource_start(dev, i),
pci_resource_len(dev, i), res_name)) {
+                               pci_release_regions(dev);
+                               return -EBUSY;
+                       }
+               } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
+                       if (!request_mem_region(pci_resource_start(dev, i),
pci_resource_len(dev, i), res_name)) {
+                               pci_release_regions(dev);
+                               return -EBUSY;
+                       }
+               }
+       }
+       return 0;
+}
+
+void
+_kc_pci_release_regions(struct pci_dev *dev)
+{
+       int i;
+
+       for (i = 0; i < 6; i++) {
+               if (pci_resource_len(dev, i) == 0)
+                       continue;
+
+               if (pci_resource_flags(dev, i) & IORESOURCE_IO)
+                       release_region(pci_resource_start(dev, i), 
pci_resource_len(dev, i));
+
+               else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
+                       release_mem_region(pci_resource_start(dev, i), 
pci_resource_len(dev, i));
+       }
+}
+
+/**************************************/
+/* NETWORK DRIVER API */
+
+struct net_device *
+_kc_alloc_etherdev(int sizeof_priv)
+{
+       struct net_device *dev;
+       int alloc_size;
+
+       alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
+       dev = kmalloc(alloc_size, GFP_KERNEL);
+       if (!dev)
+               return NULL;
+       memset(dev, 0, alloc_size);
+
+       if (sizeof_priv)
+               dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
+       dev->name[0] = '\0';
+       ether_setup(dev);
+
+       return dev;
+}
+
+int
+_kc_is_valid_ether_addr(u8 *addr)
+{
+       const char zaddr[6] = { 0, };
+
+       return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
+}
+
+#endif /* 2.4.3 => 2.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
+
+int
+_kc_pci_set_power_state(struct pci_dev *dev, int state)
+{
+       return 0;
+}
+
+int
+_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
+{
+       return 0;
+}
+
+#endif /* 2.4.6 => 2.4.3 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
+                            int off, int size)
+{
+       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+       frag->page = page;
+       frag->page_offset = off;
+       frag->size = size;
+       skb_shinfo(skb)->nr_frags = i + 1;
+}
+
+/*
+ * Original Copyright:
+ * find_next_bit.c: fallback find next bit implementation
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@xxxxxxxxxx)
+ */
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+                            unsigned long offset)
+{
+       const unsigned long *p = addr + BITOP_WORD(offset);
+       unsigned long result = offset & ~(BITS_PER_LONG-1);
+       unsigned long tmp;
+
+       if (offset >= size)
+               return size;
+       size -= result;
+       offset %= BITS_PER_LONG;
+       if (offset) {
+               tmp = *(p++);
+               tmp &= (~0UL << offset);
+               if (size < BITS_PER_LONG)
+                       goto found_first;
+               if (tmp)
+                       goto found_middle;
+               size -= BITS_PER_LONG;
+               result += BITS_PER_LONG;
+       }
+       while (size & ~(BITS_PER_LONG-1)) {
+               if ((tmp = *(p++)))
+                       goto found_middle;
+               result += BITS_PER_LONG;
+               size -= BITS_PER_LONG;
+       }
+       if (!size)
+               return result;
+       tmp = *p;
+
+found_first:
+       tmp &= (~0UL >> (BITS_PER_LONG - size));
+       if (tmp == 0UL)         /* Are any bits set? */
+               return result + size;   /* Nope. */
+found_middle:
+       return result + ffs(tmp);
+}
+
+#endif /* 2.6.0 => 2.4.6 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
+void *_kc_kzalloc(size_t size, int flags)
+{
+       void *ret = kmalloc(size, flags);
+       if (ret)
+               memset(ret, 0, size);
+       return ret;
+}
+#endif /* <= 2.6.13 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
+struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev,
+                                     unsigned int length)
+{
+       /* 16 == NET_PAD_SKB */
+       struct sk_buff *skb;
+       skb = alloc_skb(length + 16, GFP_ATOMIC);
+       if (likely(skb != NULL)) {
+               skb_reserve(skb, 16);
+               skb->dev = dev;
+       }
+       return skb;
+}
+#endif /* <= 2.6.17 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
+int _kc_pci_save_state(struct pci_dev *pdev)
+{
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct adapter_struct *adapter = netdev_priv(netdev);
+       int size = PCI_CONFIG_SPACE_LEN, i;
+       u16 pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+       u16 pcie_link_status;
+
+       if (pcie_cap_offset) {
+               if (!pci_read_config_word(pdev,
+                                         pcie_cap_offset + PCIE_LINK_STATUS,
+                                         &pcie_link_status))
+               size = PCIE_CONFIG_SPACE_LEN;
+       }
+       pci_config_space_ich8lan();
+#ifdef HAVE_PCI_ERS
+       if (adapter->config_space == NULL)
+#else
+       WARN_ON(adapter->config_space != NULL);
+#endif
+               adapter->config_space = kmalloc(size, GFP_KERNEL);
+       if (!adapter->config_space) {
+               printk(KERN_ERR "Out of memory in pci_save_state\n");
+               return -ENOMEM;
+       }
+       for (i = 0; i < (size / 4); i++)
+               pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
+       return 0;
+}
+
+void _kc_pci_restore_state(struct pci_dev * pdev)
+{
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct adapter_struct *adapter = netdev_priv(netdev);
+       int size = PCI_CONFIG_SPACE_LEN, i;
+       u16 pcie_cap_offset;
+       u16 pcie_link_status;
+
+       if (adapter->config_space != NULL) {
+               pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+               if (pcie_cap_offset &&
+                   !pci_read_config_word(pdev,
+                                         pcie_cap_offset + PCIE_LINK_STATUS,
+                                         &pcie_link_status))
+                       size = PCIE_CONFIG_SPACE_LEN;
+       
+               pci_config_space_ich8lan();
+               for (i = 0; i < (size / 4); i++)
+               pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
+#ifndef HAVE_PCI_ERS
+               kfree(adapter->config_space);
+               adapter->config_space = NULL;
+#endif
+       }
+}
+
+#ifdef HAVE_PCI_ERS
+void _kc_free_netdev(struct net_device *netdev)
+{
+       struct adapter_struct *adapter = netdev_priv(netdev);
+
+       if (adapter->config_space != NULL)
+               kfree(adapter->config_space);
+#ifdef CONFIG_SYSFS
+       if (netdev->reg_state == NETREG_UNINITIALIZED) {
+               kfree((char *)netdev - netdev->padded);
+       } else {
+               BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
+               netdev->reg_state = NETREG_RELEASED;
+               class_device_put(&netdev->class_dev);
+       }
+#else
+       kfree((char *)netdev - netdev->padded);
+#endif
+}
+#endif
+#endif /* <= 2.6.18 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
+#ifdef DRIVER_IXGBE
+int ixgbe_sysfs_create(struct ixgbe_adapter *adapter)
+{
+       return 0;
+}
+
+void ixgbe_sysfs_remove(struct ixgbe_adapter *adapter)
+{
+       return;
+}
+
+int ixgbe_dcb_netlink_register()
+{
+       return 0;
+}
+
+int ixgbe_dcb_netlink_unregister()
+{
+       return 0;
+}
+#endif /* DRIVER_IXGBE */
+#endif /* < 2.6.23 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+#ifdef NAPI
+int __kc_adapter_clean(struct net_device *netdev, int *budget)
+{
+       int work_done;
+       int work_to_do = min(*budget, netdev->quota);
+#ifdef DRIVER_IXGBE
+       /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
+       struct napi_struct *napi = netdev->priv;
+#else
+       struct adapter_struct *adapter = netdev_priv(netdev);
+       struct napi_struct *napi = &adapter->rx_ring[0].napi;
+#endif
+       work_done = napi->poll(napi, work_to_do);
+       *budget -= work_done;
+       netdev->quota -= work_done;
+       return work_done ? 1 : 0;
+}
+#endif /* NAPI */
+#endif /* <= 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
+#ifdef HAVE_TX_MQ
+void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
+{
+       struct adapter_struct *adapter = netdev_priv(netdev);
+       int i;
+
+       netif_stop_queue(netdev);
+       if (netif_is_multiqueue(netdev))
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       netif_stop_subqueue(netdev, i);
+}
+void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
+{
+       struct adapter_struct *adapter = netdev_priv(netdev);
+       int i;
+
+       netif_wake_queue(netdev);
+       if (netif_is_multiqueue(netdev))
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       netif_wake_subqueue(netdev, i);
+}
+void _kc_netif_tx_start_all_queues(struct net_device *netdev)
+{
+       struct adapter_struct *adapter = netdev_priv(netdev);
+       int i;
+
+       netif_start_queue(netdev);
+       if (netif_is_multiqueue(netdev))
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       netif_start_subqueue(netdev, i);
+}
+#endif /* HAVE_TX_MQ */
+#endif /* <= 2.6.27 */
diff -urpN -X dontdiff a/drivers/net/ixgbe/kcompat.h
b/drivers/net/ixgbe/kcompat.h
--- a/drivers/net/ixgbe/kcompat.h       1969-12-31 16:00:00.000000000 -0800
+++ b/drivers/net/ixgbe/kcompat.h       2009-01-22 11:33:59.000000000 -0800
@@ -0,0 +1,1776 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@xxxxxxxxxxxxxxxxxxxxx>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _KCOMPAT_H_
+#define _KCOMPAT_H_
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/mii.h>
+#include <asm/io.h>
+
+/* NAPI enable/disable flags here */
+
+
+
+#ifdef _E1000_H_
+#ifdef CONFIG_E1000_NAPI
+#define NAPI
+#endif
+#ifdef E1000_NAPI
+#undef NAPI
+#define NAPI
+#endif
+#ifdef E1000E_NAPI
+#undef NAPI
+#define NAPI
+#endif
+#ifdef E1000_NO_NAPI
+#undef NAPI
+#endif
+#ifdef E1000E_NO_NAPI
+#undef NAPI
+#endif
+#endif
+
+#ifdef _IGB_H_
+#define NAPI
+#endif
+
+#ifdef _IXGB_H_
+#ifdef CONFIG_IXGB_NAPI
+#define NAPI
+#endif
+#ifdef IXGB_NAPI
+#undef NAPI
+#define NAPI
+#endif
+#ifdef IXGB_NO_NAPI
+#undef NAPI
+#endif
+#endif
+
+#ifdef DRIVER_IXGBE
+/* enable NAPI for ixgbe by default */
+#undef CONFIG_IXGBE_NAPI
+#define CONFIG_IXGBE_NAPI
+#define NAPI
+#endif
+
+#ifdef _IXGBE_H_
+#ifdef CONFIG_IXGBE_NAPI
+#undef NAPI
+#define NAPI
+#endif
+#ifdef IXGBE_NAPI
+#undef NAPI
+#define NAPI
+#endif
+#ifdef IXGBE_NO_NAPI
+#undef NAPI
+#endif
+#endif
+
+
+
+
+
+
+
+#ifdef DRIVER_IXGBE
+#define adapter_struct ixgbe_adapter
+#endif
+
+/* and finally set defines so that the code sees the changes */
+#ifdef NAPI
+#ifndef CONFIG_E1000_NAPI
+#define CONFIG_E1000_NAPI
+#endif
+#ifndef CONFIG_E1000E_NAPI
+#define CONFIG_E1000E_NAPI
+#endif
+#ifndef CONFIG_IXGB_NAPI
+#define CONFIG_IXGB_NAPI
+#endif
+#ifdef _IXGBE_H_
+#ifndef CONFIG_IXGBE_NAPI
+#define CONFIG_IXGBE_NAPI
+#endif
+#endif /* _IXGBE_H */
+#else
+#undef CONFIG_E1000_NAPI
+#undef CONFIG_E1000E_NAPI
+#undef CONFIG_IXGB_NAPI
+#ifdef _IXGBE_H_
+#undef CONFIG_IXGBE_NAPI
+#endif /* _IXGBE_H */
+#endif
+
+/* packet split disable/enable */
+#ifdef DISABLE_PACKET_SPLIT
+#undef CONFIG_E1000_DISABLE_PACKET_SPLIT
+#define CONFIG_E1000_DISABLE_PACKET_SPLIT
+#undef CONFIG_IGB_DISABLE_PACKET_SPLIT
+#define CONFIG_IGB_DISABLE_PACKET_SPLIT
+#endif
+
+/* MSI compatibility code for all kernels and drivers */
+#ifdef DISABLE_PCI_MSI
+#undef CONFIG_PCI_MSI
+#endif
+#ifndef CONFIG_PCI_MSI
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
+struct msix_entry {
+       u16 vector; /* kernel uses to write allocated vector */
+       u16 entry;  /* driver uses to specify entry, OS writes */
+};
+#endif
+#define pci_enable_msi(a) -ENOTSUPP
+#define pci_disable_msi(a) do {} while (0)
+#define pci_enable_msix(a, b, c) -ENOTSUPP
+#define pci_disable_msix(a) do {} while (0)
+#define msi_remove_pci_irq_vectors(a) do {} while (0)
+#endif /* CONFIG_PCI_MSI */
+#ifdef DISABLE_PM
+#undef CONFIG_PM
+#endif
+
+#ifdef DISABLE_NET_POLL_CONTROLLER
+#undef CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef PMSG_SUSPEND
+#define PMSG_SUSPEND 3
+#endif
+
+
+/* generic boolean compatibility */
+#undef TRUE
+#undef FALSE
+#define TRUE true
+#define FALSE false
+#ifdef GCC_VERSION
+#if ( GCC_VERSION < 3000 )
+#define _Bool char
+#endif
+#endif
+#ifndef bool
+#define bool _Bool
+#define true 1
+#define false 0
+#endif
+
+
+#ifndef module_param
+#define module_param(v,t,p) MODULE_PARM(v, "i");
+#endif
+
+#ifndef DMA_64BIT_MASK
+#define DMA_64BIT_MASK  0xffffffffffffffffULL
+#endif
+
+#ifndef DMA_32BIT_MASK
+#define DMA_32BIT_MASK  0x00000000ffffffffULL
+#endif
+
+#ifndef PCI_CAP_ID_EXP
+#define PCI_CAP_ID_EXP 0x10
+#endif
+
+#ifndef PCIE_LINK_STATE_L0S
+#define PCIE_LINK_STATE_L0S 1
+#endif
+
+#ifndef mmiowb
+#ifdef CONFIG_IA64
+#define mmiowb() asm volatile ("mf.a" ::: "memory")
+#else
+#define mmiowb()
+#endif
+#endif
+
+#ifndef IRQ_HANDLED
+#define irqreturn_t void
+#define IRQ_HANDLED
+#define IRQ_NONE
+#endif
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev)
+#endif
+
+#ifndef HAVE_FREE_NETDEV
+#define free_netdev(x) kfree(x)
+#endif
+
+#ifdef HAVE_POLL_CONTROLLER
+#define CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef NETDEV_TX_OK
+#define NETDEV_TX_OK 0
+#endif
+
+#ifndef NETDEV_TX_BUSY
+#define NETDEV_TX_BUSY 1
+#endif
+
+#ifndef NETDEV_TX_LOCKED
+#define NETDEV_TX_LOCKED -1
+#endif
+
+#ifndef SKB_DATAREF_SHIFT
+/* if we do not have the infrastructure to detect if skb_header is cloned
+   just return false in all cases */
+#define skb_header_cloned(x) 0
+#endif
+
+#ifndef NETIF_F_GSO
+#define gso_size tso_size
+#define gso_segs tso_segs
+#endif
+
+#ifndef CHECKSUM_PARTIAL
+#define CHECKSUM_PARTIAL CHECKSUM_HW
+#define CHECKSUM_COMPLETE CHECKSUM_HW
+#endif
+
+#ifndef __read_mostly
+#define __read_mostly
+#endif
+
+#ifndef HAVE_NETIF_MSG
+#define HAVE_NETIF_MSG 1
+enum {
+       NETIF_MSG_DRV           = 0x0001,
+       NETIF_MSG_PROBE         = 0x0002,
+       NETIF_MSG_LINK          = 0x0004,
+       NETIF_MSG_TIMER         = 0x0008,
+       NETIF_MSG_IFDOWN        = 0x0010,
+       NETIF_MSG_IFUP          = 0x0020,
+       NETIF_MSG_RX_ERR        = 0x0040,
+       NETIF_MSG_TX_ERR        = 0x0080,
+       NETIF_MSG_TX_QUEUED     = 0x0100,
+       NETIF_MSG_INTR          = 0x0200,
+       NETIF_MSG_TX_DONE       = 0x0400,
+       NETIF_MSG_RX_STATUS     = 0x0800,
+       NETIF_MSG_PKTDATA       = 0x1000,
+       NETIF_MSG_HW            = 0x2000,
+       NETIF_MSG_WOL           = 0x4000,
+};
+
+#else
+#define NETIF_MSG_HW   0x2000
+#define NETIF_MSG_WOL  0x4000
+#endif /* HAVE_NETIF_MSG */
+
+#ifndef MII_RESV1
+#define MII_RESV1              0x17            /* Reserved...          */
+#endif
+
+#ifndef unlikely
+#define unlikely(_x) _x
+#define likely(_x) _x
+#endif
+
+#ifndef WARN_ON
+#define WARN_ON(x)
+#endif
+
+#ifndef PCI_DEVICE
+#define PCI_DEVICE(vend,dev) \
+       .vendor = (vend), .device = (dev), \
+       .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+#endif
+
+#ifndef num_online_cpus
+#define num_online_cpus() smp_num_cpus
+#endif
+
+#ifndef _LINUX_RANDOM_H
+#include <linux/random.h>
+#endif
+
+#ifndef DECLARE_BITMAP
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
+#endif
+#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
+#endif
+
+#ifndef VLAN_HLEN
+#define VLAN_HLEN 4
+#endif
+
+#ifndef VLAN_ETH_HLEN
+#define VLAN_ETH_HLEN 18
+#endif
+
+#ifndef VLAN_ETH_FRAME_LEN
+#define VLAN_ETH_FRAME_LEN 1518
+#endif
+
+#ifndef DCA_GET_TAG_TWO_ARGS
+#define dca3_get_tag(a,b) dca_get_tag(b)
+#endif
+
+
+/*****************************************************************************/
+/* Installations with ethtool version without eeprom, adapter id, or statistics
+ * support */
+
+#ifndef ETH_GSTRING_LEN
+#define ETH_GSTRING_LEN 32
+#endif
+
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS 0x1d
+#undef ethtool_drvinfo
+#define ethtool_drvinfo k_ethtool_drvinfo
+struct k_ethtool_drvinfo {
+       u32 cmd;
+       char driver[32];
+       char version[32];
+       char fw_version[32];
+       char bus_info[32];
+       char reserved1[32];
+       char reserved2[16];
+       u32 n_stats;
+       u32 testinfo_len;
+       u32 eedump_len;
+       u32 regdump_len;
+};
+
+struct ethtool_stats {
+       u32 cmd;
+       u32 n_stats;
+       u64 data[0];
+};
+#endif /* ETHTOOL_GSTATS */
+
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID 0x1c
+#endif /* ETHTOOL_PHYS_ID */
+
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS 0x1b
+enum ethtool_stringset {
+       ETH_SS_TEST             = 0,
+       ETH_SS_STATS,
+};
+struct ethtool_gstrings {
+       u32 cmd;            /* ETHTOOL_GSTRINGS */
+       u32 string_set;     /* string set id e.c. ETH_SS_TEST, etc*/
+       u32 len;            /* number of strings in the string set */
+       u8 data[0];
+};
+#endif /* ETHTOOL_GSTRINGS */
+
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST 0x1a
+enum ethtool_test_flags {
+       ETH_TEST_FL_OFFLINE     = (1 << 0),
+       ETH_TEST_FL_FAILED      = (1 << 1),
+};
+struct ethtool_test {
+       u32 cmd;
+       u32 flags;
+       u32 reserved;
+       u32 len;
+       u64 data[0];
+};
+#endif /* ETHTOOL_TEST */
+
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM 0xb
+#undef ETHTOOL_GREGS
+struct ethtool_eeprom {
+       u32 cmd;
+       u32 magic;
+       u32 offset;
+       u32 len;
+       u8 data[0];
+};
+
+struct ethtool_value {
+       u32 cmd;
+       u32 data;
+};
+#endif /* ETHTOOL_GEEPROM */
+
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK 0xa
+#endif /* ETHTOOL_GLINK */
+
+#ifndef ETHTOOL_GREGS
+#define ETHTOOL_GREGS          0x00000004 /* Get NIC registers */
+#define ethtool_regs _kc_ethtool_regs
+/* for passing big chunks of data */
+struct _kc_ethtool_regs {
+       u32 cmd;
+       u32 version; /* driver-specific, indicates different chips/revs */
+       u32 len; /* bytes */
+       u8 data[0];
+};
+#endif /* ETHTOOL_GREGS */
+
+#ifndef ETHTOOL_GMSGLVL
+#define ETHTOOL_GMSGLVL                0x00000007 /* Get driver message level 
*/
+#endif
+#ifndef ETHTOOL_SMSGLVL
+#define ETHTOOL_SMSGLVL                0x00000008 /* Set driver msg level, 
priv. */
+#endif
+#ifndef ETHTOOL_NWAY_RST
+#define ETHTOOL_NWAY_RST       0x00000009 /* Restart autonegotiation, priv */
+#endif
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK          0x0000000a /* Get link status */
+#endif
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM                0x0000000b /* Get EEPROM data */
+#endif
+#ifndef ETHTOOL_SEEPROM
+#define ETHTOOL_SEEPROM                0x0000000c /* Set EEPROM data */
+#endif
+#ifndef ETHTOOL_GCOALESCE
+#define ETHTOOL_GCOALESCE      0x0000000e /* Get coalesce config */
+/* for configuring coalescing parameters of chip */
+#define ethtool_coalesce _kc_ethtool_coalesce
+struct _kc_ethtool_coalesce {
+       u32     cmd;    /* ETHTOOL_{G,S}COALESCE */
+
+       /* How many usecs to delay an RX interrupt after
+        * a packet arrives.  If 0, only rx_max_coalesced_frames
+        * is used.
+        */
+       u32     rx_coalesce_usecs;
+
+       /* How many packets to delay an RX interrupt after
+        * a packet arrives.  If 0, only rx_coalesce_usecs is
+        * used.  It is illegal to set both usecs and max frames
+        * to zero as this would cause RX interrupts to never be
+        * generated.
+        */
+       u32     rx_max_coalesced_frames;
+
+       /* Same as above two parameters, except that these values
+        * apply while an IRQ is being serviced by the host.  Not
+        * all cards support this feature and the values are ignored
+        * in that case.
+        */
+       u32     rx_coalesce_usecs_irq;
+       u32     rx_max_coalesced_frames_irq;
+
+       /* How many usecs to delay a TX interrupt after
+        * a packet is sent.  If 0, only tx_max_coalesced_frames
+        * is used.
+        */
+       u32     tx_coalesce_usecs;
+
+       /* How many packets to delay a TX interrupt after
+        * a packet is sent.  If 0, only tx_coalesce_usecs is
+        * used.  It is illegal to set both usecs and max frames
+        * to zero as this would cause TX interrupts to never be
+        * generated.
+        */
+       u32     tx_max_coalesced_frames;
+
+       /* Same as above two parameters, except that these values
+        * apply while an IRQ is being serviced by the host.  Not
+        * all cards support this feature and the values are ignored
+        * in that case.
+        */
+       u32     tx_coalesce_usecs_irq;
+       u32     tx_max_coalesced_frames_irq;
+
+       /* How many usecs to delay in-memory statistics
+        * block updates.  Some drivers do not have an in-memory
+        * statistic block, and in such cases this value is ignored.
+        * This value must not be zero.
+        */
+       u32     stats_block_coalesce_usecs;
+
+       /* Adaptive RX/TX coalescing is an algorithm implemented by
+        * some drivers to improve latency under low packet rates and
+        * improve throughput under high packet rates.  Some drivers
+        * only implement one of RX or TX adaptive coalescing.  Anything
+        * not implemented by the driver causes these values to be
+        * silently ignored.
+        */
+       u32     use_adaptive_rx_coalesce;
+       u32     use_adaptive_tx_coalesce;
+
+       /* When the packet rate (measured in packets per second)
+        * is below pkt_rate_low, the {rx,tx}_*_low parameters are
+        * used.
+        */
+       u32     pkt_rate_low;
+       u32     rx_coalesce_usecs_low;
+       u32     rx_max_coalesced_frames_low;
+       u32     tx_coalesce_usecs_low;
+       u32     tx_max_coalesced_frames_low;
+
+       /* When the packet rate is below pkt_rate_high but above
+        * pkt_rate_low (both measured in packets per second) the
+        * normal {rx,tx}_* coalescing parameters are used.
+        */
+
+       /* When the packet rate is (measured in packets per second)
+        * is above pkt_rate_high, the {rx,tx}_*_high parameters are
+        * used.
+        */
+       u32     pkt_rate_high;
+       u32     rx_coalesce_usecs_high;
+       u32     rx_max_coalesced_frames_high;
+       u32     tx_coalesce_usecs_high;
+       u32     tx_max_coalesced_frames_high;
+
+       /* How often to do adaptive coalescing packet rate sampling,
+        * measured in seconds.  Must not be zero.
+        */
+       u32     rate_sample_interval;
+};
+#endif /* ETHTOOL_GCOALESCE */
+
+#ifndef ETHTOOL_SCOALESCE
+#define ETHTOOL_SCOALESCE      0x0000000f /* Set coalesce config. */
+#endif
+#ifndef ETHTOOL_GRINGPARAM
+#define ETHTOOL_GRINGPARAM     0x00000010 /* Get ring parameters */
+/* for configuring RX/TX ring parameters */
+#define ethtool_ringparam _kc_ethtool_ringparam
+struct _kc_ethtool_ringparam {
+       u32     cmd;    /* ETHTOOL_{G,S}RINGPARAM */
+
+       /* Read only attributes.  These indicate the maximum number
+        * of pending RX/TX ring entries the driver will allow the
+        * user to set.
+        */
+       u32     rx_max_pending;
+       u32     rx_mini_max_pending;
+       u32     rx_jumbo_max_pending;
+       u32     tx_max_pending;
+
+       /* Values changeable by the user.  The valid values are
+        * in the range 1 to the "*_max_pending" counterpart above.
+        */
+       u32     rx_pending;
+       u32     rx_mini_pending;
+       u32     rx_jumbo_pending;
+       u32     tx_pending;
+};
+#endif /* ETHTOOL_GRINGPARAM */
+
+#ifndef ETHTOOL_SRINGPARAM
+#define ETHTOOL_SRINGPARAM     0x00000011 /* Set ring parameters, priv. */
+#endif
+#ifndef ETHTOOL_GPAUSEPARAM
+#define ETHTOOL_GPAUSEPARAM    0x00000012 /* Get pause parameters */
+/* for configuring link flow control parameters */
+#define ethtool_pauseparam _kc_ethtool_pauseparam
+struct _kc_ethtool_pauseparam {
+       u32     cmd;    /* ETHTOOL_{G,S}PAUSEPARAM */
+
+       /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
+        * being true) the user may set 'autoneg' here non-zero to have the
+        * pause parameters be auto-negotiated too.  In such a case, the
+        * {rx,tx}_pause values below determine what capabilities are
+        * advertised.
+        *
+        * If 'autoneg' is zero or the link is not being auto-negotiated,
+        * then {rx,tx}_pause force the driver to use/not-use pause
+        * flow control.
+        */
+       u32     autoneg;
+       u32     rx_pause;
+       u32     tx_pause;
+};
+#endif /* ETHTOOL_GPAUSEPARAM */
+
+#ifndef ETHTOOL_SPAUSEPARAM
+#define ETHTOOL_SPAUSEPARAM    0x00000013 /* Set pause parameters. */
+#endif
+#ifndef ETHTOOL_GRXCSUM
+#define ETHTOOL_GRXCSUM                0x00000014 /* Get RX hw csum enable 
(ethtool_value) */
+#endif
+#ifndef ETHTOOL_SRXCSUM
+#define ETHTOOL_SRXCSUM                0x00000015 /* Set RX hw csum enable 
(ethtool_value) */
+#endif
+#ifndef ETHTOOL_GTXCSUM
+#define ETHTOOL_GTXCSUM                0x00000016 /* Get TX hw csum enable 
(ethtool_value) */
+#endif
+#ifndef ETHTOOL_STXCSUM
+#define ETHTOOL_STXCSUM                0x00000017 /* Set TX hw csum enable 
(ethtool_value) */
+#endif
+#ifndef ETHTOOL_GSG
+#define ETHTOOL_GSG            0x00000018 /* Get scatter-gather enable
+                                           * (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SSG
+#define ETHTOOL_SSG            0x00000019 /* Set scatter-gather enable
+                                           * (ethtool_value). */
+#endif
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST           0x0000001a /* execute NIC self-test, priv. */
+#endif
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS       0x0000001b /* get specified string set */
+#endif
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID                0x0000001c /* identify the NIC */
+#endif
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS         0x0000001d /* get NIC-specific statistics */
+#endif
+#ifndef ETHTOOL_GTSO
+#define ETHTOOL_GTSO           0x0000001e /* Get TSO enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STSO
+#define ETHTOOL_STSO           0x0000001f /* Set TSO enable (ethtool_value) */
+#endif
+
+#ifndef ETHTOOL_BUSINFO_LEN
+#define ETHTOOL_BUSINFO_LEN    32
+#endif
+
+/*****************************************************************************/
+/* 2.4.3 => 2.4.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
+
+/**************************************/
+/* PCI DRIVER API */
+
+#ifndef pci_set_dma_mask
+#define pci_set_dma_mask _kc_pci_set_dma_mask
+extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
+#endif
+
+#ifndef pci_request_regions
+#define pci_request_regions _kc_pci_request_regions
+extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);
+#endif
+
+#ifndef pci_release_regions
+#define pci_release_regions _kc_pci_release_regions
+extern void _kc_pci_release_regions(struct pci_dev *pdev);
+#endif
+
+/**************************************/
+/* NETWORK DRIVER API */
+
+#ifndef alloc_etherdev
+#define alloc_etherdev _kc_alloc_etherdev
+extern struct net_device * _kc_alloc_etherdev(int sizeof_priv);
+#endif
+
+#ifndef is_valid_ether_addr
+#define is_valid_ether_addr _kc_is_valid_ether_addr
+extern int _kc_is_valid_ether_addr(u8 *addr);
+#endif
+
+/**************************************/
+/* MISCELLANEOUS */
+
+#ifndef INIT_TQUEUE
+#define INIT_TQUEUE(_tq, _routine, _data)              \
+       do {                                            \
+               INIT_LIST_HEAD(&(_tq)->list);           \
+               (_tq)->sync = 0;                        \
+               (_tq)->routine = _routine;              \
+               (_tq)->data = _data;                    \
+       } while (0)
+#endif
+
+#endif /* 2.4.3 => 2.4.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) )
+/* Generic MII registers. */
+#define MII_BMCR            0x00        /* Basic mode control register */
+#define MII_BMSR            0x01        /* Basic mode status register  */
+#define MII_PHYSID1         0x02        /* PHYS ID 1                   */
+#define MII_PHYSID2         0x03        /* PHYS ID 2                   */
+#define MII_ADVERTISE       0x04        /* Advertisement control reg   */
+#define MII_LPA             0x05        /* Link partner ability reg    */
+#define MII_EXPANSION       0x06        /* Expansion register          */
+/* Basic mode control register. */
+#define BMCR_FULLDPLX           0x0100  /* Full duplex                 */
+#define BMCR_ANENABLE           0x1000  /* Enable auto negotiation     */
+/* Basic mode status register. */
+#define BMSR_ERCAP              0x0001  /* Ext-reg capability          */
+#define BMSR_ANEGCAPABLE        0x0008  /* Able to do auto-negotiation */
+#define BMSR_10HALF             0x0800  /* Can do 10mbps, half-duplex  */
+#define BMSR_10FULL             0x1000  /* Can do 10mbps, full-duplex  */
+#define BMSR_100HALF            0x2000  /* Can do 100mbps, half-duplex */
+#define BMSR_100FULL            0x4000  /* Can do 100mbps, full-duplex */
+/* Advertisement control register. */
+#define ADVERTISE_CSMA          0x0001  /* Only selector supported     */
+#define ADVERTISE_10HALF        0x0020  /* Try for 10mbps half-duplex  */
+#define ADVERTISE_10FULL        0x0040  /* Try for 10mbps full-duplex  */
+#define ADVERTISE_100HALF       0x0080  /* Try for 100mbps half-duplex */
+#define ADVERTISE_100FULL       0x0100  /* Try for 100mbps full-duplex */
+#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
+                       ADVERTISE_100HALF | ADVERTISE_100FULL)
+/* Expansion register for auto-negotiation. */
+#define EXPANSION_ENABLENPAGE   0x0004  /* This enables npage words    */
+#endif
+
+/*****************************************************************************/
+/* 2.4.6 => 2.4.3 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
+
+#ifndef pci_set_power_state
+#define pci_set_power_state _kc_pci_set_power_state
+extern int _kc_pci_set_power_state(struct pci_dev *dev, int state);
+#endif
+
+#ifndef pci_enable_wake
+#define pci_enable_wake _kc_pci_enable_wake
+extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);
+#endif
+
+#ifndef pci_disable_device
+#define pci_disable_device _kc_pci_disable_device
+extern void _kc_pci_disable_device(struct pci_dev *pdev);
+#endif
+
+/* PCI PM entry point syntax changed, so don't support suspend/resume */
+#undef CONFIG_PM
+
+#endif /* 2.4.6 => 2.4.3 */
+
+#ifndef HAVE_PCI_SET_MWI
+#define pci_set_mwi(X) pci_write_config_word(X, \
+                              PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
+                              PCI_COMMAND_INVALIDATE);
+#define pci_clear_mwi(X) pci_write_config_word(X, \
+                              PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
+                              ~PCI_COMMAND_INVALIDATE);
+#endif
+
+/*****************************************************************************/
+/* 2.4.10 => 2.4.9 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) )
+
+/**************************************/
+/* MODULE API */
+
+#ifndef MODULE_LICENSE
+       #define MODULE_LICENSE(X)
+#endif
+
+/**************************************/
+/* OTHER */
+
+#undef min
+#define min(x,y) ({ \
+       const typeof(x) _x = (x);       \
+       const typeof(y) _y = (y);       \
+       (void) (&_x == &_y);            \
+       _x < _y ? _x : _y; })
+
+#undef max
+#define max(x,y) ({ \
+       const typeof(x) _x = (x);       \
+       const typeof(y) _y = (y);       \
+       (void) (&_x == &_y);            \
+       _x > _y ? _x : _y; })
+
+#ifndef list_for_each_safe
+#define list_for_each_safe(pos, n, head) \
+       for (pos = (head)->next, n = pos->next; pos != (head); \
+               pos = n, n = pos->next)
+#endif
+
+#endif /* 2.4.10 -> 2.4.6 */
+
+
+/*****************************************************************************/
+/* 2.4.13 => 2.4.10 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
+
+/**************************************/
+/* PCI DMA MAPPING */
+
+#ifndef virt_to_page
+       #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))
+#endif
+
+#ifndef pci_map_page
+#define pci_map_page _kc_pci_map_page
+extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page,
unsigned long offset, size_t size, int direction);
+#endif
+
+#ifndef pci_unmap_page
+#define pci_unmap_page _kc_pci_unmap_page
+extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr,
size_t size, int direction);
+#endif
+
+/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */
+
+#undef DMA_32BIT_MASK
+#define DMA_32BIT_MASK 0xffffffff
+#undef DMA_64BIT_MASK
+#define DMA_64BIT_MASK 0xffffffff
+
+/**************************************/
+/* OTHER */
+
+#ifndef cpu_relax
+#define cpu_relax()    rep_nop()
+#endif
+
+#endif /* 2.4.13 => 2.4.10 */
+
+/*****************************************************************************/
+/* 2.4.17 => 2.4.12 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) )
+
+#ifndef __devexit_p
+       #define __devexit_p(x) &(x)
+#endif
+
+#endif /* 2.4.17 => 2.4.13 */
+
+/*****************************************************************************/
+/* 2.4.20 => 2.4.19 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) )
+
+/* we won't support NAPI on less than 2.4.20 */
+#ifdef NAPI
+#undef CONFIG_E1000_NAPI
+#undef CONFIG_E1000E_NAPI
+#undef CONFIG_IXGB_NAPI
+#ifdef _IXGBE_H_
+#undef CONFIG_IXGBE_NAPI
+#endif /* _IXGBE_H */
+#endif
+
+#endif /* 2.4.20 => 2.4.19 */
+/*****************************************************************************/
+/* 2.4.22 => 2.4.17 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
+#define pci_name(x)    ((x)->slot_name)
+#endif
+
+/*****************************************************************************/
+/* 2.4.22 => 2.4.17 */
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
+#ifndef IXGBE_NO_LRO
+/* Don't enable LRO for these legacy kernels */
+#define IXGBE_NO_LRO
+#endif
+#endif
+
+/*****************************************************************************/
+/*****************************************************************************/
+/* 2.4.23 => 2.4.22 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
+/*****************************************************************************/
+#ifdef NAPI
+#ifndef netif_poll_disable
+#define netif_poll_disable(x) _kc_netif_poll_disable(x)
+static inline void _kc_netif_poll_disable(struct net_device *netdev)
+{
+       while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
+               /* No hurry */
+               current->state = TASK_INTERRUPTIBLE;
+               schedule_timeout(1);
+       }
+}
+#endif
+
+#ifndef netif_poll_enable
+#define netif_poll_enable(x) _kc_netif_poll_enable(x)
+static inline void _kc_netif_poll_enable(struct net_device *netdev)
+{
+       clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
+}
+#endif
+#endif /* NAPI */
+#ifndef netif_tx_disable
+#define netif_tx_disable(x) _kc_netif_tx_disable(x)
+static inline void _kc_netif_tx_disable(struct net_device *dev)
+{
+       spin_lock_bh(&dev->xmit_lock);
+       netif_stop_queue(dev);
+       spin_unlock_bh(&dev->xmit_lock);
+}
+#endif
+#endif /* 2.4.23 => 2.4.22 */
+
+/*****************************************************************************/
+/* 2.6.4 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \
+    ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
+      LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) )
+#define ETHTOOL_OPS_COMPAT
+#endif /* 2.6.4 => 2.6.0 */
+
+/*****************************************************************************/
+/* 2.5.71 => 2.4.x */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )
+#include <net/sock.h>
+#define sk_protocol protocol
+
+#define pci_get_device pci_find_device
+#endif /* 2.5.70 => 2.4.x */
+
+/*****************************************************************************/
+/* < 2.4.27 or 2.6.0 <= 2.6.5 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \
+    ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
+      LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) )
+
+#ifndef netif_msg_init
+#define netif_msg_init _kc_netif_msg_init
+static inline u32 _kc_netif_msg_init(int debug_value, int
default_msg_enable_bits)
+{
+       /* use default */
+       if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
+               return default_msg_enable_bits;
+       if (debug_value == 0) /* no output */
+               return 0;
+       /* set low N bits */
+       return (1 << debug_value) -1;
+}
+#endif
+
+#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */
+/*****************************************************************************/
+#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \
+     (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \
+      ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))
+#define netdev_priv(x) x->priv
+#endif
+
+/*****************************************************************************/
+/* <= 2.5.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )
+#undef pci_register_driver
+#define pci_register_driver pci_module_init
+
+#define dev_err(__unused_dev, format, arg...)            \
+       printk(KERN_ERR "%s: " format, pci_name(adapter->pdev) , ## arg)
+#define dev_warn(__unused_dev, format, arg...)            \
+       printk(KERN_WARNING "%s: " format, pci_name(pdev) , ## arg)
+
+/* hlist_* code - double linked lists */
+struct hlist_head {
+       struct hlist_node *first;
+};
+
+struct hlist_node {
+       struct hlist_node *next, **pprev;
+};
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+       struct hlist_node *next = n->next;
+       struct hlist_node **pprev = n->pprev;
+       *pprev = next;
+       if (next)
+       next->pprev = pprev;
+}
+
+static inline void hlist_del(struct hlist_node *n)
+{
+       __hlist_del(n);
+       n->next = NULL;
+       n->pprev = NULL;
+}
+
+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+       struct hlist_node *first = h->first;
+       n->next = first;
+       if (first)
+               first->pprev = &n->next;
+       h->first = n;
+       n->pprev = &h->first;
+}
+
+static inline int hlist_empty(const struct hlist_head *h)
+{
+       return !h->first;
+}
+#define HLIST_HEAD_INIT { .first = NULL }
+#define HLIST_HEAD(name) struct hlist_head name = {  .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+static inline void INIT_HLIST_NODE(struct hlist_node *h)
+{
+       h->next = NULL;
+       h->pprev = NULL;
+}
+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+
+#define hlist_for_each_entry(tpos, pos, head, member)                    \
+       for (pos = (head)->first;                                        \
+            pos && ({ prefetch(pos->next); 1;}) &&                      \
+               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+            pos = pos->next)
+
+#define hlist_for_each_entry_safe(tpos, pos, n, head, member)            \
+       for (pos = (head)->first;                                        \
+            pos && ({ n = pos->next; 1; }) &&                           \
+               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+            pos = n)
+
+/* we ignore GFP here */
+#define dma_alloc_coherent(dv, sz, dma, gfp) \
+       pci_alloc_consistent(pdev, (sz), (dma))
+#define dma_free_coherent(dv, sz, addr, dma_addr) \
+       pci_free_consistent(pdev, (sz), (addr), (dma_addr))
+
+#ifndef might_sleep
+#define might_sleep()
+#endif
+
+#ifndef NETREG_REGISTERED
+#define NETREG_REGISTERED 1
+#define reg_state deadbeaf
+#endif
+#endif /* <= 2.5.0 */
+
+/*****************************************************************************/
+/* 2.5.28 => 2.4.23 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
+
+static inline void _kc_synchronize_irq(void)
+{
+       synchronize_irq();
+}
+#undef synchronize_irq
+#define synchronize_irq(X) _kc_synchronize_irq()
+
+#include <linux/tqueue.h>
+#define work_struct tq_struct
+#undef INIT_WORK
+#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a)
+#undef container_of
+#define container_of list_entry
+#define schedule_work schedule_task
+#define flush_scheduled_work flush_scheduled_tasks
+#define cancel_work_sync(x) flush_scheduled_work()
+
+#endif /* 2.5.28 => 2.4.17 */
+
+/*****************************************************************************/
+/* 2.6.0 => 2.5.28 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
+#define MODULE_INFO(version, _version)
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
+#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
+#endif
+#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1
+
+#define pci_set_consistent_dma_mask(dev,mask) 1
+
+#undef dev_put
+#define dev_put(dev) __dev_put(dev)
+
+#ifndef skb_fill_page_desc
+#define skb_fill_page_desc _kc_skb_fill_page_desc
+extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct
page *page, int off, int size);
+#endif
+
+#undef ALIGN
+#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
+
+#ifndef page_count
+#define page_count(p) atomic_read(&(p)->count)
+#endif
+
+/* find_first_bit and find_next bit are not defined for most
+ * 2.4 kernels (except for the redhat 2.4.21 kernels
+ */
+#include <linux/bitops.h>
+#define BITOP_WORD(nr)          ((nr) / BITS_PER_LONG)
+#undef find_next_bit
+#define find_next_bit _kc_find_next_bit
+extern unsigned long _kc_find_next_bit(const unsigned long *addr,
+                                       unsigned long size,
+                                       unsigned long offset);
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+
+#endif /* 2.6.0 => 2.5.28 */
+
+/*****************************************************************************/
+/* 2.6.4 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
+#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
+#endif /* 2.6.4 => 2.6.0 */
+
+/*****************************************************************************/
+/* 2.6.5 => 2.6.0 */
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
+#define pci_dma_sync_single_for_cpu    pci_dma_sync_single
+#define pci_dma_sync_single_for_device pci_dma_sync_single_for_cpu
+#endif /* 2.6.5 => 2.6.0 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) )
+/* taken from 2.6 include/linux/bitmap.h */
+#undef bitmap_zero
+#define bitmap_zero _kc_bitmap_zero
+static inline void _kc_bitmap_zero(unsigned long *dst, int nbits)
+{
+        if (nbits <= BITS_PER_LONG)
+                *dst = 0UL;
+        else {
+                int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+                memset(dst, 0, len);
+        }
+}
+#endif /* < 2.6.6 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )
+#undef if_mii
+#define if_mii _kc_if_mii
+static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
+{
+       return (struct mii_ioctl_data *) &rq->ifr_ifru;
+}
+#endif /* < 2.6.7 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
+#ifndef PCI_EXP_DEVCTL
+#define PCI_EXP_DEVCTL 8
+#endif
+#ifndef PCI_EXP_DEVCTL_CERE
+#define PCI_EXP_DEVCTL_CERE 0x0001
+#endif
+#define msleep(x)      do { set_current_state(TASK_UNINTERRUPTIBLE); \
+                               schedule_timeout((x * HZ)/1000 + 2); \
+                       } while (0)
+
+#endif /* < 2.6.8 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
+#include <net/dsfield.h>
+#define __iomem
+
+#ifndef kcalloc
+#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
+extern void *_kc_kzalloc(size_t size, int flags);
+#endif
+#define MSEC_PER_SEC    1000L
+static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
+{
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+       return (MSEC_PER_SEC / HZ) * j;
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+       return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
+#else
+       return (j * MSEC_PER_SEC) / HZ;
+#endif
+}
+static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
+{
+       if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
+               return MAX_JIFFY_OFFSET;
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+       return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+       return m * (HZ / MSEC_PER_SEC);
+#else
+       return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
+#endif
+}
+
+#define msleep_interruptible _kc_msleep_interruptible
+static inline unsigned long _kc_msleep_interruptible(unsigned int msecs)
+{
+       unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
+
+       while (timeout && !signal_pending(current)) {
+               __set_current_state(TASK_INTERRUPTIBLE);
+               timeout = schedule_timeout(timeout);
+       }
+       return _kc_jiffies_to_msecs(timeout);
+}
+
+/* Basic mode control register. */
+#define BMCR_SPEED1000         0x0040  /* MSB of Speed (1000)         */
+
+#ifndef __le16
+#define __le16 u16
+#endif
+#ifndef __le32
+#define __le32 u32
+#endif
+#ifndef __le64
+#define __le64 u64
+#endif
+
+#ifdef pci_dma_mapping_error
+#undef pci_dma_mapping_error
+#endif
+#define pci_dma_mapping_error _kc_pci_dma_mapping_error
+static inline int _kc_pci_dma_mapping_error(struct pci_dev *pdev,
+                                            dma_addr_t dma_addr)
+{
+       return dma_addr == 0;
+}
+
+#endif /* < 2.6.9 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
+#ifdef module_param_array_named
+#undef module_param_array_named
+#define module_param_array_named(name, array, type, nump, perm)          \
+       static struct kparam_array __param_arr_##name                    \
+       = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \
+           sizeof(array[0]), array };                                   \
+       module_param_call(name, param_array_set, param_array_get,        \
+                         &__param_arr_##name, perm)
+#endif /* module_param_array_named */
+#endif /* < 2.6.10 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )
+#define PCI_D0      0
+#define PCI_D1      1
+#define PCI_D2      2
+#define PCI_D3hot   3
+#define PCI_D3cold  4
+#define pci_choose_state(pdev,state) state
+#define PMSG_SUSPEND 3
+#define PCI_EXP_LNKCTL 16
+
+#undef NETIF_F_LLTX
+
+#ifndef ARCH_HAS_PREFETCH
+#define prefetch(X)
+#endif
+
+#ifndef NET_IP_ALIGN
+#define NET_IP_ALIGN 2
+#endif
+
+#define KC_USEC_PER_SEC        1000000L
+#define usecs_to_jiffies _kc_usecs_to_jiffies
+static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j)
+{
+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
+       return (KC_USEC_PER_SEC / HZ) * j;
+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
+       return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC);
+#else
+       return (j * KC_USEC_PER_SEC) / HZ;
+#endif
+}
+static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m)
+{
+       if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET))
+               return MAX_JIFFY_OFFSET;
+#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
+       return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ);
+#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
+       return m * (HZ / KC_USEC_PER_SEC);
+#else
+       return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;
+#endif
+}
+#endif /* < 2.6.11 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )
+#include <linux/reboot.h>
+#define USE_REBOOT_NOTIFIER
+
+/* Generic MII registers. */
+#define MII_CTRL1000        0x09        /* 1000BASE-T control          */
+#define MII_STAT1000        0x0a        /* 1000BASE-T status           */
+/* Advertisement control register. */
+#define ADVERTISE_PAUSE_CAP     0x0400  /* Try for pause               */
+#define ADVERTISE_PAUSE_ASYM    0x0800  /* Try for asymmetric pause     */
+/* 1000BASE-T Control register */
+#define ADVERTISE_1000FULL      0x0200  /* Advertise 1000BASE-T full duplex */
+#endif
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
+#define pm_message_t u32
+#ifndef kzalloc
+#define kzalloc _kc_kzalloc
+extern void *_kc_kzalloc(size_t size, int flags);
+#endif
+
+/* Generic MII registers. */
+#define MII_ESTATUS        0x0f        /* Extended Status */
+/* Basic mode status register. */
+#define BMSR_ESTATEN           0x0100  /* Extended Status in R15 */
+/* Extended status register. */
+#define ESTATUS_1000_TFULL     0x2000  /* Can do 1000BT Full */
+#define ESTATUS_1000_THALF     0x1000  /* Can do 1000BT Half */
+#endif
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) )
+#ifndef device_can_wakeup
+#define device_can_wakeup(dev) (1)
+#endif
+#ifndef device_set_wakeup_enable
+#define device_set_wakeup_enable(dev, val)     do{}while(0)
+#endif
+#endif /* < 2.6.15 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )
+#undef HAVE_PCI_ERS
+#else /* 2.6.16 and above */
+#undef HAVE_PCI_ERS
+#define HAVE_PCI_ERS
+#endif
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
+
+#ifndef IRQF_PROBE_SHARED
+#ifdef SA_PROBEIRQ
+#define IRQF_PROBE_SHARED SA_PROBEIRQ
+#else
+#define IRQF_PROBE_SHARED 0
+#endif
+#endif
+
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#ifndef netdev_alloc_skb
+#define netdev_alloc_skb _kc_netdev_alloc_skb
+extern struct sk_buff *_kc_netdev_alloc_skb(struct net_device *dev,
+                                            unsigned int length);
+#endif
+
+#ifndef skb_is_gso
+#ifdef NETIF_F_TSO
+#define skb_is_gso _kc_skb_is_gso
+static inline int _kc_skb_is_gso(const struct sk_buff *skb)
+{
+       return skb_shinfo(skb)->gso_size;
+}
+#else
+#define skb_is_gso(a) 0
+#endif
+#endif
+
+#endif /* < 2.6.18 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
+
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#endif
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
+#ifndef RHEL_RELEASE_CODE
+#define RHEL_RELEASE_CODE 0
+#endif
+#ifndef RHEL_RELEASE_VERSION
+#define RHEL_RELEASE_VERSION(a,b) 0
+#endif
+#ifndef AX_RELEASE_CODE
+#define AX_RELEASE_CODE 0
+#endif
+#ifndef AX_RELEASE_VERSION
+#define AX_RELEASE_VERSION(a,b) 0
+#endif
+#if (!(( RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) ) && (
RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0) ) || ( RHEL_RELEASE_CODE
> RHEL_RELEASE_VERSION(5,0) ) || (AX_RELEASE_CODE >
AX_RELEASE_VERSION(3,0))))
+typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
+#endif
+#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))
+#undef CONFIG_INET_LRO
+#undef CONFIG_INET_LRO_MODULE
+#endif
+typedef irqreturn_t (*new_handler_t)(int, void*);
+static inline irqreturn_t _kc_request_irq(unsigned int irq,
new_handler_t handler, unsigned long flags, const char *devname, void
*dev_id)
+#else /* 2.4.x */
+typedef void (*irq_handler_t)(int, void*, struct pt_regs *);
+typedef void (*new_handler_t)(int, void*);
+static inline int _kc_request_irq(unsigned int irq, new_handler_t
handler, unsigned long flags, const char *devname, void *dev_id)
+#endif /* >= 2.5.x */
+{
+       irq_handler_t new_handler = (irq_handler_t) handler;
+       return request_irq(irq, new_handler, flags, devname, dev_id);
+}
+
+#undef request_irq
+#define request_irq(irq, handler, flags, devname, dev_id)
_kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
+
+#define irq_handler_t new_handler_t
+/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */
+#define PCIE_CONFIG_SPACE_LEN 256
+#define PCI_CONFIG_SPACE_LEN 64
+#define PCIE_LINK_STATUS 0x12
+#define pci_config_space_ich8lan() do {} while (0)
+#undef pci_save_state
+extern int _kc_pci_save_state(struct pci_dev *);
+#define pci_save_state(pdev) _kc_pci_save_state(pdev)
+#undef pci_restore_state
+extern void _kc_pci_restore_state(struct pci_dev *);
+#define pci_restore_state(pdev) _kc_pci_restore_state(pdev)
+#ifdef HAVE_PCI_ERS
+#undef free_netdev
+extern void _kc_free_netdev(struct net_device *);
+#define free_netdev(netdev) _kc_free_netdev(netdev)
+#endif
+#define pci_enable_pcie_error_reporting(dev) do {} while (0)
+#define pci_disable_pcie_error_reporting(dev) do {} while (0)
+#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0)
+#else /* 2.6.19 */
+#include <linux/aer.h>
+#endif /* < 2.6.19 */
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )
+#undef INIT_WORK
+#define INIT_WORK(_work, _func) \
+do { \
+       INIT_LIST_HEAD(&(_work)->entry); \
+       (_work)->pending = 0; \
+       (_work)->func = (void (*)(void *))_func; \
+       (_work)->data = _work; \
+       init_timer(&(_work)->timer); \
+} while (0)
+#endif
+
+#ifndef PCI_VDEVICE
+#define PCI_VDEVICE(ven, dev)        \
+       PCI_VENDOR_ID_##ven, (dev),  \
+       PCI_ANY_ID, PCI_ANY_ID, 0, 0
+#endif
+
+#ifndef round_jiffies
+#define round_jiffies(x) x
+#endif
+
+#define csum_offset csum
+
+#endif /* < 2.6.20 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
+#define vlan_group_get_device(vg, id) (vg->vlan_devices[id])
+#define vlan_group_set_device(vg, id, dev) if (vg) vg->vlan_devices[id] = dev;
+#define pci_channel_offline(pdev) (pdev->error_state && \
+       pdev->error_state != pci_channel_io_normal)
+#define pci_request_selected_regions(pdev, bars, name) \
+        pci_request_regions(pdev, name)
+#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev);
+#endif /* < 2.6.21 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
+#define tcp_hdr(skb) (skb->h.th)
+#define tcp_hdrlen(skb) (skb->h.th->doff << 2)
+#define skb_transport_offset(skb) (skb->h.raw - skb->data)
+#define skb_transport_header(skb) (skb->h.raw)
+#define ipv6_hdr(skb) (skb->nh.ipv6h)
+#define ip_hdr(skb) (skb->nh.iph)
+#define skb_network_offset(skb) (skb->nh.raw - skb->data)
+#define skb_network_header(skb) (skb->nh.raw)
+#define skb_tail_pointer(skb) skb->tail
+#define skb_copy_to_linear_data_offset(skb, offset, from, len) \
+                                 memcpy(skb->data + offset, from, len)
+#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
+#define pci_register_driver pci_module_init
+#define skb_mac_header(skb) skb->mac.raw
+
+#ifdef NETIF_F_MULTI_QUEUE
+#ifndef alloc_etherdev_mq
+#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)
+#endif
+#endif /* NETIF_F_MULTI_QUEUE */
+
+#ifndef ETH_FCS_LEN
+#define ETH_FCS_LEN 4
+#endif
+#define cancel_work_sync(x) flush_scheduled_work()
+#endif /* < 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) )
+#undef ETHTOOL_GPERMADDR
+#endif /* > 2.6.22 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
+/* NAPI API changes in 2.6.24 break everything */
+struct napi_struct {
+       /* used to look up the real NAPI polling routine */
+       int (*poll)(struct napi_struct *, int);
+       struct net_device poll_dev;
+       int weight;
+};
+#ifdef NAPI
+extern int __kc_adapter_clean(struct net_device *, int *);
+#if defined(DRIVER_IGB) || defined(DRIVER_IXGBE)
+#define netif_rx_complete(_netdev, napi) netif_rx_complete(&(napi)->poll_dev)
+#define netif_rx_schedule_prep(_netdev, napi) \
+       (netif_running(_netdev) && netif_rx_schedule_prep(&(napi)->poll_dev))
+#define netif_rx_schedule(_netdev, napi) netif_rx_schedule(&(napi)->poll_dev)
+#define __netif_rx_schedule(_netdev, napi)
__netif_rx_schedule(&(napi)->poll_dev)
+#define napi_enable(napi) do { \
+       /* abuse if_port as a counter */ \
+       if (!adapter->netdev->if_port) { \
+               netif_poll_enable(adapter->netdev); \
+       } \
+       ++adapter->netdev->if_port; \
+       netif_poll_enable(&(napi)->poll_dev); \
+       } while (0)
+#define napi_disable(_napi) do { \
+       netif_poll_disable(&(_napi)->poll_dev); \
+       --adapter->netdev->if_port; \
+       if (!adapter->netdev->if_port) \
+               netif_poll_disable(adapter->netdev); \
+       } while (0)
+
+#define netif_napi_add(_netdev, _napi, _poll, _weight) \
+       do { \
+               struct napi_struct *__napi = (_napi); \
+               __napi->poll_dev.poll = &(__kc_adapter_clean); \
+               __napi->poll_dev.priv = (_napi); \
+               __napi->poll_dev.weight = (_weight); \
+               dev_hold(&__napi->poll_dev); \
+               set_bit(__LINK_STATE_START, &__napi->poll_dev.state);\
+               _netdev->poll = &(__kc_adapter_clean); \
+               _netdev->weight = (_weight); \
+               __napi->poll = &(_poll); \
+               __napi->weight = (_weight); \
+               set_bit(__LINK_STATE_RX_SCHED, &(_netdev)->state); \
+               set_bit(__LINK_STATE_RX_SCHED, &__napi->poll_dev.state); \
+       } while (0)
+#define netif_napi_del(_napi) \
+       do { \
+               WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, 
&(_napi)->poll_dev.state)); \
+               dev_put(&(_napi)->poll_dev); \
+               memset(&(_napi)->poll_dev, 0, sizeof(struct napi_struct));\
+       } while (0)
+#else /* DRIVER_IGB || DRIVER_IXGBE */
+#define netif_rx_complete(netdev, napi) netif_rx_complete(netdev)
+#define netif_rx_schedule_prep(netdev, napi) netif_rx_schedule_prep(netdev)
+#define netif_rx_schedule(netdev, napi) netif_rx_schedule(netdev)
+#define __netif_rx_schedule(netdev, napi) __netif_rx_schedule(netdev)
+#define napi_enable(napi) netif_poll_enable(adapter->netdev)
+#define napi_disable(napi) netif_poll_disable(adapter->netdev)
+#define netif_napi_add(_netdev, _napi, _poll, _weight) \
+       do { \
+               struct napi_struct *__napi = (_napi); \
+               _netdev->poll = &(__kc_adapter_clean); \
+               _netdev->weight = (_weight); \
+               __napi->poll = &(_poll); \
+               __napi->weight = (_weight); \
+               netif_poll_disable(_netdev); \
+       } while (0)
+#define netif_napi_del(_a) do {} while (0)
+#endif /* DRIVER_IGB || DRIVER_IXGBE */
+#else /* NAPI */
+#define netif_napi_add(_netdev, _napi, _poll, _weight) \
+       do { \
+               struct napi_struct *__napi = _napi; \
+               _netdev->poll = &(_poll); \
+               _netdev->weight = (_weight); \
+               __napi->poll = &(_poll); \
+               __napi->weight = (_weight); \
+       } while (0)
+#define netif_napi_del(_a) do {} while (0)
+#endif /* NAPI */
+
+#undef dev_get_by_name
+#define dev_get_by_name(_a, _b) dev_get_by_name(_b)
+#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)
+#endif /* < 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) )
+#include <linux/pm_qos_params.h>
+#endif /* > 2.6.24 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) )
+#define PM_QOS_CPU_DMA_LATENCY 1
+
+#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) )
+#include <linux/latency.h>
+#define PM_QOS_DEFAULT_VALUE   INFINITE_LATENCY
+#define pm_qos_add_requirement(pm_qos_class, name, value) \
+               set_acceptable_latency(name, value)
+#define pm_qos_remove_requirement(pm_qos_class, name) \
+               remove_acceptable_latency(name)
+#define pm_qos_update_requirement(pm_qos_class, name, value) \
+               modify_acceptable_latency(name, value)
+#else
+#define PM_QOS_DEFAULT_VALUE   -1
+#define pm_qos_add_requirement(pm_qos_class, name, value)
+#define pm_qos_remove_requirement(pm_qos_class, name)
+#define pm_qos_update_requirement(pm_qos_class, name, value) { \
+       if (value != PM_QOS_DEFAULT_VALUE) { \
+               printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \
+                       pci_name(adapter->pdev)); \
+       } \
+}
+#endif /* > 2.6.18 */
+
+#define pci_enable_device_mem(pdev) pci_enable_device(pdev)
+
+#endif /* < 2.6.25 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
+#ifdef DRIVER_IXGBE
+#ifdef NETIF_F_TSO
+#ifdef NETIF_F_TSO6
+#define netif_set_gso_max_size(_netdev, size) \
+       do { \
+               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { \
+                       _netdev->features &= ~NETIF_F_TSO; \
+                       _netdev->features &= ~NETIF_F_TSO6; \
+               } else { \
+                       _netdev->features |= NETIF_F_TSO; \
+                       _netdev->features |= NETIF_F_TSO6; \
+               } \
+       } while (0)
+#else /* NETIF_F_TSO6 */
+#define netif_set_gso_max_size(_netdev, size) \
+       do { \
+               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
+                       _netdev->features &= ~NETIF_F_TSO; \
+               else \
+                       _netdev->features |= NETIF_F_TSO; \
+       } while (0)
+#endif /* NETIF_F_TSO6 */
+#else
+#define netif_set_gso_max_size(_netdev, size) do {} while (0)
+#endif /* NETIF_F_TSO */
+#endif /* DRIVER_IXGBE */
+#else /* < 2.6.26 */
+#include <linux/pci-aspm.h>
+#define HAVE_NETDEV_VLAN_FEATURES
+#endif /* < 2.6.26 */
+
+/*****************************************************************************/
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
+#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) )
+#undef device_set_wakeup_enable
+#define device_set_wakeup_enable(dev, val) \
+       do { \
+               u16 pmc = 0; \
+               int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \
+               if (pm) { \
+                       pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \
+                               &pmc); \
+               } \
+               if (val && (pmc >> 11)) \
+                       (dev)->power.can_wakeup = !!(val); \
+               (dev)->power.should_wakeup = !!(val); \
+       } while (0)
+#endif /* 2.6.15 through 2.6.27 */
+
+#ifndef netif_napi_del
+#define netif_napi_del(_a) do {} while (0)
+#ifdef NAPI
+#ifdef CONFIG_NETPOLL
+#undef netif_napi_del
+#define netif_napi_del(_a) list_del(&(_a)->dev_list);
+#endif
+#endif
+#endif /* netif_napi_del */
+#ifndef pci_dma_mapping_error
+#define pci_dma_mapping_error(pdev, dma_addr) pci_dma_mapping_error(dma_addr)
+#endif
+
+#ifdef CONFIG_NETDEVICES_MULTIQUEUE
+#ifdef DRIVER_IXGBE
+#define HAVE_TX_MQ
+#endif
+#endif
+
+#ifdef HAVE_TX_MQ
+extern void _kc_netif_tx_stop_all_queues(struct net_device *);
+extern void _kc_netif_tx_wake_all_queues(struct net_device *);
+extern void _kc_netif_tx_start_all_queues(struct net_device *);
+#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a)
+#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a)
+#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a)
+#undef netif_stop_subqueue
+#define netif_stop_subqueue(_ndev,_qi) do { \
+       if (netif_is_multiqueue((_ndev))) \
+               netif_stop_subqueue((_ndev), (_qi)); \
+       else \
+               netif_stop_queue((_ndev)); \
+       } while (0)
+#undef netif_start_subqueue
+#define netif_start_subqueue(_ndev,_qi) do { \
+       if (netif_is_multiqueue((_ndev))) \
+               netif_start_subqueue((_ndev), (_qi)); \
+       else \
+               netif_start_queue((_ndev)); \
+       } while (0)
+#else /* CONFIG_NETDEVICES_MULTIQUEUE */
+#define netif_tx_stop_all_queues(a) netif_stop_queue(a)
+#define netif_tx_wake_all_queues(a) netif_wake_queue(a)
+#define netif_tx_start_all_queues(a) netif_start_queue(a)
+#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev))
+#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev))
+#endif /* CONFIG_NETDEVICES_MULTIQUEUE */
+#ifndef NETIF_F_MULTI_QUEUE
+#define NETIF_F_MULTI_QUEUE 0
+#define netif_is_multiqueue(a) 0
+#define netif_wake_subqueue(a, b)
+#endif /* NETIF_F_MULTI_QUEUE */
+#else /* < 2.6.27 */
+#define HAVE_TX_MQ
+#endif /* < 2.6.27 */
+
+#endif /* _KCOMPAT_H_ */

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.